From 7a6fe1d1e1b48887963a73db06df78398839cbb5 Mon Sep 17 00:00:00 2001
From: ggivo
Date: Mon, 6 Oct 2025 15:14:06 +0300
Subject: [PATCH 01/17] [churn] Fix test-on-docker should run integration tests
accidentally running only unit test locally after merge conflict resolved in commit f8de2fe5f280e16295deb44347887be48ad19861
---
Makefile | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/Makefile b/Makefile
index 13cb801f34..603328df1c 100644
--- a/Makefile
+++ b/Makefile
@@ -522,10 +522,10 @@ stop:
test: | start mvn-test-local stop
mvn-test-local:
- @TEST_ENV_PROVIDER=local mvn -Dwith-param-names=true -Dtest=${TEST} clean compile test
+ @TEST_ENV_PROVIDER=local mvn -Dwith-param-names=true -Dtest=${TEST} clean verify
mvn-test:
- mvn -Dwith-param-names=true -Dtest=${TEST} clean compile test
+ mvn -Dwith-param-names=true -Dtest=${TEST} clean verify
package: | start mvn-package stop
From 7abdc1e2dd39a9dbe8ca5504fc686d40c31ac80d Mon Sep 17 00:00:00 2001
From: atakavci
Date: Thu, 2 Oct 2025 13:32:58 +0300
Subject: [PATCH 02/17] [automatic failover] Set and test default values for
failover config&components (#4298)
* - set & test default values
* - format
* - fix tests failing due to changing defaults
---
pom.xml | 10 +--
.../jedis/MultiClusterClientConfig.java | 10 +--
.../redis/clients/jedis/mcf/EchoStrategy.java | 5 +-
.../jedis/mcf/HealthCheckStrategy.java | 17 ++--
.../clients/jedis/mcf/LagAwareStrategy.java | 15 +++-
.../clients/jedis/mcf/DefaultValuesTest.java | 77 +++++++++++++++++++
.../jedis/mcf/FailbackMechanismUnitTest.java | 4 +-
.../clients/jedis/mcf/HealthCheckTest.java | 2 +-
.../jedis/mcf/LagAwareStrategyUnitTest.java | 14 ++--
9 files changed, 119 insertions(+), 35 deletions(-)
create mode 100644 src/test/java/redis/clients/jedis/mcf/DefaultValuesTest.java
diff --git a/pom.xml b/pom.xml
index 64ebc8ef3e..fd58331800 100644
--- a/pom.xml
+++ b/pom.xml
@@ -482,15 +482,7 @@
**/Endpoint.java
src/main/java/redis/clients/jedis/mcf/*.java
src/test/java/redis/clients/jedis/failover/*.java
- **/mcf/EchoStrategyIntegrationTest.java
- **/mcf/LagAwareStrategyUnitTest.java
- **/mcf/RedisRestAPI*.java
- **/mcf/ActiveActiveLocalFailoverTest*
- **/mcf/FailbackMechanism*.java
- **/mcf/PeriodicFailbackTest*.java
- **/mcf/AutomaticFailoverTest*.java
- **/mcf/MultiCluster*.java
- **/mcf/StatusTracker*.java
+ src/test/java/redis/clients/jedis/mcf/*.java
**/Health*.java
**/*IT.java
**/scenario/RestEndpointUtil.java
diff --git a/src/main/java/redis/clients/jedis/MultiClusterClientConfig.java b/src/main/java/redis/clients/jedis/MultiClusterClientConfig.java
index 17b239de5c..d8e321e041 100644
--- a/src/main/java/redis/clients/jedis/MultiClusterClientConfig.java
+++ b/src/main/java/redis/clients/jedis/MultiClusterClientConfig.java
@@ -130,16 +130,16 @@ public static interface StrategySupplier {
.asList(JedisConnectionException.class);
/** Default failure rate threshold percentage for circuit breaker activation. */
- private static final float CIRCUIT_BREAKER_FAILURE_RATE_THRESHOLD_DEFAULT = 50.0f;
+ private static final float CIRCUIT_BREAKER_FAILURE_RATE_THRESHOLD_DEFAULT = 10.0f;
/** Default minimum number of calls required before circuit breaker can calculate failure rate. */
- private static final int CIRCUIT_BREAKER_SLIDING_WINDOW_MIN_CALLS_DEFAULT = 100;
+ private static final int CIRCUIT_BREAKER_SLIDING_WINDOW_MIN_CALLS_DEFAULT = 1000;
/** Default sliding window type for circuit breaker failure tracking. */
private static final SlidingWindowType CIRCUIT_BREAKER_SLIDING_WINDOW_TYPE_DEFAULT = SlidingWindowType.COUNT_BASED;
/** Default sliding window size for circuit breaker failure tracking. */
- private static final int CIRCUIT_BREAKER_SLIDING_WINDOW_SIZE_DEFAULT = 100;
+ private static final int CIRCUIT_BREAKER_SLIDING_WINDOW_SIZE_DEFAULT = 2;
/** Default slow call duration threshold in milliseconds. */
private static final int CIRCUIT_BREAKER_SLOW_CALL_DURATION_THRESHOLD_DEFAULT = 60000;
@@ -156,10 +156,10 @@ public static interface StrategySupplier {
.asList(CallNotPermittedException.class, ConnectionFailoverException.class);
/** Default interval in milliseconds for checking if failed clusters have recovered. */
- private static final long FAILBACK_CHECK_INTERVAL_DEFAULT = 5000;
+ private static final long FAILBACK_CHECK_INTERVAL_DEFAULT = 120000;
/** Default grace period in milliseconds to keep clusters disabled after they become unhealthy. */
- private static final long GRACE_PERIOD_DEFAULT = 10000;
+ private static final long GRACE_PERIOD_DEFAULT = 60000;
/** Default maximum number of failover attempts. */
private static final int MAX_NUM_FAILOVER_ATTEMPTS_DEFAULT = 10;
diff --git a/src/main/java/redis/clients/jedis/mcf/EchoStrategy.java b/src/main/java/redis/clients/jedis/mcf/EchoStrategy.java
index 512f1609fc..3c73e17d6f 100644
--- a/src/main/java/redis/clients/jedis/mcf/EchoStrategy.java
+++ b/src/main/java/redis/clients/jedis/mcf/EchoStrategy.java
@@ -11,18 +11,19 @@
import redis.clients.jedis.MultiClusterClientConfig.StrategySupplier;
public class EchoStrategy implements HealthCheckStrategy {
+ private static final int MAX_HEALTH_CHECK_POOL_SIZE = 2;
private final UnifiedJedis jedis;
private final HealthCheckStrategy.Config config;
public EchoStrategy(HostAndPort hostAndPort, JedisClientConfig jedisClientConfig) {
- this(hostAndPort, jedisClientConfig, HealthCheckStrategy.Config.builder().build());
+ this(hostAndPort, jedisClientConfig, HealthCheckStrategy.Config.create());
}
public EchoStrategy(HostAndPort hostAndPort, JedisClientConfig jedisClientConfig,
HealthCheckStrategy.Config config) {
GenericObjectPoolConfig poolConfig = new GenericObjectPoolConfig<>();
- poolConfig.setMaxTotal(2);
+ poolConfig.setMaxTotal(MAX_HEALTH_CHECK_POOL_SIZE);
this.jedis = new JedisPooled(hostAndPort, jedisClientConfig, poolConfig);
this.config = config;
}
diff --git a/src/main/java/redis/clients/jedis/mcf/HealthCheckStrategy.java b/src/main/java/redis/clients/jedis/mcf/HealthCheckStrategy.java
index 7e2b43d6db..7d1e5292ca 100644
--- a/src/main/java/redis/clients/jedis/mcf/HealthCheckStrategy.java
+++ b/src/main/java/redis/clients/jedis/mcf/HealthCheckStrategy.java
@@ -49,6 +49,11 @@ default void close() {
int getDelayInBetweenProbes();
public static class Config {
+ private static final int INTERVAL_DEFAULT = 5000;
+ private static final int TIMEOUT_DEFAULT = 1000;
+ private static final int NUM_PROBES_DEFAULT = 3;
+ private static final int DELAY_IN_BETWEEN_PROBES_DEFAULT = 500;
+
protected final int interval;
protected final int timeout;
protected final int numProbes;
@@ -97,14 +102,14 @@ public ProbingPolicy getPolicy() {
* @return a new Config instance
*/
public static Config create() {
- return new Builder<>().build();
+ return builder().build();
}
/**
* Create a new builder for HealthCheckStrategy.Config.
* @return a new Builder instance
*/
- public static Builder, Config> builder() {
+ public static Builder, ? extends Config> builder() {
return new Builder<>();
}
@@ -114,11 +119,11 @@ public static Builder, Config> builder() {
* @param the config type being built
*/
public static class Builder, C extends Config> {
- protected int interval = 1000;
- protected int timeout = 1000;
- protected int numProbes = 3;
+ protected int interval = INTERVAL_DEFAULT;
+ protected int timeout = TIMEOUT_DEFAULT;
+ protected int numProbes = NUM_PROBES_DEFAULT;
protected ProbingPolicy policy = ProbingPolicy.BuiltIn.ALL_SUCCESS;
- protected int delayInBetweenProbes = 100;
+ protected int delayInBetweenProbes = DELAY_IN_BETWEEN_PROBES_DEFAULT;
/**
* Set the interval between health checks in milliseconds.
diff --git a/src/main/java/redis/clients/jedis/mcf/LagAwareStrategy.java b/src/main/java/redis/clients/jedis/mcf/LagAwareStrategy.java
index fcbe0a11ec..03489b7be3 100644
--- a/src/main/java/redis/clients/jedis/mcf/LagAwareStrategy.java
+++ b/src/main/java/redis/clients/jedis/mcf/LagAwareStrategy.java
@@ -94,7 +94,7 @@ public HealthStatus doHealthCheck(Endpoint endpoint) {
public static class Config extends HealthCheckStrategy.Config {
public static final boolean EXTENDED_CHECK_DEFAULT = true;
- public static final Duration AVAILABILITY_LAG_TOLERANCE_DEFAULT = Duration.ofMillis(100);
+ public static final Duration AVAILABILITY_LAG_TOLERANCE_DEFAULT = Duration.ofMillis(5000);
private final Endpoint restEndpoint;
private final Supplier credentialsSupplier;
@@ -102,7 +102,7 @@ public static class Config extends HealthCheckStrategy.Config {
// SSL configuration for HTTPS connections to Redis Enterprise REST API
private final SslOptions sslOptions;
- // Maximum acceptable lag in milliseconds (default: 100);
+ // Maximum acceptable lag in milliseconds (default: 5000);
private final Duration availability_lag_tolerance;
// Enable extended lag checking (default: true - performs lag validation in addition to standard
@@ -111,7 +111,7 @@ public static class Config extends HealthCheckStrategy.Config {
private final boolean extendedCheckEnabled;
public Config(Endpoint restEndpoint, Supplier credentialsSupplier) {
- this(builder(restEndpoint, credentialsSupplier).interval(1000).timeout(1000).numProbes(3)
+ this(builder(restEndpoint, credentialsSupplier)
.availabilityLagTolerance(AVAILABILITY_LAG_TOLERANCE_DEFAULT)
.extendedCheckEnabled(EXTENDED_CHECK_DEFAULT));
}
@@ -157,6 +157,15 @@ public static ConfigBuilder builder(Endpoint restEndpoint,
return new ConfigBuilder(restEndpoint, credentialsSupplier);
}
+ /**
+ * Use {@link LagAwareStrategy.Config#builder(Endpoint, Supplier)} instead.
+ * @return a new Builder instance
+ */
+ public static ConfigBuilder builder() {
+ throw new UnsupportedOperationException(
+ "Endpoint and credentials are required to build LagAwareStrategy.Config.");
+ }
+
/**
* Create a new Config instance with default values.
*
diff --git a/src/test/java/redis/clients/jedis/mcf/DefaultValuesTest.java b/src/test/java/redis/clients/jedis/mcf/DefaultValuesTest.java
new file mode 100644
index 0000000000..6939ef7069
--- /dev/null
+++ b/src/test/java/redis/clients/jedis/mcf/DefaultValuesTest.java
@@ -0,0 +1,77 @@
+package redis.clients.jedis.mcf;
+
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+
+import java.time.Duration;
+
+import org.junit.jupiter.api.Test;
+import redis.clients.jedis.DefaultJedisClientConfig;
+import redis.clients.jedis.HostAndPort;
+import redis.clients.jedis.JedisClientConfig;
+import redis.clients.jedis.MultiClusterClientConfig;
+
+public class DefaultValuesTest {
+
+ HostAndPort fakeEndpoint = new HostAndPort("fake", 6379);
+ JedisClientConfig config = DefaultJedisClientConfig.builder().build();
+
+ @Test
+ void testDefaultValuesInConfig() {
+
+ MultiClusterClientConfig.ClusterConfig clusterConfig = MultiClusterClientConfig.ClusterConfig
+ .builder(fakeEndpoint, config).build();
+ MultiClusterClientConfig multiConfig = new MultiClusterClientConfig.Builder(
+ new MultiClusterClientConfig.ClusterConfig[] { clusterConfig }).build();
+
+ // check for grace period
+ assertEquals(60000, multiConfig.getGracePeriod());
+
+ // check for cluster config
+ assertEquals(clusterConfig, multiConfig.getClusterConfigs()[0]);
+
+ // check healthchecks enabled
+ assertNotNull(clusterConfig.getHealthCheckStrategySupplier());
+
+ // check default healthcheck strategy is echo
+ assertEquals(EchoStrategy.DEFAULT, clusterConfig.getHealthCheckStrategySupplier());
+
+ // check number of probes
+ assertEquals(3,
+ clusterConfig.getHealthCheckStrategySupplier().get(fakeEndpoint, config).getNumProbes());
+
+ assertEquals(500, clusterConfig.getHealthCheckStrategySupplier().get(fakeEndpoint, config)
+ .getDelayInBetweenProbes());
+
+ assertEquals(ProbingPolicy.BuiltIn.ALL_SUCCESS,
+ clusterConfig.getHealthCheckStrategySupplier().get(fakeEndpoint, config).getPolicy());
+
+ // check health check interval
+ assertEquals(5000,
+ clusterConfig.getHealthCheckStrategySupplier().get(fakeEndpoint, config).getInterval());
+
+ // check lag aware tolerance
+ LagAwareStrategy.Config lagAwareConfig = LagAwareStrategy.Config
+ .builder(fakeEndpoint, config.getCredentialsProvider()).build();
+ assertEquals(Duration.ofMillis(5000), lagAwareConfig.getAvailabilityLagTolerance());
+
+ // TODO: check CB number of failures threshold -- 1000
+ // assertEquals(1000, multiConfig.circuitBreakerMinNumOfFailures());
+
+ // check CB failure rate threshold
+ assertEquals(10, multiConfig.getCircuitBreakerFailureRateThreshold());
+
+ // check CB sliding window size
+ assertEquals(2, multiConfig.getCircuitBreakerSlidingWindowSize());
+
+ // check failback check interval
+ assertEquals(120000, multiConfig.getFailbackCheckInterval());
+
+ // check failover max attempts before give up
+ assertEquals(10, multiConfig.getMaxNumFailoverAttempts());
+
+ // check delay between failover attempts
+ assertEquals(12000, multiConfig.getDelayInBetweenFailoverAttempts());
+
+ }
+}
diff --git a/src/test/java/redis/clients/jedis/mcf/FailbackMechanismUnitTest.java b/src/test/java/redis/clients/jedis/mcf/FailbackMechanismUnitTest.java
index 4e57b3c466..25de5a7c3e 100644
--- a/src/test/java/redis/clients/jedis/mcf/FailbackMechanismUnitTest.java
+++ b/src/test/java/redis/clients/jedis/mcf/FailbackMechanismUnitTest.java
@@ -32,7 +32,7 @@ void testFailbackCheckIntervalConfiguration() {
MultiClusterClientConfig defaultConfig = new MultiClusterClientConfig.Builder(
new MultiClusterClientConfig.ClusterConfig[] { clusterConfig }).build();
- assertEquals(5000, defaultConfig.getFailbackCheckInterval());
+ assertEquals(120000, defaultConfig.getFailbackCheckInterval());
// Test custom value
MultiClusterClientConfig customConfig = new MultiClusterClientConfig.Builder(
@@ -105,7 +105,7 @@ void testGracePeriodConfiguration() {
MultiClusterClientConfig defaultConfig = new MultiClusterClientConfig.Builder(
new MultiClusterClientConfig.ClusterConfig[] { clusterConfig }).build();
- assertEquals(10000, defaultConfig.getGracePeriod()); // Default is 10 seconds
+ assertEquals(60000, defaultConfig.getGracePeriod()); // Default is 10 seconds
// Test custom value
MultiClusterClientConfig customConfig = new MultiClusterClientConfig.Builder(
diff --git a/src/test/java/redis/clients/jedis/mcf/HealthCheckTest.java b/src/test/java/redis/clients/jedis/mcf/HealthCheckTest.java
index d81e012c1e..d7b61dcd20 100644
--- a/src/test/java/redis/clients/jedis/mcf/HealthCheckTest.java
+++ b/src/test/java/redis/clients/jedis/mcf/HealthCheckTest.java
@@ -531,7 +531,7 @@ void testStrategySupplierPolymorphism() {
// Test without config
HealthCheckStrategy strategyWithoutConfig = supplier.get(testEndpoint, null);
assertNotNull(strategyWithoutConfig);
- assertEquals(1000, strategyWithoutConfig.getInterval()); // Default values
+ assertEquals(5000, strategyWithoutConfig.getInterval()); // Default values
assertEquals(1000, strategyWithoutConfig.getTimeout());
}
diff --git a/src/test/java/redis/clients/jedis/mcf/LagAwareStrategyUnitTest.java b/src/test/java/redis/clients/jedis/mcf/LagAwareStrategyUnitTest.java
index 459b3be8c3..c985c0d06d 100644
--- a/src/test/java/redis/clients/jedis/mcf/LagAwareStrategyUnitTest.java
+++ b/src/test/java/redis/clients/jedis/mcf/LagAwareStrategyUnitTest.java
@@ -50,7 +50,7 @@ void healthy_when_bdb_available_and_cached_uid_used_on_next_check() throws Excep
try (MockedConstruction mockedConstructor = mockConstruction(RedisRestAPI.class,
(mock, context) -> {
when(mock.getBdbs()).thenReturn(Arrays.asList(bdbInfo));
- when(mock.checkBdbAvailability("1", true, 100L)).thenReturn(true);
+ when(mock.checkBdbAvailability("1", true, 5000L)).thenReturn(true);
reference[0] = mock;
})) {
Config lagCheckConfig = Config.builder(endpoint, creds).interval(500).timeout(250)
@@ -61,7 +61,7 @@ void healthy_when_bdb_available_and_cached_uid_used_on_next_check() throws Excep
assertEquals(HealthStatus.HEALTHY, strategy.doHealthCheck(endpoint));
verify(api, times(1)).getBdbs(); // Should not call getBdbs again when cached
- verify(api, times(2)).checkBdbAvailability("1", true, 100L);
+ verify(api, times(2)).checkBdbAvailability("1", true, 5000L);
}
}
}
@@ -97,7 +97,7 @@ void exception_and_cache_reset_on_exception_then_recovers_next_time() throws Exc
// First call throws exception, second call returns bdbInfo
when(mock.getBdbs()).thenThrow(new RuntimeException("boom"))
.thenReturn(Arrays.asList(bdbInfo));
- when(mock.checkBdbAvailability("42", true, 100L)).thenReturn(true);
+ when(mock.checkBdbAvailability("42", true, 5000L)).thenReturn(true);
reference[0] = mock;
})) {
@@ -115,7 +115,7 @@ void exception_and_cache_reset_on_exception_then_recovers_next_time() throws Exc
// Verify getBdbs was called twice (once failed, once succeeded)
verify(api, times(2)).getBdbs();
// Verify availability check was called only once (on the successful attempt)
- verify(api, times(1)).checkBdbAvailability("42", true, 100L);
+ verify(api, times(1)).checkBdbAvailability("42", true, 5000L);
}
}
}
@@ -173,10 +173,10 @@ void exception_when_no_matching_host_found() throws Exception {
void config_builder_creates_config_with_default_values() {
Config config = Config.builder(endpoint, creds).build();
- assertEquals(1000, config.interval);
+ assertEquals(5000, config.interval);
assertEquals(1000, config.timeout);
assertEquals(3, config.numProbes);
- assertEquals(Duration.ofMillis(100), config.getAvailabilityLagTolerance());
+ assertEquals(Duration.ofMillis(5000), config.getAvailabilityLagTolerance());
assertEquals(endpoint, config.getRestEndpoint());
assertEquals(creds, config.getCredentialsSupplier());
}
@@ -288,7 +288,7 @@ void base_config_builder_factory_method_works() {
void base_config_create_factory_method_uses_defaults() {
HealthCheckStrategy.Config config = HealthCheckStrategy.Config.create();
- assertEquals(1000, config.getInterval());
+ assertEquals(5000, config.getInterval());
assertEquals(1000, config.getTimeout());
assertEquals(3, config.getNumProbes());
}
From 3c702e803e7314ff81e1ac0f87ac87ee541653d2 Mon Sep 17 00:00:00 2001
From: atakavci
Date: Fri, 3 Oct 2025 12:25:45 +0300
Subject: [PATCH 03/17] [automatic failover] Add dual thresholds (min num of
failures + failure rate) capabililty to circuit breaker (#4295)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
* [automatic failover] Remove the check for 'GenericObjectPool.getNumWaiters()' in 'TrackingConnectionPool' (#4270)
- remove the check for number of waitiers in TrackingConnectionPool
* [automatic failover] Configure max total connections for EchoStrategy (#4268)
- set maxtotal connections for echoStrategy
* [automatic failover] Replace 'CircuitBreaker' with 'Cluster' for 'CircuitBreakerFailoverBase.clusterFailover' (#4275)
* - replace CircuitBreaker with Cluster for CircuitBreakerFailoverBase.clusterFailover
- improve thread safety with provider initialization
* - formatting
* [automatic failover] Minor optimizations on fast failover (#4277)
* - minor optimizations on fail fast
* - volatile failfast
* [automatic failover] Implement health check retries (#4273)
* - replace minConsecutiveSuccessCount with numberOfRetries
- add retries into healtCheckImpl
- apply changes to strategy implementations config classes
- fix unit tests
* - fix typo
* - fix failing tests
* - add tests for retry logic
* - formatting
* - format
* - revisit numRetries for healthCheck ,replace with numProbes and implement built in policies
- new types probecontext, ProbePolicy, HealthProbeContext
- add delayer executor pool to healthcheckımpl
- adjustments on worker pool of healthCheckImpl for shared use of workers
* - format
* - expand comment with example case
* - drop pooled executor for delays
* - polish
* - fix tests
* - formatting
* - checking failing tests
* - fix test
* - fix flaky tests
* - fix flaky test
* - add tests for builtin probing policies
* - fix flaky test
* [automatic failover] Move failover provider to mcf (#4294)
* - move failover provider to mcf
* - make iterateActiveCluster package private
* [automatic failover] Add SSL configuration support to LagAwareStrategy (#4291)
* User-provided ssl config for lag-aware health check
* ssl scenario test for lag-aware healthcheck
* format
* format
* address review comments
- use getters instead of fields
* [automatic failover] Implement max number of failover attempts (#4293)
* - implement max failover attempt
- add tests
* - fix user receive the intended exception
* -clean+format
* - java doc for exceptions
* format
* - more tests on excaption types in max failover attempts mechanism
* format
* fix failing timing in test
* disable health checks
* rename to switchToHealthyCluster
* format
* - Add dual-threshold (min failures + failure rate) failover to circuit breaker executor
- Map config to resilience4j via CircuitBreakerThresholdsAdapter
- clean up/simplfy config: drop slow-call and window type
- Add thresholdMinNumOfFailures; update some of the defaults
- Update provider to use thresholds adapter
- Update docs; align examples with new defaults
- Add tests for 0% rate, edge thresholds
* polish
* Update src/main/java/redis/clients/jedis/mcf/CircuitBreakerThresholdsAdapter.java
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
* - fix typo
* - fix min total calls calculation
* format
* - merge issues fixed
* fix javadoc ref
* - move threshold evaluations to failoverbase
- simplfy executer and cbfailoverconnprovider
- adjust config getters
- fix failing tests due to COUNT_BASED -> TIME_BASED
- new tests for thresholds calculations and impact on circuit state transitions
* - avoid facilitating actual CBConfig type in tests
* Update src/test/java/redis/clients/jedis/failover/FailoverIntegrationTest.java
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
* Trigger workflows
* - evaluate only in failure recorded and failover immediately
- add more test on threshold calculations
- enable command line arg for overwriting surefire.excludedGroups
* format
* check pom
* - fix error prone test
* [automatic failover] Set and test default values for failover config&components (#4298)
* - set & test default values
* - format
* - fix tests failing due to changing defaults
* - fix flaky test
* - remove unnecessary checks for failover attempt
* - clean and trim adapter class
- add docs and more explanantion
* fix javadoc issue
* - switch to all_succes to fix flaky timing
* - fix issue in CircuitBreakerFailoverConnectionProvider
* introduce ReflectionTestUtil
---------
Co-authored-by: Ivo Gaydazhiev
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
---
docs/failover.md | 14 +-
pom.xml | 4 +-
.../jedis/MultiClusterClientConfig.java | 306 ++++--------------
.../mcf/CircuitBreakerCommandExecutor.java | 24 +-
.../jedis/mcf/CircuitBreakerFailoverBase.java | 11 +
...cuitBreakerFailoverConnectionProvider.java | 11 +-
.../mcf/CircuitBreakerThresholdsAdapter.java | 84 +++++
.../MultiClusterPooledConnectionProvider.java | 61 +++-
.../failover/FailoverIntegrationTest.java | 34 +-
.../mcf/ActiveActiveLocalFailoverTest.java | 2 -
.../mcf/CircuitBreakerThresholdsTest.java | 249 ++++++++++++++
.../mcf/ClusterEvaluateThresholdsTest.java | 183 +++++++++++
.../jedis/mcf/FailbackMechanismUnitTest.java | 2 +-
.../jedis/mcf/HealthCheckIntegrationTest.java | 7 +-
.../clients/jedis/mcf/HealthCheckTest.java | 7 +-
...ultiClusterFailoverAttemptsConfigTest.java | 45 +--
...tiClusterPooledConnectionProviderTest.java | 10 +-
.../clients/jedis/mcf/StatusTrackerTest.java | 14 +-
.../jedis/misc/AutomaticFailoverTest.java | 26 +-
.../scenario/ActiveActiveFailoverTest.java | 2 -
.../jedis/util/ReflectionTestUtil.java | 91 ++++++
21 files changed, 812 insertions(+), 375 deletions(-)
create mode 100644 src/main/java/redis/clients/jedis/mcf/CircuitBreakerThresholdsAdapter.java
create mode 100644 src/test/java/redis/clients/jedis/mcf/CircuitBreakerThresholdsTest.java
create mode 100644 src/test/java/redis/clients/jedis/mcf/ClusterEvaluateThresholdsTest.java
create mode 100644 src/test/java/redis/clients/jedis/util/ReflectionTestUtil.java
diff --git a/docs/failover.md b/docs/failover.md
index 25ab21d6e3..632fba1a82 100644
--- a/docs/failover.md
+++ b/docs/failover.md
@@ -69,9 +69,8 @@ Then build a `MultiClusterPooledConnectionProvider`.
```java
MultiClusterClientConfig.Builder builder = new MultiClusterClientConfig.Builder(clientConfigs);
-builder.circuitBreakerSlidingWindowSize(10); // Sliding window size in number of calls
-builder.circuitBreakerSlidingWindowMinCalls(1);
-builder.circuitBreakerFailureRateThreshold(50.0f); // percentage of failures to trigger circuit breaker
+builder.circuitBreakerSlidingWindowSize(2); // Sliding window size in number of calls
+builder.circuitBreakerFailureRateThreshold(10.0f); // percentage of failures to trigger circuit breaker
builder.failbackSupported(true); // Enable failback
builder.failbackCheckInterval(1000); // Check every second the unhealthy cluster to see if it has recovered
@@ -140,12 +139,9 @@ Jedis uses the following circuit breaker settings:
| Setting | Default value | Description |
|-----------------------------------------|----------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
-| Sliding window type | `COUNT_BASED` | The type of sliding window used to record the outcome of calls. Options are `COUNT_BASED` and `TIME_BASED`. |
-| Sliding window size | 100 | The size of the sliding window. Units depend on sliding window type. When `COUNT_BASED`, the size represents number of calls. When `TIME_BASED`, the size represents seconds. |
-| Sliding window min calls | 100 | Minimum number of calls required (per sliding window period) before the CircuitBreaker will start calculating the error rate or slow call rate. |
-| Failure rate threshold | `50.0f` | Percentage of calls within the sliding window that must fail before the circuit breaker transitions to the `OPEN` state. |
-| Slow call duration threshold | 60000 ms | Duration threshold above which calls are classified as slow and added to the sliding window. |
-| Slow call rate threshold | `100.0f` | Percentage of calls within the sliding window that exceed the slow call duration threshold before circuit breaker transitions to the `OPEN` state. |
+| Sliding window size | 2 | The size of the sliding window. Units depend on sliding window type. The size represents seconds. |
+| Threshold min number of failures | 1000 | Minimum number of failures before circuit breaker is tripped. |
+| Failure rate threshold | `10.0f` | Percentage of calls within the sliding window that must fail before the circuit breaker transitions to the `OPEN` state. |
| Circuit breaker included exception list | [JedisConnectionException] | A list of Throwable classes that count as failures and add to the failure rate. |
| Circuit breaker ignored exception list | null | A list of Throwable classes to explicitly ignore for failure rate calculations. | |
diff --git a/pom.xml b/pom.xml
index fd58331800..f21927b66b 100644
--- a/pom.xml
+++ b/pom.xml
@@ -62,6 +62,8 @@
5.13.4
+
+ integration,scenario
@@ -335,7 +337,7 @@
${redis-hosts}
- integration,scenario
+ ${excludedGroupsForUnitTests}
**/examples/*.java
**/scenario/*Test.java
diff --git a/src/main/java/redis/clients/jedis/MultiClusterClientConfig.java b/src/main/java/redis/clients/jedis/MultiClusterClientConfig.java
index d8e321e041..49a351c82e 100644
--- a/src/main/java/redis/clients/jedis/MultiClusterClientConfig.java
+++ b/src/main/java/redis/clients/jedis/MultiClusterClientConfig.java
@@ -1,8 +1,6 @@
package redis.clients.jedis;
import io.github.resilience4j.circuitbreaker.CallNotPermittedException;
-import io.github.resilience4j.circuitbreaker.CircuitBreakerConfig.SlidingWindowType;
-
import java.time.Duration;
import java.util.Arrays;
import java.util.List;
@@ -58,7 +56,7 @@
*
* // Build multi-cluster configuration
* MultiClusterClientConfig config = MultiClusterClientConfig.builder(primary, secondary)
- * .circuitBreakerFailureRateThreshold(50.0f).retryMaxAttempts(3).failbackSupported(true)
+ * .circuitBreakerFailureRateThreshold(10.0f).retryMaxAttempts(3).failbackSupported(true)
* .gracePeriod(10000).build();
*
* // Use with connection provider
@@ -132,21 +130,12 @@ public static interface StrategySupplier {
/** Default failure rate threshold percentage for circuit breaker activation. */
private static final float CIRCUIT_BREAKER_FAILURE_RATE_THRESHOLD_DEFAULT = 10.0f;
- /** Default minimum number of calls required before circuit breaker can calculate failure rate. */
- private static final int CIRCUIT_BREAKER_SLIDING_WINDOW_MIN_CALLS_DEFAULT = 1000;
-
- /** Default sliding window type for circuit breaker failure tracking. */
- private static final SlidingWindowType CIRCUIT_BREAKER_SLIDING_WINDOW_TYPE_DEFAULT = SlidingWindowType.COUNT_BASED;
+ /** Minimum number of failures before circuit breaker is tripped. */
+ private static final int CIRCUITBREAKER_THRESHOLD_MIN_NUM_OF_FAILURES_DEFAULT = 1000;
/** Default sliding window size for circuit breaker failure tracking. */
private static final int CIRCUIT_BREAKER_SLIDING_WINDOW_SIZE_DEFAULT = 2;
- /** Default slow call duration threshold in milliseconds. */
- private static final int CIRCUIT_BREAKER_SLOW_CALL_DURATION_THRESHOLD_DEFAULT = 60000;
-
- /** Default slow call rate threshold percentage for circuit breaker activation. */
- private static final float CIRCUIT_BREAKER_SLOW_CALL_RATE_THRESHOLD_DEFAULT = 100.0f;
-
/** Default list of exceptions that are recorded as circuit breaker failures. */
private static final List CIRCUIT_BREAKER_INCLUDED_EXCEPTIONS_DEFAULT = Arrays
.asList(JedisConnectionException.class);
@@ -249,76 +238,43 @@ public static interface StrategySupplier {
private List retryIgnoreExceptionList;
// ============ Circuit Breaker Configuration ============
- // Based on Resilience4j Circuit Breaker: https://resilience4j.readme.io/docs/circuitbreaker
/**
- * Failure rate threshold percentage that triggers circuit breaker transition to OPEN state.
+ * Minimum number of failures before circuit breaker is tripped.
*
- * When the failure rate equals or exceeds this threshold, the circuit breaker transitions to the
- * OPEN state and starts short-circuiting calls, immediately failing them without attempting to
- * reach the Redis cluster. This prevents cascading failures and allows the system to fail over to
- * the next available cluster.
+ * When the number of failures exceeds both this threshold and the failure rate threshold, the
+ * circuit breaker will trip and prevent further requests from being sent to the cluster until it
+ * has recovered.
*
*
- * Range: 0.0 to 100.0 (percentage)
+ * Default: {@value #CIRCUITBREAKER_THRESHOLD_MIN_NUM_OF_FAILURES_DEFAULT}
*
- *
- * Default: {@value #CIRCUIT_BREAKER_FAILURE_RATE_THRESHOLD_DEFAULT}%
- *
- * @see #getCircuitBreakerFailureRateThreshold()
- * @see #circuitBreakerSlidingWindowMinCalls
+ * @see #getCircuitBreakerMinNumOfFailures()
+ * @see #circuitBreakerFailureRateThreshold
*/
- private float circuitBreakerFailureRateThreshold;
+ private int circuitBreakerMinNumOfFailures;
/**
- * Minimum number of calls required per sliding window period before circuit breaker can calculate
- * failure rates.
- *
- * The circuit breaker needs a minimum number of calls to make statistically meaningful decisions
- * about failure rates. Until this minimum is reached, the circuit breaker remains in the CLOSED
- * state regardless of failure rate.
- *
- *
- * Example: If set to 10, at least 10 calls must be recorded before the failure
- * rate can be calculated. If only 9 calls have been recorded, the circuit breaker will not
- * transition to OPEN even if all 9 calls failed.
- *
+ * Failure rate threshold percentage that triggers circuit breaker transition to OPEN state.
*
- * Default: {@value #CIRCUIT_BREAKER_SLIDING_WINDOW_MIN_CALLS_DEFAULT}
+ * When the failure rate exceeds both this threshold and the minimum number of failures, the
+ * circuit breaker transitions to the OPEN state and starts short-circuiting calls, immediately
+ * failing them without attempting to reach the Redis cluster. This prevents cascading failures
+ * and allows the system to fail over to the next available cluster.
*
- * @see #getCircuitBreakerSlidingWindowMinCalls()
- * @see #circuitBreakerFailureRateThreshold
- */
- private int circuitBreakerSlidingWindowMinCalls;
-
- /**
- * Type of sliding window used to record call outcomes for circuit breaker calculations.
*
- * Available Types:
+ * Range: 0.0 to 100.0 (percentage)
*
- *
- * - COUNT_BASED: Records the last N calls (where N = slidingWindowSize)
- * - TIME_BASED: Records calls from the last N seconds (where N =
- * slidingWindowSize)
- *
*
- * Default: {@link SlidingWindowType#COUNT_BASED}
+ * Default: {@value #CIRCUIT_BREAKER_FAILURE_RATE_THRESHOLD_DEFAULT}%
*
- * @see #getCircuitBreakerSlidingWindowType()
- * @see #circuitBreakerSlidingWindowSize
+ * @see #getCircuitBreakerFailureRateThreshold()
+ * @see #circuitBreakerMinNumOfFailures
*/
- private SlidingWindowType circuitBreakerSlidingWindowType;
+ private float circuitBreakerFailureRateThreshold;
/**
* Size of the sliding window used to record call outcomes when the circuit breaker is CLOSED.
- *
- * The interpretation of this value depends on the {@link #circuitBreakerSlidingWindowType}:
- *
- *
- * - COUNT_BASED: Number of calls to track
- * - TIME_BASED: Number of seconds to track
- *
- *
* Default: {@value #CIRCUIT_BREAKER_SLIDING_WINDOW_SIZE_DEFAULT}
*
* @see #getCircuitBreakerSlidingWindowSize()
@@ -326,44 +282,6 @@ public static interface StrategySupplier {
*/
private int circuitBreakerSlidingWindowSize;
- /**
- * Duration threshold above which calls are considered slow and contribute to slow call rate.
- *
- * Calls that take longer than this threshold are classified as "slow calls" and are tracked
- * separately from failed calls. This allows the circuit breaker to open based on performance
- * degradation even when calls are technically successful.
- *
- *
- * Default: {@value #CIRCUIT_BREAKER_SLOW_CALL_DURATION_THRESHOLD_DEFAULT}
- * milliseconds
- *
- * @see #getCircuitBreakerSlowCallDurationThreshold()
- * @see #circuitBreakerSlowCallRateThreshold
- */
- private Duration circuitBreakerSlowCallDurationThreshold;
-
- /**
- * Slow call rate threshold percentage that triggers circuit breaker transition to OPEN state.
- *
- * When the percentage of slow calls equals or exceeds this threshold, the circuit breaker
- * transitions to the OPEN state. A call is considered slow when its duration exceeds the
- * {@link #circuitBreakerSlowCallDurationThreshold}.
- *
- *
- * This mechanism allows the circuit breaker to open based on performance degradation rather than
- * just failures, enabling proactive failover when a cluster becomes slow.
- *
- *
- * Range: 0.0 to 100.0 (percentage)
- *
- *
- * Default: {@value #CIRCUIT_BREAKER_SLOW_CALL_RATE_THRESHOLD_DEFAULT}%
- *
- * @see #getCircuitBreakerSlowCallRateThreshold()
- * @see #circuitBreakerSlowCallDurationThreshold
- */
- private float circuitBreakerSlowCallRateThreshold;
-
/**
* List of exception classes that are recorded as circuit breaker failures and increase the
* failure rate.
@@ -577,50 +495,34 @@ public int getRetryWaitDurationExponentialBackoffMultiplier() {
}
/**
- * Returns the failure rate threshold percentage for circuit breaker activation.
+ * Returns the failure rate threshold percentage for circuit breaker activation. 0.0f means
+ * failure rate is ignored, and only minimum number of failures is considered.
* @return failure rate threshold as a percentage (0.0 to 100.0)
* @see #circuitBreakerFailureRateThreshold
+ * @see #getCircuitBreakerMinNumOfFailures
*/
public float getCircuitBreakerFailureRateThreshold() {
return circuitBreakerFailureRateThreshold;
}
- /**
- * Returns the minimum number of calls required before circuit breaker can calculate failure
- * rates.
- * @return minimum number of calls for failure rate calculation
- * @see #circuitBreakerSlidingWindowMinCalls
- */
- public int getCircuitBreakerSlidingWindowMinCalls() {
- return circuitBreakerSlidingWindowMinCalls;
- }
-
/**
* Returns the size of the sliding window used for circuit breaker calculations.
* @return sliding window size (calls or seconds depending on window type)
* @see #circuitBreakerSlidingWindowSize
- * @see #getCircuitBreakerSlidingWindowType()
*/
public int getCircuitBreakerSlidingWindowSize() {
return circuitBreakerSlidingWindowSize;
}
/**
- * Returns the duration threshold above which calls are considered slow.
- * @return slow call duration threshold
- * @see #circuitBreakerSlowCallDurationThreshold
+ * Returns the minimum number of failures before circuit breaker is tripped. 0 means minimum
+ * number of failures is ignored, and only failure rate is considered.
+ * @return minimum number of failures before circuit breaker is tripped
+ * @see #circuitBreakerMinNumOfFailures
+ * @see #getCircuitBreakerFailureRateThreshold
*/
- public Duration getCircuitBreakerSlowCallDurationThreshold() {
- return circuitBreakerSlowCallDurationThreshold;
- }
-
- /**
- * Returns the slow call rate threshold percentage for circuit breaker activation.
- * @return slow call rate threshold as a percentage (0.0 to 100.0)
- * @see #circuitBreakerSlowCallRateThreshold
- */
- public float getCircuitBreakerSlowCallRateThreshold() {
- return circuitBreakerSlowCallRateThreshold;
+ public int getCircuitBreakerMinNumOfFailures() {
+ return circuitBreakerMinNumOfFailures;
}
/**
@@ -659,15 +561,6 @@ public List getCircuitBreakerIgnoreExceptionList() {
return circuitBreakerIgnoreExceptionList;
}
- /**
- * Returns the type of sliding window used for circuit breaker calculations.
- * @return sliding window type (COUNT_BASED or TIME_BASED)
- * @see #circuitBreakerSlidingWindowType
- */
- public SlidingWindowType getCircuitBreakerSlidingWindowType() {
- return circuitBreakerSlidingWindowType;
- }
-
/**
* Returns the list of exception classes that trigger immediate fallback to next cluster.
* @return list of exception classes that trigger fallback, never null
@@ -1103,21 +996,9 @@ public static class Builder {
/** Failure rate threshold percentage for circuit breaker activation. */
private float circuitBreakerFailureRateThreshold = CIRCUIT_BREAKER_FAILURE_RATE_THRESHOLD_DEFAULT;
- /** Minimum number of calls required before circuit breaker can calculate failure rates. */
- private int circuitBreakerSlidingWindowMinCalls = CIRCUIT_BREAKER_SLIDING_WINDOW_MIN_CALLS_DEFAULT;
-
- /** Type of sliding window for circuit breaker calculations. */
- private SlidingWindowType circuitBreakerSlidingWindowType = CIRCUIT_BREAKER_SLIDING_WINDOW_TYPE_DEFAULT;
-
/** Size of the sliding window for circuit breaker calculations. */
private int circuitBreakerSlidingWindowSize = CIRCUIT_BREAKER_SLIDING_WINDOW_SIZE_DEFAULT;
- /** Duration threshold above which calls are considered slow. */
- private int circuitBreakerSlowCallDurationThreshold = CIRCUIT_BREAKER_SLOW_CALL_DURATION_THRESHOLD_DEFAULT;
-
- /** Slow call rate threshold percentage for circuit breaker activation. */
- private float circuitBreakerSlowCallRateThreshold = CIRCUIT_BREAKER_SLOW_CALL_RATE_THRESHOLD_DEFAULT;
-
/** List of exception classes that are recorded as circuit breaker failures. */
private List circuitBreakerIncludedExceptionList = CIRCUIT_BREAKER_INCLUDED_EXCEPTIONS_DEFAULT;
@@ -1131,6 +1012,9 @@ public static class Builder {
/** Whether to retry failed commands during failover. */
private boolean retryOnFailover = false;
+ /** Minimum number of failures before circuit breaker is tripped. */
+ private int circuitBreakerMinNumOfFailures = CIRCUITBREAKER_THRESHOLD_MIN_NUM_OF_FAILURES_DEFAULT;
+
/** Whether automatic failback to higher-priority clusters is supported. */
private boolean isFailbackSupported = true;
@@ -1281,9 +1165,9 @@ public Builder retryIgnoreExceptionList(List retryIgnoreExceptionList) {
/**
* Sets the failure rate threshold percentage that triggers circuit breaker activation.
*
- * When the failure rate equals or exceeds this threshold, the circuit breaker transitions to
- * the OPEN state and starts short-circuiting calls, enabling immediate failover to the next
- * available cluster.
+ * When both the failure rate and minimum number of failures exceeds this threshold, the circuit
+ * breaker transitions to the OPEN state and starts short-circuiting calls, enabling immediate
+ * failover to the next available cluster.
*
*
* Typical Values:
@@ -1295,74 +1179,16 @@ public Builder retryIgnoreExceptionList(List retryIgnoreExceptionList) {
*
* @param circuitBreakerFailureRateThreshold failure rate threshold as percentage (0.0 to 100.0)
* @return this builder instance for method chaining
+ * @see #circuitBreakerMinNumOfFailures(int)
*/
public Builder circuitBreakerFailureRateThreshold(float circuitBreakerFailureRateThreshold) {
+ checkThresholds(circuitBreakerMinNumOfFailures, circuitBreakerFailureRateThreshold);
this.circuitBreakerFailureRateThreshold = circuitBreakerFailureRateThreshold;
return this;
}
- /**
- * Sets the minimum number of calls required before circuit breaker can calculate failure rates.
- *
- * The circuit breaker needs sufficient data to make statistically meaningful decisions. Until
- * this minimum is reached, the circuit breaker remains CLOSED regardless of failure rate.
- *
- *
- * Considerations:
- *
- *
- * - Low values (5-10): Faster failure detection, higher chance of false
- * positives
- * - Medium values (50-100): Balanced approach (default: 100)
- * - High values (200+): More stable decisions, slower failure detection
- *
- * @param circuitBreakerSlidingWindowMinCalls minimum number of calls for failure rate
- * calculation
- * @return this builder instance for method chaining
- */
- public Builder circuitBreakerSlidingWindowMinCalls(int circuitBreakerSlidingWindowMinCalls) {
- this.circuitBreakerSlidingWindowMinCalls = circuitBreakerSlidingWindowMinCalls;
- return this;
- }
-
- /**
- * Sets the type of sliding window used for circuit breaker calculations.
- *
- * Available Types:
- *
- *
- * - COUNT_BASED: Tracks the last N calls (default)
- * - TIME_BASED: Tracks calls from the last N seconds
- *
- *
- * COUNT_BASED is generally preferred for consistent load patterns, while TIME_BASED works
- * better for variable load scenarios.
- *
- * @param circuitBreakerSlidingWindowType sliding window type
- * @return this builder instance for method chaining
- */
- public Builder circuitBreakerSlidingWindowType(
- SlidingWindowType circuitBreakerSlidingWindowType) {
- this.circuitBreakerSlidingWindowType = circuitBreakerSlidingWindowType;
- return this;
- }
-
/**
* Sets the size of the sliding window for circuit breaker calculations.
- *
- * The interpretation depends on the sliding window type:
- *
- *
- * - COUNT_BASED: Number of calls to track
- * - TIME_BASED: Number of seconds to track
- *
- *
- * Typical Values:
- *
- *
- * - COUNT_BASED: 50-200 calls (default: 100)
- * - TIME_BASED: 30-300 seconds
- *
* @param circuitBreakerSlidingWindowSize sliding window size
* @return this builder instance for method chaining
*/
@@ -1372,47 +1198,30 @@ public Builder circuitBreakerSlidingWindowSize(int circuitBreakerSlidingWindowSi
}
/**
- * Sets the duration threshold above which calls are considered slow.
+ * Sets the minimum number of failures before circuit breaker is tripped.
*
- * Calls exceeding this threshold contribute to the slow call rate, allowing the circuit breaker
- * to open based on performance degradation rather than just failures. This enables proactive
- * failover when clusters become slow.
+ * When both the number of failures and failure rate exceeds this threshold, the circuit breaker
+ * will trip and prevent further requests from being sent to the cluster until it has recovered.
*
*
- * Typical Values:
+ * Default: 1000
*
- *
- * - 1-5 seconds: For low-latency applications
- * - 10-30 seconds: For standard applications
- * - 60+ seconds: For batch or long-running operations (default: 60s)
- *
- * @param circuitBreakerSlowCallDurationThreshold slow call threshold in milliseconds
+ * @param circuitBreakerMinNumOfFailures minimum number of failures before circuit breaker is
+ * tripped
* @return this builder instance for method chaining
+ * @see #circuitBreakerFailureRateThreshold(float)
*/
- public Builder circuitBreakerSlowCallDurationThreshold(
- int circuitBreakerSlowCallDurationThreshold) {
- this.circuitBreakerSlowCallDurationThreshold = circuitBreakerSlowCallDurationThreshold;
+ public Builder circuitBreakerMinNumOfFailures(int circuitBreakerMinNumOfFailures) {
+ checkThresholds(circuitBreakerMinNumOfFailures, circuitBreakerFailureRateThreshold);
+ this.circuitBreakerMinNumOfFailures = circuitBreakerMinNumOfFailures;
return this;
}
- /**
- * Sets the slow call rate threshold percentage that triggers circuit breaker activation.
- *
- * When the percentage of slow calls equals or exceeds this threshold, the circuit breaker
- * opens. This allows failover based on performance degradation even when calls are technically
- * successful.
- *
- *
- * Note: Default value of 100% means only failures trigger the circuit breaker,
- * not slow calls. Lower values enable performance-based failover.
- *
- * @param circuitBreakerSlowCallRateThreshold slow call rate threshold as percentage (0.0 to
- * 100.0)
- * @return this builder instance for method chaining
- */
- public Builder circuitBreakerSlowCallRateThreshold(float circuitBreakerSlowCallRateThreshold) {
- this.circuitBreakerSlowCallRateThreshold = circuitBreakerSlowCallRateThreshold;
- return this;
+ private void checkThresholds(int failures, float rate) {
+ if (failures == 0 && rate == 0) {
+ throw new JedisValidationException(
+ "Both circuitBreakerMinNumOfFailures and circuitBreakerFailureRateThreshold can not be 0 at the same time!");
+ }
}
/**
@@ -1654,13 +1463,9 @@ public MultiClusterClientConfig build() {
config.retryIgnoreExceptionList = this.retryIgnoreExceptionList;
// Copy circuit breaker configuration
+ config.circuitBreakerMinNumOfFailures = this.circuitBreakerMinNumOfFailures;
config.circuitBreakerFailureRateThreshold = this.circuitBreakerFailureRateThreshold;
- config.circuitBreakerSlidingWindowMinCalls = this.circuitBreakerSlidingWindowMinCalls;
- config.circuitBreakerSlidingWindowType = this.circuitBreakerSlidingWindowType;
config.circuitBreakerSlidingWindowSize = this.circuitBreakerSlidingWindowSize;
- config.circuitBreakerSlowCallDurationThreshold = Duration
- .ofMillis(this.circuitBreakerSlowCallDurationThreshold);
- config.circuitBreakerSlowCallRateThreshold = this.circuitBreakerSlowCallRateThreshold;
config.circuitBreakerIncludedExceptionList = this.circuitBreakerIncludedExceptionList;
config.circuitBreakerIgnoreExceptionList = this.circuitBreakerIgnoreExceptionList;
@@ -1676,6 +1481,7 @@ public MultiClusterClientConfig build() {
return config;
}
+
}
}
\ No newline at end of file
diff --git a/src/main/java/redis/clients/jedis/mcf/CircuitBreakerCommandExecutor.java b/src/main/java/redis/clients/jedis/mcf/CircuitBreakerCommandExecutor.java
index 06c97a4e90..90f269bd70 100644
--- a/src/main/java/redis/clients/jedis/mcf/CircuitBreakerCommandExecutor.java
+++ b/src/main/java/redis/clients/jedis/mcf/CircuitBreakerCommandExecutor.java
@@ -1,6 +1,6 @@
package redis.clients.jedis.mcf;
-import io.github.resilience4j.circuitbreaker.CircuitBreaker;
+import io.github.resilience4j.circuitbreaker.CircuitBreaker.State;
import io.github.resilience4j.decorators.Decorators;
import io.github.resilience4j.decorators.Decorators.DecorateSupplier;
@@ -39,8 +39,14 @@ public T executeCommand(CommandObject commandObject) {
supplier.withRetry(cluster.getRetry());
supplier.withFallback(provider.getFallbackExceptionList(),
e -> this.handleClusterFailover(commandObject, cluster));
-
- return supplier.decorate().get();
+ try {
+ return supplier.decorate().get();
+ } catch (Exception e) {
+ if (cluster.getCircuitBreaker().getState() == State.OPEN && isActiveCluster(cluster)) {
+ clusterFailover(cluster);
+ }
+ throw e;
+ }
}
/**
@@ -58,26 +64,16 @@ private T handleExecuteCommand(CommandObject commandObject, Cluster clust
return connection.executeCommand(commandObject);
} catch (Exception e) {
if (cluster.retryOnFailover() && !isActiveCluster(cluster)
- && isCircuitBreakerTrackedException(e, cluster.getCircuitBreaker())) {
+ && isCircuitBreakerTrackedException(e, cluster)) {
throw new ConnectionFailoverException(
"Command failed during failover: " + cluster.getCircuitBreaker().getName(), e);
}
-
throw e;
} finally {
connection.close();
}
}
- private boolean isCircuitBreakerTrackedException(Exception e, CircuitBreaker cb) {
- return cb.getCircuitBreakerConfig().getRecordExceptionPredicate().test(e);
- }
-
- private boolean isActiveCluster(Cluster cluster) {
- Cluster activeCluster = provider.getCluster();
- return activeCluster != null && activeCluster.equals(cluster);
- }
-
/**
* Functional interface wrapped in retry and circuit breaker logic to handle open circuit breaker
* failure scenarios
diff --git a/src/main/java/redis/clients/jedis/mcf/CircuitBreakerFailoverBase.java b/src/main/java/redis/clients/jedis/mcf/CircuitBreakerFailoverBase.java
index 462a5ff427..40141fb009 100644
--- a/src/main/java/redis/clients/jedis/mcf/CircuitBreakerFailoverBase.java
+++ b/src/main/java/redis/clients/jedis/mcf/CircuitBreakerFailoverBase.java
@@ -1,8 +1,10 @@
package redis.clients.jedis.mcf;
import io.github.resilience4j.circuitbreaker.CircuitBreaker;
+
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
+
import redis.clients.jedis.annots.Experimental;
import redis.clients.jedis.mcf.MultiClusterPooledConnectionProvider.Cluster;
import redis.clients.jedis.util.IOUtils;
@@ -79,4 +81,13 @@ else if (cluster == provider.getCluster()) {
}
}
+ boolean isActiveCluster(Cluster cluster) {
+ Cluster activeCluster = provider.getCluster();
+ return activeCluster != null && activeCluster.equals(cluster);
+ }
+
+ static boolean isCircuitBreakerTrackedException(Exception e, Cluster cluster) {
+ return cluster.getCircuitBreaker().getCircuitBreakerConfig().getRecordExceptionPredicate()
+ .test(e);
+ }
}
diff --git a/src/main/java/redis/clients/jedis/mcf/CircuitBreakerFailoverConnectionProvider.java b/src/main/java/redis/clients/jedis/mcf/CircuitBreakerFailoverConnectionProvider.java
index 24e41022b4..51a5d35788 100644
--- a/src/main/java/redis/clients/jedis/mcf/CircuitBreakerFailoverConnectionProvider.java
+++ b/src/main/java/redis/clients/jedis/mcf/CircuitBreakerFailoverConnectionProvider.java
@@ -1,6 +1,6 @@
package redis.clients.jedis.mcf;
-import io.github.resilience4j.circuitbreaker.CircuitBreaker;
+import io.github.resilience4j.circuitbreaker.CircuitBreaker.State;
import io.github.resilience4j.decorators.Decorators;
import io.github.resilience4j.decorators.Decorators.DecorateSupplier;
@@ -32,7 +32,14 @@ public Connection getConnection() {
supplier.withFallback(provider.getFallbackExceptionList(),
e -> this.handleClusterFailover(cluster));
- return supplier.decorate().get();
+ try {
+ return supplier.decorate().get();
+ } catch (Exception e) {
+ if (cluster.getCircuitBreaker().getState() == State.OPEN && isActiveCluster(cluster)) {
+ clusterFailover(cluster);
+ }
+ throw e;
+ }
}
/**
diff --git a/src/main/java/redis/clients/jedis/mcf/CircuitBreakerThresholdsAdapter.java b/src/main/java/redis/clients/jedis/mcf/CircuitBreakerThresholdsAdapter.java
new file mode 100644
index 0000000000..a103613ba0
--- /dev/null
+++ b/src/main/java/redis/clients/jedis/mcf/CircuitBreakerThresholdsAdapter.java
@@ -0,0 +1,84 @@
+package redis.clients.jedis.mcf;
+
+import io.github.resilience4j.circuitbreaker.CircuitBreakerConfig.SlidingWindowType;
+import redis.clients.jedis.MultiClusterClientConfig;
+
+/**
+ * Adapter that disables Resilience4j's built-in circuit breaker evaluation and help delegate
+ * threshold decisions to Jedis's custom dual-threshold logic.
+ *
+ * This adapter sets maximum values for failure rate (100%) and minimum calls (Integer.MAX_VALUE) to
+ * effectively disable Resilience4j's automatic circuit breaker transitions, allowing
+ * {@link MultiClusterPooledConnectionProvider.Cluster#evaluateThresholds(boolean)} to control when
+ * the circuit breaker opens based on both minimum failure count AND failure rate.
+ *
+ * @see MultiClusterPooledConnectionProvider.Cluster#evaluateThresholds(boolean)
+ */
+class CircuitBreakerThresholdsAdapter {
+ /** Maximum failure rate threshold (100%) to disable Resilience4j evaluation */
+ private static final float FAILURE_RATE_TRESHOLD_MAX = 100.0f;
+
+ /** Always set to 100% to disable Resilience4j's rate-based evaluation */
+ private float failureRateThreshold;
+
+ /** Always set to Integer.MAX_VALUE to disable Resilience4j's call-count evaluation */
+ private int minimumNumberOfCalls;
+
+ /** Sliding window size from configuration for metrics collection */
+ private int slidingWindowSize;
+
+ /**
+ * Returns Integer.MAX_VALUE to disable Resilience4j's minimum call evaluation.
+ * @return Integer.MAX_VALUE to prevent automatic circuit breaker evaluation
+ */
+ int getMinimumNumberOfCalls() {
+ return minimumNumberOfCalls;
+ }
+
+ /**
+ * Returns 100% to disable Resilience4j's failure rate evaluation.
+ * @return 100.0f to prevent automatic circuit breaker evaluation
+ */
+ float getFailureRateThreshold() {
+ return failureRateThreshold;
+ }
+
+ /**
+ * Returns TIME_BASED sliding window type for metrics collection.
+ * @return SlidingWindowType.TIME_BASED
+ */
+ SlidingWindowType getSlidingWindowType() {
+ return SlidingWindowType.TIME_BASED;
+ }
+
+ /**
+ * Returns the sliding window size for metrics collection.
+ * @return sliding window size in seconds
+ */
+ int getSlidingWindowSize() {
+ return slidingWindowSize;
+ }
+
+ /**
+ * Creates an adapter that disables Resilience4j's circuit breaker evaluation.
+ *
+ * Sets failure rate to 100% and minimum calls to Integer.MAX_VALUE to ensure Resilience4j never
+ * automatically opens the circuit breaker. Instead, Jedis's custom {@code evaluateThresholds()}
+ * method controls circuit breaker state based on the original configuration's dual-threshold
+ * logic.
+ *
+ * @param multiClusterClientConfig configuration containing sliding window size
+ */
+ CircuitBreakerThresholdsAdapter(MultiClusterClientConfig multiClusterClientConfig) {
+
+ // IMPORTANT: failureRateThreshold is set to max theoretically disable Resilience4j's evaluation
+ // and rely on our custom evaluateThresholds() logic.
+ failureRateThreshold = FAILURE_RATE_TRESHOLD_MAX;
+
+ // IMPORTANT: minimumNumberOfCalls is set to max theoretically disable Resilience4j's evaluation
+ // and rely on our custom evaluateThresholds() logic.
+ minimumNumberOfCalls = Integer.MAX_VALUE;
+
+ slidingWindowSize = multiClusterClientConfig.getCircuitBreakerSlidingWindowSize();
+ }
+}
diff --git a/src/main/java/redis/clients/jedis/mcf/MultiClusterPooledConnectionProvider.java b/src/main/java/redis/clients/jedis/mcf/MultiClusterPooledConnectionProvider.java
index 50d7cfd30c..4cf044bbe6 100644
--- a/src/main/java/redis/clients/jedis/mcf/MultiClusterPooledConnectionProvider.java
+++ b/src/main/java/redis/clients/jedis/mcf/MultiClusterPooledConnectionProvider.java
@@ -1,6 +1,7 @@
package redis.clients.jedis.mcf;
import io.github.resilience4j.circuitbreaker.CircuitBreaker;
+import io.github.resilience4j.circuitbreaker.CircuitBreaker.Metrics;
import io.github.resilience4j.circuitbreaker.CircuitBreaker.State;
import io.github.resilience4j.circuitbreaker.CircuitBreakerConfig;
import io.github.resilience4j.circuitbreaker.CircuitBreakerRegistry;
@@ -39,7 +40,6 @@
import redis.clients.jedis.mcf.JedisFailoverException.*;
import redis.clients.jedis.providers.ConnectionProvider;
import redis.clients.jedis.MultiClusterClientConfig.StrategySupplier;
-
import redis.clients.jedis.util.Pool;
/**
@@ -54,7 +54,6 @@
* Support for manual failback is provided by way of {@link #setActiveCluster(Endpoint)}
*
*/
-// TODO: move?
@Experimental
public class MultiClusterPooledConnectionProvider implements ConnectionProvider {
@@ -133,18 +132,14 @@ public MultiClusterPooledConnectionProvider(MultiClusterClientConfig multiCluste
////////////// Configure Circuit Breaker ////////////////////
CircuitBreakerConfig.Builder circuitBreakerConfigBuilder = CircuitBreakerConfig.custom();
- circuitBreakerConfigBuilder
- .failureRateThreshold(multiClusterClientConfig.getCircuitBreakerFailureRateThreshold());
- circuitBreakerConfigBuilder
- .slowCallRateThreshold(multiClusterClientConfig.getCircuitBreakerSlowCallRateThreshold());
- circuitBreakerConfigBuilder.slowCallDurationThreshold(
- multiClusterClientConfig.getCircuitBreakerSlowCallDurationThreshold());
- circuitBreakerConfigBuilder
- .minimumNumberOfCalls(multiClusterClientConfig.getCircuitBreakerSlidingWindowMinCalls());
- circuitBreakerConfigBuilder
- .slidingWindowType(multiClusterClientConfig.getCircuitBreakerSlidingWindowType());
- circuitBreakerConfigBuilder
- .slidingWindowSize(multiClusterClientConfig.getCircuitBreakerSlidingWindowSize());
+
+ CircuitBreakerThresholdsAdapter adapter = new CircuitBreakerThresholdsAdapter(
+ multiClusterClientConfig);
+ circuitBreakerConfigBuilder.minimumNumberOfCalls(adapter.getMinimumNumberOfCalls());
+ circuitBreakerConfigBuilder.failureRateThreshold(adapter.getFailureRateThreshold());
+ circuitBreakerConfigBuilder.slidingWindowSize(adapter.getSlidingWindowSize());
+ circuitBreakerConfigBuilder.slidingWindowType(adapter.getSlidingWindowType());
+
circuitBreakerConfigBuilder.recordExceptions(multiClusterClientConfig
.getCircuitBreakerIncludedExceptionList().stream().toArray(Class[]::new));
circuitBreakerConfigBuilder.automaticTransitionFromOpenToHalfOpenEnabled(false); // State
@@ -334,6 +329,12 @@ private void addClusterInternal(MultiClusterClientConfig multiClusterClientConfi
}
multiClusterMap.put(config.getHostAndPort(), cluster);
+
+ // this is the place where we listen tracked errors and check if
+ // thresholds are exceeded for the cluster
+ circuitBreakerEventPublisher.onError(event -> {
+ cluster.evaluateThresholds(false);
+ });
}
/**
@@ -800,6 +801,14 @@ public boolean retryOnFailover() {
return multiClusterClientConfig.isRetryOnFailover();
}
+ public int getCircuitBreakerMinNumOfFailures() {
+ return multiClusterClientConfig.getCircuitBreakerMinNumOfFailures();
+ }
+
+ public float getCircuitBreakerFailureRateThreshold() {
+ return multiClusterClientConfig.getCircuitBreakerFailureRateThreshold();
+ }
+
public boolean isDisabled() {
return disabled;
}
@@ -847,6 +856,30 @@ public void close() {
connectionPool.close();
}
+ void evaluateThresholds(boolean lastFailRecorded) {
+ if (getCircuitBreaker().getState() == State.CLOSED
+ && isThresholdsExceeded(this, lastFailRecorded)) {
+ getCircuitBreaker().transitionToOpenState();
+ }
+ }
+
+ private static boolean isThresholdsExceeded(Cluster cluster, boolean lastFailRecorded) {
+ Metrics metrics = cluster.getCircuitBreaker().getMetrics();
+ // ATTENTION: this is to increment fails in regard to the current call that is failing,
+ // DO NOT remove the increment, it will change the behaviour in case of initial requests to
+ // cluster fail
+ int fails = metrics.getNumberOfFailedCalls() + (lastFailRecorded ? 0 : 1);
+ int succ = metrics.getNumberOfSuccessfulCalls();
+ if (fails >= cluster.getCircuitBreakerMinNumOfFailures()) {
+ float ratePercentThreshold = cluster.getCircuitBreakerFailureRateThreshold();// 0..100
+ int total = fails + succ;
+ if (total == 0) return false;
+ float failureRatePercent = (fails * 100.0f) / total;
+ return failureRatePercent >= ratePercentThreshold;
+ }
+ return false;
+ }
+
@Override
public String toString() {
return circuitBreaker.getName() + "{" + "connectionPool=" + connectionPool + ", retry="
diff --git a/src/test/java/redis/clients/jedis/failover/FailoverIntegrationTest.java b/src/test/java/redis/clients/jedis/failover/FailoverIntegrationTest.java
index 902ed73aa5..6eb047f6c7 100644
--- a/src/test/java/redis/clients/jedis/failover/FailoverIntegrationTest.java
+++ b/src/test/java/redis/clients/jedis/failover/FailoverIntegrationTest.java
@@ -30,12 +30,12 @@
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicInteger;
import java.util.function.Function;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import java.util.stream.Collectors;
-import static io.github.resilience4j.circuitbreaker.CircuitBreakerConfig.SlidingWindowType.COUNT_BASED;
import static org.awaitility.Awaitility.await;
import static org.hamcrest.MatcherAssert.assertThat;
import static org.hamcrest.Matchers.instanceOf;
@@ -150,7 +150,7 @@ public void testAutomaticFailoverWhenServerBecomesUnavailable() throws Exception
assertThrows(JedisConnectionException.class, () -> failoverClient.info("server"));
assertThat(provider.getCluster(endpoint1.getHostAndPort()).getCircuitBreaker().getState(),
- equalTo(CircuitBreaker.State.OPEN));
+ equalTo(CircuitBreaker.State.FORCED_OPEN));
// Check that the failoverClient is now using Endpoint 2
assertThat(getNodeId(failoverClient.info("server")), equalTo(JEDIS2_ID));
@@ -161,7 +161,7 @@ public void testAutomaticFailoverWhenServerBecomesUnavailable() throws Exception
// Endpoint1 and Endpoint2 are NOT available,
assertThrows(JedisConnectionException.class, () -> failoverClient.info("server"));
assertThat(provider.getCluster(endpoint2.getHostAndPort()).getCircuitBreaker().getState(),
- equalTo(CircuitBreaker.State.OPEN));
+ equalTo(CircuitBreaker.State.FORCED_OPEN));
// and since no other nodes are available, it should propagate the errors to the caller
// subsequent calls
@@ -183,8 +183,11 @@ public void testManualFailoverNewCommandsAreSentToActiveCluster() throws Interru
private List getClusterConfigs(
JedisClientConfig clientConfig, EndpointConfig... endpoints) {
- return Arrays.stream(endpoints).map(
- e -> MultiClusterClientConfig.ClusterConfig.builder(e.getHostAndPort(), clientConfig).build())
+ int weight = endpoints.length;
+ AtomicInteger weightCounter = new AtomicInteger(weight);
+ return Arrays.stream(endpoints)
+ .map(e -> MultiClusterClientConfig.ClusterConfig.builder(e.getHostAndPort(), clientConfig)
+ .weight(1.0f / weightCounter.getAndIncrement()).healthCheckEnabled(false).build())
.collect(Collectors.toList());
}
@@ -264,10 +267,9 @@ public void testCircuitBreakerCountsEachConnectionErrorSeparately() throws IOExc
.socketTimeoutMillis(RecommendedSettings.DEFAULT_TIMEOUT_MS)
.connectionTimeoutMillis(RecommendedSettings.DEFAULT_TIMEOUT_MS).build(),
endpoint1, endpoint2)).retryMaxAttempts(2).retryWaitDuration(1)
- .circuitBreakerSlidingWindowType(COUNT_BASED).circuitBreakerSlidingWindowSize(3)
- .circuitBreakerFailureRateThreshold(50) // 50% failure
- // rate threshold
- .circuitBreakerSlidingWindowMinCalls(3).build();
+ .circuitBreakerSlidingWindowSize(3).circuitBreakerMinNumOfFailures(2)
+ .circuitBreakerFailureRateThreshold(50f) // %50 failure rate
+ .build();
MultiClusterPooledConnectionProvider provider = new MultiClusterPooledConnectionProvider(
failoverConfig);
@@ -297,7 +299,7 @@ public void testCircuitBreakerCountsEachConnectionErrorSeparately() throws IOExc
// Circuit breaker should be open after just one command with retries
assertThat(provider.getCluster(endpoint1.getHostAndPort()).getCircuitBreaker().getState(),
- equalTo(CircuitBreaker.State.OPEN));
+ equalTo(CircuitBreaker.State.FORCED_OPEN));
// Next command should be routed to the second endpoint
// Command 2
@@ -375,7 +377,7 @@ public void testInflightCommandsAreNotRetriedAfterFailover() throws Exception {
// Check that the circuit breaker for Endpoint 1 is open
assertThat(
customProvider.getCluster(endpoint1.getHostAndPort()).getCircuitBreaker().getState(),
- equalTo(CircuitBreaker.State.OPEN));
+ equalTo(CircuitBreaker.State.FORCED_OPEN));
// Disable redisProxy1 to enforce the current blpop command failure
redisProxy1.disable();
@@ -425,9 +427,8 @@ private MultiClusterPooledConnectionProvider createProvider() {
MultiClusterClientConfig failoverConfig = new MultiClusterClientConfig.Builder(
getClusterConfigs(clientConfig, endpoint1, endpoint2)).retryMaxAttempts(1)
- .retryWaitDuration(1).circuitBreakerSlidingWindowType(COUNT_BASED)
- .circuitBreakerSlidingWindowSize(1).circuitBreakerFailureRateThreshold(100)
- .circuitBreakerSlidingWindowMinCalls(1).build();
+ .retryWaitDuration(1).circuitBreakerSlidingWindowSize(3)
+ .circuitBreakerMinNumOfFailures(1).circuitBreakerFailureRateThreshold(50f).build();
return new MultiClusterPooledConnectionProvider(failoverConfig);
}
@@ -444,9 +445,8 @@ private MultiClusterPooledConnectionProvider createProvider(
MultiClusterClientConfig.Builder builder = new MultiClusterClientConfig.Builder(
getClusterConfigs(clientConfig, endpoint1, endpoint2)).retryMaxAttempts(1)
- .retryWaitDuration(1).circuitBreakerSlidingWindowType(COUNT_BASED)
- .circuitBreakerSlidingWindowSize(1).circuitBreakerFailureRateThreshold(100)
- .circuitBreakerSlidingWindowMinCalls(1);
+ .retryWaitDuration(1).circuitBreakerSlidingWindowSize(3)
+ .circuitBreakerMinNumOfFailures(1).circuitBreakerFailureRateThreshold(50f);
if (configCustomizer != null) {
builder = configCustomizer.apply(builder);
diff --git a/src/test/java/redis/clients/jedis/mcf/ActiveActiveLocalFailoverTest.java b/src/test/java/redis/clients/jedis/mcf/ActiveActiveLocalFailoverTest.java
index f2134f0ea3..bc00caf8ed 100644
--- a/src/test/java/redis/clients/jedis/mcf/ActiveActiveLocalFailoverTest.java
+++ b/src/test/java/redis/clients/jedis/mcf/ActiveActiveLocalFailoverTest.java
@@ -109,9 +109,7 @@ public void testFailover(boolean fastFailover, long minFailoverCompletionDuratio
MultiClusterClientConfig.Builder builder = new MultiClusterClientConfig.Builder(clusterConfig);
- builder.circuitBreakerSlidingWindowType(CircuitBreakerConfig.SlidingWindowType.TIME_BASED);
builder.circuitBreakerSlidingWindowSize(1); // SLIDING WINDOW SIZE IN SECONDS
- builder.circuitBreakerSlidingWindowMinCalls(1);
builder.circuitBreakerFailureRateThreshold(10.0f); // percentage of failures to trigger circuit
// breaker
diff --git a/src/test/java/redis/clients/jedis/mcf/CircuitBreakerThresholdsTest.java b/src/test/java/redis/clients/jedis/mcf/CircuitBreakerThresholdsTest.java
new file mode 100644
index 0000000000..755325c705
--- /dev/null
+++ b/src/test/java/redis/clients/jedis/mcf/CircuitBreakerThresholdsTest.java
@@ -0,0 +1,249 @@
+package redis.clients.jedis.mcf;
+
+import static org.junit.jupiter.api.Assertions.*;
+import static org.mockito.ArgumentMatchers.*;
+import static org.mockito.Mockito.*;
+
+import io.github.resilience4j.circuitbreaker.CircuitBreaker;
+
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.params.ParameterizedTest;
+import org.junit.jupiter.params.provider.CsvSource;
+import redis.clients.jedis.BuilderFactory;
+import redis.clients.jedis.CommandArguments;
+import redis.clients.jedis.CommandObject;
+import redis.clients.jedis.Connection;
+import redis.clients.jedis.DefaultJedisClientConfig;
+import redis.clients.jedis.HostAndPort;
+import redis.clients.jedis.MultiClusterClientConfig;
+import redis.clients.jedis.MultiClusterClientConfig.ClusterConfig;
+import redis.clients.jedis.Protocol;
+import redis.clients.jedis.exceptions.JedisConnectionException;
+import redis.clients.jedis.mcf.MultiClusterPooledConnectionProvider.Cluster;
+import redis.clients.jedis.util.ReflectionTestUtil;
+
+/**
+ * Tests for circuit breaker thresholds: both failure-rate threshold and minimum number of failures
+ * must be exceeded to trigger failover. Uses a real CircuitBreaker and real Retry, but mocks the
+ * provider and cluster wiring to avoid network I/O.
+ */
+public class CircuitBreakerThresholdsTest {
+
+ private MultiClusterPooledConnectionProvider realProvider;
+ private MultiClusterPooledConnectionProvider spyProvider;
+ private Cluster cluster;
+ private CircuitBreakerCommandExecutor executor;
+ private CommandObject dummyCommand;
+ private TrackingConnectionPool poolMock;
+ private HostAndPort fakeEndpoint = new HostAndPort("fake", 6379);
+ private HostAndPort fakeEndpoint2 = new HostAndPort("fake2", 6379);
+ private ClusterConfig[] fakeClusterConfigs;
+
+ @BeforeEach
+ public void setup() throws Exception {
+
+ ClusterConfig[] clusterConfigs = new ClusterConfig[] {
+ ClusterConfig.builder(fakeEndpoint, DefaultJedisClientConfig.builder().build())
+ .healthCheckEnabled(false).weight(1.0f).build(),
+ ClusterConfig.builder(fakeEndpoint2, DefaultJedisClientConfig.builder().build())
+ .healthCheckEnabled(false).weight(0.5f).build() };
+ fakeClusterConfigs = clusterConfigs;
+
+ MultiClusterClientConfig.Builder cfgBuilder = MultiClusterClientConfig.builder(clusterConfigs)
+ .circuitBreakerFailureRateThreshold(50.0f).circuitBreakerMinNumOfFailures(3)
+ .circuitBreakerSlidingWindowSize(10).retryMaxAttempts(1).retryOnFailover(false);
+
+ MultiClusterClientConfig mcc = cfgBuilder.build();
+
+ realProvider = new MultiClusterPooledConnectionProvider(mcc);
+ spyProvider = spy(realProvider);
+
+ cluster = spyProvider.getCluster();
+
+ executor = new CircuitBreakerCommandExecutor(spyProvider);
+
+ dummyCommand = new CommandObject<>(new CommandArguments(Protocol.Command.PING),
+ BuilderFactory.STRING);
+
+ // Replace the cluster's pool with a mock to avoid real network I/O
+ poolMock = mock(TrackingConnectionPool.class);
+ ReflectionTestUtil.setField(cluster, "connectionPool", poolMock);
+ }
+
+ /**
+ * Below minimum failures; even if all calls are failures, failover should NOT trigger.
+ */
+ @Test
+ public void belowMinFailures_doesNotFailover() {
+ // Always failing connections
+ Connection failing = mock(Connection.class);
+ when(failing.executeCommand(org.mockito.Mockito.> any()))
+ .thenThrow(new JedisConnectionException("fail"));
+ doNothing().when(failing).close();
+ when(poolMock.getResource()).thenReturn(failing);
+
+ for (int i = 0; i < 2; i++) {
+ assertThrows(JedisConnectionException.class, () -> executor.executeCommand(dummyCommand));
+ }
+
+ // Below min failures; CB remains CLOSED
+ assertEquals(CircuitBreaker.State.CLOSED, spyProvider.getClusterCircuitBreaker().getState());
+ }
+
+ /**
+ * Reaching minFailures and exceeding failure rate threshold should trigger failover.
+ */
+ @Test
+ public void minFailuresAndRateExceeded_triggersFailover() {
+ // Always failing connections
+ Connection failing = mock(Connection.class);
+ when(failing.executeCommand(org.mockito.Mockito.> any()))
+ .thenThrow(new JedisConnectionException("fail"));
+ doNothing().when(failing).close();
+ when(poolMock.getResource()).thenReturn(failing);
+
+ // Reach min failures and exceed rate threshold
+ for (int i = 0; i < 3; i++) {
+ assertThrows(JedisConnectionException.class, () -> executor.executeCommand(dummyCommand));
+ }
+
+ // Next call should hit open CB (CallNotPermitted) and trigger failover
+ assertThrows(JedisConnectionException.class, () -> executor.executeCommand(dummyCommand));
+
+ verify(spyProvider, atLeastOnce()).switchToHealthyCluster(eq(SwitchReason.CIRCUIT_BREAKER),
+ any());
+ assertEquals(CircuitBreaker.State.FORCED_OPEN,
+ spyProvider.getCluster(fakeEndpoint).getCircuitBreaker().getState());
+ }
+
+ /**
+ * Even after reaching minFailures, if failure rate is below threshold, do not failover.
+ */
+ @Test
+ public void rateBelowThreshold_doesNotFailover() throws Exception {
+ // Use local provider with higher threshold (80%) and no retries
+ MultiClusterClientConfig.Builder cfgBuilder = MultiClusterClientConfig
+ .builder(fakeClusterConfigs).circuitBreakerFailureRateThreshold(80.0f)
+ .circuitBreakerMinNumOfFailures(3).circuitBreakerSlidingWindowSize(10).retryMaxAttempts(1)
+ .retryOnFailover(false);
+ MultiClusterPooledConnectionProvider rp = new MultiClusterPooledConnectionProvider(
+ cfgBuilder.build());
+ MultiClusterPooledConnectionProvider sp = spy(rp);
+ Cluster c = sp.getCluster();
+ try (CircuitBreakerCommandExecutor ex = new CircuitBreakerCommandExecutor(sp)) {
+ CommandObject cmd = new CommandObject<>(new CommandArguments(Protocol.Command.PING),
+ BuilderFactory.STRING);
+
+ TrackingConnectionPool pool = mock(TrackingConnectionPool.class);
+ ReflectionTestUtil.setField(c, "connectionPool", pool);
+
+ // 3 successes
+ Connection success = mock(Connection.class);
+ when(success.executeCommand(org.mockito.Mockito.> any()))
+ .thenReturn("PONG");
+ doNothing().when(success).close();
+ when(pool.getResource()).thenReturn(success);
+ for (int i = 0; i < 3; i++) {
+ assertEquals("PONG", ex.executeCommand(cmd));
+ }
+
+ // 3 failures -> total 6 calls, 50% failure rate; threshold 80% means stay CLOSED
+ Connection failing = mock(Connection.class);
+ when(failing.executeCommand(org.mockito.Mockito.> any()))
+ .thenThrow(new JedisConnectionException("fail"));
+ doNothing().when(failing).close();
+ when(pool.getResource()).thenReturn(failing);
+ for (int i = 0; i < 3; i++) {
+ assertThrows(JedisConnectionException.class, () -> ex.executeCommand(cmd));
+ }
+
+ assertEquals(CircuitBreaker.State.CLOSED, sp.getClusterCircuitBreaker().getState());
+ }
+ }
+
+ @Test
+ public void providerBuilder_zeroRate_mapsToHundredAndHugeMinCalls() {
+ MultiClusterClientConfig.Builder cfgBuilder = MultiClusterClientConfig
+ .builder(fakeClusterConfigs);
+ cfgBuilder.circuitBreakerFailureRateThreshold(0.0f).circuitBreakerMinNumOfFailures(3)
+ .circuitBreakerSlidingWindowSize(10);
+ MultiClusterClientConfig mcc = cfgBuilder.build();
+
+ CircuitBreakerThresholdsAdapter adapter = new CircuitBreakerThresholdsAdapter(mcc);
+
+ assertEquals(100.0f, adapter.getFailureRateThreshold(), 0.0001f);
+ assertEquals(Integer.MAX_VALUE, adapter.getMinimumNumberOfCalls());
+ }
+
+ @ParameterizedTest
+ @CsvSource({
+ // minFailures, ratePercent, successes, failures, expectFailoverOnNext
+ "0, 1.0, 0, 1, true", //
+ "1, 1.0, 0, 1, true", //
+ "3, 50.0, 0, 3, true", //
+ "1, 100.0, 0, 1, true", //
+ "0, 100.0, 99, 1, false", //
+ "0, 1.0, 99, 1, true", //
+ // additional edge cases
+ "1, 0.0, 0, 1, true", //
+ "3, 50.0, 3, 2, false", //
+ "1000, 1.0, 198, 2, false", })
+ public void thresholdMatrix(int minFailures, float ratePercent, int successes, int failures,
+ boolean expectFailoverOnNext) throws Exception {
+
+ MultiClusterClientConfig.Builder cfgBuilder = MultiClusterClientConfig
+ .builder(fakeClusterConfigs).circuitBreakerFailureRateThreshold(ratePercent)
+ .circuitBreakerMinNumOfFailures(minFailures)
+ .circuitBreakerSlidingWindowSize(Math.max(10, successes + failures + 2)).retryMaxAttempts(1)
+ .retryOnFailover(false);
+
+ MultiClusterPooledConnectionProvider real = new MultiClusterPooledConnectionProvider(
+ cfgBuilder.build());
+ MultiClusterPooledConnectionProvider spy = spy(real);
+ Cluster c = spy.getCluster();
+ try (CircuitBreakerCommandExecutor ex = new CircuitBreakerCommandExecutor(spy)) {
+
+ CommandObject cmd = new CommandObject<>(new CommandArguments(Protocol.Command.PING),
+ BuilderFactory.STRING);
+
+ TrackingConnectionPool pool = mock(TrackingConnectionPool.class);
+ ReflectionTestUtil.setField(c, "connectionPool", pool);
+
+ if (successes > 0) {
+ Connection ok = mock(Connection.class);
+ when(ok.executeCommand(org.mockito.Mockito.> any()))
+ .thenReturn("PONG");
+ doNothing().when(ok).close();
+ when(pool.getResource()).thenReturn(ok);
+ for (int i = 0; i < successes; i++) {
+ ex.executeCommand(cmd);
+ }
+ }
+
+ if (failures > 0) {
+ Connection bad = mock(Connection.class);
+ when(bad.executeCommand(org.mockito.Mockito.> any()))
+ .thenThrow(new JedisConnectionException("fail"));
+ doNothing().when(bad).close();
+ when(pool.getResource()).thenReturn(bad);
+ for (int i = 0; i < failures; i++) {
+ try {
+ ex.executeCommand(cmd);
+ } catch (Exception ignore) {
+ }
+ }
+ }
+
+ if (expectFailoverOnNext) {
+ assertThrows(Exception.class, () -> ex.executeCommand(cmd));
+ verify(spy, atLeastOnce()).switchToHealthyCluster(eq(SwitchReason.CIRCUIT_BREAKER), any());
+ assertEquals(CircuitBreaker.State.FORCED_OPEN, c.getCircuitBreaker().getState());
+ } else {
+ CircuitBreaker.State st = c.getCircuitBreaker().getState();
+ assertTrue(st == CircuitBreaker.State.CLOSED || st == CircuitBreaker.State.HALF_OPEN);
+ }
+ }
+ }
+
+}
diff --git a/src/test/java/redis/clients/jedis/mcf/ClusterEvaluateThresholdsTest.java b/src/test/java/redis/clients/jedis/mcf/ClusterEvaluateThresholdsTest.java
new file mode 100644
index 0000000000..c603509f32
--- /dev/null
+++ b/src/test/java/redis/clients/jedis/mcf/ClusterEvaluateThresholdsTest.java
@@ -0,0 +1,183 @@
+package redis.clients.jedis.mcf;
+
+import static org.junit.jupiter.api.Assertions.*;
+import static org.mockito.ArgumentMatchers.*;
+import static org.mockito.Mockito.*;
+
+import io.github.resilience4j.circuitbreaker.CircuitBreaker;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.params.ParameterizedTest;
+import org.junit.jupiter.params.provider.CsvSource;
+import redis.clients.jedis.DefaultJedisClientConfig;
+import redis.clients.jedis.HostAndPort;
+import redis.clients.jedis.MultiClusterClientConfig;
+import redis.clients.jedis.mcf.MultiClusterPooledConnectionProvider.Cluster;
+
+/**
+ * Tests for circuit breaker thresholds: both failure-rate threshold and minimum number of failures
+ * must be exceeded to trigger failover. Uses a real CircuitBreaker and real Retry, but mocks the
+ * provider and cluster wiring to avoid network I/O.
+ */
+public class ClusterEvaluateThresholdsTest {
+
+ private MultiClusterPooledConnectionProvider provider;
+ private Cluster cluster;
+ private CircuitBreaker circuitBreaker;
+ private CircuitBreaker.Metrics metrics;
+
+ @BeforeEach
+ public void setup() {
+ provider = mock(MultiClusterPooledConnectionProvider.class);
+ cluster = mock(Cluster.class);
+
+ circuitBreaker = mock(CircuitBreaker.class);
+ metrics = mock(CircuitBreaker.Metrics.class);
+
+ when(cluster.getCircuitBreaker()).thenReturn(circuitBreaker);
+ when(circuitBreaker.getMetrics()).thenReturn(metrics);
+ when(circuitBreaker.getState()).thenReturn(CircuitBreaker.State.CLOSED);
+
+ // Configure the mock to call the real evaluateThresholds method
+ doCallRealMethod().when(cluster).evaluateThresholds(anyBoolean());
+
+ }
+
+ /**
+ * Below minimum failures; even if all calls are failures, failover should NOT trigger. Note: The
+ * isThresholdsExceeded method adds +1 to account for the current failing call, so we set
+ * failures=1 which becomes 2 with +1, still below minFailures=3.
+ */
+ @Test
+ public void belowMinFailures_doesNotFailover() {
+ when(cluster.getCircuitBreakerMinNumOfFailures()).thenReturn(3);
+ when(metrics.getNumberOfFailedCalls()).thenReturn(1); // +1 becomes 2, still < 3
+ when(metrics.getNumberOfSuccessfulCalls()).thenReturn(0);
+ when(cluster.getCircuitBreakerFailureRateThreshold()).thenReturn(50.0f);
+ when(circuitBreaker.getState()).thenReturn(CircuitBreaker.State.CLOSED);
+
+ cluster.evaluateThresholds(false);
+ verify(circuitBreaker, never()).transitionToOpenState();
+ verify(provider, never()).switchToHealthyCluster(any(), any());
+ }
+
+ /**
+ * Reaching minFailures and exceeding failure rate threshold should trigger circuit breaker to
+ * OPEN state. Note: The isThresholdsExceeded method adds +1 to account for the current failing
+ * call, so we set failures=2 which becomes 3 with +1, reaching minFailures=3.
+ */
+ @Test
+ public void minFailuresAndRateExceeded_triggersOpenState() {
+ when(cluster.getCircuitBreakerMinNumOfFailures()).thenReturn(3);
+ when(metrics.getNumberOfFailedCalls()).thenReturn(2); // +1 becomes 3, reaching minFailures
+ when(metrics.getNumberOfSuccessfulCalls()).thenReturn(0);
+ when(cluster.getCircuitBreakerFailureRateThreshold()).thenReturn(50.0f);
+ when(circuitBreaker.getState()).thenReturn(CircuitBreaker.State.CLOSED);
+
+ cluster.evaluateThresholds(false);
+ verify(circuitBreaker, times(1)).transitionToOpenState();
+ }
+
+ /**
+ * Even after reaching minFailures, if failure rate is below threshold, do not failover. Note: The
+ * isThresholdsExceeded method adds +1 to account for the current failing call, so we set
+ * failures=2 which becomes 3 with +1, reaching minFailures=3. Rate calculation: (3 failures) / (3
+ * failures + 3 successes) = 50% < 80% threshold.
+ */
+ @Test
+ public void rateBelowThreshold_doesNotFailover() {
+ when(cluster.getCircuitBreakerMinNumOfFailures()).thenReturn(3);
+ when(metrics.getNumberOfSuccessfulCalls()).thenReturn(3);
+ when(metrics.getNumberOfFailedCalls()).thenReturn(2); // +1 becomes 3, rate = 3/(3+3) = 50%
+ when(cluster.getCircuitBreakerFailureRateThreshold()).thenReturn(80.0f);
+ when(circuitBreaker.getState()).thenReturn(CircuitBreaker.State.CLOSED);
+
+ cluster.evaluateThresholds(false);
+
+ verify(circuitBreaker, never()).transitionToOpenState();
+ verify(provider, never()).switchToHealthyCluster(any(), any());
+ }
+
+ @Test
+ public void providerBuilder_zeroRate_mapsToHundredAndHugeMinCalls() {
+ MultiClusterClientConfig.Builder cfgBuilder = MultiClusterClientConfig
+ .builder(java.util.Arrays.asList(MultiClusterClientConfig.ClusterConfig
+ .builder(new HostAndPort("localhost", 6379), DefaultJedisClientConfig.builder().build())
+ .healthCheckEnabled(false).build()));
+ cfgBuilder.circuitBreakerFailureRateThreshold(0.0f).circuitBreakerMinNumOfFailures(3)
+ .circuitBreakerSlidingWindowSize(10);
+ MultiClusterClientConfig mcc = cfgBuilder.build();
+
+ CircuitBreakerThresholdsAdapter adapter = new CircuitBreakerThresholdsAdapter(mcc);
+
+ assertEquals(100.0f, adapter.getFailureRateThreshold(), 0.0001f);
+ assertEquals(Integer.MAX_VALUE, adapter.getMinimumNumberOfCalls());
+ }
+
+ @ParameterizedTest
+ @CsvSource({
+ // Format: "minFails, rate%, success, fails, lastFailRecorded, expected"
+
+ // === Basic threshold crossing cases ===
+ "0, 1.0, 0, 1, false, true", // +1 = 2 fails, rate=100% >= 1%, min=0 -> trigger
+ "0, 1.0, 0, 1, true, true", // +0 = 1 fails, rate=100% >= 1%, min=0 -> trigger
+
+ "1, 1.0, 0, 0, false, true", // +1 = 1 fails, rate=100% >= 1%, min=1 -> trigger
+ "1, 1.0, 0, 0, true, false", // +0 = 0 fails, 0 < 1 min -> no trigger
+
+ "3, 50.0, 0, 2, false, true", // +1 = 3 fails, rate=100% >= 50%, min=3 -> trigger
+ "3, 50.0, 0, 2, true, false", // +0 = 2 fails, 2 < 3 min -> no trigger
+
+ // === Rate threshold boundary cases ===
+ "1, 100.0, 0, 0, false, true", // +1 = 1 fails, rate=100% >= 100%, min=1 -> trigger
+ "1, 100.0, 0, 0, true, false", // +0 = 0 fails, 0 < 1 min -> no trigger
+
+ "0, 100.0, 99, 1, false, false", // +1 = 2 fails, rate=1.98% < 100% -> no trigger
+ "0, 100.0, 99, 1, true, false", // +0 = 1 fails, rate=1.0% < 100% -> no trigger
+
+ "0, 1.0, 99, 1, false, true", // +1 = 2 fails, rate=1.98% >= 1%, min=0 -> trigger
+ "0, 1.0, 99, 1, true, true", // +0 = 1 fails, rate=1.0% >= 1%, min=0 -> trigger
+
+ // === Zero rate threshold (always trigger if min failures met) ===
+ "1, 0.0, 0, 0, false, true", // +1 = 1 fails, rate=100% >= 0%, min=1 -> trigger
+ "1, 0.0, 0, 0, true, false", // +0 = 0 fails, 0 < 1 min -> no trigger
+ "1, 0.0, 100, 0, false, true", // +1 = 1 fails, rate=0.99% >= 0%, min=1 -> trigger
+ "1, 0.0, 100, 0, true, false", // +0 = 0 fails, 0 < 1 min -> no trigger
+
+ // === High minimum failures cases ===
+ "3, 50.0, 3, 1, false, false", // +1 = 2 fails, 2 < 3 min -> no trigger
+ "3, 50.0, 3, 1, true, false", // +0 = 1 fails, 1 < 3 min -> no trigger
+ "1000, 1.0, 198, 2, false, false", // +1 = 3 fails, 3 < 1000 min -> no trigger
+ "1000, 1.0, 198, 2, true, false", // +0 = 2 fails, 2 < 1000 min -> no trigger
+
+ // === Corner cases ===
+ "0, 50.0, 0, 0, false, true", // +1 = 1 fails, rate=100% >= 50%, min=0 -> trigger
+ "0, 50.0, 0, 0, true, false", // +0 = 0 fails, no calls -> no trigger
+ "1, 50.0, 1, 1, false, true", // +1 = 2 fails, rate=66.7% >= 50%, min=1 -> trigger
+ "1, 50.0, 1, 1, true, true", // +0 = 1 fails, rate=50% >= 50%, min=1 -> trigger
+ "2, 33.0, 2, 1, false, true", // +1 = 2 fails, rate=50% >= 33%, min=2 -> trigger
+ "2, 33.0, 2, 1, true, false", // +0 = 1 fails, 1 < 2 min -> no trigger
+ "5, 20.0, 20, 4, false, true", // +1 = 5 fails, rate=20% >= 20%, min=5 -> trigger
+ "5, 20.0, 20, 4, true, false", // +0 = 4 fails, 4 < 5 min -> no trigger
+ "3, 75.0, 1, 2, false, true", // +1 = 3 fails, rate=75% >= 75%, min=3 -> trigger
+ "3, 75.0, 1, 2, true, false", // +0 = 2 fails, 2 < 3 min -> no trigger
+ })
+ public void thresholdMatrix(int minFailures, float ratePercent, int successes, int failures,
+ boolean lastFailRecorded, boolean expectOpenState) {
+
+ when(cluster.getCircuitBreakerMinNumOfFailures()).thenReturn(minFailures);
+ when(metrics.getNumberOfSuccessfulCalls()).thenReturn(successes);
+ when(metrics.getNumberOfFailedCalls()).thenReturn(failures);
+ when(cluster.getCircuitBreakerFailureRateThreshold()).thenReturn(ratePercent);
+ when(circuitBreaker.getState()).thenReturn(CircuitBreaker.State.CLOSED);
+
+ cluster.evaluateThresholds(lastFailRecorded);
+
+ if (expectOpenState) {
+ verify(circuitBreaker, times(1)).transitionToOpenState();
+ } else {
+ verify(circuitBreaker, never()).transitionToOpenState();
+ }
+ }
+
+}
diff --git a/src/test/java/redis/clients/jedis/mcf/FailbackMechanismUnitTest.java b/src/test/java/redis/clients/jedis/mcf/FailbackMechanismUnitTest.java
index 25de5a7c3e..fee216f2be 100644
--- a/src/test/java/redis/clients/jedis/mcf/FailbackMechanismUnitTest.java
+++ b/src/test/java/redis/clients/jedis/mcf/FailbackMechanismUnitTest.java
@@ -105,7 +105,7 @@ void testGracePeriodConfiguration() {
MultiClusterClientConfig defaultConfig = new MultiClusterClientConfig.Builder(
new MultiClusterClientConfig.ClusterConfig[] { clusterConfig }).build();
- assertEquals(60000, defaultConfig.getGracePeriod()); // Default is 10 seconds
+ assertEquals(60000, defaultConfig.getGracePeriod());
// Test custom value
MultiClusterClientConfig customConfig = new MultiClusterClientConfig.Builder(
diff --git a/src/test/java/redis/clients/jedis/mcf/HealthCheckIntegrationTest.java b/src/test/java/redis/clients/jedis/mcf/HealthCheckIntegrationTest.java
index 835a655df3..d1cb8b90e9 100644
--- a/src/test/java/redis/clients/jedis/mcf/HealthCheckIntegrationTest.java
+++ b/src/test/java/redis/clients/jedis/mcf/HealthCheckIntegrationTest.java
@@ -12,7 +12,6 @@
import org.junit.jupiter.api.Test;
-import io.github.resilience4j.circuitbreaker.CircuitBreakerConfig.SlidingWindowType;
import redis.clients.jedis.EndpointConfig;
import redis.clients.jedis.HostAndPorts;
import redis.clients.jedis.JedisClientConfig;
@@ -94,10 +93,8 @@ private MultiClusterPooledConnectionProvider getMCCF(
.collect(Collectors.toList());
MultiClusterClientConfig mccf = new MultiClusterClientConfig.Builder(clusterConfigs)
- .retryMaxAttempts(1).retryWaitDuration(1)
- .circuitBreakerSlidingWindowType(SlidingWindowType.COUNT_BASED)
- .circuitBreakerSlidingWindowSize(1).circuitBreakerFailureRateThreshold(100)
- .circuitBreakerSlidingWindowMinCalls(1).build();
+ .retryMaxAttempts(1).retryWaitDuration(1).circuitBreakerSlidingWindowSize(1)
+ .circuitBreakerFailureRateThreshold(100).build();
return new MultiClusterPooledConnectionProvider(mccf);
}
diff --git a/src/test/java/redis/clients/jedis/mcf/HealthCheckTest.java b/src/test/java/redis/clients/jedis/mcf/HealthCheckTest.java
index d7b61dcd20..b7205fd808 100644
--- a/src/test/java/redis/clients/jedis/mcf/HealthCheckTest.java
+++ b/src/test/java/redis/clients/jedis/mcf/HealthCheckTest.java
@@ -280,8 +280,11 @@ void testHealthStatusManagerLifecycle() throws InterruptedException {
// Register listener before adding health check to capture the initial event
manager.registerListener(testEndpoint, listener);
+ HealthCheckStrategy delayedStrategy = new TestHealthCheckStrategy(2000, 1000, 3,
+ BuiltIn.ALL_SUCCESS, 100, e -> HealthStatus.HEALTHY);
+
// Add health check - this will start async health checking
- manager.add(testEndpoint, alwaysHealthyStrategy);
+ manager.add(testEndpoint, delayedStrategy);
// Initially should still be UNKNOWN until first check completes
assertEquals(HealthStatus.UNKNOWN, manager.getHealthStatus(testEndpoint));
@@ -785,7 +788,7 @@ void testPolicy_Majority_EarlyFailStopsAtTwo() throws Exception {
CountDownLatch unhealthyLatch = new CountDownLatch(1);
TestHealthCheckStrategy strategy = new TestHealthCheckStrategy(
- HealthCheckStrategy.Config.builder().interval(5).timeout(200).numProbes(4)
+ HealthCheckStrategy.Config.builder().interval(5000).timeout(200).numProbes(4)
.policy(BuiltIn.MAJORITY_SUCCESS).delayInBetweenProbes(5).build(),
e -> {
int c = callCount.incrementAndGet();
diff --git a/src/test/java/redis/clients/jedis/mcf/MultiClusterFailoverAttemptsConfigTest.java b/src/test/java/redis/clients/jedis/mcf/MultiClusterFailoverAttemptsConfigTest.java
index 3d0f114c18..e4992fb92b 100644
--- a/src/test/java/redis/clients/jedis/mcf/MultiClusterFailoverAttemptsConfigTest.java
+++ b/src/test/java/redis/clients/jedis/mcf/MultiClusterFailoverAttemptsConfigTest.java
@@ -12,7 +12,8 @@
import redis.clients.jedis.MultiClusterClientConfig.ClusterConfig;
import redis.clients.jedis.mcf.JedisFailoverException.JedisPermanentlyNotAvailableException;
import redis.clients.jedis.mcf.JedisFailoverException.JedisTemporarilyNotAvailableException;
-import java.lang.reflect.Field;
+import redis.clients.jedis.util.ReflectionTestUtil;
+
import java.time.Duration;
import java.util.concurrent.atomic.AtomicInteger;
@@ -147,53 +148,35 @@ void maxNumFailoverAttempts_zeroDelay_leadsToPermanentAfterExceeding() throws Ex
private static void setBuilderFailoverConfig(MultiClusterClientConfig.Builder builder,
int maxAttempts, int delayMs) throws Exception {
- Field fMax = builder.getClass().getDeclaredField("maxNumFailoverAttempts");
- fMax.setAccessible(true);
- fMax.setInt(builder, maxAttempts);
+ ReflectionTestUtil.setField(builder, "maxNumFailoverAttempts", maxAttempts);
- Field fDelay = builder.getClass().getDeclaredField("delayInBetweenFailoverAttempts");
- fDelay.setAccessible(true);
- fDelay.setInt(builder, delayMs);
+ ReflectionTestUtil.setField(builder, "delayInBetweenFailoverAttempts", delayMs);
}
private void setProviderFailoverConfig(int maxAttempts, int delayMs) throws Exception {
// Access the underlying MultiClusterClientConfig inside provider and adjust fields for this
// test
- Field cfgField = provider.getClass().getDeclaredField("multiClusterClientConfig");
- cfgField.setAccessible(true);
- Object cfg = cfgField.get(provider);
+ Object cfg = ReflectionTestUtil.getField(provider, "multiClusterClientConfig");
- Field fMax = cfg.getClass().getDeclaredField("maxNumFailoverAttempts");
- fMax.setAccessible(true);
- fMax.setInt(cfg, maxAttempts);
+ ReflectionTestUtil.setField(cfg, "maxNumFailoverAttempts", maxAttempts);
- Field fDelay = cfg.getClass().getDeclaredField("delayInBetweenFailoverAttempts");
- fDelay.setAccessible(true);
- fDelay.setInt(cfg, delayMs);
+ ReflectionTestUtil.setField(cfg, "delayInBetweenFailoverAttempts", delayMs);
}
private int getProviderMaxAttempts() throws Exception {
- Field cfgField = provider.getClass().getDeclaredField("multiClusterClientConfig");
- cfgField.setAccessible(true);
- Object cfg = cfgField.get(provider);
- Field fMax = cfg.getClass().getDeclaredField("maxNumFailoverAttempts");
- fMax.setAccessible(true);
- return fMax.getInt(cfg);
+ Object cfg = ReflectionTestUtil.getField(provider, "multiClusterClientConfig");
+
+ return ReflectionTestUtil.getField(cfg, "maxNumFailoverAttempts");
}
private int getProviderDelayMs() throws Exception {
- Field cfgField = provider.getClass().getDeclaredField("multiClusterClientConfig");
- cfgField.setAccessible(true);
- Object cfg = cfgField.get(provider);
- Field fDelay = cfg.getClass().getDeclaredField("delayInBetweenFailoverAttempts");
- fDelay.setAccessible(true);
- return fDelay.getInt(cfg);
+ Object cfg = ReflectionTestUtil.getField(provider, "multiClusterClientConfig");
+
+ return ReflectionTestUtil.getField(cfg, "delayInBetweenFailoverAttempts");
}
private int getProviderAttemptCount() throws Exception {
- Field f = provider.getClass().getDeclaredField("failoverAttemptCount");
- f.setAccessible(true);
- AtomicInteger val = (AtomicInteger) f.get(provider);
+ AtomicInteger val = ReflectionTestUtil.getField(provider, "failoverAttemptCount");
return val.get();
}
}
diff --git a/src/test/java/redis/clients/jedis/mcf/MultiClusterPooledConnectionProviderTest.java b/src/test/java/redis/clients/jedis/mcf/MultiClusterPooledConnectionProviderTest.java
index 3b429a1436..88b2948016 100644
--- a/src/test/java/redis/clients/jedis/mcf/MultiClusterPooledConnectionProviderTest.java
+++ b/src/test/java/redis/clients/jedis/mcf/MultiClusterPooledConnectionProviderTest.java
@@ -1,8 +1,6 @@
package redis.clients.jedis.mcf;
import io.github.resilience4j.circuitbreaker.CircuitBreaker;
-import io.github.resilience4j.circuitbreaker.CircuitBreakerConfig.SlidingWindowType;
-
import org.awaitility.Awaitility;
import org.awaitility.Durations;
import org.junit.jupiter.api.*;
@@ -113,8 +111,8 @@ public void testRunClusterFailoverPostProcessor() {
MultiClusterClientConfig.Builder builder = new MultiClusterClientConfig.Builder(clusterConfigs);
// Configures a single failed command to trigger an open circuit on the next subsequent failure
- builder.circuitBreakerSlidingWindowSize(1);
- builder.circuitBreakerSlidingWindowMinCalls(1);
+ builder.circuitBreakerSlidingWindowSize(3).circuitBreakerMinNumOfFailures(1)
+ .circuitBreakerFailureRateThreshold(0);
AtomicBoolean isValidTest = new AtomicBoolean(false);
@@ -283,9 +281,7 @@ public void userCommand_connectionExceptions_thenMultipleTemporary_thenPermanent
// and open to impact from other defaulted values withing the components in use.
MultiClusterPooledConnectionProvider testProvider = new MultiClusterPooledConnectionProvider(
new MultiClusterClientConfig.Builder(clusterConfigs).delayInBetweenFailoverAttempts(100)
- .maxNumFailoverAttempts(2).retryMaxAttempts(1).circuitBreakerSlidingWindowMinCalls(3)
- .circuitBreakerSlidingWindowSize(5)
- .circuitBreakerSlidingWindowType(SlidingWindowType.TIME_BASED)
+ .maxNumFailoverAttempts(2).retryMaxAttempts(1).circuitBreakerSlidingWindowSize(5)
.circuitBreakerFailureRateThreshold(60).build()) {
};
diff --git a/src/test/java/redis/clients/jedis/mcf/StatusTrackerTest.java b/src/test/java/redis/clients/jedis/mcf/StatusTrackerTest.java
index 95cd541631..c17fa2ff51 100644
--- a/src/test/java/redis/clients/jedis/mcf/StatusTrackerTest.java
+++ b/src/test/java/redis/clients/jedis/mcf/StatusTrackerTest.java
@@ -5,6 +5,7 @@
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicReference;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
@@ -184,15 +185,20 @@ void testWaitForHealthStatus_IgnoresOtherEndpoints() throws InterruptedException
void testWaitForHealthStatus_InterruptHandling() {
// Given: Health status is initially UNKNOWN and will stay that way
when(mockHealthStatusManager.getHealthStatus(testEndpoint)).thenReturn(HealthStatus.UNKNOWN);
+ when(mockHealthStatusManager.getMaxWaitFor(any())).thenReturn(3000L);
- // When: Interrupt the waiting thread
+ AtomicReference interruptedThreadName = new AtomicReference<>();
+ AtomicReference thrownException = new AtomicReference<>();
+ AtomicReference isInterrupted = new AtomicReference<>();
+ // When: Interrupt thse waiting thread
Thread testThread = new Thread(() -> {
try {
statusTracker.waitForHealthStatus(testEndpoint);
fail("Should have thrown JedisConnectionException due to interrupt");
} catch (Exception e) {
- assertTrue(e.getMessage().contains("Interrupted while waiting"));
- assertTrue(Thread.currentThread().isInterrupted());
+ interruptedThreadName.set(Thread.currentThread().getName());
+ thrownException.set(e);
+ isInterrupted.set(Thread.currentThread().isInterrupted());
}
});
@@ -215,6 +221,8 @@ void testWaitForHealthStatus_InterruptHandling() {
}
assertFalse(testThread.isAlive(), "Test thread should have completed");
+ assertTrue(thrownException.get().getMessage().contains("Interrupted while waiting"));
+ assertTrue(isInterrupted.get(), "Thread should be interrupted");
}
@Test
diff --git a/src/test/java/redis/clients/jedis/misc/AutomaticFailoverTest.java b/src/test/java/redis/clients/jedis/misc/AutomaticFailoverTest.java
index b6f34106f2..3be7d29656 100644
--- a/src/test/java/redis/clients/jedis/misc/AutomaticFailoverTest.java
+++ b/src/test/java/redis/clients/jedis/misc/AutomaticFailoverTest.java
@@ -108,15 +108,15 @@ public void transactionWithSwitch() {
@Test
public void commandFailoverUnresolvableHost() {
- int slidingWindowMinCalls = 2;
+ int slidingWindowMinFails = 2;
int slidingWindowSize = 2;
HostAndPort unresolvableHostAndPort = new HostAndPort("unresolvable", 6379);
MultiClusterClientConfig.Builder builder = new MultiClusterClientConfig.Builder(
getClusterConfigs(clientConfig, unresolvableHostAndPort, workingEndpoint.getHostAndPort()))
.retryWaitDuration(1).retryMaxAttempts(1)
- .circuitBreakerSlidingWindowMinCalls(slidingWindowMinCalls)
- .circuitBreakerSlidingWindowSize(slidingWindowSize);
+ .circuitBreakerSlidingWindowSize(slidingWindowSize)
+ .circuitBreakerMinNumOfFailures(slidingWindowMinFails);
RedisFailoverReporter failoverReporter = new RedisFailoverReporter();
MultiClusterPooledConnectionProvider connectionProvider = new MultiClusterPooledConnectionProvider(
@@ -129,16 +129,16 @@ public void commandFailoverUnresolvableHost() {
log.info("Starting calls to Redis");
assertFalse(failoverReporter.failedOver);
- for (int attempt = 0; attempt < slidingWindowMinCalls; attempt++) {
+ for (int attempt = 0; attempt < slidingWindowMinFails; attempt++) {
+ assertFalse(failoverReporter.failedOver);
Throwable thrown = assertThrows(JedisConnectionException.class,
() -> jedis.hset(key, "f1", "v1"));
assertThat(thrown.getCause(), instanceOf(UnknownHostException.class));
- assertFalse(failoverReporter.failedOver);
}
- // should failover now
- jedis.hset(key, "f1", "v1");
+ // already failed over now
assertTrue(failoverReporter.failedOver);
+ jedis.hset(key, "f1", "v1");
assertEquals(Collections.singletonMap("f1", "v1"), jedis.hgetAll(key));
jedis.flushAll();
@@ -148,7 +148,7 @@ public void commandFailoverUnresolvableHost() {
@Test
public void commandFailover() {
- int slidingWindowMinCalls = 6;
+ int slidingWindowMinFails = 6;
int slidingWindowSize = 6;
int retryMaxAttempts = 3;
@@ -157,7 +157,8 @@ public void commandFailover() {
.retryMaxAttempts(retryMaxAttempts) // Default
// is
// 3
- .circuitBreakerSlidingWindowMinCalls(slidingWindowMinCalls)
+ .circuitBreakerFailureRateThreshold(50)
+ .circuitBreakerMinNumOfFailures(slidingWindowMinFails)
.circuitBreakerSlidingWindowSize(slidingWindowSize);
RedisFailoverReporter failoverReporter = new RedisFailoverReporter();
@@ -191,12 +192,10 @@ public void commandFailover() {
@Test
public void pipelineFailover() {
- int slidingWindowMinCalls = 10;
int slidingWindowSize = 10;
MultiClusterClientConfig.Builder builder = new MultiClusterClientConfig.Builder(
getClusterConfigs(clientConfig, hostPortWithFailure, workingEndpoint.getHostAndPort()))
- .circuitBreakerSlidingWindowMinCalls(slidingWindowMinCalls)
.circuitBreakerSlidingWindowSize(slidingWindowSize)
.fallbackExceptionList(Collections.singletonList(JedisConnectionException.class));
@@ -225,14 +224,11 @@ public void pipelineFailover() {
@Test
public void failoverFromAuthError() {
- int slidingWindowMinCalls = 10;
int slidingWindowSize = 10;
MultiClusterClientConfig.Builder builder = new MultiClusterClientConfig.Builder(
getClusterConfigs(clientConfig, endpointForAuthFailure.getHostAndPort(),
- workingEndpoint.getHostAndPort()))
- .circuitBreakerSlidingWindowMinCalls(slidingWindowMinCalls)
- .circuitBreakerSlidingWindowSize(slidingWindowSize)
+ workingEndpoint.getHostAndPort())).circuitBreakerSlidingWindowSize(slidingWindowSize)
.fallbackExceptionList(Collections.singletonList(JedisAccessControlException.class));
RedisFailoverReporter failoverReporter = new RedisFailoverReporter();
diff --git a/src/test/java/redis/clients/jedis/scenario/ActiveActiveFailoverTest.java b/src/test/java/redis/clients/jedis/scenario/ActiveActiveFailoverTest.java
index 6531cc5490..59a62ed7e0 100644
--- a/src/test/java/redis/clients/jedis/scenario/ActiveActiveFailoverTest.java
+++ b/src/test/java/redis/clients/jedis/scenario/ActiveActiveFailoverTest.java
@@ -70,9 +70,7 @@ public void testFailover() {
MultiClusterClientConfig.Builder builder = new MultiClusterClientConfig.Builder(clusterConfig);
- builder.circuitBreakerSlidingWindowType(CircuitBreakerConfig.SlidingWindowType.TIME_BASED);
builder.circuitBreakerSlidingWindowSize(1); // SLIDING WINDOW SIZE IN SECONDS
- builder.circuitBreakerSlidingWindowMinCalls(1);
builder.circuitBreakerFailureRateThreshold(10.0f); // percentage of failures to trigger circuit breaker
builder.failbackSupported(true);
diff --git a/src/test/java/redis/clients/jedis/util/ReflectionTestUtil.java b/src/test/java/redis/clients/jedis/util/ReflectionTestUtil.java
new file mode 100644
index 0000000000..50b6448429
--- /dev/null
+++ b/src/test/java/redis/clients/jedis/util/ReflectionTestUtil.java
@@ -0,0 +1,91 @@
+package redis.clients.jedis.util;
+
+import java.lang.reflect.Field;
+
+/**
+ * Simple utility for accessing private fields in tests using reflection.
+ *
+ * This utility is intended for testing purposes only to access internal state that is not exposed
+ * through public APIs.
+ *
+ */
+public class ReflectionTestUtil {
+
+ /**
+ * Gets the value of a private field from an object.
+ * @param target the object containing the field
+ * @param fieldName the name of the field to access
+ * @param the expected type of the field value
+ * @return the value of the field
+ * @throws RuntimeException if the field cannot be accessed
+ */
+ @SuppressWarnings("unchecked")
+ public static T getField(Object target, String fieldName) {
+ if (target == null) {
+ throw new IllegalArgumentException("Target object cannot be null");
+ }
+ if (fieldName == null || fieldName.isEmpty()) {
+ throw new IllegalArgumentException("Field name cannot be null or empty");
+ }
+
+ try {
+ Field field = findField(target.getClass(), fieldName);
+ field.setAccessible(true);
+ return (T) field.get(target);
+ } catch (NoSuchFieldException e) {
+ throw new RuntimeException(
+ "Field '" + fieldName + "' not found in class " + target.getClass().getName(), e);
+ } catch (IllegalAccessException e) {
+ throw new RuntimeException(
+ "Cannot access field '" + fieldName + "' in class " + target.getClass().getName(), e);
+ }
+ }
+
+ /**
+ * Sets the value of a private field in an object.
+ * @param target the object containing the field
+ * @param fieldName the name of the field to set
+ * @param value the value to set
+ * @throws RuntimeException if the field cannot be accessed
+ */
+ public static void setField(Object target, String fieldName, Object value) {
+ if (target == null) {
+ throw new IllegalArgumentException("Target object cannot be null");
+ }
+ if (fieldName == null || fieldName.isEmpty()) {
+ throw new IllegalArgumentException("Field name cannot be null or empty");
+ }
+
+ try {
+ Field field = findField(target.getClass(), fieldName);
+ field.setAccessible(true);
+ field.set(target, value);
+ } catch (NoSuchFieldException e) {
+ throw new RuntimeException(
+ "Field '" + fieldName + "' not found in class " + target.getClass().getName(), e);
+ } catch (IllegalAccessException e) {
+ throw new RuntimeException(
+ "Cannot access field '" + fieldName + "' in class " + target.getClass().getName(), e);
+ }
+ }
+
+ /**
+ * Finds a field in the class hierarchy.
+ * @param clazz the class to search
+ * @param fieldName the name of the field
+ * @return the field
+ * @throws NoSuchFieldException if the field is not found
+ */
+ private static Field findField(Class> clazz, String fieldName) throws NoSuchFieldException {
+ Class> current = clazz;
+ while (current != null) {
+ try {
+ return current.getDeclaredField(fieldName);
+ } catch (NoSuchFieldException e) {
+ current = current.getSuperclass();
+ }
+ }
+ throw new NoSuchFieldException(
+ "Field '" + fieldName + "' not found in class hierarchy of " + clazz.getName());
+ }
+}
From 03387cfb0fd89f2dbf8b9d8f234d39b76148feaa Mon Sep 17 00:00:00 2001
From: Ivo Gaydazhiev
Date: Fri, 3 Oct 2025 15:40:29 +0300
Subject: [PATCH 04/17] [automatic failover] feat: Add MultiDbClient with
multi-endpoint failover and circuit breaker support (#4300)
* feat: introduce ResilientRedisClient with multi-endpoint failover support
Add ResilientRedisClient extending UnifiedJedis with automatic failover
capabilities across multiple weighted Redis endpoints. Includes circuit
breaker pattern, health monitoring, and configurable retry logic for
high-availability Redis deployments.
* format
* mark ResilientRedisClientTest as integration one
* fix test
- make sure endpoint is healthy before activating it
* Rename ResilientClient to align with design
- ResilientClient -> MultiDbClient (builder, tests, etc)
* Rename setActiveEndpoint to setActiveDatabaseEndpoint
* Rename clusterSwitchListener to databaseSwitchListener
* Rename multiClusterConfig to multiDbConfig
* fix api doc's error
* fix compilation error after rebase
* format
* fix example in javadoc
* Update ActiveActiveFailoverTest scenariou test to use builder's
# Conflicts:
# src/test/java/redis/clients/jedis/scenario/ActiveActiveFailoverTest.java
* rename setActiveDatabaseEndpoint -. setActiveDatabase
* is healthy throw exception if cluster does not exists
* format
---
pom.xml | 3 +
.../jedis/MultiClusterClientConfig.java | 77 ++++-
.../redis/clients/jedis/MultiDbClient.java | 288 ++++++++++++++++++
.../jedis/builders/MultiDbClientBuilder.java | 139 +++++++++
.../jedis/mcf/ClusterSwitchEventArgs.java | 1 +
.../MultiClusterPooledConnectionProvider.java | 65 +++-
.../clients/jedis/MultiDbClientTest.java | 206 +++++++++++++
.../scenario/ActiveActiveFailoverTest.java | 47 ++-
.../clients/jedis/util/ClientTestUtil.java | 11 +
9 files changed, 798 insertions(+), 39 deletions(-)
create mode 100644 src/main/java/redis/clients/jedis/MultiDbClient.java
create mode 100644 src/main/java/redis/clients/jedis/builders/MultiDbClientBuilder.java
create mode 100644 src/test/java/redis/clients/jedis/MultiDbClientTest.java
create mode 100644 src/test/java/redis/clients/jedis/util/ClientTestUtil.java
diff --git a/pom.xml b/pom.xml
index f21927b66b..fd98539365 100644
--- a/pom.xml
+++ b/pom.xml
@@ -491,6 +491,9 @@
src/main/java/redis/clients/jedis/MultiClusterClientConfig.java
src/main/java/redis/clients/jedis/HostAndPort.java
**/builders/*.java
+ **/MultiDb*.java
+ **/ClientTestUtil.java
+ **/ReflectionTestUtil.java
diff --git a/src/main/java/redis/clients/jedis/MultiClusterClientConfig.java b/src/main/java/redis/clients/jedis/MultiClusterClientConfig.java
index 49a351c82e..9788f8efab 100644
--- a/src/main/java/redis/clients/jedis/MultiClusterClientConfig.java
+++ b/src/main/java/redis/clients/jedis/MultiClusterClientConfig.java
@@ -2,6 +2,7 @@
import io.github.resilience4j.circuitbreaker.CallNotPermittedException;
import java.time.Duration;
+import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
@@ -450,8 +451,10 @@ public static interface StrategySupplier {
* @see Builder#Builder(ClusterConfig[])
*/
public MultiClusterClientConfig(ClusterConfig[] clusterConfigs) {
+
if (clusterConfigs == null || clusterConfigs.length < 1) throw new JedisValidationException(
"ClusterClientConfigs are required for MultiClusterPooledConnectionProvider");
+
for (ClusterConfig clusterConfig : clusterConfigs) {
if (clusterConfig == null)
throw new IllegalArgumentException("ClusterClientConfigs must not contain null elements");
@@ -634,6 +637,20 @@ public boolean isFastFailover() {
return fastFailover;
}
+ /**
+ * Creates a new Builder instance for configuring MultiClusterClientConfig.
+ *
+ * At least one cluster configuration must be added to the builder before calling build(). Use the
+ * endpoint() methods to add cluster configurations.
+ *
+ * @return new Builder instance
+ * @throws JedisValidationException if clusterConfigs is null or empty
+ * @see Builder#Builder(ClusterConfig[])
+ */
+ public static Builder builder() {
+ return new Builder();
+ }
+
/**
* Creates a new Builder instance for configuring MultiClusterClientConfig.
* @param clusterConfigs array of cluster configurations defining available Redis endpoints
@@ -751,6 +768,7 @@ public HostAndPort getHostAndPort() {
* @return new Builder instance
* @throws IllegalArgumentException if hostAndPort or clientConfig is null
*/
+ // TODO : Replace HostAndPort with Endpoint
public static Builder builder(HostAndPort hostAndPort, JedisClientConfig clientConfig) {
return new Builder(hostAndPort, clientConfig);
}
@@ -974,7 +992,7 @@ public ClusterConfig build() {
public static class Builder {
/** Array of cluster configurations defining available Redis endpoints. */
- private ClusterConfig[] clusterConfigs;
+ private final List clusterConfigs = new ArrayList<>();
// ============ Retry Configuration Fields ============
/** Maximum number of retry attempts including the initial call. */
@@ -1033,6 +1051,12 @@ public static class Builder {
/** Delay in milliseconds between failover attempts. */
private int delayInBetweenFailoverAttempts = DELAY_IN_BETWEEN_FAILOVER_ATTEMPTS_DEFAULT;
+ /**
+ * Constructs a new Builder with the specified cluster configurations.
+ */
+ public Builder() {
+ }
+
/**
* Constructs a new Builder with the specified cluster configurations.
* @param clusterConfigs array of cluster configurations defining available Redis endpoints
@@ -1040,10 +1064,7 @@ public static class Builder {
*/
public Builder(ClusterConfig[] clusterConfigs) {
- if (clusterConfigs == null || clusterConfigs.length < 1) throw new JedisValidationException(
- "ClusterClientConfigs are required for MultiClusterPooledConnectionProvider");
-
- this.clusterConfigs = clusterConfigs;
+ this(Arrays.asList(clusterConfigs));
}
/**
@@ -1052,7 +1073,47 @@ public Builder(ClusterConfig[] clusterConfigs) {
* @throws JedisValidationException if clusterConfigs is null or empty
*/
public Builder(List clusterConfigs) {
- this(clusterConfigs.toArray(new ClusterConfig[0]));
+ this.clusterConfigs.addAll(clusterConfigs);
+ }
+
+ /**
+ * Adds a pre-configured endpoint configuration.
+ *
+ * This method allows adding a fully configured ClusterConfig instance, providing maximum
+ * flexibility for advanced configurations including custom health check strategies, connection
+ * pool settings, etc.
+ *
+ * @param clusterConfig the pre-configured cluster configuration
+ * @return this builder
+ */
+ public Builder endpoint(ClusterConfig clusterConfig) {
+ this.clusterConfigs.add(clusterConfig);
+ return this;
+ }
+
+ /**
+ * Adds a Redis endpoint with custom client configuration.
+ *
+ * This method allows specifying endpoint-specific configuration such as authentication, SSL
+ * settings, timeouts, etc. This configuration will override the default client configuration
+ * for this specific endpoint.
+ *
+ * @param endpoint the Redis server endpoint
+ * @param weight the weight for this endpoint (higher values = higher priority)
+ * @param clientConfig the client configuration for this endpoint
+ * @return this builder
+ */
+ public Builder endpoint(Endpoint endpoint, float weight, JedisClientConfig clientConfig) {
+ // Convert Endpoint to HostAndPort for ClusterConfig
+ // TODO : Refactor ClusterConfig to accept Endpoint directly
+ HostAndPort hostAndPort = (endpoint instanceof HostAndPort) ? (HostAndPort) endpoint
+ : new HostAndPort(endpoint.getHost(), endpoint.getPort());
+
+ ClusterConfig clusterConfig = ClusterConfig.builder(hostAndPort, clientConfig).weight(weight)
+ .build();
+
+ this.clusterConfigs.add(clusterConfig);
+ return this;
}
// ============ Retry Configuration Methods ============
@@ -1453,7 +1514,9 @@ public Builder delayInBetweenFailoverAttempts(int delayInBetweenFailoverAttempts
* @return a new MultiClusterClientConfig instance with the configured settings
*/
public MultiClusterClientConfig build() {
- MultiClusterClientConfig config = new MultiClusterClientConfig(this.clusterConfigs);
+
+ MultiClusterClientConfig config = new MultiClusterClientConfig(
+ this.clusterConfigs.toArray(new ClusterConfig[0]));
// Copy retry configuration
config.retryMaxAttempts = this.retryMaxAttempts;
diff --git a/src/main/java/redis/clients/jedis/MultiDbClient.java b/src/main/java/redis/clients/jedis/MultiDbClient.java
new file mode 100644
index 0000000000..74943e2bed
--- /dev/null
+++ b/src/main/java/redis/clients/jedis/MultiDbClient.java
@@ -0,0 +1,288 @@
+package redis.clients.jedis;
+
+import redis.clients.jedis.MultiClusterClientConfig.ClusterConfig;
+import redis.clients.jedis.annots.Experimental;
+import redis.clients.jedis.builders.MultiDbClientBuilder;
+import redis.clients.jedis.csc.Cache;
+import redis.clients.jedis.executors.CommandExecutor;
+import redis.clients.jedis.mcf.CircuitBreakerCommandExecutor;
+import redis.clients.jedis.mcf.MultiClusterPipeline;
+import redis.clients.jedis.mcf.MultiClusterTransaction;
+import redis.clients.jedis.providers.ConnectionProvider;
+import redis.clients.jedis.mcf.MultiClusterPooledConnectionProvider;
+
+import java.util.Set;
+
+/**
+ * MultiDbClient provides high-availability Redis connectivity with automatic failover and failback
+ * capabilities across multiple weighted endpoints.
+ *
+ * This client extends UnifiedJedis to support resilient operations with:
+ *
+ * - Multi-Endpoint Support: Configure multiple Redis endpoints with individual
+ * weights
+ * - Automatic Failover: Seamless switching to backup endpoints when primary
+ * becomes unavailable
+ * - Circuit Breaker Pattern: Built-in circuit breaker to prevent cascading
+ * failures
+ * - Weight-Based Selection: Intelligent endpoint selection based on configured
+ * weights
+ * - Health Monitoring: Continuous health checks with automatic failback to
+ * recovered endpoints
+ * - Retry Logic: Configurable retry mechanisms with exponential backoff
+ *
+ *
+ * Usage Example:
+ *
+ *
+ *
+ * // Create multi-db client with multiple endpoints
+ * HostAndPort primary = new HostAndPort("localhost", 29379);
+ * HostAndPort secondary = new HostAndPort("localhost", 29380);
+ *
+ *
+ * MultiDbClient client = MultiDbClient.builder()
+ * .multiDbConfig(
+ * MultiClusterClientConfig.builder()
+ * .endpoint(
+ * ClusterConfig.builder(
+ * primary,
+ * DefaultJedisClientConfig.builder().build())
+ * .weight(100.0f)
+ * .build())
+ * .endpoint(ClusterConfig.builder(
+ * secondary,
+ * DefaultJedisClientConfig.builder().build())
+ * .weight(50.0f).build())
+ * .circuitBreakerFailureRateThreshold(50.0f)
+ * .retryMaxAttempts(3)
+ * .build()
+ * )
+ * .databaseSwitchListener(event ->
+ * System.out.println("Switched to: " + event.getEndpoint()))
+ * .build();
+ *
+ * // Use like any other Jedis client
+ * client.set("key", "value");
+ * String value = client.get("key");
+ *
+ * // Automatic failover happens transparently
+ * client.close();
+ *
+ *
+ * The client automatically handles endpoint failures and recoveries, providing transparent high
+ * availability for Redis operations. All standard Jedis operations are supported with the added
+ * resilience features.
+ *
+ * @author Ivo Gaydazhiev
+ * @since 5.2.0
+ * @see MultiClusterPooledConnectionProvider
+ * @see CircuitBreakerCommandExecutor
+ * @see MultiClusterClientConfig
+ */
+@Experimental
+public class MultiDbClient extends UnifiedJedis {
+
+ /**
+ * Creates a MultiDbClient with custom components.
+ *
+ * This constructor allows full customization of the client components and is primarily used by
+ * the builder pattern for advanced configurations. For most use cases, prefer using
+ * {@link #builder()} to create instances.
+ *
+ * @param commandExecutor the command executor (typically CircuitBreakerCommandExecutor)
+ * @param connectionProvider the connection provider (typically
+ * MultiClusterPooledConnectionProvider)
+ * @param commandObjects the command objects
+ * @param redisProtocol the Redis protocol version
+ * @param cache the client-side cache (may be null)
+ */
+ MultiDbClient(CommandExecutor commandExecutor, ConnectionProvider connectionProvider,
+ CommandObjects commandObjects, RedisProtocol redisProtocol, Cache cache) {
+ super(commandExecutor, connectionProvider, commandObjects, redisProtocol, cache);
+ }
+
+ /**
+ * Returns the underlying MultiClusterPooledConnectionProvider.
+ *
+ * This provides access to multi-cluster specific operations like manual failover, health status
+ * monitoring, and cluster switch event handling.
+ *
+ * @return the multi-cluster connection provider
+ * @throws ClassCastException if the provider is not a MultiClusterPooledConnectionProvider
+ */
+ private MultiClusterPooledConnectionProvider getMultiClusterProvider() {
+ return (MultiClusterPooledConnectionProvider) this.provider;
+ }
+
+ /**
+ * Manually switches to the specified endpoint.
+ *
+ * This method allows manual failover to a specific endpoint, bypassing the automatic weight-based
+ * selection. The switch will only succeed if the target endpoint is healthy.
+ *
+ * @param endpoint the endpoint to switch to
+ */
+ public void setActiveDatabase(Endpoint endpoint) {
+ getMultiClusterProvider().setActiveCluster(endpoint);
+ }
+
+ /**
+ * Adds a pre-configured cluster configuration.
+ *
+ * This method allows adding a fully configured ClusterConfig instance, providing maximum
+ * flexibility for advanced configurations including custom health check strategies, connection
+ * pool settings, etc.
+ *
+ * @param clusterConfig the pre-configured cluster configuration
+ */
+ public void addEndpoint(ClusterConfig clusterConfig) {
+ getMultiClusterProvider().add(clusterConfig);
+ }
+
+ /**
+ * Dynamically adds a new cluster endpoint to the resilient client.
+ *
+ * This allows adding new endpoints at runtime without recreating the client. The new endpoint
+ * will be available for failover operations immediately after being added and passing health
+ * checks (if configured).
+ *
+ * @param endpoint the Redis server endpoint
+ * @param weight the weight for this endpoint (higher values = higher priority)
+ * @param clientConfig the client configuration for this endpoint
+ * @throws redis.clients.jedis.exceptions.JedisValidationException if the endpoint already exists
+ */
+ public void addEndpoint(Endpoint endpoint, float weight, JedisClientConfig clientConfig) {
+ // Convert Endpoint to HostAndPort for ClusterConfig
+ HostAndPort hostAndPort = (endpoint instanceof HostAndPort) ? (HostAndPort) endpoint
+ : new HostAndPort(endpoint.getHost(), endpoint.getPort());
+
+ ClusterConfig clusterConfig = ClusterConfig.builder(hostAndPort, clientConfig).weight(weight)
+ .build();
+
+ getMultiClusterProvider().add(clusterConfig);
+ }
+
+ /**
+ * Returns the set of all configured endpoints.
+ *
+ * This method provides a view of all endpoints currently configured in the resilient client.
+ *
+ * @return the set of all configured endpoints
+ */
+ public Set getEndpoints() {
+ return getMultiClusterProvider().getEndpoints();
+ }
+
+ /**
+ * Returns the health status of the specified endpoint.
+ *
+ * This method provides the current health status of a specific endpoint.
+ *
+ * @param endpoint the endpoint to check
+ * @return the health status of the endpoint
+ */
+ public boolean isHealthy(Endpoint endpoint) {
+ return getMultiClusterProvider().isHealthy(endpoint);
+ }
+
+ /**
+ * Dynamically removes a cluster endpoint from the resilient client.
+ *
+ * This allows removing endpoints at runtime. If the removed endpoint is currently active, the
+ * client will automatically failover to the next available healthy endpoint based on weight
+ * priority.
+ *
+ * @param endpoint the endpoint to remove
+ * @throws redis.clients.jedis.exceptions.JedisValidationException if the endpoint doesn't exist
+ * @throws redis.clients.jedis.exceptions.JedisException if removing the endpoint would leave no
+ * healthy clusters available
+ */
+ public void removeEndpoint(Endpoint endpoint) {
+ getMultiClusterProvider().remove(endpoint);
+ }
+
+ /**
+ * Forces the client to switch to a specific endpoint for a duration.
+ *
+ * This method forces the client to use the specified endpoint and puts all other endpoints in a
+ * grace period, preventing automatic failover for the specified duration. This is useful for
+ * maintenance scenarios or testing specific endpoints.
+ *
+ * @param endpoint the endpoint to force as active
+ * @param forcedActiveDurationMs the duration in milliseconds to keep this endpoint forced
+ * @throws redis.clients.jedis.exceptions.JedisValidationException if the endpoint is not healthy
+ * or doesn't exist
+ */
+ public void forceActiveEndpoint(Endpoint endpoint, long forcedActiveDurationMs) {
+ getMultiClusterProvider().forceActiveCluster(endpoint, forcedActiveDurationMs);
+ }
+
+ /**
+ * Creates a new pipeline for batch operations with multi-cluster support.
+ *
+ * The returned pipeline supports the same resilience features as the main client, including
+ * automatic failover during batch execution.
+ *
+ * @return a new MultiClusterPipeline instance
+ */
+ @Override
+ public MultiClusterPipeline pipelined() {
+ return new MultiClusterPipeline(getMultiClusterProvider(), commandObjects);
+ }
+
+ /**
+ * Creates a new transaction with multi-cluster support.
+ *
+ * The returned transaction supports the same resilience features as the main client, including
+ * automatic failover during transaction execution.
+ *
+ * @return a new MultiClusterTransaction instance
+ */
+ @Override
+ public MultiClusterTransaction multi() {
+ return new MultiClusterTransaction((MultiClusterPooledConnectionProvider) provider, true,
+ commandObjects);
+ }
+
+ /**
+ * @param doMulti {@code false} should be set to enable manual WATCH, UNWATCH and MULTI
+ * @return transaction object
+ */
+ @Override
+ public MultiClusterTransaction transaction(boolean doMulti) {
+ if (provider == null) {
+ throw new IllegalStateException(
+ "It is not allowed to create Transaction from this " + getClass());
+ }
+
+ return new MultiClusterTransaction(getMultiClusterProvider(), doMulti, commandObjects);
+ }
+
+ public Endpoint getActiveEndpoint() {
+ return getMultiClusterProvider().getCluster().getEndpoint();
+ }
+
+ /**
+ * Fluent builder for {@link MultiDbClient}.
+ *
+ * Obtain an instance via {@link #builder()}.
+ *
+ */
+ public static class Builder extends MultiDbClientBuilder {
+
+ @Override
+ protected MultiDbClient createClient() {
+ return new MultiDbClient(commandExecutor, connectionProvider, commandObjects,
+ clientConfig.getRedisProtocol(), cache);
+ }
+ }
+
+ /**
+ * Create a new builder for configuring MultiDbClient instances.
+ * @return a new {@link MultiDbClient.Builder} instance
+ */
+ public static Builder builder() {
+ return new Builder();
+ }
+}
diff --git a/src/main/java/redis/clients/jedis/builders/MultiDbClientBuilder.java b/src/main/java/redis/clients/jedis/builders/MultiDbClientBuilder.java
new file mode 100644
index 0000000000..df3c1f86d6
--- /dev/null
+++ b/src/main/java/redis/clients/jedis/builders/MultiDbClientBuilder.java
@@ -0,0 +1,139 @@
+package redis.clients.jedis.builders;
+
+import java.util.function.Consumer;
+
+import redis.clients.jedis.MultiClusterClientConfig;
+import redis.clients.jedis.annots.Experimental;
+import redis.clients.jedis.executors.CommandExecutor;
+import redis.clients.jedis.mcf.CircuitBreakerCommandExecutor;
+import redis.clients.jedis.mcf.ClusterSwitchEventArgs;
+import redis.clients.jedis.mcf.MultiClusterPooledConnectionProvider;
+import redis.clients.jedis.providers.ConnectionProvider;
+
+/**
+ * Builder for creating multi-db Redis clients with multi-endpoint support.
+ *
+ * This builder provides methods specific to multi-db Redis deployments, including multiple weighted
+ * endpoints, circuit breaker configuration, health checks, and automatic failover/failback
+ * capabilities.
+ *
+ *
+ * Key Features:
+ *
+ *
+ * - Multi-Endpoint Configuration: Add multiple Redis endpoints with individual
+ * weights
+ * - Circuit Breaker Integration: Built-in circuit breaker with configurable
+ * thresholds
+ * - Health Monitoring: Automatic health checks with configurable strategies
+ * - Event Handling: Listen to cluster switch events for monitoring and
+ * alerting
+ * - Flexible Configuration: Support for both simple and advanced multi-cluster
+ * configurations
+ *
+ *
+ * Usage Examples:
+ *
+ *
+ *
+ * MultiDbClient client = MultiDbClient.builder()
+ * .multiDbConfig(
+ * MultiClusterClientConfig.builder()
+ * .endpoint(
+ * ClusterConfig.builder(
+ * east,
+ * DefaultJedisClientConfig.builder().credentials(credentialsEast).build())
+ * .weight(100.0f)
+ * .build())
+ * .endpoint(ClusterConfig.builder(
+ * west,
+ * DefaultJedisClientConfig.builder().credentials(credentialsWest).build())
+ * .weight(50.0f).build())
+ * .circuitBreakerFailureRateThreshold(50.0f)
+ * .retryMaxAttempts(3)
+ * .build()
+ * )
+ * .databaseSwitchListener(event ->
+ * System.out.println("Switched to: " + event.getEndpoint()))
+ * .build();
+ *
+ *
+ * @param the client type that this builder creates
+ * @author Ivo Gaydazhiev
+ * @since 5.2.0
+ */
+@Experimental
+public abstract class MultiDbClientBuilder
+ extends AbstractClientBuilder, C> {
+
+ // Multi-db specific configuration fields
+ private MultiClusterClientConfig multiDbConfig = null;
+ private Consumer databaseSwitchListener = null;
+
+ /**
+ * Sets the multi-database configuration.
+ *
+ * This configuration controls circuit breaker behavior, retry logic, health checks, failback
+ * settings, and other resilience features. If not provided, default configuration will be used.
+ *
+ * @param config the multi-database configuration
+ * @return this builder
+ */
+ public MultiDbClientBuilder multiDbConfig(MultiClusterClientConfig config) {
+ this.multiDbConfig = config;
+ return this;
+ }
+
+ /**
+ * Sets a listener for database switch events.
+ *
+ * The listener will be called whenever the client switches from one endpoint to another,
+ * providing information about the switch reason and the new active endpoint. This is useful for
+ * monitoring, alerting, and logging purposes.
+ *
+ * @param listener the database switch event listener
+ * @return this builder
+ */
+ public MultiDbClientBuilder databaseSwitchListener(Consumer listener) {
+ this.databaseSwitchListener = listener;
+ return this;
+ }
+
+ @Override
+ protected MultiDbClientBuilder self() {
+ return this;
+ }
+
+ @Override
+ protected ConnectionProvider createDefaultConnectionProvider() {
+
+ if (this.multiDbConfig == null || this.multiDbConfig.getClusterConfigs() == null
+ || this.multiDbConfig.getClusterConfigs().length < 1) {
+ throw new IllegalArgumentException("At least one endpoint must be specified");
+ }
+
+ // Create the multi-cluster connection provider
+ MultiClusterPooledConnectionProvider provider = new MultiClusterPooledConnectionProvider(
+ multiDbConfig);
+
+ // Set database switch listener if provided
+ if (this.databaseSwitchListener != null) {
+ provider.setClusterSwitchListener(this.databaseSwitchListener);
+ }
+
+ return provider;
+ }
+
+ @Override
+ protected CommandExecutor createDefaultCommandExecutor() {
+ // For multi-db clients, we always use CircuitBreakerCommandExecutor
+ return new CircuitBreakerCommandExecutor(
+ (MultiClusterPooledConnectionProvider) this.connectionProvider);
+ }
+
+ @Override
+ protected void validateSpecificConfiguration() {
+
+ }
+
+}
diff --git a/src/main/java/redis/clients/jedis/mcf/ClusterSwitchEventArgs.java b/src/main/java/redis/clients/jedis/mcf/ClusterSwitchEventArgs.java
index 8000e616c4..1fe6cebe4d 100644
--- a/src/main/java/redis/clients/jedis/mcf/ClusterSwitchEventArgs.java
+++ b/src/main/java/redis/clients/jedis/mcf/ClusterSwitchEventArgs.java
@@ -11,6 +11,7 @@ public class ClusterSwitchEventArgs {
public ClusterSwitchEventArgs(SwitchReason reason, Endpoint endpoint, Cluster cluster) {
this.reason = reason;
+ // TODO: @ggivo do we need cluster name?
this.ClusterName = cluster.getCircuitBreaker().getName();
this.Endpoint = endpoint;
}
diff --git a/src/main/java/redis/clients/jedis/mcf/MultiClusterPooledConnectionProvider.java b/src/main/java/redis/clients/jedis/mcf/MultiClusterPooledConnectionProvider.java
index 4cf044bbe6..a85e490899 100644
--- a/src/main/java/redis/clients/jedis/mcf/MultiClusterPooledConnectionProvider.java
+++ b/src/main/java/redis/clients/jedis/mcf/MultiClusterPooledConnectionProvider.java
@@ -12,8 +12,10 @@
import java.util.Collections;
import java.util.Comparator;
+import java.util.HashSet;
import java.util.List;
import java.util.Map;
+import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
@@ -49,7 +51,7 @@
* isolated connection pool. With this ConnectionProvider users can seamlessly failover to
* Disaster Recovery (DR), Backup, and Active-Active cluster(s) by using simple
* configuration which is passed through from Resilience4j -
- * https://resilience4j.readme.io/docs
+ * docs
*
* Support for manual failback is provided by way of {@link #setActiveCluster(Endpoint)}
*
@@ -321,11 +323,11 @@ private void addClusterInternal(MultiClusterClientConfig multiClusterClientConfi
// Register listeners BEFORE adding clusters to avoid missing events
healthStatusManager.registerListener(config.getHostAndPort(), this::onHealthStatusChange);
HealthCheck hc = healthStatusManager.add(config.getHostAndPort(), hcs);
- cluster = new Cluster(pool, retry, hc, circuitBreaker, config.getWeight(),
- multiClusterClientConfig);
+ cluster = new Cluster(config.getHostAndPort(), pool, retry, hc, circuitBreaker,
+ config.getWeight(), multiClusterClientConfig);
} else {
- cluster = new Cluster(pool, retry, circuitBreaker, config.getWeight(),
- multiClusterClientConfig);
+ cluster = new Cluster(config.getHostAndPort(), pool, retry, circuitBreaker,
+ config.getWeight(), multiClusterClientConfig);
}
multiClusterMap.put(config.getHostAndPort(), cluster);
@@ -564,6 +566,14 @@ private void validateTargetConnection(Cluster cluster) {
}
}
+ /**
+ * Returns the set of all configured endpoints.
+ * @return
+ */
+ public Set getEndpoints() {
+ return new HashSet<>(multiClusterMap.keySet());
+ }
+
public void setActiveCluster(Endpoint endpoint) {
if (endpoint == null) {
throw new JedisValidationException(
@@ -581,6 +591,12 @@ public void setActiveCluster(Endpoint endpoint) {
public void forceActiveCluster(Endpoint endpoint, long forcedActiveDuration) {
Cluster cluster = multiClusterMap.get(endpoint);
+
+ if (cluster == null) {
+ throw new JedisValidationException("Provided endpoint: " + endpoint + " is not within "
+ + "the configured endpoints. Please use one from the configuration");
+ }
+
cluster.clearGracePeriod();
if (!cluster.isHealthy()) {
throw new JedisValidationException("Provided endpoint: " + endpoint
@@ -685,6 +701,31 @@ public Cluster getCluster(Endpoint endpoint) {
return multiClusterMap.get(endpoint);
}
+ /**
+ * Returns the active endpoint
+ *
+ * Active endpoint is the one which is currently being used for all operations. It can change at
+ * any time due to health checks, failover, failback, etc.
+ * @return the active cluster endpoint
+ */
+ public Endpoint getActiveEndpoint() {
+ return activeCluster.getEndpoint();
+ }
+
+ /**
+ * Returns the health state of the given endpoint
+ * @param endpoint the endpoint to check
+ * @return the health status of the endpoint
+ */
+ public boolean isHealthy(Endpoint endpoint) {
+ Cluster cluster = getCluster(endpoint);
+ if (cluster == null) {
+ throw new JedisValidationException(
+ "Endpoint " + endpoint + " does not exist in the provider");
+ }
+ return cluster.isHealthy();
+ }
+
public CircuitBreaker getClusterCircuitBreaker() {
return activeCluster.getCircuitBreaker();
}
@@ -723,15 +764,17 @@ public static class Cluster {
private final HealthCheck healthCheck;
private final MultiClusterClientConfig multiClusterClientConfig;
private boolean disabled = false;
+ private final Endpoint endpoint;
// Grace period tracking
private volatile long gracePeriodEndsAt = 0;
private final Logger log = LoggerFactory.getLogger(getClass());
- private Cluster(TrackingConnectionPool connectionPool, Retry retry,
+ private Cluster(Endpoint endpoint, TrackingConnectionPool connectionPool, Retry retry,
CircuitBreaker circuitBreaker, float weight,
MultiClusterClientConfig multiClusterClientConfig) {
+ this.endpoint = endpoint;
this.connectionPool = connectionPool;
this.retry = retry;
this.circuitBreaker = circuitBreaker;
@@ -740,10 +783,11 @@ private Cluster(TrackingConnectionPool connectionPool, Retry retry,
this.healthCheck = null;
}
- private Cluster(TrackingConnectionPool connectionPool, Retry retry, HealthCheck hc,
- CircuitBreaker circuitBreaker, float weight,
+ private Cluster(Endpoint endpoint, TrackingConnectionPool connectionPool, Retry retry,
+ HealthCheck hc, CircuitBreaker circuitBreaker, float weight,
MultiClusterClientConfig multiClusterClientConfig) {
+ this.endpoint = endpoint;
this.connectionPool = connectionPool;
this.retry = retry;
this.circuitBreaker = circuitBreaker;
@@ -752,6 +796,10 @@ private Cluster(TrackingConnectionPool connectionPool, Retry retry, HealthCheck
this.healthCheck = hc;
}
+ public Endpoint getEndpoint() {
+ return endpoint;
+ }
+
public Connection getConnection() {
if (!isHealthy()) throw new JedisConnectionException("Cluster is not healthy");
if (connectionPool.isClosed()) {
@@ -886,5 +934,6 @@ public String toString() {
+ retry + ", circuitBreaker=" + circuitBreaker + ", weight=" + weight + ", healthStatus="
+ getHealthStatus() + ", multiClusterClientConfig=" + multiClusterClientConfig + '}';
}
+
}
}
\ No newline at end of file
diff --git a/src/test/java/redis/clients/jedis/MultiDbClientTest.java b/src/test/java/redis/clients/jedis/MultiDbClientTest.java
new file mode 100644
index 0000000000..9c0126a5af
--- /dev/null
+++ b/src/test/java/redis/clients/jedis/MultiDbClientTest.java
@@ -0,0 +1,206 @@
+package redis.clients.jedis;
+
+import eu.rekawek.toxiproxy.Proxy;
+import eu.rekawek.toxiproxy.ToxiproxyClient;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Tag;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.AfterEach;
+
+import static org.awaitility.Awaitility.await;
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.hasItems;
+import static org.hamcrest.Matchers.not;
+import static org.junit.jupiter.api.Assertions.*;
+
+import redis.clients.jedis.MultiClusterClientConfig.ClusterConfig;
+import redis.clients.jedis.exceptions.JedisValidationException;
+import redis.clients.jedis.mcf.ClusterSwitchEventArgs;
+import redis.clients.jedis.mcf.SwitchReason;
+
+import java.io.IOException;
+import java.time.Duration;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.function.Consumer;
+
+/**
+ * Basic tests for MultiDbClient functionality.
+ */
+@Tag("integration")
+public class MultiDbClientTest {
+
+ private MultiDbClient client;
+ private static final EndpointConfig endpoint1 = HostAndPorts.getRedisEndpoint("redis-failover-1");
+ private static final EndpointConfig endpoint2 = HostAndPorts.getRedisEndpoint("redis-failover-2");
+
+ private static final ToxiproxyClient tp = new ToxiproxyClient("localhost", 8474);
+ private static Proxy redisProxy1;
+ private static Proxy redisProxy2;
+
+ @BeforeAll
+ public static void setupAdminClients() throws IOException {
+ if (tp.getProxyOrNull("redis-1") != null) {
+ tp.getProxy("redis-1").delete();
+ }
+ if (tp.getProxyOrNull("redis-2") != null) {
+ tp.getProxy("redis-2").delete();
+ }
+
+ redisProxy1 = tp.createProxy("redis-1", "0.0.0.0:29379", "redis-failover-1:9379");
+ redisProxy2 = tp.createProxy("redis-2", "0.0.0.0:29380", "redis-failover-2:9380");
+ }
+
+ @BeforeEach
+ void setUp() {
+ // Create a simple resilient client with mock endpoints for testing
+ MultiClusterClientConfig clientConfig = MultiClusterClientConfig.builder()
+ .endpoint(endpoint1.getHostAndPort(), 100.0f, endpoint1.getClientConfigBuilder().build())
+ .endpoint(endpoint2.getHostAndPort(), 50.0f, endpoint2.getClientConfigBuilder().build())
+ .build();
+
+ client = MultiDbClient.builder().multiDbConfig(clientConfig).build();
+ }
+
+ @AfterEach
+ void tearDown() {
+ if (client != null) {
+ client.close();
+ }
+ }
+
+ @Test
+ void testAddRemoveEndpointWithEndpointInterface() {
+ Endpoint newEndpoint = new HostAndPort("unavailable", 6381);
+
+ assertDoesNotThrow(
+ () -> client.addEndpoint(newEndpoint, 25.0f, DefaultJedisClientConfig.builder().build()));
+
+ assertThat(client.getEndpoints(), hasItems(newEndpoint));
+
+ assertDoesNotThrow(() -> client.removeEndpoint(newEndpoint));
+
+ assertThat(client.getEndpoints(), not(hasItems(newEndpoint)));
+ }
+
+ @Test
+ void testAddRemoveEndpointWithClusterConfig() {
+ // todo : (@ggivo) Replace HostAndPort with Endpoint
+ HostAndPort newEndpoint = new HostAndPort("unavailable", 6381);
+
+ ClusterConfig newConfig = ClusterConfig
+ .builder(newEndpoint, DefaultJedisClientConfig.builder().build()).weight(25.0f).build();
+
+ assertDoesNotThrow(() -> client.addEndpoint(newConfig));
+
+ assertThat(client.getEndpoints(), hasItems(newEndpoint));
+
+ assertDoesNotThrow(() -> client.removeEndpoint(newEndpoint));
+
+ assertThat(client.getEndpoints(), not(hasItems(newEndpoint)));
+ }
+
+ @Test
+ void testSetActiveDatabase() {
+ Endpoint endpoint = client.getActiveEndpoint();
+
+ awaitIsHealthy(endpoint1.getHostAndPort());
+ awaitIsHealthy(endpoint2.getHostAndPort());
+ // Ensure we have a healthy endpoint to switch to
+ Endpoint newEndpoint = client.getEndpoints().stream()
+ .filter(e -> e.equals(endpoint) && client.isHealthy(e)).findFirst().orElse(null);
+ assertNotNull(newEndpoint);
+
+ // Switch to the new endpoint
+ client.setActiveDatabase(newEndpoint);
+
+ assertEquals(newEndpoint, client.getActiveEndpoint());
+ }
+
+ @Test
+ void testBuilderWithMultipleEndpointTypes() {
+ MultiClusterClientConfig clientConfig = MultiClusterClientConfig.builder()
+ .endpoint(endpoint1.getHostAndPort(), 100.0f, DefaultJedisClientConfig.builder().build())
+ .endpoint(ClusterConfig
+ .builder(endpoint2.getHostAndPort(), DefaultJedisClientConfig.builder().build())
+ .weight(50.0f).build())
+ .build();
+
+ try (MultiDbClient testClient = MultiDbClient.builder().multiDbConfig(clientConfig).build()) {
+ assertThat(testClient.getEndpoints().size(), equalTo(2));
+ assertThat(testClient.getEndpoints(),
+ hasItems(endpoint1.getHostAndPort(), endpoint2.getHostAndPort()));
+ }
+ }
+
+ @Test
+ public void testForceActiveEndpoint() {
+ Endpoint endpoint = client.getActiveEndpoint();
+
+ // Ensure we have a healthy endpoint to switch to
+ awaitIsHealthy(endpoint1.getHostAndPort());
+ awaitIsHealthy(endpoint2.getHostAndPort());
+ Endpoint newEndpoint = client.getEndpoints().stream()
+ .filter(e -> e.equals(endpoint) && client.isHealthy(e)).findFirst().orElse(null);
+ assertNotNull(newEndpoint);
+
+ // Force switch to the new endpoint for 10 seconds
+ client.forceActiveEndpoint(newEndpoint, Duration.ofMillis(100).toMillis());
+
+ // Verify the active endpoint has changed
+ assertEquals(newEndpoint, client.getActiveEndpoint());
+ }
+
+ @Test
+ public void testForceActiveEndpointWithNonHealthyEndpoint() {
+ Endpoint newEndpoint = new HostAndPort("unavailable", 6381);
+ client.addEndpoint(newEndpoint, 25.0f, DefaultJedisClientConfig.builder().build());
+
+ assertThrows(JedisValidationException.class,
+ () -> client.forceActiveEndpoint(newEndpoint, Duration.ofMillis(100).toMillis()));
+ }
+
+ @Test
+ public void testForceActiveEndpointWithNonExistingEndpoint() {
+ Endpoint newEndpoint = new HostAndPort("unavailable", 6381);
+ assertThrows(JedisValidationException.class,
+ () -> client.forceActiveEndpoint(newEndpoint, Duration.ofMillis(100).toMillis()));
+ }
+
+ @Test
+ public void testWithDatabaseSwitchListener() {
+
+ MultiClusterClientConfig endpointsConfig = MultiClusterClientConfig.builder()
+ .endpoint(ClusterConfig
+ .builder(endpoint1.getHostAndPort(), endpoint1.getClientConfigBuilder().build())
+ .weight(100.0f).build())
+ .endpoint(ClusterConfig
+ .builder(endpoint2.getHostAndPort(), endpoint2.getClientConfigBuilder().build())
+ .weight(50.0f).build())
+ .build();
+
+ Consumer eventConsumer;
+ List events = new ArrayList<>();
+ eventConsumer = events::add;
+
+ try (MultiDbClient testClient = MultiDbClient.builder().databaseSwitchListener(eventConsumer)
+ .multiDbConfig(endpointsConfig).build()) {
+
+ assertThat(events.size(), equalTo(0));
+
+ awaitIsHealthy(endpoint2.getHostAndPort());
+ testClient.setActiveDatabase(endpoint2.getHostAndPort());
+
+ assertThat(events.size(), equalTo(1));
+ assertThat(events.get(0).getEndpoint(), equalTo(endpoint2.getHostAndPort()));
+ assertThat(events.get(0).getReason(), equalTo(SwitchReason.FORCED));
+ }
+ }
+
+ private void awaitIsHealthy(HostAndPort hostAndPort) {
+ await().atMost(Duration.ofSeconds(1)).until(() -> client.isHealthy(hostAndPort));
+ }
+
+}
diff --git a/src/test/java/redis/clients/jedis/scenario/ActiveActiveFailoverTest.java b/src/test/java/redis/clients/jedis/scenario/ActiveActiveFailoverTest.java
index 59a62ed7e0..a6deb256eb 100644
--- a/src/test/java/redis/clients/jedis/scenario/ActiveActiveFailoverTest.java
+++ b/src/test/java/redis/clients/jedis/scenario/ActiveActiveFailoverTest.java
@@ -13,6 +13,7 @@
import redis.clients.jedis.exceptions.JedisConnectionException;
import redis.clients.jedis.mcf.ClusterSwitchEventArgs;
import redis.clients.jedis.mcf.MultiClusterPooledConnectionProvider;
+import redis.clients.jedis.util.ClientTestUtil;
import java.io.IOException;
import java.time.Duration;
@@ -57,32 +58,30 @@ public static void beforeClass() {
@Test
public void testFailover() {
- MultiClusterClientConfig.ClusterConfig[] clusterConfig = new MultiClusterClientConfig.ClusterConfig[2];
-
JedisClientConfig config = endpoint.getClientConfigBuilder()
.socketTimeoutMillis(SOCKET_TIMEOUT_MS)
.connectionTimeoutMillis(CONNECTION_TIMEOUT_MS).build();
- clusterConfig[0] = ClusterConfig.builder(endpoint.getHostAndPort(0), config)
+ ClusterConfig primary = ClusterConfig.builder(endpoint.getHostAndPort(0), config)
.connectionPoolConfig(RecommendedSettings.poolConfig).weight(1.0f).build();
- clusterConfig[1] = ClusterConfig.builder(endpoint.getHostAndPort(1), config)
- .connectionPoolConfig(RecommendedSettings.poolConfig).weight(0.5f).build();
-
- MultiClusterClientConfig.Builder builder = new MultiClusterClientConfig.Builder(clusterConfig);
-
- builder.circuitBreakerSlidingWindowSize(1); // SLIDING WINDOW SIZE IN SECONDS
- builder.circuitBreakerFailureRateThreshold(10.0f); // percentage of failures to trigger circuit breaker
- builder.failbackSupported(true);
- builder.failbackCheckInterval(1000);
- builder.gracePeriod(2000);
-
- builder.retryWaitDuration(10);
- builder.retryMaxAttempts(1);
- builder.retryWaitDurationExponentialBackoffMultiplier(1);
- builder.fastFailover(true);
- builder.retryOnFailover(false);
+ ClusterConfig secondary = ClusterConfig.builder(endpoint.getHostAndPort(1), config)
+ .connectionPoolConfig(RecommendedSettings.poolConfig).weight(0.5f).build();
+ MultiClusterClientConfig multiConfig = MultiClusterClientConfig.builder()
+ .endpoint(primary)
+ .endpoint(secondary)
+ .circuitBreakerSlidingWindowSize(1) // SLIDING WINDOW SIZE IN SECONDS
+ .circuitBreakerFailureRateThreshold(10.0f) // percentage of failures to trigger circuit breaker
+ .failbackSupported(true)
+ .failbackCheckInterval(1000)
+ .gracePeriod(2000)
+ .retryWaitDuration(10)
+ .retryMaxAttempts(1)
+ .retryWaitDurationExponentialBackoffMultiplier(1)
+ .fastFailover(true)
+ .retryOnFailover(false)
+ .build();
class FailoverReporter implements Consumer {
String currentClusterName = "not set";
@@ -115,12 +114,12 @@ public void accept(ClusterSwitchEventArgs e) {
}
}
- MultiClusterPooledConnectionProvider provider = new MultiClusterPooledConnectionProvider(builder.build());
FailoverReporter reporter = new FailoverReporter();
- provider.setClusterSwitchListener(reporter);
- provider.setActiveCluster(endpoint.getHostAndPort(0));
- UnifiedJedis client = new UnifiedJedis(provider);
+ MultiDbClient client = MultiDbClient.builder()
+ .multiDbConfig(multiConfig)
+ .databaseSwitchListener(reporter)
+ .build();
AtomicLong executedCommands = new AtomicLong(0);
AtomicLong retryingThreadsCounter = new AtomicLong(0);
@@ -209,7 +208,7 @@ public void accept(ClusterSwitchEventArgs e) {
throw new RuntimeException(e);
}
-
+ MultiClusterPooledConnectionProvider provider = ClientTestUtil.getConnectionProvider(client);
ConnectionPool pool1 = provider.getCluster(endpoint.getHostAndPort(0)).getConnectionPool();
ConnectionPool pool2 = provider.getCluster(endpoint.getHostAndPort(1)).getConnectionPool();
diff --git a/src/test/java/redis/clients/jedis/util/ClientTestUtil.java b/src/test/java/redis/clients/jedis/util/ClientTestUtil.java
new file mode 100644
index 0000000000..ecc33d3d9c
--- /dev/null
+++ b/src/test/java/redis/clients/jedis/util/ClientTestUtil.java
@@ -0,0 +1,11 @@
+package redis.clients.jedis.util;
+
+import redis.clients.jedis.UnifiedJedis;
+import redis.clients.jedis.providers.ConnectionProvider;
+
+public class ClientTestUtil {
+
+ public static T getConnectionProvider(UnifiedJedis jedis) {
+ return ReflectionTestUtil.getField(jedis, "provider");
+ }
+}
From 4af3df1eeb71a775bd0439275f1e3991565f1e63 Mon Sep 17 00:00:00 2001
From: Ivo Gaydazhiev
Date: Fri, 3 Oct 2025 17:25:05 +0300
Subject: [PATCH 05/17] [automatic failover]Use Endpoint interface instead
HostAndPort in multi db (#4302)
[clean up] Use Endpoint interface where possible
---
.../jedis/MultiClusterClientConfig.java | 53 +++++++++----------
.../redis/clients/jedis/MultiDbClient.java | 6 +--
.../MultiClusterPooledConnectionProvider.java | 28 +++++-----
3 files changed, 41 insertions(+), 46 deletions(-)
diff --git a/src/main/java/redis/clients/jedis/MultiClusterClientConfig.java b/src/main/java/redis/clients/jedis/MultiClusterClientConfig.java
index 9788f8efab..35e51ee600 100644
--- a/src/main/java/redis/clients/jedis/MultiClusterClientConfig.java
+++ b/src/main/java/redis/clients/jedis/MultiClusterClientConfig.java
@@ -279,7 +279,6 @@ public static interface StrategySupplier {
* Default: {@value #CIRCUIT_BREAKER_SLIDING_WINDOW_SIZE_DEFAULT}
*
* @see #getCircuitBreakerSlidingWindowSize()
- * @see #circuitBreakerSlidingWindowType
*/
private int circuitBreakerSlidingWindowSize;
@@ -687,10 +686,10 @@ public static Builder builder(List clusterConfigs) {
public static class ClusterConfig {
/** The Redis endpoint (host and port) for this cluster. */
- private HostAndPort hostAndPort;
+ private final Endpoint endpoint;
/** Jedis client configuration containing connection settings and authentication. */
- private JedisClientConfig jedisClientConfig;
+ private final JedisClientConfig jedisClientConfig;
/** Optional connection pool configuration for managing connections to this cluster. */
private GenericObjectPoolConfig connectionPoolConfig;
@@ -714,12 +713,12 @@ public static class ClusterConfig {
* EchoStrategy for health checks. Use the {@link Builder} for more advanced configuration
* options.
*
- * @param hostAndPort the Redis endpoint (host and port)
+ * @param endpoint the Redis endpoint (host and port)
* @param clientConfig the Jedis client configuration
- * @throws IllegalArgumentException if hostAndPort or clientConfig is null
+ * @throws IllegalArgumentException if endpoint or clientConfig is null
*/
- public ClusterConfig(HostAndPort hostAndPort, JedisClientConfig clientConfig) {
- this.hostAndPort = hostAndPort;
+ public ClusterConfig(Endpoint endpoint, JedisClientConfig clientConfig) {
+ this.endpoint = endpoint;
this.jedisClientConfig = clientConfig;
}
@@ -729,14 +728,14 @@ public ClusterConfig(HostAndPort hostAndPort, JedisClientConfig clientConfig) {
* This constructor allows specification of connection pool settings in addition to basic
* endpoint configuration. Default weight of 1.0f and EchoStrategy for health checks are used.
*
- * @param hostAndPort the Redis endpoint (host and port)
+ * @param endpoint the Redis endpoint (host and port)
* @param clientConfig the Jedis client configuration
* @param connectionPoolConfig the connection pool configuration
- * @throws IllegalArgumentException if hostAndPort or clientConfig is null
+ * @throws IllegalArgumentException if endpoint or clientConfig is null
*/
- public ClusterConfig(HostAndPort hostAndPort, JedisClientConfig clientConfig,
+ public ClusterConfig(Endpoint endpoint, JedisClientConfig clientConfig,
GenericObjectPoolConfig connectionPoolConfig) {
- this.hostAndPort = hostAndPort;
+ this.endpoint = endpoint;
this.jedisClientConfig = clientConfig;
this.connectionPoolConfig = connectionPoolConfig;
}
@@ -746,7 +745,7 @@ public ClusterConfig(HostAndPort hostAndPort, JedisClientConfig clientConfig,
* @param builder the builder containing configuration values
*/
private ClusterConfig(Builder builder) {
- this.hostAndPort = builder.hostAndPort;
+ this.endpoint = builder.endpoint;
this.jedisClientConfig = builder.jedisClientConfig;
this.connectionPoolConfig = builder.connectionPoolConfig;
this.weight = builder.weight;
@@ -757,20 +756,20 @@ private ClusterConfig(Builder builder) {
* Returns the Redis endpoint (host and port) for this cluster.
* @return the host and port information
*/
- public HostAndPort getHostAndPort() {
- return hostAndPort;
+ public Endpoint getEndpoint() {
+ return endpoint;
}
/**
* Creates a new Builder instance for configuring a ClusterConfig.
- * @param hostAndPort the Redis endpoint (host and port)
+ * @param endpoint the Redis endpoint (host and port)
* @param clientConfig the Jedis client configuration
* @return new Builder instance
- * @throws IllegalArgumentException if hostAndPort or clientConfig is null
+ * @throws IllegalArgumentException if endpoint or clientConfig is null
*/
- // TODO : Replace HostAndPort with Endpoint
- public static Builder builder(HostAndPort hostAndPort, JedisClientConfig clientConfig) {
- return new Builder(hostAndPort, clientConfig);
+
+ public static Builder builder(Endpoint endpoint, JedisClientConfig clientConfig) {
+ return new Builder(endpoint, clientConfig);
}
/**
@@ -833,7 +832,7 @@ public StrategySupplier getHealthCheckStrategySupplier() {
*/
public static class Builder {
/** The Redis endpoint for this cluster configuration. */
- private HostAndPort hostAndPort;
+ private Endpoint endpoint;
/** The Jedis client configuration. */
private JedisClientConfig jedisClientConfig;
@@ -849,12 +848,12 @@ public static class Builder {
/**
* Constructs a new Builder with required endpoint and client configuration.
- * @param hostAndPort the Redis endpoint (host and port)
+ * @param endpoint the Redis endpoint (host and port)
* @param clientConfig the Jedis client configuration
- * @throws IllegalArgumentException if hostAndPort or clientConfig is null
+ * @throws IllegalArgumentException if endpoint or clientConfig is null
*/
- public Builder(HostAndPort hostAndPort, JedisClientConfig clientConfig) {
- this.hostAndPort = hostAndPort;
+ public Builder(Endpoint endpoint, JedisClientConfig clientConfig) {
+ this.endpoint = endpoint;
this.jedisClientConfig = clientConfig;
}
@@ -1104,12 +1103,8 @@ public Builder endpoint(ClusterConfig clusterConfig) {
* @return this builder
*/
public Builder endpoint(Endpoint endpoint, float weight, JedisClientConfig clientConfig) {
- // Convert Endpoint to HostAndPort for ClusterConfig
- // TODO : Refactor ClusterConfig to accept Endpoint directly
- HostAndPort hostAndPort = (endpoint instanceof HostAndPort) ? (HostAndPort) endpoint
- : new HostAndPort(endpoint.getHost(), endpoint.getPort());
- ClusterConfig clusterConfig = ClusterConfig.builder(hostAndPort, clientConfig).weight(weight)
+ ClusterConfig clusterConfig = ClusterConfig.builder(endpoint, clientConfig).weight(weight)
.build();
this.clusterConfigs.add(clusterConfig);
diff --git a/src/main/java/redis/clients/jedis/MultiDbClient.java b/src/main/java/redis/clients/jedis/MultiDbClient.java
index 74943e2bed..9df888651c 100644
--- a/src/main/java/redis/clients/jedis/MultiDbClient.java
+++ b/src/main/java/redis/clients/jedis/MultiDbClient.java
@@ -153,11 +153,7 @@ public void addEndpoint(ClusterConfig clusterConfig) {
* @throws redis.clients.jedis.exceptions.JedisValidationException if the endpoint already exists
*/
public void addEndpoint(Endpoint endpoint, float weight, JedisClientConfig clientConfig) {
- // Convert Endpoint to HostAndPort for ClusterConfig
- HostAndPort hostAndPort = (endpoint instanceof HostAndPort) ? (HostAndPort) endpoint
- : new HostAndPort(endpoint.getHost(), endpoint.getPort());
-
- ClusterConfig clusterConfig = ClusterConfig.builder(hostAndPort, clientConfig).weight(weight)
+ ClusterConfig clusterConfig = ClusterConfig.builder(endpoint, clientConfig).weight(weight)
.build();
getMultiClusterProvider().add(clusterConfig);
diff --git a/src/main/java/redis/clients/jedis/mcf/MultiClusterPooledConnectionProvider.java b/src/main/java/redis/clients/jedis/mcf/MultiClusterPooledConnectionProvider.java
index a85e490899..a389a0d7b4 100644
--- a/src/main/java/redis/clients/jedis/mcf/MultiClusterPooledConnectionProvider.java
+++ b/src/main/java/redis/clients/jedis/mcf/MultiClusterPooledConnectionProvider.java
@@ -206,7 +206,7 @@ public void add(ClusterConfig clusterConfig) {
throw new JedisValidationException("ClusterConfig must not be null");
}
- Endpoint endpoint = clusterConfig.getHostAndPort();
+ Endpoint endpoint = clusterConfig.getEndpoint();
if (multiClusterMap.containsKey(endpoint)) {
throw new JedisValidationException(
"Endpoint " + endpoint + " already exists in the provider");
@@ -289,12 +289,12 @@ public void remove(Endpoint endpoint) {
*/
private void addClusterInternal(MultiClusterClientConfig multiClusterClientConfig,
ClusterConfig config) {
- if (multiClusterMap.containsKey(config.getHostAndPort())) {
+ if (multiClusterMap.containsKey(config.getEndpoint())) {
throw new JedisValidationException(
- "Endpoint " + config.getHostAndPort() + " already exists in the provider");
+ "Endpoint " + config.getEndpoint() + " already exists in the provider");
}
- String clusterId = "cluster:" + config.getHostAndPort();
+ String clusterId = "cluster:" + config.getEndpoint();
Retry retry = RetryRegistry.of(retryConfig).retry(clusterId);
@@ -312,25 +312,25 @@ private void addClusterInternal(MultiClusterClientConfig multiClusterClientConfi
circuitBreakerEventPublisher.onSlowCallRateExceeded(event -> log.error(String.valueOf(event)));
TrackingConnectionPool pool = TrackingConnectionPool.builder()
- .hostAndPort(config.getHostAndPort()).clientConfig(config.getJedisClientConfig())
+ .hostAndPort(hostPort(config.getEndpoint())).clientConfig(config.getJedisClientConfig())
.poolConfig(config.getConnectionPoolConfig()).build();
Cluster cluster;
StrategySupplier strategySupplier = config.getHealthCheckStrategySupplier();
if (strategySupplier != null) {
- HealthCheckStrategy hcs = strategySupplier.get(config.getHostAndPort(),
+ HealthCheckStrategy hcs = strategySupplier.get(hostPort(config.getEndpoint()),
config.getJedisClientConfig());
// Register listeners BEFORE adding clusters to avoid missing events
- healthStatusManager.registerListener(config.getHostAndPort(), this::onHealthStatusChange);
- HealthCheck hc = healthStatusManager.add(config.getHostAndPort(), hcs);
- cluster = new Cluster(config.getHostAndPort(), pool, retry, hc, circuitBreaker,
+ healthStatusManager.registerListener(config.getEndpoint(), this::onHealthStatusChange);
+ HealthCheck hc = healthStatusManager.add(config.getEndpoint(), hcs);
+ cluster = new Cluster(config.getEndpoint(), pool, retry, hc, circuitBreaker,
config.getWeight(), multiClusterClientConfig);
} else {
- cluster = new Cluster(config.getHostAndPort(), pool, retry, circuitBreaker,
- config.getWeight(), multiClusterClientConfig);
+ cluster = new Cluster(config.getEndpoint(), pool, retry, circuitBreaker, config.getWeight(),
+ multiClusterClientConfig);
}
- multiClusterMap.put(config.getHostAndPort(), cluster);
+ multiClusterMap.put(config.getEndpoint(), cluster);
// this is the place where we listen tracked errors and check if
// thresholds are exceeded for the cluster
@@ -339,6 +339,10 @@ private void addClusterInternal(MultiClusterClientConfig multiClusterClientConfi
});
}
+ private HostAndPort hostPort(Endpoint endpoint) {
+ return new HostAndPort(endpoint.getHost(), endpoint.getPort());
+ }
+
/**
* Handles health status changes for clusters. This method is called by the health status manager
* when the health status of a cluster changes.
From 504c4b8cef029ef16180102d2515cf217d190d7d Mon Sep 17 00:00:00 2001
From: ggivo
Date: Fri, 3 Oct 2025 19:43:24 +0300
Subject: [PATCH 06/17] Rename MultiDb classes -
MultiClusterPooledConnectionProvider -> MultiDatabaseConnectionProvider -
Cluster -> Database - MultiClusterClientConfig -> MultiDatabaseConfig -
ClusterConfig -> DatabaseConfig
---
pom.xml | 2 +-
...ntConfig.java => MultiDatabaseConfig.java} | 215 +++++----
.../redis/clients/jedis/MultiDbClient.java | 56 +--
.../redis/clients/jedis/UnifiedJedis.java | 12 +-
.../jedis/builders/MultiDbClientBuilder.java | 25 +-
.../mcf/CircuitBreakerCommandExecutor.java | 24 +-
.../jedis/mcf/CircuitBreakerFailoverBase.java | 40 +-
...cuitBreakerFailoverConnectionProvider.java | 15 +-
.../mcf/CircuitBreakerThresholdsAdapter.java | 14 +-
.../jedis/mcf/ClusterSwitchEventArgs.java | 6 +-
.../redis/clients/jedis/mcf/EchoStrategy.java | 2 +-
.../jedis/mcf/JedisFailoverException.java | 10 +-
.../jedis/mcf/MultiClusterPipeline.java | 4 +-
.../jedis/mcf/MultiClusterTransaction.java | 6 +-
...a => MultiDatabaseConnectionProvider.java} | 415 +++++++++---------
.../clients/jedis/MultiDbClientTest.java | 18 +-
...UnifiedJedisConstructorReflectionTest.java | 3 +-
.../failover/FailoverIntegrationTest.java | 85 ++--
.../mcf/ActiveActiveLocalFailoverTest.java | 25 +-
.../mcf/CircuitBreakerThresholdsTest.java | 71 ++-
.../mcf/ClusterEvaluateThresholdsTest.java | 22 +-
.../clients/jedis/mcf/DefaultValuesTest.java | 10 +-
.../mcf/FailbackMechanismIntegrationTest.java | 193 ++++----
.../jedis/mcf/FailbackMechanismUnitTest.java | 64 +--
.../jedis/mcf/HealthCheckIntegrationTest.java | 34 +-
.../clients/jedis/mcf/HealthCheckTest.java | 57 ++-
.../MultiClusterDynamicEndpointUnitTest.java | 78 ++--
...ultiClusterFailoverAttemptsConfigTest.java | 56 +--
.../mcf/MultiClusterInitializationTest.java | 67 ++-
...ClusterPooledConnectionProviderHelper.java | 20 -
...MultiDatabaseConnectionProviderHelper.java | 20 +
... MultiDatabaseConnectionProviderTest.java} | 120 ++---
.../jedis/mcf/PeriodicFailbackTest.java | 103 +++--
.../jedis/misc/AutomaticFailoverTest.java | 60 +--
...erProviderHealthStatusChangeEventTest.java | 142 +++---
.../scenario/ActiveActiveFailoverTest.java | 16 +-
36 files changed, 1036 insertions(+), 1074 deletions(-)
rename src/main/java/redis/clients/jedis/{MultiClusterClientConfig.java => MultiDatabaseConfig.java} (88%)
rename src/main/java/redis/clients/jedis/mcf/{MultiClusterPooledConnectionProvider.java => MultiDatabaseConnectionProvider.java} (65%)
delete mode 100644 src/test/java/redis/clients/jedis/mcf/MultiClusterPooledConnectionProviderHelper.java
create mode 100644 src/test/java/redis/clients/jedis/mcf/MultiDatabaseConnectionProviderHelper.java
rename src/test/java/redis/clients/jedis/mcf/{MultiClusterPooledConnectionProviderTest.java => MultiDatabaseConnectionProviderTest.java} (69%)
diff --git a/pom.xml b/pom.xml
index fd98539365..957561819d 100644
--- a/pom.xml
+++ b/pom.xml
@@ -488,7 +488,7 @@
**/Health*.java
**/*IT.java
**/scenario/RestEndpointUtil.java
- src/main/java/redis/clients/jedis/MultiClusterClientConfig.java
+ src/main/java/redis/clients/jedis/MultiDatabaseConfig.java
src/main/java/redis/clients/jedis/HostAndPort.java
**/builders/*.java
**/MultiDb*.java
diff --git a/src/main/java/redis/clients/jedis/MultiClusterClientConfig.java b/src/main/java/redis/clients/jedis/MultiDatabaseConfig.java
similarity index 88%
rename from src/main/java/redis/clients/jedis/MultiClusterClientConfig.java
rename to src/main/java/redis/clients/jedis/MultiDatabaseConfig.java
index 35e51ee600..96ab7d7971 100644
--- a/src/main/java/redis/clients/jedis/MultiClusterClientConfig.java
+++ b/src/main/java/redis/clients/jedis/MultiDatabaseConfig.java
@@ -21,8 +21,8 @@
* This configuration enables seamless failover between multiple Redis clusters, databases, or
* endpoints by providing comprehensive settings for retry logic, circuit breaker behavior, health
* checks, and failback mechanisms. It is designed to work with
- * {@link redis.clients.jedis.mcf.MultiClusterPooledConnectionProvider} to provide high availability
- * and disaster recovery capabilities.
+ * {@link redis.clients.jedis.mcf.MultiDatabaseConnectionProvider} to provide high availability and
+ * disaster recovery capabilities.
*
*
* Key Features:
@@ -49,27 +49,26 @@
* {
* @code
* // Configure individual clusters
- * ClusterConfig primary = ClusterConfig.builder(primaryEndpoint, clientConfig).weight(1.0f)
+ * DatabaseConfig primary = DatabaseConfig.builder(primaryEndpoint, clientConfig).weight(1.0f)
* .build();
*
- * ClusterConfig secondary = ClusterConfig.builder(secondaryEndpoint, clientConfig).weight(0.5f)
+ * DatabaseConfig secondary = DatabaseConfig.builder(secondaryEndpoint, clientConfig).weight(0.5f)
* .healthCheckEnabled(true).build();
*
* // Build multi-cluster configuration
- * MultiClusterClientConfig config = MultiClusterClientConfig.builder(primary, secondary)
+ * MultiDatabaseConfig config = MultiDatabaseConfig.builder(primary, secondary)
* .circuitBreakerFailureRateThreshold(10.0f).retryMaxAttempts(3).failbackSupported(true)
* .gracePeriod(10000).build();
*
* // Use with connection provider
- * MultiClusterPooledConnectionProvider provider = new MultiClusterPooledConnectionProvider(
- * config);
+ * MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider(config);
* }
*
*
* The configuration leverages Resilience4j for
* circuit breaker and retry implementations, providing battle-tested fault tolerance patterns.
*
- * @see redis.clients.jedis.mcf.MultiClusterPooledConnectionProvider
+ * @see redis.clients.jedis.mcf.MultiDatabaseConnectionProvider
* @see redis.clients.jedis.mcf.HealthCheckStrategy
* @see redis.clients.jedis.mcf.EchoStrategy
* @see redis.clients.jedis.mcf.LagAwareStrategy
@@ -77,7 +76,7 @@
*/
// TODO: move
@Experimental
-public final class MultiClusterClientConfig {
+public final class MultiDatabaseConfig {
/**
* Functional interface for creating {@link HealthCheckStrategy} instances for specific Redis
@@ -158,7 +157,7 @@ public static interface StrategySupplier {
private static final int DELAY_IN_BETWEEN_FAILOVER_ATTEMPTS_DEFAULT = 12000;
/** Array of cluster configurations defining the available Redis endpoints and their settings. */
- private final ClusterConfig[] clusterConfigs;
+ private final DatabaseConfig[] databaseConfigs;
// ============ Retry Configuration ============
// Based on Resilience4j Retry: https://resilience4j.readme.io/docs/retry
@@ -438,35 +437,35 @@ public static interface StrategySupplier {
private int delayInBetweenFailoverAttempts;
/**
- * Constructs a new MultiClusterClientConfig with the specified cluster configurations.
+ * Constructs a new MultiDatabaseConfig with the specified cluster configurations.
*
* This constructor validates that at least one cluster configuration is provided and that all
* configurations are non-null. Use the {@link Builder} class for more convenient configuration
* with default values.
*
- * @param clusterConfigs array of cluster configurations defining the available Redis endpoints
- * @throws JedisValidationException if clusterConfigs is null or empty
+ * @param databaseConfigs array of cluster configurations defining the available Redis endpoints
+ * @throws JedisValidationException if databaseConfigs is null or empty
* @throws IllegalArgumentException if any cluster configuration is null
- * @see Builder#Builder(ClusterConfig[])
+ * @see Builder#Builder(DatabaseConfig[])
*/
- public MultiClusterClientConfig(ClusterConfig[] clusterConfigs) {
+ public MultiDatabaseConfig(DatabaseConfig[] databaseConfigs) {
- if (clusterConfigs == null || clusterConfigs.length < 1) throw new JedisValidationException(
- "ClusterClientConfigs are required for MultiClusterPooledConnectionProvider");
+ if (databaseConfigs == null || databaseConfigs.length < 1) throw new JedisValidationException(
+ "DatabaseClientConfigs are required for MultiDatabaseConnectionProvider");
- for (ClusterConfig clusterConfig : clusterConfigs) {
- if (clusterConfig == null)
- throw new IllegalArgumentException("ClusterClientConfigs must not contain null elements");
+ for (DatabaseConfig databaseConfig : databaseConfigs) {
+ if (databaseConfig == null)
+ throw new IllegalArgumentException("DatabaseClientConfigs must not contain null elements");
}
- this.clusterConfigs = clusterConfigs;
+ this.databaseConfigs = databaseConfigs;
}
/**
* Returns the array of cluster configurations defining available Redis endpoints.
* @return array of cluster configurations, never null or empty
*/
- public ClusterConfig[] getClusterConfigs() {
- return clusterConfigs;
+ public DatabaseConfig[] getDatabaseConfigs() {
+ return databaseConfigs;
}
/**
@@ -637,79 +636,79 @@ public boolean isFastFailover() {
}
/**
- * Creates a new Builder instance for configuring MultiClusterClientConfig.
+ * Creates a new Builder instance for configuring MultiDatabaseConfig.
*
* At least one cluster configuration must be added to the builder before calling build(). Use the
* endpoint() methods to add cluster configurations.
*
* @return new Builder instance
- * @throws JedisValidationException if clusterConfigs is null or empty
- * @see Builder#Builder(ClusterConfig[])
+ * @throws JedisValidationException if databaseConfigs is null or empty
+ * @see Builder#Builder(DatabaseConfig[])
*/
public static Builder builder() {
return new Builder();
}
/**
- * Creates a new Builder instance for configuring MultiClusterClientConfig.
- * @param clusterConfigs array of cluster configurations defining available Redis endpoints
+ * Creates a new Builder instance for configuring MultiDatabaseConfig.
+ * @param databaseConfigs array of cluster configurations defining available Redis endpoints
* @return new Builder instance
- * @throws JedisValidationException if clusterConfigs is null or empty
- * @see Builder#Builder(ClusterConfig[])
+ * @throws JedisValidationException if databaseConfigs is null or empty
+ * @see Builder#Builder(DatabaseConfig[])
*/
- public static Builder builder(ClusterConfig[] clusterConfigs) {
- return new Builder(clusterConfigs);
+ public static Builder builder(DatabaseConfig[] databaseConfigs) {
+ return new Builder(databaseConfigs);
}
/**
- * Creates a new Builder instance for configuring MultiClusterClientConfig.
- * @param clusterConfigs list of cluster configurations defining available Redis endpoints
+ * Creates a new Builder instance for configuring MultiDatabaseConfig.
+ * @param databaseConfigs list of cluster configurations defining available Redis endpoints
* @return new Builder instance
- * @throws JedisValidationException if clusterConfigs is null or empty
+ * @throws JedisValidationException if databaseConfigs is null or empty
* @see Builder#Builder(List)
*/
- public static Builder builder(List clusterConfigs) {
- return new Builder(clusterConfigs);
+ public static Builder builder(List databaseConfigs) {
+ return new Builder(databaseConfigs);
}
/**
* Configuration class for individual Redis cluster endpoints within a multi-cluster setup.
*
- * Each ClusterConfig represents a single Redis endpoint that can participate in the multi-cluster
- * failover system. It encapsulates the connection details, weight for priority-based selection,
- * and health check configuration for that endpoint.
+ * Each DatabaseConfig represents a single Redis endpoint that can participate in the
+ * multi-cluster failover system. It encapsulates the connection details, weight for
+ * priority-based selection, and health check configuration for that endpoint.
*
* @see Builder
* @see StrategySupplier
* @see redis.clients.jedis.mcf.HealthCheckStrategy
*/
- public static class ClusterConfig {
+ public static class DatabaseConfig {
- /** The Redis endpoint (host and port) for this cluster. */
+ /** The Redis endpoint (host and port) for this database. */
private final Endpoint endpoint;
/** Jedis client configuration containing connection settings and authentication. */
private final JedisClientConfig jedisClientConfig;
- /** Optional connection pool configuration for managing connections to this cluster. */
+ /** Optional connection pool configuration for managing connections to this database. */
private GenericObjectPoolConfig connectionPoolConfig;
/**
- * Weight value for cluster selection priority. Higher weights indicate higher priority. Default
- * value is 1.0f.
+ * Weight value for database selection priority. Higher weights indicate higher priority.
+ * Default value is 1.0f.
*/
private float weight = 1.0f;
/**
- * Strategy supplier for creating health check instances for this cluster. Default is
+ * Strategy supplier for creating health check instances for this database. Default is
* EchoStrategy.DEFAULT.
*/
private StrategySupplier healthCheckStrategySupplier;
/**
- * Constructs a ClusterConfig with basic endpoint and client configuration.
+ * Constructs a DatabaseConfig with basic endpoint and client configuration.
*
- * This constructor creates a cluster configuration with default settings: weight of 1.0f and
+ * This constructor creates a database configuration with default settings: weight of 1.0f and
* EchoStrategy for health checks. Use the {@link Builder} for more advanced configuration
* options.
*
@@ -717,13 +716,13 @@ public static class ClusterConfig {
* @param clientConfig the Jedis client configuration
* @throws IllegalArgumentException if endpoint or clientConfig is null
*/
- public ClusterConfig(Endpoint endpoint, JedisClientConfig clientConfig) {
+ public DatabaseConfig(Endpoint endpoint, JedisClientConfig clientConfig) {
this.endpoint = endpoint;
this.jedisClientConfig = clientConfig;
}
/**
- * Constructs a ClusterConfig with endpoint, client, and connection pool configuration.
+ * Constructs a DatabaseConfig with endpoint, client, and connection pool configuration.
*
* This constructor allows specification of connection pool settings in addition to basic
* endpoint configuration. Default weight of 1.0f and EchoStrategy for health checks are used.
@@ -733,7 +732,7 @@ public ClusterConfig(Endpoint endpoint, JedisClientConfig clientConfig) {
* @param connectionPoolConfig the connection pool configuration
* @throws IllegalArgumentException if endpoint or clientConfig is null
*/
- public ClusterConfig(Endpoint endpoint, JedisClientConfig clientConfig,
+ public DatabaseConfig(Endpoint endpoint, JedisClientConfig clientConfig,
GenericObjectPoolConfig connectionPoolConfig) {
this.endpoint = endpoint;
this.jedisClientConfig = clientConfig;
@@ -744,7 +743,7 @@ public ClusterConfig(Endpoint endpoint, JedisClientConfig clientConfig,
* Private constructor used by the Builder to create configured instances.
* @param builder the builder containing configuration values
*/
- private ClusterConfig(Builder builder) {
+ private DatabaseConfig(Builder builder) {
this.endpoint = builder.endpoint;
this.jedisClientConfig = builder.jedisClientConfig;
this.connectionPoolConfig = builder.connectionPoolConfig;
@@ -753,7 +752,7 @@ private ClusterConfig(Builder builder) {
}
/**
- * Returns the Redis endpoint (host and port) for this cluster.
+ * Returns the Redis endpoint (host and port) for this database.
* @return the host and port information
*/
public Endpoint getEndpoint() {
@@ -761,7 +760,7 @@ public Endpoint getEndpoint() {
}
/**
- * Creates a new Builder instance for configuring a ClusterConfig.
+ * Creates a new Builder instance for configuring a DatabaseConfig.
* @param endpoint the Redis endpoint (host and port)
* @param clientConfig the Jedis client configuration
* @return new Builder instance
@@ -773,7 +772,7 @@ public static Builder builder(Endpoint endpoint, JedisClientConfig clientConfig)
}
/**
- * Returns the Jedis client configuration for this cluster.
+ * Returns the Jedis client configuration for this database.
* @return the client configuration containing connection settings and authentication
*/
public JedisClientConfig getJedisClientConfig() {
@@ -781,7 +780,7 @@ public JedisClientConfig getJedisClientConfig() {
}
/**
- * Returns the connection pool configuration for this cluster.
+ * Returns the connection pool configuration for this database.
* @return the connection pool configuration, may be null if not specified
*/
public GenericObjectPoolConfig getConnectionPoolConfig() {
@@ -789,9 +788,9 @@ public GenericObjectPoolConfig getConnectionPoolConfig() {
}
/**
- * Returns the weight value used for cluster selection priority.
+ * Returns the weight value used for database selection priority.
*
- * Higher weight values indicate higher priority. During failover, clusters are selected in
+ * Higher weight values indicate higher priority. During failover, databases are selected in
* descending order of weight (highest weight first).
*
* @return the weight value, default is 1.0f
@@ -801,9 +800,9 @@ public float getWeight() {
}
/**
- * Returns the health check strategy supplier for this cluster.
+ * Returns the health check strategy supplier for this database.
*
- * The strategy supplier is used to create health check instances that monitor this cluster's
+ * The strategy supplier is used to create health check instances that monitor this database's
* availability. Returns null if health checks are disabled.
*
* @return the health check strategy supplier, or null if health checks are disabled
@@ -815,9 +814,9 @@ public StrategySupplier getHealthCheckStrategySupplier() {
}
/**
- * Builder class for creating ClusterConfig instances with fluent configuration API.
+ * Builder class for creating DatabaseConfig instances with fluent configuration API.
*
- * The Builder provides a convenient way to configure cluster settings including connection
+ * The Builder provides a convenient way to configure database settings including connection
* pooling, weight-based priority, and health check strategies. All configuration methods return
* the builder instance for method chaining.
*
@@ -831,7 +830,7 @@ public StrategySupplier getHealthCheckStrategySupplier() {
*
*/
public static class Builder {
- /** The Redis endpoint for this cluster configuration. */
+ /** The Redis endpoint for this database configuration. */
private Endpoint endpoint;
/** The Jedis client configuration. */
@@ -840,7 +839,7 @@ public static class Builder {
/** Optional connection pool configuration. */
private GenericObjectPoolConfig connectionPoolConfig;
- /** Weight for cluster selection priority. Default: 1.0f */
+ /** Weight for database selection priority. Default: 1.0f */
private float weight = 1.0f;
/** Health check strategy supplier. Default: EchoStrategy.DEFAULT */
@@ -858,7 +857,7 @@ public Builder(Endpoint endpoint, JedisClientConfig clientConfig) {
}
/**
- * Sets the connection pool configuration for this cluster.
+ * Sets the connection pool configuration for this database.
*
* Connection pooling helps manage connections efficiently and provides better performance
* under load. If not specified, default pooling behavior will be used.
@@ -873,19 +872,19 @@ public Builder connectionPoolConfig(
}
/**
- * Sets the weight value for cluster selection priority.
+ * Sets the weight value for database selection priority.
*
- * Weight determines the priority order for cluster selection during failover. Clusters with
+ * Weight determines the priority order for database selection during failover. Databases with
* higher weights are preferred over those with lower weights. The system will attempt to use
- * the highest-weight healthy cluster available.
+ * the highest-weight healthy database available.
*
*
* Examples:
*
*
* - 1.0f: Standard priority (default)
- * - 0.8f: Lower priority (secondary cluster)
- * - 0.1f: Lowest priority (backup cluster)
+ * - 0.8f: Lower priority (secondary database)
+ * - 0.1f: Lowest priority (backup database)
*
* @param weight the weight value for priority-based selection
* @return this builder instance for method chaining
@@ -896,10 +895,10 @@ public Builder weight(float weight) {
}
/**
- * Sets a custom health check strategy supplier for this cluster.
+ * Sets a custom health check strategy supplier for this database.
*
- * The strategy supplier creates health check instances that monitor this cluster's
- * availability. Different clusters can use different health check strategies based on their
+ * The strategy supplier creates health check instances that monitor this database's
+ * availability. Different databases can use different health check strategies based on their
* specific requirements.
*
* @param healthCheckStrategySupplier the health check strategy supplier
@@ -917,14 +916,14 @@ public Builder healthCheckStrategySupplier(StrategySupplier healthCheckStrategyS
}
/**
- * Sets a specific health check strategy instance for this cluster.
+ * Sets a specific health check strategy instance for this database.
*
* This is a convenience method that wraps the provided strategy in a supplier that always
* returns the same instance. Use this when you have a pre-configured strategy instance.
*
*
* Note: The same strategy instance will be reused, so ensure it's
- * thread-safe if multiple clusters might use it.
+ * thread-safe if multiple databases might use it.
*
* @param healthCheckStrategy the health check strategy instance
* @return this builder instance for method chaining
@@ -940,15 +939,15 @@ public Builder healthCheckStrategy(HealthCheckStrategy healthCheckStrategy) {
}
/**
- * Enables or disables health checks for this cluster.
+ * Enables or disables health checks for this database.
*
- * When health checks are disabled (false), the cluster will not be proactively monitored for
+ * When health checks are disabled (false), the database will not be proactively monitored for
* availability. This means:
*
*
* - No background health check threads will be created
- * - Failback to this cluster must be triggered manually
- * - The cluster is assumed to be healthy unless circuit breaker opens
+ * - Failback to this database must be triggered manually
+ * - The database is assumed to be healthy unless circuit breaker opens
*
*
* When health checks are enabled (true) and no strategy supplier was previously set, the
@@ -967,17 +966,17 @@ public Builder healthCheckEnabled(boolean healthCheckEnabled) {
}
/**
- * Builds and returns a new ClusterConfig instance with the configured settings.
- * @return a new ClusterConfig instance
+ * Builds and returns a new DatabaseConfig instance with the configured settings.
+ * @return a new DatabaseConfig instance
*/
- public ClusterConfig build() {
- return new ClusterConfig(this);
+ public DatabaseConfig build() {
+ return new DatabaseConfig(this);
}
}
}
/**
- * Builder class for creating MultiClusterClientConfig instances with comprehensive configuration
+ * Builder class for creating MultiDatabaseConfig instances with comprehensive configuration
* options.
*
* The Builder provides a fluent API for configuring all aspects of multi-cluster failover
@@ -985,13 +984,13 @@ public ClusterConfig build() {
* sensible defaults based on production best practices while allowing fine-tuning for specific
* requirements.
*
- * @see MultiClusterClientConfig
- * @see ClusterConfig
+ * @see MultiDatabaseConfig
+ * @see DatabaseConfig
*/
public static class Builder {
- /** Array of cluster configurations defining available Redis endpoints. */
- private final List clusterConfigs = new ArrayList<>();
+ /** Array of database configurations defining available Redis endpoints. */
+ private final List databaseConfigs = new ArrayList<>();
// ============ Retry Configuration Fields ============
/** Maximum number of retry attempts including the initial call. */
@@ -1058,35 +1057,35 @@ public Builder() {
/**
* Constructs a new Builder with the specified cluster configurations.
- * @param clusterConfigs array of cluster configurations defining available Redis endpoints
- * @throws JedisValidationException if clusterConfigs is null or empty
+ * @param databaseConfigs array of cluster configurations defining available Redis endpoints
+ * @throws JedisValidationException if databaseConfigs is null or empty
*/
- public Builder(ClusterConfig[] clusterConfigs) {
+ public Builder(DatabaseConfig[] databaseConfigs) {
- this(Arrays.asList(clusterConfigs));
+ this(Arrays.asList(databaseConfigs));
}
/**
- * Constructs a new Builder with the specified cluster configurations.
- * @param clusterConfigs list of cluster configurations defining available Redis endpoints
- * @throws JedisValidationException if clusterConfigs is null or empty
+ * Constructs a new Builder with the specified database configurations.
+ * @param databaseConfigs list of database configurations defining available Redis endpoints
+ * @throws JedisValidationException if databaseConfigs is null or empty
*/
- public Builder(List clusterConfigs) {
- this.clusterConfigs.addAll(clusterConfigs);
+ public Builder(List databaseConfigs) {
+ this.databaseConfigs.addAll(databaseConfigs);
}
/**
* Adds a pre-configured endpoint configuration.
*
- * This method allows adding a fully configured ClusterConfig instance, providing maximum
+ * This method allows adding a fully configured DatabaseConfig instance, providing maximum
* flexibility for advanced configurations including custom health check strategies, connection
* pool settings, etc.
*
- * @param clusterConfig the pre-configured cluster configuration
+ * @param databaseConfig the pre-configured database configuration
* @return this builder
*/
- public Builder endpoint(ClusterConfig clusterConfig) {
- this.clusterConfigs.add(clusterConfig);
+ public Builder endpoint(DatabaseConfig databaseConfig) {
+ this.databaseConfigs.add(databaseConfig);
return this;
}
@@ -1104,10 +1103,10 @@ public Builder endpoint(ClusterConfig clusterConfig) {
*/
public Builder endpoint(Endpoint endpoint, float weight, JedisClientConfig clientConfig) {
- ClusterConfig clusterConfig = ClusterConfig.builder(endpoint, clientConfig).weight(weight)
+ DatabaseConfig databaseConfig = DatabaseConfig.builder(endpoint, clientConfig).weight(weight)
.build();
- this.clusterConfigs.add(clusterConfig);
+ this.databaseConfigs.add(databaseConfig);
return this;
}
@@ -1500,18 +1499,18 @@ public Builder delayInBetweenFailoverAttempts(int delayInBetweenFailoverAttempts
}
/**
- * Builds and returns a new MultiClusterClientConfig instance with all configured settings.
+ * Builds and returns a new MultiDatabaseConfig instance with all configured settings.
*
* This method creates the final configuration object by copying all builder settings to the
* configuration instance. The builder can be reused after calling build() to create additional
* configurations with different settings.
*
- * @return a new MultiClusterClientConfig instance with the configured settings
+ * @return a new MultiDatabaseConfig instance with the configured settings
*/
- public MultiClusterClientConfig build() {
+ public MultiDatabaseConfig build() {
- MultiClusterClientConfig config = new MultiClusterClientConfig(
- this.clusterConfigs.toArray(new ClusterConfig[0]));
+ MultiDatabaseConfig config = new MultiDatabaseConfig(
+ this.databaseConfigs.toArray(new DatabaseConfig[0]));
// Copy retry configuration
config.retryMaxAttempts = this.retryMaxAttempts;
diff --git a/src/main/java/redis/clients/jedis/MultiDbClient.java b/src/main/java/redis/clients/jedis/MultiDbClient.java
index 9df888651c..9224307a56 100644
--- a/src/main/java/redis/clients/jedis/MultiDbClient.java
+++ b/src/main/java/redis/clients/jedis/MultiDbClient.java
@@ -1,6 +1,6 @@
package redis.clients.jedis;
-import redis.clients.jedis.MultiClusterClientConfig.ClusterConfig;
+import redis.clients.jedis.MultiDatabaseConfig.DatabaseConfig;
import redis.clients.jedis.annots.Experimental;
import redis.clients.jedis.builders.MultiDbClientBuilder;
import redis.clients.jedis.csc.Cache;
@@ -9,7 +9,7 @@
import redis.clients.jedis.mcf.MultiClusterPipeline;
import redis.clients.jedis.mcf.MultiClusterTransaction;
import redis.clients.jedis.providers.ConnectionProvider;
-import redis.clients.jedis.mcf.MultiClusterPooledConnectionProvider;
+import redis.clients.jedis.mcf.MultiDatabaseConnectionProvider;
import java.util.Set;
@@ -43,14 +43,14 @@
*
* MultiDbClient client = MultiDbClient.builder()
* .multiDbConfig(
- * MultiClusterClientConfig.builder()
+ * MultiDatabaseConfig.builder()
* .endpoint(
- * ClusterConfig.builder(
+ * DatabaseConfig.builder(
* primary,
* DefaultJedisClientConfig.builder().build())
* .weight(100.0f)
* .build())
- * .endpoint(ClusterConfig.builder(
+ * .endpoint(DatabaseConfig.builder(
* secondary,
* DefaultJedisClientConfig.builder().build())
* .weight(50.0f).build())
@@ -76,9 +76,9 @@
*
* @author Ivo Gaydazhiev
* @since 5.2.0
- * @see MultiClusterPooledConnectionProvider
+ * @see MultiDatabaseConnectionProvider
* @see CircuitBreakerCommandExecutor
- * @see MultiClusterClientConfig
+ * @see MultiDatabaseConfig
*/
@Experimental
public class MultiDbClient extends UnifiedJedis {
@@ -91,8 +91,7 @@ public class MultiDbClient extends UnifiedJedis {
* {@link #builder()} to create instances.
*
* @param commandExecutor the command executor (typically CircuitBreakerCommandExecutor)
- * @param connectionProvider the connection provider (typically
- * MultiClusterPooledConnectionProvider)
+ * @param connectionProvider the connection provider (typically MultiDatabaseConnectionProvider)
* @param commandObjects the command objects
* @param redisProtocol the Redis protocol version
* @param cache the client-side cache (may be null)
@@ -103,16 +102,16 @@ public class MultiDbClient extends UnifiedJedis {
}
/**
- * Returns the underlying MultiClusterPooledConnectionProvider.
+ * Returns the underlying MultiDatabaseConnectionProvider.
*
* This provides access to multi-cluster specific operations like manual failover, health status
* monitoring, and cluster switch event handling.
*
* @return the multi-cluster connection provider
- * @throws ClassCastException if the provider is not a MultiClusterPooledConnectionProvider
+ * @throws ClassCastException if the provider is not a MultiDatabaseConnectionProvider
*/
- private MultiClusterPooledConnectionProvider getMultiClusterProvider() {
- return (MultiClusterPooledConnectionProvider) this.provider;
+ private MultiDatabaseConnectionProvider getMultiDatabaseConnectionProvider() {
+ return (MultiDatabaseConnectionProvider) this.provider;
}
/**
@@ -124,20 +123,20 @@ private MultiClusterPooledConnectionProvider getMultiClusterProvider() {
* @param endpoint the endpoint to switch to
*/
public void setActiveDatabase(Endpoint endpoint) {
- getMultiClusterProvider().setActiveCluster(endpoint);
+ getMultiDatabaseConnectionProvider().setActiveDatabase(endpoint);
}
/**
* Adds a pre-configured cluster configuration.
*
- * This method allows adding a fully configured ClusterConfig instance, providing maximum
+ * This method allows adding a fully configured DatabaseConfig instance, providing maximum
* flexibility for advanced configurations including custom health check strategies, connection
* pool settings, etc.
*
- * @param clusterConfig the pre-configured cluster configuration
+ * @param databaseConfig the pre-configured database configuration
*/
- public void addEndpoint(ClusterConfig clusterConfig) {
- getMultiClusterProvider().add(clusterConfig);
+ public void addEndpoint(DatabaseConfig databaseConfig) {
+ getMultiDatabaseConnectionProvider().add(databaseConfig);
}
/**
@@ -153,10 +152,10 @@ public void addEndpoint(ClusterConfig clusterConfig) {
* @throws redis.clients.jedis.exceptions.JedisValidationException if the endpoint already exists
*/
public void addEndpoint(Endpoint endpoint, float weight, JedisClientConfig clientConfig) {
- ClusterConfig clusterConfig = ClusterConfig.builder(endpoint, clientConfig).weight(weight)
+ DatabaseConfig databaseConfig = DatabaseConfig.builder(endpoint, clientConfig).weight(weight)
.build();
- getMultiClusterProvider().add(clusterConfig);
+ getMultiDatabaseConnectionProvider().add(databaseConfig);
}
/**
@@ -167,7 +166,7 @@ public void addEndpoint(Endpoint endpoint, float weight, JedisClientConfig clien
* @return the set of all configured endpoints
*/
public Set getEndpoints() {
- return getMultiClusterProvider().getEndpoints();
+ return getMultiDatabaseConnectionProvider().getEndpoints();
}
/**
@@ -179,7 +178,7 @@ public Set getEndpoints() {
* @return the health status of the endpoint
*/
public boolean isHealthy(Endpoint endpoint) {
- return getMultiClusterProvider().isHealthy(endpoint);
+ return getMultiDatabaseConnectionProvider().isHealthy(endpoint);
}
/**
@@ -195,7 +194,7 @@ public boolean isHealthy(Endpoint endpoint) {
* healthy clusters available
*/
public void removeEndpoint(Endpoint endpoint) {
- getMultiClusterProvider().remove(endpoint);
+ getMultiDatabaseConnectionProvider().remove(endpoint);
}
/**
@@ -211,7 +210,7 @@ public void removeEndpoint(Endpoint endpoint) {
* or doesn't exist
*/
public void forceActiveEndpoint(Endpoint endpoint, long forcedActiveDurationMs) {
- getMultiClusterProvider().forceActiveCluster(endpoint, forcedActiveDurationMs);
+ getMultiDatabaseConnectionProvider().forceActiveDatabase(endpoint, forcedActiveDurationMs);
}
/**
@@ -224,7 +223,7 @@ public void forceActiveEndpoint(Endpoint endpoint, long forcedActiveDurationMs)
*/
@Override
public MultiClusterPipeline pipelined() {
- return new MultiClusterPipeline(getMultiClusterProvider(), commandObjects);
+ return new MultiClusterPipeline(getMultiDatabaseConnectionProvider(), commandObjects);
}
/**
@@ -237,7 +236,7 @@ public MultiClusterPipeline pipelined() {
*/
@Override
public MultiClusterTransaction multi() {
- return new MultiClusterTransaction((MultiClusterPooledConnectionProvider) provider, true,
+ return new MultiClusterTransaction((MultiDatabaseConnectionProvider) provider, true,
commandObjects);
}
@@ -252,11 +251,12 @@ public MultiClusterTransaction transaction(boolean doMulti) {
"It is not allowed to create Transaction from this " + getClass());
}
- return new MultiClusterTransaction(getMultiClusterProvider(), doMulti, commandObjects);
+ return new MultiClusterTransaction(getMultiDatabaseConnectionProvider(), doMulti,
+ commandObjects);
}
public Endpoint getActiveEndpoint() {
- return getMultiClusterProvider().getCluster().getEndpoint();
+ return getMultiDatabaseConnectionProvider().getDatabase().getEndpoint();
}
/**
diff --git a/src/main/java/redis/clients/jedis/UnifiedJedis.java b/src/main/java/redis/clients/jedis/UnifiedJedis.java
index b175dbd319..ebe5a55f54 100644
--- a/src/main/java/redis/clients/jedis/UnifiedJedis.java
+++ b/src/main/java/redis/clients/jedis/UnifiedJedis.java
@@ -34,7 +34,7 @@
import redis.clients.jedis.json.JsonObjectMapper;
import redis.clients.jedis.mcf.CircuitBreakerCommandExecutor;
import redis.clients.jedis.mcf.MultiClusterPipeline;
-import redis.clients.jedis.mcf.MultiClusterPooledConnectionProvider;
+import redis.clients.jedis.mcf.MultiDatabaseConnectionProvider;
import redis.clients.jedis.mcf.MultiClusterTransaction;
import redis.clients.jedis.params.*;
import redis.clients.jedis.providers.*;
@@ -240,7 +240,7 @@ public UnifiedJedis(ConnectionProvider provider, int maxAttempts, Duration maxTo
*
*/
@Experimental
- public UnifiedJedis(MultiClusterPooledConnectionProvider provider) {
+ public UnifiedJedis(MultiDatabaseConnectionProvider provider) {
this(new CircuitBreakerCommandExecutor(provider), provider);
}
@@ -5099,8 +5099,8 @@ public List tdigestByRevRank(String key, long... ranks) {
public PipelineBase pipelined() {
if (provider == null) {
throw new IllegalStateException("It is not allowed to create Pipeline from this " + getClass());
- } else if (provider instanceof MultiClusterPooledConnectionProvider) {
- return new MultiClusterPipeline((MultiClusterPooledConnectionProvider) provider, commandObjects);
+ } else if (provider instanceof MultiDatabaseConnectionProvider) {
+ return new MultiClusterPipeline((MultiDatabaseConnectionProvider) provider, commandObjects);
} else {
return new Pipeline(provider.getConnection(), true, commandObjects);
}
@@ -5120,8 +5120,8 @@ public AbstractTransaction multi() {
public AbstractTransaction transaction(boolean doMulti) {
if (provider == null) {
throw new IllegalStateException("It is not allowed to create Transaction from this " + getClass());
- } else if (provider instanceof MultiClusterPooledConnectionProvider) {
- return new MultiClusterTransaction((MultiClusterPooledConnectionProvider) provider, doMulti, commandObjects);
+ } else if (provider instanceof MultiDatabaseConnectionProvider) {
+ return new MultiClusterTransaction((MultiDatabaseConnectionProvider) provider, doMulti, commandObjects);
} else {
return new Transaction(provider.getConnection(), doMulti, true, commandObjects);
}
diff --git a/src/main/java/redis/clients/jedis/builders/MultiDbClientBuilder.java b/src/main/java/redis/clients/jedis/builders/MultiDbClientBuilder.java
index df3c1f86d6..c4592ec905 100644
--- a/src/main/java/redis/clients/jedis/builders/MultiDbClientBuilder.java
+++ b/src/main/java/redis/clients/jedis/builders/MultiDbClientBuilder.java
@@ -2,12 +2,12 @@
import java.util.function.Consumer;
-import redis.clients.jedis.MultiClusterClientConfig;
+import redis.clients.jedis.MultiDatabaseConfig;
import redis.clients.jedis.annots.Experimental;
import redis.clients.jedis.executors.CommandExecutor;
import redis.clients.jedis.mcf.CircuitBreakerCommandExecutor;
import redis.clients.jedis.mcf.ClusterSwitchEventArgs;
-import redis.clients.jedis.mcf.MultiClusterPooledConnectionProvider;
+import redis.clients.jedis.mcf.MultiDatabaseConnectionProvider;
import redis.clients.jedis.providers.ConnectionProvider;
/**
@@ -38,14 +38,14 @@
*
* MultiDbClient client = MultiDbClient.builder()
* .multiDbConfig(
- * MultiClusterClientConfig.builder()
+ * MultiDatabaseConfig.builder()
* .endpoint(
- * ClusterConfig.builder(
+ * DatabaseConfig.builder(
* east,
* DefaultJedisClientConfig.builder().credentials(credentialsEast).build())
* .weight(100.0f)
* .build())
- * .endpoint(ClusterConfig.builder(
+ * .endpoint(DatabaseConfig.builder(
* west,
* DefaultJedisClientConfig.builder().credentials(credentialsWest).build())
* .weight(50.0f).build())
@@ -67,7 +67,7 @@ public abstract class MultiDbClientBuilder
extends AbstractClientBuilder, C> {
// Multi-db specific configuration fields
- private MultiClusterClientConfig multiDbConfig = null;
+ private MultiDatabaseConfig multiDbConfig = null;
private Consumer databaseSwitchListener = null;
/**
@@ -79,7 +79,7 @@ public abstract class MultiDbClientBuilder
* @param config the multi-database configuration
* @return this builder
*/
- public MultiDbClientBuilder multiDbConfig(MultiClusterClientConfig config) {
+ public MultiDbClientBuilder multiDbConfig(MultiDatabaseConfig config) {
this.multiDbConfig = config;
return this;
}
@@ -107,18 +107,17 @@ protected MultiDbClientBuilder self() {
@Override
protected ConnectionProvider createDefaultConnectionProvider() {
- if (this.multiDbConfig == null || this.multiDbConfig.getClusterConfigs() == null
- || this.multiDbConfig.getClusterConfigs().length < 1) {
+ if (this.multiDbConfig == null || this.multiDbConfig.getDatabaseConfigs() == null
+ || this.multiDbConfig.getDatabaseConfigs().length < 1) {
throw new IllegalArgumentException("At least one endpoint must be specified");
}
// Create the multi-cluster connection provider
- MultiClusterPooledConnectionProvider provider = new MultiClusterPooledConnectionProvider(
- multiDbConfig);
+ MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider(multiDbConfig);
// Set database switch listener if provided
if (this.databaseSwitchListener != null) {
- provider.setClusterSwitchListener(this.databaseSwitchListener);
+ provider.setDatabaseSwitchListener(this.databaseSwitchListener);
}
return provider;
@@ -128,7 +127,7 @@ protected ConnectionProvider createDefaultConnectionProvider() {
protected CommandExecutor createDefaultCommandExecutor() {
// For multi-db clients, we always use CircuitBreakerCommandExecutor
return new CircuitBreakerCommandExecutor(
- (MultiClusterPooledConnectionProvider) this.connectionProvider);
+ (MultiDatabaseConnectionProvider) this.connectionProvider);
}
@Override
diff --git a/src/main/java/redis/clients/jedis/mcf/CircuitBreakerCommandExecutor.java b/src/main/java/redis/clients/jedis/mcf/CircuitBreakerCommandExecutor.java
index 90f269bd70..5a5f24e063 100644
--- a/src/main/java/redis/clients/jedis/mcf/CircuitBreakerCommandExecutor.java
+++ b/src/main/java/redis/clients/jedis/mcf/CircuitBreakerCommandExecutor.java
@@ -9,7 +9,7 @@
import redis.clients.jedis.annots.Experimental;
import redis.clients.jedis.exceptions.JedisConnectionException;
import redis.clients.jedis.executors.CommandExecutor;
-import redis.clients.jedis.mcf.MultiClusterPooledConnectionProvider.Cluster;
+import redis.clients.jedis.mcf.MultiDatabaseConnectionProvider.Database;
/**
* @author Allen Terleto (aterleto)
@@ -24,26 +24,26 @@
public class CircuitBreakerCommandExecutor extends CircuitBreakerFailoverBase
implements CommandExecutor {
- public CircuitBreakerCommandExecutor(MultiClusterPooledConnectionProvider provider) {
+ public CircuitBreakerCommandExecutor(MultiDatabaseConnectionProvider provider) {
super(provider);
}
@Override
public T executeCommand(CommandObject commandObject) {
- Cluster cluster = provider.getCluster(); // Pass this by reference for thread safety
+ Database database = provider.getDatabase(); // Pass this by reference for thread safety
DecorateSupplier supplier = Decorators
- .ofSupplier(() -> this.handleExecuteCommand(commandObject, cluster));
+ .ofSupplier(() -> this.handleExecuteCommand(commandObject, database));
- supplier.withCircuitBreaker(cluster.getCircuitBreaker());
- supplier.withRetry(cluster.getRetry());
+ supplier.withCircuitBreaker(database.getCircuitBreaker());
+ supplier.withRetry(database.getRetry());
supplier.withFallback(provider.getFallbackExceptionList(),
- e -> this.handleClusterFailover(commandObject, cluster));
+ e -> this.handleClusterFailover(commandObject, database));
try {
return supplier.decorate().get();
} catch (Exception e) {
- if (cluster.getCircuitBreaker().getState() == State.OPEN && isActiveCluster(cluster)) {
- clusterFailover(cluster);
+ if (database.getCircuitBreaker().getState() == State.OPEN && isActiveDatabase(database)) {
+ clusterFailover(database);
}
throw e;
}
@@ -52,7 +52,7 @@ public T executeCommand(CommandObject commandObject) {
/**
* Functional interface wrapped in retry and circuit breaker logic to handle happy path scenarios
*/
- private T handleExecuteCommand(CommandObject commandObject, Cluster cluster) {
+ private T handleExecuteCommand(CommandObject commandObject, Database cluster) {
Connection connection;
try {
connection = cluster.getConnection();
@@ -63,7 +63,7 @@ private T handleExecuteCommand(CommandObject commandObject, Cluster clust
try {
return connection.executeCommand(commandObject);
} catch (Exception e) {
- if (cluster.retryOnFailover() && !isActiveCluster(cluster)
+ if (cluster.retryOnFailover() && !isActiveDatabase(cluster)
&& isCircuitBreakerTrackedException(e, cluster)) {
throw new ConnectionFailoverException(
"Command failed during failover: " + cluster.getCircuitBreaker().getName(), e);
@@ -78,7 +78,7 @@ && isCircuitBreakerTrackedException(e, cluster)) {
* Functional interface wrapped in retry and circuit breaker logic to handle open circuit breaker
* failure scenarios
*/
- private T handleClusterFailover(CommandObject commandObject, Cluster cluster) {
+ private T handleClusterFailover(CommandObject commandObject, Database cluster) {
clusterFailover(cluster);
diff --git a/src/main/java/redis/clients/jedis/mcf/CircuitBreakerFailoverBase.java b/src/main/java/redis/clients/jedis/mcf/CircuitBreakerFailoverBase.java
index 40141fb009..cbe97f27a8 100644
--- a/src/main/java/redis/clients/jedis/mcf/CircuitBreakerFailoverBase.java
+++ b/src/main/java/redis/clients/jedis/mcf/CircuitBreakerFailoverBase.java
@@ -6,7 +6,7 @@
import java.util.concurrent.locks.ReentrantLock;
import redis.clients.jedis.annots.Experimental;
-import redis.clients.jedis.mcf.MultiClusterPooledConnectionProvider.Cluster;
+import redis.clients.jedis.mcf.MultiDatabaseConnectionProvider.Database;
import redis.clients.jedis.util.IOUtils;
/**
@@ -23,9 +23,9 @@
public class CircuitBreakerFailoverBase implements AutoCloseable {
private final Lock lock = new ReentrantLock(true);
- protected final MultiClusterPooledConnectionProvider provider;
+ protected final MultiDatabaseConnectionProvider provider;
- public CircuitBreakerFailoverBase(MultiClusterPooledConnectionProvider provider) {
+ public CircuitBreakerFailoverBase(MultiDatabaseConnectionProvider provider) {
this.provider = provider;
}
@@ -38,10 +38,10 @@ public void close() {
* Functional interface wrapped in retry and circuit breaker logic to handle open circuit breaker
* failure scenarios
*/
- protected void clusterFailover(Cluster cluster) {
+ protected void clusterFailover(Database database) {
lock.lock();
- CircuitBreaker circuitBreaker = cluster.getCircuitBreaker();
+ CircuitBreaker circuitBreaker = database.getCircuitBreaker();
try {
// Check state to handle race conditions since iterateActiveCluster() is
// non-idempotent
@@ -51,29 +51,29 @@ protected void clusterFailover(Cluster cluster) {
// event publishing.
// To recover/transition from this forced state the user will need to manually failback
- Cluster activeCluster = provider.getCluster();
- // This should be possible only if active cluster is switched from by other reasons than
+ Database activeDatabase = provider.getDatabase();
+ // This should be possible only if active database is switched from by other reasons than
// circuit breaker, just before circuit breaker triggers
- if (activeCluster != cluster) {
+ if (activeDatabase != database) {
return;
}
- cluster.setGracePeriod();
+ database.setGracePeriod();
circuitBreaker.transitionToForcedOpenState();
- // Iterating the active cluster will allow subsequent calls to the executeCommand() to use
+ // Iterating the active database will allow subsequent calls to the executeCommand() to use
// the next
- // cluster's connection pool - according to the configuration's prioritization/order/weight
- provider.switchToHealthyCluster(SwitchReason.CIRCUIT_BREAKER, cluster);
+ // database's connection pool - according to the configuration's prioritization/order/weight
+ provider.switchToHealthyDatabase(SwitchReason.CIRCUIT_BREAKER, database);
}
// this check relies on the fact that many failover attempts can hit with the same CB,
// only the first one will trigger a failover, and make the CB FORCED_OPEN.
- // when the rest reaches here, the active cluster is already the next one, and should be
+ // when the rest reaches here, the active database is already the next one, and should be
// different than
// active CB. If its the same one and there are no more clusters to failover to, then throw an
// exception
- else if (cluster == provider.getCluster()) {
- provider.switchToHealthyCluster(SwitchReason.CIRCUIT_BREAKER, cluster);
+ else if (database == provider.getDatabase()) {
+ provider.switchToHealthyDatabase(SwitchReason.CIRCUIT_BREAKER, database);
}
// Ignore exceptions since we are already in a failure state
} finally {
@@ -81,13 +81,13 @@ else if (cluster == provider.getCluster()) {
}
}
- boolean isActiveCluster(Cluster cluster) {
- Cluster activeCluster = provider.getCluster();
- return activeCluster != null && activeCluster.equals(cluster);
+ boolean isActiveDatabase(Database database) {
+ Database activeDatabase = provider.getDatabase();
+ return activeDatabase != null && activeDatabase.equals(database);
}
- static boolean isCircuitBreakerTrackedException(Exception e, Cluster cluster) {
- return cluster.getCircuitBreaker().getCircuitBreakerConfig().getRecordExceptionPredicate()
+ static boolean isCircuitBreakerTrackedException(Exception e, Database database) {
+ return database.getCircuitBreaker().getCircuitBreakerConfig().getRecordExceptionPredicate()
.test(e);
}
}
diff --git a/src/main/java/redis/clients/jedis/mcf/CircuitBreakerFailoverConnectionProvider.java b/src/main/java/redis/clients/jedis/mcf/CircuitBreakerFailoverConnectionProvider.java
index 51a5d35788..b45cd04c61 100644
--- a/src/main/java/redis/clients/jedis/mcf/CircuitBreakerFailoverConnectionProvider.java
+++ b/src/main/java/redis/clients/jedis/mcf/CircuitBreakerFailoverConnectionProvider.java
@@ -6,23 +6,22 @@
import redis.clients.jedis.Connection;
import redis.clients.jedis.annots.Experimental;
-import redis.clients.jedis.mcf.MultiClusterPooledConnectionProvider.Cluster;
+import redis.clients.jedis.mcf.MultiDatabaseConnectionProvider.Database;
/**
* ConnectionProvider with built-in retry, circuit-breaker, and failover to another cluster/database
* endpoint. With this executor users can seamlessly failover to Disaster Recovery (DR), Backup, and
- * Active-Active cluster(s) by using simple configuration which is passed through from Resilience4j
- * - https://resilience4j.readme.io/docs
+ * Active-Active cluster(s) by using simple configuration
*/
@Experimental
public class CircuitBreakerFailoverConnectionProvider extends CircuitBreakerFailoverBase {
- public CircuitBreakerFailoverConnectionProvider(MultiClusterPooledConnectionProvider provider) {
+ public CircuitBreakerFailoverConnectionProvider(MultiDatabaseConnectionProvider provider) {
super(provider);
}
public Connection getConnection() {
- Cluster cluster = provider.getCluster(); // Pass this by reference for thread safety
+ Database cluster = provider.getDatabase(); // Pass this by reference for thread safety
DecorateSupplier supplier = Decorators
.ofSupplier(() -> this.handleGetConnection(cluster));
@@ -35,7 +34,7 @@ public Connection getConnection() {
try {
return supplier.decorate().get();
} catch (Exception e) {
- if (cluster.getCircuitBreaker().getState() == State.OPEN && isActiveCluster(cluster)) {
+ if (cluster.getCircuitBreaker().getState() == State.OPEN && isActiveDatabase(cluster)) {
clusterFailover(cluster);
}
throw e;
@@ -45,7 +44,7 @@ public Connection getConnection() {
/**
* Functional interface wrapped in retry and circuit breaker logic to handle happy path scenarios
*/
- private Connection handleGetConnection(Cluster cluster) {
+ private Connection handleGetConnection(Database cluster) {
Connection connection = cluster.getConnection();
connection.ping();
return connection;
@@ -55,7 +54,7 @@ private Connection handleGetConnection(Cluster cluster) {
* Functional interface wrapped in retry and circuit breaker logic to handle open circuit breaker
* failure scenarios
*/
- private Connection handleClusterFailover(Cluster cluster) {
+ private Connection handleClusterFailover(Database cluster) {
clusterFailover(cluster);
diff --git a/src/main/java/redis/clients/jedis/mcf/CircuitBreakerThresholdsAdapter.java b/src/main/java/redis/clients/jedis/mcf/CircuitBreakerThresholdsAdapter.java
index a103613ba0..b23a07289c 100644
--- a/src/main/java/redis/clients/jedis/mcf/CircuitBreakerThresholdsAdapter.java
+++ b/src/main/java/redis/clients/jedis/mcf/CircuitBreakerThresholdsAdapter.java
@@ -1,7 +1,7 @@
package redis.clients.jedis.mcf;
import io.github.resilience4j.circuitbreaker.CircuitBreakerConfig.SlidingWindowType;
-import redis.clients.jedis.MultiClusterClientConfig;
+import redis.clients.jedis.MultiDatabaseConfig;
/**
* Adapter that disables Resilience4j's built-in circuit breaker evaluation and help delegate
@@ -9,10 +9,10 @@
*
* This adapter sets maximum values for failure rate (100%) and minimum calls (Integer.MAX_VALUE) to
* effectively disable Resilience4j's automatic circuit breaker transitions, allowing
- * {@link MultiClusterPooledConnectionProvider.Cluster#evaluateThresholds(boolean)} to control when
- * the circuit breaker opens based on both minimum failure count AND failure rate.
+ * {@link MultiDatabaseConnectionProvider.Database#evaluateThresholds(boolean)} to control when the
+ * circuit breaker opens based on both minimum failure count AND failure rate.
*
- * @see MultiClusterPooledConnectionProvider.Cluster#evaluateThresholds(boolean)
+ * @see MultiDatabaseConnectionProvider.Database#evaluateThresholds(boolean)
*/
class CircuitBreakerThresholdsAdapter {
/** Maximum failure rate threshold (100%) to disable Resilience4j evaluation */
@@ -67,9 +67,9 @@ int getSlidingWindowSize() {
* method controls circuit breaker state based on the original configuration's dual-threshold
* logic.
*
- * @param multiClusterClientConfig configuration containing sliding window size
+ * @param multiDatabaseConfig configuration containing sliding window size
*/
- CircuitBreakerThresholdsAdapter(MultiClusterClientConfig multiClusterClientConfig) {
+ CircuitBreakerThresholdsAdapter(MultiDatabaseConfig multiDatabaseConfig) {
// IMPORTANT: failureRateThreshold is set to max theoretically disable Resilience4j's evaluation
// and rely on our custom evaluateThresholds() logic.
@@ -79,6 +79,6 @@ int getSlidingWindowSize() {
// and rely on our custom evaluateThresholds() logic.
minimumNumberOfCalls = Integer.MAX_VALUE;
- slidingWindowSize = multiClusterClientConfig.getCircuitBreakerSlidingWindowSize();
+ slidingWindowSize = multiDatabaseConfig.getCircuitBreakerSlidingWindowSize();
}
}
diff --git a/src/main/java/redis/clients/jedis/mcf/ClusterSwitchEventArgs.java b/src/main/java/redis/clients/jedis/mcf/ClusterSwitchEventArgs.java
index 1fe6cebe4d..2c3e283445 100644
--- a/src/main/java/redis/clients/jedis/mcf/ClusterSwitchEventArgs.java
+++ b/src/main/java/redis/clients/jedis/mcf/ClusterSwitchEventArgs.java
@@ -1,7 +1,7 @@
package redis.clients.jedis.mcf;
import redis.clients.jedis.Endpoint;
-import redis.clients.jedis.mcf.MultiClusterPooledConnectionProvider.Cluster;
+import redis.clients.jedis.mcf.MultiDatabaseConnectionProvider.Database;
public class ClusterSwitchEventArgs {
@@ -9,10 +9,10 @@ public class ClusterSwitchEventArgs {
private final String ClusterName;
private final Endpoint Endpoint;
- public ClusterSwitchEventArgs(SwitchReason reason, Endpoint endpoint, Cluster cluster) {
+ public ClusterSwitchEventArgs(SwitchReason reason, Endpoint endpoint, Database database) {
this.reason = reason;
// TODO: @ggivo do we need cluster name?
- this.ClusterName = cluster.getCircuitBreaker().getName();
+ this.ClusterName = database.getCircuitBreaker().getName();
this.Endpoint = endpoint;
}
diff --git a/src/main/java/redis/clients/jedis/mcf/EchoStrategy.java b/src/main/java/redis/clients/jedis/mcf/EchoStrategy.java
index 3c73e17d6f..51173ace31 100644
--- a/src/main/java/redis/clients/jedis/mcf/EchoStrategy.java
+++ b/src/main/java/redis/clients/jedis/mcf/EchoStrategy.java
@@ -8,7 +8,7 @@
import redis.clients.jedis.JedisClientConfig;
import redis.clients.jedis.JedisPooled;
import redis.clients.jedis.UnifiedJedis;
-import redis.clients.jedis.MultiClusterClientConfig.StrategySupplier;
+import redis.clients.jedis.MultiDatabaseConfig.StrategySupplier;
public class EchoStrategy implements HealthCheckStrategy {
private static final int MAX_HEALTH_CHECK_POOL_SIZE = 2;
diff --git a/src/main/java/redis/clients/jedis/mcf/JedisFailoverException.java b/src/main/java/redis/clients/jedis/mcf/JedisFailoverException.java
index 3543517703..fec047824f 100644
--- a/src/main/java/redis/clients/jedis/mcf/JedisFailoverException.java
+++ b/src/main/java/redis/clients/jedis/mcf/JedisFailoverException.java
@@ -11,7 +11,7 @@
* @see JedisFailoverException.JedisTemporarilyNotAvailableException
*/
public class JedisFailoverException extends JedisConnectionException {
- private static final String MESSAGE = "Cluster/database endpoint could not failover since the MultiClusterClientConfig was not "
+ private static final String MESSAGE = "Cluster/database endpoint could not failover since the MultiDatabaseConfig was not "
+ "provided with an additional cluster/database endpoint according to its prioritized sequence. "
+ "If applicable, consider falling back OR restarting with an available cluster/database endpoint";
@@ -28,8 +28,8 @@ public JedisFailoverException() {
* the max number of failover attempts has been exceeded. And there is still no healthy cluster.
*
* See the configuration properties
- * {@link redis.clients.jedis.MultiClusterClientConfig#maxNumFailoverAttempts} and
- * {@link redis.clients.jedis.MultiClusterClientConfig#delayInBetweenFailoverAttempts} for more
+ * {@link redis.clients.jedis.MultiDatabaseConfig#maxNumFailoverAttempts} and
+ * {@link redis.clients.jedis.MultiDatabaseConfig#delayInBetweenFailoverAttempts} for more
* details.
*/
public static class JedisPermanentlyNotAvailableException extends JedisFailoverException {
@@ -49,8 +49,8 @@ public JedisPermanentlyNotAvailableException() {
* temporary condition and it is possible that there will be a healthy cluster available.
*
* See the configuration properties
- * {@link redis.clients.jedis.MultiClusterClientConfig#maxNumFailoverAttempts} and
- * {@link redis.clients.jedis.MultiClusterClientConfig#delayInBetweenFailoverAttempts} for more
+ * {@link redis.clients.jedis.MultiDatabaseConfig#maxNumFailoverAttempts} and
+ * {@link redis.clients.jedis.MultiDatabaseConfig#delayInBetweenFailoverAttempts} for more
* details.
*/
public static class JedisTemporarilyNotAvailableException extends JedisFailoverException {
diff --git a/src/main/java/redis/clients/jedis/mcf/MultiClusterPipeline.java b/src/main/java/redis/clients/jedis/mcf/MultiClusterPipeline.java
index c227b27e99..d23f56411f 100644
--- a/src/main/java/redis/clients/jedis/mcf/MultiClusterPipeline.java
+++ b/src/main/java/redis/clients/jedis/mcf/MultiClusterPipeline.java
@@ -21,7 +21,7 @@ public class MultiClusterPipeline extends PipelineBase implements Closeable {
private final Queue>> commands = new LinkedList<>();
@Deprecated
- public MultiClusterPipeline(MultiClusterPooledConnectionProvider pooledProvider) {
+ public MultiClusterPipeline(MultiDatabaseConnectionProvider pooledProvider) {
super(new CommandObjects());
this.failoverProvider = new CircuitBreakerFailoverConnectionProvider(pooledProvider);
@@ -32,7 +32,7 @@ public MultiClusterPipeline(MultiClusterPooledConnectionProvider pooledProvider)
}
}
- public MultiClusterPipeline(MultiClusterPooledConnectionProvider pooledProvider,
+ public MultiClusterPipeline(MultiDatabaseConnectionProvider pooledProvider,
CommandObjects commandObjects) {
super(commandObjects);
this.failoverProvider = new CircuitBreakerFailoverConnectionProvider(pooledProvider);
diff --git a/src/main/java/redis/clients/jedis/mcf/MultiClusterTransaction.java b/src/main/java/redis/clients/jedis/mcf/MultiClusterTransaction.java
index 2de927826c..6f634549e2 100644
--- a/src/main/java/redis/clients/jedis/mcf/MultiClusterTransaction.java
+++ b/src/main/java/redis/clients/jedis/mcf/MultiClusterTransaction.java
@@ -39,7 +39,7 @@ public class MultiClusterTransaction extends TransactionBase {
* @param provider
*/
@Deprecated
- public MultiClusterTransaction(MultiClusterPooledConnectionProvider provider) {
+ public MultiClusterTransaction(MultiDatabaseConnectionProvider provider) {
this(provider, true);
}
@@ -50,7 +50,7 @@ public MultiClusterTransaction(MultiClusterPooledConnectionProvider provider) {
* @param doMulti {@code false} should be set to enable manual WATCH, UNWATCH and MULTI
*/
@Deprecated
- public MultiClusterTransaction(MultiClusterPooledConnectionProvider provider, boolean doMulti) {
+ public MultiClusterTransaction(MultiDatabaseConnectionProvider provider, boolean doMulti) {
this.failoverProvider = new CircuitBreakerFailoverConnectionProvider(provider);
try (Connection connection = failoverProvider.getConnection()) {
@@ -68,7 +68,7 @@ public MultiClusterTransaction(MultiClusterPooledConnectionProvider provider, bo
* @param doMulti {@code false} should be set to enable manual WATCH, UNWATCH and MULTI
* @param commandObjects command objects
*/
- public MultiClusterTransaction(MultiClusterPooledConnectionProvider provider, boolean doMulti,
+ public MultiClusterTransaction(MultiDatabaseConnectionProvider provider, boolean doMulti,
CommandObjects commandObjects) {
super(commandObjects);
this.failoverProvider = new CircuitBreakerFailoverConnectionProvider(provider);
diff --git a/src/main/java/redis/clients/jedis/mcf/MultiClusterPooledConnectionProvider.java b/src/main/java/redis/clients/jedis/mcf/MultiDatabaseConnectionProvider.java
similarity index 65%
rename from src/main/java/redis/clients/jedis/mcf/MultiClusterPooledConnectionProvider.java
rename to src/main/java/redis/clients/jedis/mcf/MultiDatabaseConnectionProvider.java
index a389a0d7b4..9b7d12cb96 100644
--- a/src/main/java/redis/clients/jedis/mcf/MultiClusterPooledConnectionProvider.java
+++ b/src/main/java/redis/clients/jedis/mcf/MultiDatabaseConnectionProvider.java
@@ -33,7 +33,7 @@
import org.slf4j.LoggerFactory;
import redis.clients.jedis.*;
-import redis.clients.jedis.MultiClusterClientConfig.ClusterConfig;
+import redis.clients.jedis.MultiDatabaseConfig.DatabaseConfig;
import redis.clients.jedis.annots.Experimental;
import redis.clients.jedis.annots.VisibleForTesting;
import redis.clients.jedis.exceptions.JedisConnectionException;
@@ -41,7 +41,7 @@
import redis.clients.jedis.exceptions.JedisValidationException;
import redis.clients.jedis.mcf.JedisFailoverException.*;
import redis.clients.jedis.providers.ConnectionProvider;
-import redis.clients.jedis.MultiClusterClientConfig.StrategySupplier;
+import redis.clients.jedis.MultiDatabaseConfig.StrategySupplier;
import redis.clients.jedis.util.Pool;
/**
@@ -53,35 +53,34 @@
* configuration which is passed through from Resilience4j -
* docs
*
- * Support for manual failback is provided by way of {@link #setActiveCluster(Endpoint)}
+ * Support for manual failback is provided by way of {@link #setActiveDatabase(Endpoint)}
*
*/
@Experimental
-public class MultiClusterPooledConnectionProvider implements ConnectionProvider {
+public class MultiDatabaseConnectionProvider implements ConnectionProvider {
private final Logger log = LoggerFactory.getLogger(getClass());
/**
- * Ordered map of cluster/database endpoints which were provided at startup via the
- * MultiClusterClientConfig. Users can move down (failover) or (up) failback the map depending on
+ * Ordered map of database. Users can move down (failover) or (up) failback the map depending on
* their availability and order.
*/
- private final Map multiClusterMap = new ConcurrentHashMap<>();
+ private final Map databaseMap = new ConcurrentHashMap<>();
/**
- * Indicates the actively used cluster/database endpoint (connection pool) amongst the
- * pre-configured list which were provided at startup via the MultiClusterClientConfig. All
- * traffic will be routed with this cluster/database
+ * Indicates the actively used database endpoint (connection pool) amongst the pre-configured list
+ * which were provided at startup via the MultiDatabaseConfig. All traffic will be routed with
+ * this database
*/
- private volatile Cluster activeCluster;
+ private volatile Database activeDatabase;
- private final Lock activeClusterChangeLock = new ReentrantLock(true);
+ private final Lock activeDatabaseChangeLock = new ReentrantLock(true);
/**
* Functional interface for listening to cluster switch events. The event args contain the reason
* for the switch, the endpoint, and the cluster.
*/
- private Consumer clusterSwitchListener;
+ private Consumer databaseSwitchListener;
private List> fallbackExceptionList;
@@ -99,33 +98,33 @@ public class MultiClusterPooledConnectionProvider implements ConnectionProvider
return t;
});
- // Store retry and circuit breaker configs for dynamic cluster addition/removal
+ // Store retry and circuit breaker configs for dynamic database addition/removal
private RetryConfig retryConfig;
private CircuitBreakerConfig circuitBreakerConfig;
- private MultiClusterClientConfig multiClusterClientConfig;
+ private MultiDatabaseConfig multiDatabaseConfig;
private AtomicLong failoverFreezeUntil = new AtomicLong(0);
private AtomicInteger failoverAttemptCount = new AtomicInteger(0);
- public MultiClusterPooledConnectionProvider(MultiClusterClientConfig multiClusterClientConfig) {
+ public MultiDatabaseConnectionProvider(MultiDatabaseConfig multiDatabaseConfig) {
- if (multiClusterClientConfig == null) throw new JedisValidationException(
- "MultiClusterClientConfig must not be NULL for MultiClusterPooledConnectionProvider");
+ if (multiDatabaseConfig == null) throw new JedisValidationException(
+ "MultiDatabaseConfig must not be NULL for MultiDatabaseConnectionProvider");
- this.multiClusterClientConfig = multiClusterClientConfig;
+ this.multiDatabaseConfig = multiDatabaseConfig;
////////////// Configure Retry ////////////////////
RetryConfig.Builder retryConfigBuilder = RetryConfig.custom();
- retryConfigBuilder.maxAttempts(multiClusterClientConfig.getRetryMaxAttempts());
+ retryConfigBuilder.maxAttempts(multiDatabaseConfig.getRetryMaxAttempts());
retryConfigBuilder.intervalFunction(
- IntervalFunction.ofExponentialBackoff(multiClusterClientConfig.getRetryWaitDuration(),
- multiClusterClientConfig.getRetryWaitDurationExponentialBackoffMultiplier()));
+ IntervalFunction.ofExponentialBackoff(multiDatabaseConfig.getRetryWaitDuration(),
+ multiDatabaseConfig.getRetryWaitDurationExponentialBackoffMultiplier()));
retryConfigBuilder.failAfterMaxAttempts(false); // JedisConnectionException will be thrown
retryConfigBuilder.retryExceptions(
- multiClusterClientConfig.getRetryIncludedExceptionList().stream().toArray(Class[]::new));
+ multiDatabaseConfig.getRetryIncludedExceptionList().stream().toArray(Class[]::new));
- List retryIgnoreExceptionList = multiClusterClientConfig.getRetryIgnoreExceptionList();
+ List retryIgnoreExceptionList = multiDatabaseConfig.getRetryIgnoreExceptionList();
if (retryIgnoreExceptionList != null)
retryConfigBuilder.ignoreExceptions(retryIgnoreExceptionList.stream().toArray(Class[]::new));
@@ -136,14 +135,14 @@ public MultiClusterPooledConnectionProvider(MultiClusterClientConfig multiCluste
CircuitBreakerConfig.Builder circuitBreakerConfigBuilder = CircuitBreakerConfig.custom();
CircuitBreakerThresholdsAdapter adapter = new CircuitBreakerThresholdsAdapter(
- multiClusterClientConfig);
+ multiDatabaseConfig);
circuitBreakerConfigBuilder.minimumNumberOfCalls(adapter.getMinimumNumberOfCalls());
circuitBreakerConfigBuilder.failureRateThreshold(adapter.getFailureRateThreshold());
circuitBreakerConfigBuilder.slidingWindowSize(adapter.getSlidingWindowSize());
circuitBreakerConfigBuilder.slidingWindowType(adapter.getSlidingWindowType());
- circuitBreakerConfigBuilder.recordExceptions(multiClusterClientConfig
- .getCircuitBreakerIncludedExceptionList().stream().toArray(Class[]::new));
+ circuitBreakerConfigBuilder.recordExceptions(
+ multiDatabaseConfig.getCircuitBreakerIncludedExceptionList().stream().toArray(Class[]::new));
circuitBreakerConfigBuilder.automaticTransitionFromOpenToHalfOpenEnabled(false); // State
// transitions
// are
@@ -152,45 +151,45 @@ public MultiClusterPooledConnectionProvider(MultiClusterClientConfig multiCluste
// states
// are used
- List circuitBreakerIgnoreExceptionList = multiClusterClientConfig
+ List circuitBreakerIgnoreExceptionList = multiDatabaseConfig
.getCircuitBreakerIgnoreExceptionList();
if (circuitBreakerIgnoreExceptionList != null) circuitBreakerConfigBuilder
.ignoreExceptions(circuitBreakerIgnoreExceptionList.stream().toArray(Class[]::new));
this.circuitBreakerConfig = circuitBreakerConfigBuilder.build();
- ////////////// Configure Cluster Map ////////////////////
+ ////////////// Configure Database Map ////////////////////
- ClusterConfig[] clusterConfigs = multiClusterClientConfig.getClusterConfigs();
+ DatabaseConfig[] databaseConfigs = multiDatabaseConfig.getDatabaseConfigs();
- // Now add clusters - health checks will start but events will be queued
- for (ClusterConfig config : clusterConfigs) {
- addClusterInternal(multiClusterClientConfig, config);
+ // Now add databases - health checks will start but events will be queued
+ for (DatabaseConfig config : databaseConfigs) {
+ addClusterInternal(multiDatabaseConfig, config);
}
// Initialize StatusTracker for waiting on health check results
StatusTracker statusTracker = new StatusTracker(healthStatusManager);
// Wait for initial health check results and select active cluster based on weights
- activeCluster = waitForInitialHealthyCluster(statusTracker);
+ activeDatabase = waitForInitialHealthyCluster(statusTracker);
// Mark initialization as complete - handleHealthStatusChange can now process events
initializationComplete = true;
- Cluster temp = activeCluster;
+ Database temp = activeDatabase;
if (!temp.isHealthy()) {
- // Race condition: Direct assignment to 'activeCluster' is not thread safe because
+ // Race condition: Direct assignment to 'activeDatabase' is not thread safe because
// 'onHealthStatusChange' may execute concurrently once 'initializationComplete'
// is set to true.
- // Simple rule is to never assign value of 'activeCluster' outside of
- // 'activeClusterChangeLock' once the 'initializationComplete' is done.
+ // Simple rule is to never assign value of 'activeDatabase' outside of
+ // 'activeDatabaseChangeLock' once the 'initializationComplete' is done.
waitForInitialHealthyCluster(statusTracker);
- switchToHealthyCluster(SwitchReason.HEALTH_CHECK, temp);
+ switchToHealthyDatabase(SwitchReason.HEALTH_CHECK, temp);
}
- this.fallbackExceptionList = multiClusterClientConfig.getFallbackExceptionList();
+ this.fallbackExceptionList = multiDatabaseConfig.getFallbackExceptionList();
// Start periodic failback checker
- if (multiClusterClientConfig.isFailbackSupported()) {
- long failbackInterval = multiClusterClientConfig.getFailbackCheckInterval();
+ if (multiDatabaseConfig.isFailbackSupported()) {
+ long failbackInterval = multiDatabaseConfig.getFailbackCheckInterval();
failbackScheduler.scheduleAtFixedRate(this::periodicFailbackCheck, failbackInterval,
failbackInterval, TimeUnit.MILLISECONDS);
}
@@ -198,25 +197,25 @@ public MultiClusterPooledConnectionProvider(MultiClusterClientConfig multiCluste
/**
* Adds a new cluster endpoint to the provider.
- * @param clusterConfig the configuration for the new cluster
+ * @param databaseConfig the configuration for the new database
* @throws JedisValidationException if the endpoint already exists
*/
- public void add(ClusterConfig clusterConfig) {
- if (clusterConfig == null) {
- throw new JedisValidationException("ClusterConfig must not be null");
+ public void add(DatabaseConfig databaseConfig) {
+ if (databaseConfig == null) {
+ throw new JedisValidationException("DatabaseConfig must not be null");
}
- Endpoint endpoint = clusterConfig.getEndpoint();
- if (multiClusterMap.containsKey(endpoint)) {
+ Endpoint endpoint = databaseConfig.getEndpoint();
+ if (databaseMap.containsKey(endpoint)) {
throw new JedisValidationException(
"Endpoint " + endpoint + " already exists in the provider");
}
- activeClusterChangeLock.lock();
+ activeDatabaseChangeLock.lock();
try {
- addClusterInternal(multiClusterClientConfig, clusterConfig);
+ addClusterInternal(multiDatabaseConfig, databaseConfig);
} finally {
- activeClusterChangeLock.unlock();
+ activeDatabaseChangeLock.unlock();
}
}
@@ -231,35 +230,35 @@ public void remove(Endpoint endpoint) {
throw new JedisValidationException("Endpoint must not be null");
}
- if (!multiClusterMap.containsKey(endpoint)) {
+ if (!databaseMap.containsKey(endpoint)) {
throw new JedisValidationException(
"Endpoint " + endpoint + " does not exist in the provider");
}
- if (multiClusterMap.size() < 2) {
+ if (databaseMap.size() < 2) {
throw new JedisValidationException("Cannot remove the last remaining endpoint");
}
log.debug("Removing endpoint {}", endpoint);
- Map.Entry notificationData = null;
- activeClusterChangeLock.lock();
+ Map.Entry notificationData = null;
+ activeDatabaseChangeLock.lock();
try {
- Cluster clusterToRemove = multiClusterMap.get(endpoint);
- boolean isActiveCluster = (activeCluster == clusterToRemove);
+ Database databaseToRemove = databaseMap.get(endpoint);
+ boolean isActiveDatabase = (activeDatabase == databaseToRemove);
- if (isActiveCluster) {
+ if (isActiveDatabase) {
log.info("Active cluster is being removed. Finding a new active cluster...");
- Map.Entry candidate = findWeightedHealthyClusterToIterate(
- clusterToRemove);
+ Map.Entry candidate = findWeightedHealthyClusterToIterate(
+ databaseToRemove);
if (candidate != null) {
- Cluster selectedCluster = candidate.getValue();
- if (setActiveCluster(selectedCluster, true)) {
+ Database selectedCluster = candidate.getValue();
+ if (setActiveDatabase(selectedCluster, true)) {
log.info("New active cluster set to {}", candidate.getKey());
notificationData = candidate;
}
} else {
throw new JedisException(
- "Cluster can not be removed due to no healthy cluster available to switch!");
+ "Database can not be removed due to no healthy cluster available to switch!");
}
}
@@ -268,15 +267,15 @@ public void remove(Endpoint endpoint) {
healthStatusManager.remove(endpoint);
// Remove from cluster map
- multiClusterMap.remove(endpoint);
+ databaseMap.remove(endpoint);
// Close the cluster resources
- if (clusterToRemove != null) {
- clusterToRemove.setDisabled(true);
- clusterToRemove.close();
+ if (databaseToRemove != null) {
+ databaseToRemove.setDisabled(true);
+ databaseToRemove.close();
}
} finally {
- activeClusterChangeLock.unlock();
+ activeDatabaseChangeLock.unlock();
}
if (notificationData != null) {
onClusterSwitch(SwitchReason.FORCED, notificationData.getKey(), notificationData.getValue());
@@ -284,17 +283,16 @@ public void remove(Endpoint endpoint) {
}
/**
- * Internal method to add a cluster configuration. This method is not thread-safe and should be
+ * Internal method to add a database configuration. This method is not thread-safe and should be
* called within appropriate locks.
*/
- private void addClusterInternal(MultiClusterClientConfig multiClusterClientConfig,
- ClusterConfig config) {
- if (multiClusterMap.containsKey(config.getEndpoint())) {
+ private void addClusterInternal(MultiDatabaseConfig multiDatabaseConfig, DatabaseConfig config) {
+ if (databaseMap.containsKey(config.getEndpoint())) {
throw new JedisValidationException(
"Endpoint " + config.getEndpoint() + " already exists in the provider");
}
- String clusterId = "cluster:" + config.getEndpoint();
+ String clusterId = "database:" + config.getEndpoint();
Retry retry = RetryRegistry.of(retryConfig).retry(clusterId);
@@ -315,7 +313,7 @@ private void addClusterInternal(MultiClusterClientConfig multiClusterClientConfi
.hostAndPort(hostPort(config.getEndpoint())).clientConfig(config.getJedisClientConfig())
.poolConfig(config.getConnectionPoolConfig()).build();
- Cluster cluster;
+ Database database;
StrategySupplier strategySupplier = config.getHealthCheckStrategySupplier();
if (strategySupplier != null) {
HealthCheckStrategy hcs = strategySupplier.get(hostPort(config.getEndpoint()),
@@ -323,19 +321,19 @@ private void addClusterInternal(MultiClusterClientConfig multiClusterClientConfi
// Register listeners BEFORE adding clusters to avoid missing events
healthStatusManager.registerListener(config.getEndpoint(), this::onHealthStatusChange);
HealthCheck hc = healthStatusManager.add(config.getEndpoint(), hcs);
- cluster = new Cluster(config.getEndpoint(), pool, retry, hc, circuitBreaker,
- config.getWeight(), multiClusterClientConfig);
+ database = new Database(config.getEndpoint(), pool, retry, hc, circuitBreaker,
+ config.getWeight(), multiDatabaseConfig);
} else {
- cluster = new Cluster(config.getEndpoint(), pool, retry, circuitBreaker, config.getWeight(),
- multiClusterClientConfig);
+ database = new Database(config.getEndpoint(), pool, retry, circuitBreaker, config.getWeight(),
+ multiDatabaseConfig);
}
- multiClusterMap.put(config.getEndpoint(), cluster);
+ databaseMap.put(config.getEndpoint(), database);
// this is the place where we listen tracked errors and check if
- // thresholds are exceeded for the cluster
+ // thresholds are exceeded for the database
circuitBreakerEventPublisher.onError(event -> {
- cluster.evaluateThresholds(false);
+ database.evaluateThresholds(false);
});
}
@@ -353,14 +351,14 @@ void onHealthStatusChange(HealthStatusChangeEvent eventArgs) {
HealthStatus newStatus = eventArgs.getNewStatus();
log.debug("Health status changed for {} from {} to {}", endpoint, eventArgs.getOldStatus(),
newStatus);
- Cluster clusterWithHealthChange = multiClusterMap.get(endpoint);
+ Database clusterWithHealthChange = databaseMap.get(endpoint);
if (clusterWithHealthChange == null) return;
if (initializationComplete) {
- if (!newStatus.isHealthy() && clusterWithHealthChange == activeCluster) {
+ if (!newStatus.isHealthy() && clusterWithHealthChange == activeDatabase) {
clusterWithHealthChange.setGracePeriod();
- switchToHealthyCluster(SwitchReason.HEALTH_CHECK, clusterWithHealthChange);
+ switchToHealthyDatabase(SwitchReason.HEALTH_CHECK, clusterWithHealthChange);
}
}
}
@@ -373,46 +371,46 @@ void onHealthStatusChange(HealthStatusChangeEvent eventArgs) {
* @return the first healthy cluster found, ordered by weight (highest first)
* @throws JedisConnectionException if all clusters are unhealthy
*/
- private Cluster waitForInitialHealthyCluster(StatusTracker statusTracker) {
+ private Database waitForInitialHealthyCluster(StatusTracker statusTracker) {
// Sort clusters by weight in descending order
- List> sortedClusters = multiClusterMap.entrySet().stream()
- .sorted(Map.Entry. comparingByValue(
- Comparator.comparing(Cluster::getWeight).reversed()))
+ List> sortedClusters = databaseMap.entrySet().stream()
+ .sorted(Map.Entry. comparingByValue(
+ Comparator.comparing(Database::getWeight).reversed()))
.collect(Collectors.toList());
log.info("Selecting initial cluster from {} configured clusters", sortedClusters.size());
// Select cluster in weight order
- for (Map.Entry entry : sortedClusters) {
+ for (Map.Entry entry : sortedClusters) {
Endpoint endpoint = entry.getKey();
- Cluster cluster = entry.getValue();
+ Database database = entry.getValue();
- log.info("Evaluating cluster {} (weight: {})", endpoint, cluster.getWeight());
+ log.info("Evaluating database {} (weight: {})", endpoint, database.getWeight());
HealthStatus status;
// Check if health checks are enabled for this endpoint
if (healthStatusManager.hasHealthCheck(endpoint)) {
log.info("Health checks enabled for {}, waiting for result", endpoint);
- // Wait for this cluster's health status to be determined
+ // Wait for this database's health status to be determined
status = statusTracker.waitForHealthStatus(endpoint);
} else {
// No health check configured - assume healthy
- log.info("No health check configured for cluster {}, defaulting to HEALTHY", endpoint);
+ log.info("No health check configured for database {}, defaulting to HEALTHY", endpoint);
status = HealthStatus.HEALTHY;
}
if (status.isHealthy()) {
- log.info("Found healthy cluster: {} (weight: {})", endpoint, cluster.getWeight());
- return cluster;
+ log.info("Found healthy database: {} (weight: {})", endpoint, database.getWeight());
+ return database;
} else {
- log.info("Cluster {} is unhealthy, trying next cluster", endpoint);
+ log.info("Database {} is unhealthy, trying next database", endpoint);
}
}
// All clusters are unhealthy
throw new JedisConnectionException(
- "All configured clusters are unhealthy. Cannot initialize MultiClusterPooledConnectionProvider.");
+ "All configured clusters are unhealthy. Cannot initialize MultiDatabaseConnectionProvider.");
}
/**
@@ -422,36 +420,36 @@ private Cluster waitForInitialHealthyCluster(StatusTracker statusTracker) {
void periodicFailbackCheck() {
try {
// Find the best candidate cluster for failback
- Map.Entry bestCandidate = null;
- float bestWeight = activeCluster.getWeight();
+ Map.Entry bestCandidate = null;
+ float bestWeight = activeDatabase.getWeight();
- for (Map.Entry entry : multiClusterMap.entrySet()) {
- Cluster cluster = entry.getValue();
+ for (Map.Entry entry : databaseMap.entrySet()) {
+ Database database = entry.getValue();
- // Skip if this is already the active cluster
- if (cluster == activeCluster) {
+ // Skip if this is already the active database
+ if (database == activeDatabase) {
continue;
}
- // Skip if cluster is not healthy
- if (!cluster.isHealthy()) {
+ // Skip if database is not healthy
+ if (!database.isHealthy()) {
continue;
}
- // This cluster is a valid candidate
- if (cluster.getWeight() > bestWeight) {
+ // This database is a valid candidate
+ if (database.getWeight() > bestWeight) {
bestCandidate = entry;
- bestWeight = cluster.getWeight();
+ bestWeight = database.getWeight();
}
}
// Perform failback if we found a better candidate
if (bestCandidate != null) {
- Cluster selectedCluster = bestCandidate.getValue();
+ Database selectedCluster = bestCandidate.getValue();
log.info("Performing failback from {} to {} (higher weight cluster available)",
- activeCluster.getCircuitBreaker().getName(),
+ activeDatabase.getCircuitBreaker().getName(),
selectedCluster.getCircuitBreaker().getName());
- if (setActiveCluster(selectedCluster, true)) {
+ if (setActiveDatabase(selectedCluster, true)) {
onClusterSwitch(SwitchReason.FAILBACK, bestCandidate.getKey(), selectedCluster);
}
}
@@ -460,24 +458,24 @@ void periodicFailbackCheck() {
}
}
- Endpoint switchToHealthyCluster(SwitchReason reason, Cluster iterateFrom) {
- Map.Entry clusterToIterate = findWeightedHealthyClusterToIterate(
+ Endpoint switchToHealthyDatabase(SwitchReason reason, Database iterateFrom) {
+ Map.Entry databaseToIterate = findWeightedHealthyClusterToIterate(
iterateFrom);
- if (clusterToIterate == null) {
+ if (databaseToIterate == null) {
// throws exception anyway since not able to iterate
handleNoHealthyCluster();
}
- Cluster cluster = clusterToIterate.getValue();
- boolean changed = setActiveCluster(cluster, false);
+ Database database = databaseToIterate.getValue();
+ boolean changed = setActiveDatabase(database, false);
if (!changed) return null;
failoverAttemptCount.set(0);
- onClusterSwitch(reason, clusterToIterate.getKey(), cluster);
- return clusterToIterate.getKey();
+ onClusterSwitch(reason, databaseToIterate.getKey(), database);
+ return databaseToIterate.getKey();
}
private void handleNoHealthyCluster() {
- int max = multiClusterClientConfig.getMaxNumFailoverAttempts();
+ int max = multiDatabaseConfig.getMaxNumFailoverAttempts();
log.error("No healthy cluster available to switch to");
if (failoverAttemptCount.get() > max) {
throw new JedisPermanentlyNotAvailableException();
@@ -496,7 +494,7 @@ private boolean markAsFreeze() {
long until = failoverFreezeUntil.get();
long now = System.currentTimeMillis();
if (until <= now) {
- long nextUntil = now + multiClusterClientConfig.getDelayInBetweenFailoverAttempts();
+ long nextUntil = now + multiDatabaseConfig.getDelayInBetweenFailoverAttempts();
if (failoverFreezeUntil.compareAndSet(until, nextUntil)) {
return true;
}
@@ -515,20 +513,20 @@ private boolean markAsFreeze() {
*/
@VisibleForTesting
public void assertOperability() {
- Cluster current = activeCluster;
+ Database current = activeDatabase;
if (!current.isHealthy() && !this.canIterateFrom(current)) {
handleNoHealthyCluster();
}
}
- private static Comparator> maxByWeight = Map.Entry
- . comparingByValue(Comparator.comparing(Cluster::getWeight));
+ private static Comparator> maxByWeight = Map.Entry
+ . comparingByValue(Comparator.comparing(Database::getWeight));
- private static Predicate> filterByHealth = c -> c.getValue()
+ private static Predicate> filterByHealth = c -> c.getValue()
.isHealthy();
- private Map.Entry findWeightedHealthyClusterToIterate(Cluster iterateFrom) {
- return multiClusterMap.entrySet().stream().filter(filterByHealth)
+ private Map.Entry findWeightedHealthyClusterToIterate(Database iterateFrom) {
+ return databaseMap.entrySet().stream().filter(filterByHealth)
.filter(entry -> entry.getValue() != iterateFrom).max(maxByWeight).orElse(null);
}
@@ -539,12 +537,12 @@ private Map.Entry findWeightedHealthyClusterToIterate(Cluster
* from the target connection.
*/
public void validateTargetConnection(Endpoint endpoint) {
- Cluster cluster = multiClusterMap.get(endpoint);
- validateTargetConnection(cluster);
+ Database database = databaseMap.get(endpoint);
+ validateTargetConnection(database);
}
- private void validateTargetConnection(Cluster cluster) {
- CircuitBreaker circuitBreaker = cluster.getCircuitBreaker();
+ private void validateTargetConnection(Database database) {
+ CircuitBreaker circuitBreaker = database.getCircuitBreaker();
State originalState = circuitBreaker.getState();
try {
@@ -555,7 +553,7 @@ private void validateTargetConnection(Cluster cluster) {
// yet
circuitBreaker.transitionToClosedState();
- try (Connection targetConnection = cluster.getConnection()) {
+ try (Connection targetConnection = database.getConnection()) {
targetConnection.ping();
}
} catch (Exception e) {
@@ -575,77 +573,77 @@ private void validateTargetConnection(Cluster cluster) {
* @return
*/
public Set getEndpoints() {
- return new HashSet<>(multiClusterMap.keySet());
+ return new HashSet<>(databaseMap.keySet());
}
- public void setActiveCluster(Endpoint endpoint) {
+ public void setActiveDatabase(Endpoint endpoint) {
if (endpoint == null) {
throw new JedisValidationException(
"Provided endpoint is null. Please use one from the configuration");
}
- Cluster cluster = multiClusterMap.get(endpoint);
- if (cluster == null) {
+ Database database = databaseMap.get(endpoint);
+ if (database == null) {
throw new JedisValidationException("Provided endpoint: " + endpoint + " is not within "
+ "the configured endpoints. Please use one from the configuration");
}
- if (setActiveCluster(cluster, true)) {
- onClusterSwitch(SwitchReason.FORCED, endpoint, cluster);
+ if (setActiveDatabase(database, true)) {
+ onClusterSwitch(SwitchReason.FORCED, endpoint, database);
}
}
- public void forceActiveCluster(Endpoint endpoint, long forcedActiveDuration) {
- Cluster cluster = multiClusterMap.get(endpoint);
+ public void forceActiveDatabase(Endpoint endpoint, long forcedActiveDuration) {
+ Database database = databaseMap.get(endpoint);
- if (cluster == null) {
+ if (database == null) {
throw new JedisValidationException("Provided endpoint: " + endpoint + " is not within "
+ "the configured endpoints. Please use one from the configuration");
}
- cluster.clearGracePeriod();
- if (!cluster.isHealthy()) {
+ database.clearGracePeriod();
+ if (!database.isHealthy()) {
throw new JedisValidationException("Provided endpoint: " + endpoint
+ " is not healthy. Please consider a healthy endpoint from the configuration");
}
- multiClusterMap.entrySet().stream().forEach(entry -> {
+ databaseMap.entrySet().stream().forEach(entry -> {
if (entry.getKey() != endpoint) {
entry.getValue().setGracePeriod(forcedActiveDuration);
}
});
- setActiveCluster(endpoint);
+ setActiveDatabase(endpoint);
}
- private boolean setActiveCluster(Cluster cluster, boolean validateConnection) {
- // Cluster cluster = clusterEntry.getValue();
+ private boolean setActiveDatabase(Database database, boolean validateConnection) {
+ // Database database = clusterEntry.getValue();
// Field-level synchronization is used to avoid the edge case in which
// incrementActiveMultiClusterIndex() is called at the same time
- activeClusterChangeLock.lock();
- Cluster oldCluster;
+ activeDatabaseChangeLock.lock();
+ Database oldCluster;
try {
- // Allows an attempt to reset the current cluster from a FORCED_OPEN to CLOSED state in the
+ // Allows an attempt to reset the current database from a FORCED_OPEN to CLOSED state in the
// event that no failover is possible
- if (activeCluster == cluster && !cluster.isCBForcedOpen()) return false;
+ if (activeDatabase == database && !database.isCBForcedOpen()) return false;
- if (validateConnection) validateTargetConnection(cluster);
+ if (validateConnection) validateTargetConnection(database);
- String originalClusterName = getClusterCircuitBreaker().getName();
+ String originalClusterName = getDatabaseCircuitBreaker().getName();
- if (activeCluster == cluster)
- log.warn("Cluster/database endpoint '{}' successfully closed its circuit breaker",
+ if (activeDatabase == database)
+ log.warn("Database/database endpoint '{}' successfully closed its circuit breaker",
originalClusterName);
- else log.warn("Cluster/database endpoint successfully updated from '{}' to '{}'",
- originalClusterName, cluster.circuitBreaker.getName());
- oldCluster = activeCluster;
- activeCluster = cluster;
+ else log.warn("Database/database endpoint successfully updated from '{}' to '{}'",
+ originalClusterName, database.circuitBreaker.getName());
+ oldCluster = activeDatabase;
+ activeDatabase = database;
} finally {
- activeClusterChangeLock.unlock();
+ activeDatabaseChangeLock.unlock();
}
- boolean switched = oldCluster != cluster;
- if (switched && this.multiClusterClientConfig.isFastFailover()) {
- log.info("Forcing disconnect of all active connections in old cluster: {}",
+ boolean switched = oldCluster != database;
+ if (switched && this.multiDatabaseConfig.isFastFailover()) {
+ log.info("Forcing disconnect of all active connections in old database: {}",
oldCluster.circuitBreaker.getName());
oldCluster.forceDisconnect();
- log.info("Disconnected all active connections in old cluster: {}",
+ log.info("Disconnected all active connections in old database: {}",
oldCluster.circuitBreaker.getName());
}
@@ -671,38 +669,38 @@ public void close() {
}
// Close all cluster connection pools
- for (Cluster cluster : multiClusterMap.values()) {
- cluster.close();
+ for (Database database : databaseMap.values()) {
+ database.close();
}
}
@Override
public Connection getConnection() {
- return activeCluster.getConnection();
+ return activeDatabase.getConnection();
}
public Connection getConnection(Endpoint endpoint) {
- return multiClusterMap.get(endpoint).getConnection();
+ return databaseMap.get(endpoint).getConnection();
}
@Override
public Connection getConnection(CommandArguments args) {
- return activeCluster.getConnection();
+ return activeDatabase.getConnection();
}
@Override
public Map, Pool> getConnectionMap() {
- ConnectionPool connectionPool = activeCluster.connectionPool;
+ ConnectionPool connectionPool = activeDatabase.connectionPool;
return Collections.singletonMap(connectionPool.getFactory(), connectionPool);
}
- public Cluster getCluster() {
- return activeCluster;
+ public Database getDatabase() {
+ return activeDatabase;
}
@VisibleForTesting
- public Cluster getCluster(Endpoint endpoint) {
- return multiClusterMap.get(endpoint);
+ public Database getDatabase(Endpoint endpoint) {
+ return databaseMap.get(endpoint);
}
/**
@@ -713,7 +711,7 @@ public Cluster getCluster(Endpoint endpoint) {
* @return the active cluster endpoint
*/
public Endpoint getActiveEndpoint() {
- return activeCluster.getEndpoint();
+ return activeDatabase.getEndpoint();
}
/**
@@ -722,51 +720,51 @@ public Endpoint getActiveEndpoint() {
* @return the health status of the endpoint
*/
public boolean isHealthy(Endpoint endpoint) {
- Cluster cluster = getCluster(endpoint);
- if (cluster == null) {
+ Database database = getDatabase(endpoint);
+ if (database == null) {
throw new JedisValidationException(
"Endpoint " + endpoint + " does not exist in the provider");
}
- return cluster.isHealthy();
+ return database.isHealthy();
}
- public CircuitBreaker getClusterCircuitBreaker() {
- return activeCluster.getCircuitBreaker();
+ public CircuitBreaker getDatabaseCircuitBreaker() {
+ return activeDatabase.getCircuitBreaker();
}
/**
* Indicates the final cluster/database endpoint (connection pool), according to the
- * pre-configured list provided at startup via the MultiClusterClientConfig, is unavailable and
+ * pre-configured list provided at startup via the MultiDatabaseConfig, is unavailable and
* therefore no further failover is possible. Users can manually failback to an available cluster
*/
- public boolean canIterateFrom(Cluster iterateFrom) {
- Map.Entry e = findWeightedHealthyClusterToIterate(iterateFrom);
+ public boolean canIterateFrom(Database iterateFrom) {
+ Map.Entry e = findWeightedHealthyClusterToIterate(iterateFrom);
return e != null;
}
- public void onClusterSwitch(SwitchReason reason, Endpoint endpoint, Cluster cluster) {
- if (clusterSwitchListener != null) {
- ClusterSwitchEventArgs eventArgs = new ClusterSwitchEventArgs(reason, endpoint, cluster);
- clusterSwitchListener.accept(eventArgs);
+ public void onClusterSwitch(SwitchReason reason, Endpoint endpoint, Database database) {
+ if (databaseSwitchListener != null) {
+ ClusterSwitchEventArgs eventArgs = new ClusterSwitchEventArgs(reason, endpoint, database);
+ databaseSwitchListener.accept(eventArgs);
}
}
- public void setClusterSwitchListener(Consumer clusterSwitchListener) {
- this.clusterSwitchListener = clusterSwitchListener;
+ public void setDatabaseSwitchListener(Consumer databaseSwitchListener) {
+ this.databaseSwitchListener = databaseSwitchListener;
}
public List> getFallbackExceptionList() {
return fallbackExceptionList;
}
- public static class Cluster {
+ public static class Database {
private TrackingConnectionPool connectionPool;
private final Retry retry;
private final CircuitBreaker circuitBreaker;
private final float weight;
private final HealthCheck healthCheck;
- private final MultiClusterClientConfig multiClusterClientConfig;
+ private final MultiDatabaseConfig multiDbConfig;
private boolean disabled = false;
private final Endpoint endpoint;
@@ -774,29 +772,28 @@ public static class Cluster {
private volatile long gracePeriodEndsAt = 0;
private final Logger log = LoggerFactory.getLogger(getClass());
- private Cluster(Endpoint endpoint, TrackingConnectionPool connectionPool, Retry retry,
- CircuitBreaker circuitBreaker, float weight,
- MultiClusterClientConfig multiClusterClientConfig) {
+ private Database(Endpoint endpoint, TrackingConnectionPool connectionPool, Retry retry,
+ CircuitBreaker circuitBreaker, float weight, MultiDatabaseConfig multiDatabaseConfig) {
this.endpoint = endpoint;
this.connectionPool = connectionPool;
this.retry = retry;
this.circuitBreaker = circuitBreaker;
this.weight = weight;
- this.multiClusterClientConfig = multiClusterClientConfig;
+ this.multiDbConfig = multiDatabaseConfig;
this.healthCheck = null;
}
- private Cluster(Endpoint endpoint, TrackingConnectionPool connectionPool, Retry retry,
+ private Database(Endpoint endpoint, TrackingConnectionPool connectionPool, Retry retry,
HealthCheck hc, CircuitBreaker circuitBreaker, float weight,
- MultiClusterClientConfig multiClusterClientConfig) {
+ MultiDatabaseConfig multiDbConfig) {
this.endpoint = endpoint;
this.connectionPool = connectionPool;
this.retry = retry;
this.circuitBreaker = circuitBreaker;
this.weight = weight;
- this.multiClusterClientConfig = multiClusterClientConfig;
+ this.multiDbConfig = multiDbConfig;
this.healthCheck = hc;
}
@@ -805,7 +802,7 @@ public Endpoint getEndpoint() {
}
public Connection getConnection() {
- if (!isHealthy()) throw new JedisConnectionException("Cluster is not healthy");
+ if (!isHealthy()) throw new JedisConnectionException("Database is not healthy");
if (connectionPool.isClosed()) {
connectionPool = TrackingConnectionPool.from(connectionPool);
}
@@ -850,15 +847,15 @@ public boolean isHealthy() {
}
public boolean retryOnFailover() {
- return multiClusterClientConfig.isRetryOnFailover();
+ return multiDbConfig.isRetryOnFailover();
}
public int getCircuitBreakerMinNumOfFailures() {
- return multiClusterClientConfig.getCircuitBreakerMinNumOfFailures();
+ return multiDbConfig.getCircuitBreakerMinNumOfFailures();
}
public float getCircuitBreakerFailureRateThreshold() {
- return multiClusterClientConfig.getCircuitBreakerFailureRateThreshold();
+ return multiDbConfig.getCircuitBreakerFailureRateThreshold();
}
public boolean isDisabled() {
@@ -880,7 +877,7 @@ public boolean isInGracePeriod() {
* Sets the grace period for this cluster
*/
public void setGracePeriod() {
- setGracePeriod(multiClusterClientConfig.getGracePeriod());
+ setGracePeriod(multiDbConfig.getGracePeriod());
}
public void setGracePeriod(long gracePeriod) {
@@ -897,7 +894,7 @@ public void clearGracePeriod() {
* Whether failback is supported by client
*/
public boolean isFailbackSupported() {
- return multiClusterClientConfig.isFailbackSupported();
+ return multiDbConfig.isFailbackSupported();
}
public void forceDisconnect() {
@@ -915,15 +912,15 @@ && isThresholdsExceeded(this, lastFailRecorded)) {
}
}
- private static boolean isThresholdsExceeded(Cluster cluster, boolean lastFailRecorded) {
- Metrics metrics = cluster.getCircuitBreaker().getMetrics();
+ private static boolean isThresholdsExceeded(Database database, boolean lastFailRecorded) {
+ Metrics metrics = database.getCircuitBreaker().getMetrics();
// ATTENTION: this is to increment fails in regard to the current call that is failing,
// DO NOT remove the increment, it will change the behaviour in case of initial requests to
- // cluster fail
+ // database fail
int fails = metrics.getNumberOfFailedCalls() + (lastFailRecorded ? 0 : 1);
int succ = metrics.getNumberOfSuccessfulCalls();
- if (fails >= cluster.getCircuitBreakerMinNumOfFailures()) {
- float ratePercentThreshold = cluster.getCircuitBreakerFailureRateThreshold();// 0..100
+ if (fails >= database.getCircuitBreakerMinNumOfFailures()) {
+ float ratePercentThreshold = database.getCircuitBreakerFailureRateThreshold();// 0..100
int total = fails + succ;
if (total == 0) return false;
float failureRatePercent = (fails * 100.0f) / total;
@@ -936,7 +933,7 @@ private static boolean isThresholdsExceeded(Cluster cluster, boolean lastFailRec
public String toString() {
return circuitBreaker.getName() + "{" + "connectionPool=" + connectionPool + ", retry="
+ retry + ", circuitBreaker=" + circuitBreaker + ", weight=" + weight + ", healthStatus="
- + getHealthStatus() + ", multiClusterClientConfig=" + multiClusterClientConfig + '}';
+ + getHealthStatus() + ", multiClusterClientConfig=" + multiDbConfig + '}';
}
}
diff --git a/src/test/java/redis/clients/jedis/MultiDbClientTest.java b/src/test/java/redis/clients/jedis/MultiDbClientTest.java
index 9c0126a5af..05e923f2d4 100644
--- a/src/test/java/redis/clients/jedis/MultiDbClientTest.java
+++ b/src/test/java/redis/clients/jedis/MultiDbClientTest.java
@@ -15,7 +15,7 @@
import static org.hamcrest.Matchers.not;
import static org.junit.jupiter.api.Assertions.*;
-import redis.clients.jedis.MultiClusterClientConfig.ClusterConfig;
+import redis.clients.jedis.MultiDatabaseConfig.DatabaseConfig;
import redis.clients.jedis.exceptions.JedisValidationException;
import redis.clients.jedis.mcf.ClusterSwitchEventArgs;
import redis.clients.jedis.mcf.SwitchReason;
@@ -56,7 +56,7 @@ public static void setupAdminClients() throws IOException {
@BeforeEach
void setUp() {
// Create a simple resilient client with mock endpoints for testing
- MultiClusterClientConfig clientConfig = MultiClusterClientConfig.builder()
+ MultiDatabaseConfig clientConfig = MultiDatabaseConfig.builder()
.endpoint(endpoint1.getHostAndPort(), 100.0f, endpoint1.getClientConfigBuilder().build())
.endpoint(endpoint2.getHostAndPort(), 50.0f, endpoint2.getClientConfigBuilder().build())
.build();
@@ -86,11 +86,11 @@ void testAddRemoveEndpointWithEndpointInterface() {
}
@Test
- void testAddRemoveEndpointWithClusterConfig() {
+ void testAddRemoveEndpointWithDatabaseConfig() {
// todo : (@ggivo) Replace HostAndPort with Endpoint
HostAndPort newEndpoint = new HostAndPort("unavailable", 6381);
- ClusterConfig newConfig = ClusterConfig
+ DatabaseConfig newConfig = DatabaseConfig
.builder(newEndpoint, DefaultJedisClientConfig.builder().build()).weight(25.0f).build();
assertDoesNotThrow(() -> client.addEndpoint(newConfig));
@@ -121,9 +121,9 @@ void testSetActiveDatabase() {
@Test
void testBuilderWithMultipleEndpointTypes() {
- MultiClusterClientConfig clientConfig = MultiClusterClientConfig.builder()
+ MultiDatabaseConfig clientConfig = MultiDatabaseConfig.builder()
.endpoint(endpoint1.getHostAndPort(), 100.0f, DefaultJedisClientConfig.builder().build())
- .endpoint(ClusterConfig
+ .endpoint(DatabaseConfig
.builder(endpoint2.getHostAndPort(), DefaultJedisClientConfig.builder().build())
.weight(50.0f).build())
.build();
@@ -172,11 +172,11 @@ public void testForceActiveEndpointWithNonExistingEndpoint() {
@Test
public void testWithDatabaseSwitchListener() {
- MultiClusterClientConfig endpointsConfig = MultiClusterClientConfig.builder()
- .endpoint(ClusterConfig
+ MultiDatabaseConfig endpointsConfig = MultiDatabaseConfig.builder()
+ .endpoint(DatabaseConfig
.builder(endpoint1.getHostAndPort(), endpoint1.getClientConfigBuilder().build())
.weight(100.0f).build())
- .endpoint(ClusterConfig
+ .endpoint(DatabaseConfig
.builder(endpoint2.getHostAndPort(), endpoint2.getClientConfigBuilder().build())
.weight(50.0f).build())
.build();
diff --git a/src/test/java/redis/clients/jedis/builders/UnifiedJedisConstructorReflectionTest.java b/src/test/java/redis/clients/jedis/builders/UnifiedJedisConstructorReflectionTest.java
index f2b2f56e14..39c76b6338 100644
--- a/src/test/java/redis/clients/jedis/builders/UnifiedJedisConstructorReflectionTest.java
+++ b/src/test/java/redis/clients/jedis/builders/UnifiedJedisConstructorReflectionTest.java
@@ -184,8 +184,7 @@ private static boolean clusterConstructorThatShouldBeDeprecatedAndRemoved(Constr
private static boolean multiClusterPooledConnectionProviderShouldBeReplacedWithResilientClient(
Constructor> ctor) {
Class>[] types = ctor.getParameterTypes();
- return types.length == 1
- && types[0].getSimpleName().equals("MultiClusterPooledConnectionProvider");
+ return types.length == 1 && types[0].getSimpleName().equals("MultiDatabaseConnectionProvider");
}
private static String prettySignature(Constructor> ctor) {
diff --git a/src/test/java/redis/clients/jedis/failover/FailoverIntegrationTest.java b/src/test/java/redis/clients/jedis/failover/FailoverIntegrationTest.java
index 6eb047f6c7..3416f68b74 100644
--- a/src/test/java/redis/clients/jedis/failover/FailoverIntegrationTest.java
+++ b/src/test/java/redis/clients/jedis/failover/FailoverIntegrationTest.java
@@ -16,10 +16,10 @@
import redis.clients.jedis.EndpointConfig;
import redis.clients.jedis.HostAndPorts;
import redis.clients.jedis.JedisClientConfig;
-import redis.clients.jedis.MultiClusterClientConfig;
+import redis.clients.jedis.MultiDatabaseConfig;
import redis.clients.jedis.UnifiedJedis;
import redis.clients.jedis.exceptions.JedisConnectionException;
-import redis.clients.jedis.mcf.MultiClusterPooledConnectionProvider;
+import redis.clients.jedis.mcf.MultiDatabaseConnectionProvider;
import redis.clients.jedis.scenario.RecommendedSettings;
import java.io.IOException;
@@ -57,7 +57,7 @@ public class FailoverIntegrationTest {
private static UnifiedJedis jedis2;
private static String JEDIS1_ID = "";
private static String JEDIS2_ID = "";
- private MultiClusterPooledConnectionProvider provider;
+ private MultiDatabaseConnectionProvider provider;
private UnifiedJedis failoverClient;
@BeforeAll
@@ -138,7 +138,7 @@ public void testAutomaticFailoverWhenServerBecomesUnavailable() throws Exception
assertThat(getNodeId(failoverClient.info("server")), equalTo(JEDIS1_ID));
await().atMost(1, TimeUnit.SECONDS).pollInterval(50, TimeUnit.MILLISECONDS)
- .until(() -> provider.getCluster(endpoint2.getHostAndPort()).isHealthy());
+ .until(() -> provider.getDatabase(endpoint2.getHostAndPort()).isHealthy());
// Disable redisProxy1
redisProxy1.disable();
@@ -149,7 +149,7 @@ public void testAutomaticFailoverWhenServerBecomesUnavailable() throws Exception
// 3. Subsequent calls should be routed to Endpoint 2
assertThrows(JedisConnectionException.class, () -> failoverClient.info("server"));
- assertThat(provider.getCluster(endpoint1.getHostAndPort()).getCircuitBreaker().getState(),
+ assertThat(provider.getDatabase(endpoint1.getHostAndPort()).getCircuitBreaker().getState(),
equalTo(CircuitBreaker.State.FORCED_OPEN));
// Check that the failoverClient is now using Endpoint 2
@@ -160,7 +160,7 @@ public void testAutomaticFailoverWhenServerBecomesUnavailable() throws Exception
// Endpoint1 and Endpoint2 are NOT available,
assertThrows(JedisConnectionException.class, () -> failoverClient.info("server"));
- assertThat(provider.getCluster(endpoint2.getHostAndPort()).getCircuitBreaker().getState(),
+ assertThat(provider.getDatabase(endpoint2.getHostAndPort()).getCircuitBreaker().getState(),
equalTo(CircuitBreaker.State.FORCED_OPEN));
// and since no other nodes are available, it should propagate the errors to the caller
@@ -173,20 +173,20 @@ public void testManualFailoverNewCommandsAreSentToActiveCluster() throws Interru
assertThat(getNodeId(failoverClient.info("server")), equalTo(JEDIS1_ID));
await().atMost(1, TimeUnit.SECONDS).pollInterval(50, TimeUnit.MILLISECONDS)
- .until(() -> provider.getCluster(endpoint2.getHostAndPort()).isHealthy());
+ .until(() -> provider.getDatabase(endpoint2.getHostAndPort()).isHealthy());
- provider.setActiveCluster(endpoint2.getHostAndPort());
+ provider.setActiveDatabase(endpoint2.getHostAndPort());
assertThat(getNodeId(failoverClient.info("server")), equalTo(JEDIS2_ID));
}
- private List getClusterConfigs(
+ private List getDatabaseConfigs(
JedisClientConfig clientConfig, EndpointConfig... endpoints) {
int weight = endpoints.length;
AtomicInteger weightCounter = new AtomicInteger(weight);
return Arrays.stream(endpoints)
- .map(e -> MultiClusterClientConfig.ClusterConfig.builder(e.getHostAndPort(), clientConfig)
+ .map(e -> MultiDatabaseConfig.DatabaseConfig.builder(e.getHostAndPort(), clientConfig)
.weight(1.0f / weightCounter.getAndIncrement()).healthCheckEnabled(false).build())
.collect(Collectors.toList());
}
@@ -197,21 +197,21 @@ public void testManualFailoverInflightCommandsCompleteGracefully()
throws ExecutionException, InterruptedException {
await().atMost(1, TimeUnit.SECONDS).pollInterval(50, TimeUnit.MILLISECONDS)
- .until(() -> provider.getCluster(endpoint2.getHostAndPort()).isHealthy());
+ .until(() -> provider.getDatabase(endpoint2.getHostAndPort()).isHealthy());
assertThat(getNodeId(failoverClient.info("server")), equalTo(JEDIS1_ID));
// We will trigger failover while this command is in-flight
Future> blpop = executor.submit(() -> failoverClient.blpop(1000, "test-list"));
- provider.setActiveCluster(endpoint2.getHostAndPort());
+ provider.setActiveDatabase(endpoint2.getHostAndPort());
// After the manual failover, commands should be executed against Endpoint 2
assertThat(getNodeId(failoverClient.info("server")), equalTo(JEDIS2_ID));
// Failover was manually triggered, and there were no errors
// previous endpoint CB should still be in CLOSED state
- assertThat(provider.getCluster(endpoint1.getHostAndPort()).getCircuitBreaker().getState(),
+ assertThat(provider.getDatabase(endpoint1.getHostAndPort()).getCircuitBreaker().getState(),
equalTo(CircuitBreaker.State.CLOSED));
jedis1.rpush("test-list", "somevalue");
@@ -228,12 +228,12 @@ public void testManualFailoverInflightCommandsWithErrorsPropagateError() throws
assertThat(getNodeId(failoverClient.info("server")), equalTo(JEDIS1_ID));
await().atMost(1, TimeUnit.SECONDS).pollInterval(50, TimeUnit.MILLISECONDS)
- .until(() -> provider.getCluster(endpoint2.getHostAndPort()).isHealthy());
+ .until(() -> provider.getDatabase(endpoint2.getHostAndPort()).isHealthy());
Future> blpop = executor.submit(() -> failoverClient.blpop(10000, "test-list-1"));
// trigger failover manually
- provider.setActiveCluster(endpoint2.getHostAndPort());
+ provider.setActiveDatabase(endpoint2.getHostAndPort());
Future infoCmd = executor.submit(() -> failoverClient.info("server"));
// After the manual failover, commands should be executed against Endpoint 2
@@ -247,7 +247,7 @@ public void testManualFailoverInflightCommandsWithErrorsPropagateError() throws
assertThat(exception.getCause(), instanceOf(JedisConnectionException.class));
// Check that the circuit breaker for Endpoint 1 is open after the error
- assertThat(provider.getCluster(endpoint1.getHostAndPort()).getCircuitBreaker().getState(),
+ assertThat(provider.getDatabase(endpoint1.getHostAndPort()).getCircuitBreaker().getState(),
equalTo(CircuitBreaker.State.OPEN));
// Ensure that the active cluster is still Endpoint 2
@@ -261,18 +261,15 @@ public void testManualFailoverInflightCommandsWithErrorsPropagateError() throws
*/
@Test
public void testCircuitBreakerCountsEachConnectionErrorSeparately() throws IOException {
- MultiClusterClientConfig failoverConfig = new MultiClusterClientConfig.Builder(
- getClusterConfigs(
- DefaultJedisClientConfig.builder()
- .socketTimeoutMillis(RecommendedSettings.DEFAULT_TIMEOUT_MS)
- .connectionTimeoutMillis(RecommendedSettings.DEFAULT_TIMEOUT_MS).build(),
- endpoint1, endpoint2)).retryMaxAttempts(2).retryWaitDuration(1)
- .circuitBreakerSlidingWindowSize(3).circuitBreakerMinNumOfFailures(2)
- .circuitBreakerFailureRateThreshold(50f) // %50 failure rate
- .build();
-
- MultiClusterPooledConnectionProvider provider = new MultiClusterPooledConnectionProvider(
- failoverConfig);
+ MultiDatabaseConfig failoverConfig = new MultiDatabaseConfig.Builder(getDatabaseConfigs(
+ DefaultJedisClientConfig.builder().socketTimeoutMillis(RecommendedSettings.DEFAULT_TIMEOUT_MS)
+ .connectionTimeoutMillis(RecommendedSettings.DEFAULT_TIMEOUT_MS).build(),
+ endpoint1, endpoint2)).retryMaxAttempts(2).retryWaitDuration(1)
+ .circuitBreakerSlidingWindowSize(3).circuitBreakerMinNumOfFailures(2)
+ .circuitBreakerFailureRateThreshold(50f) // %50 failure rate
+ .build();
+
+ MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider(failoverConfig);
try (UnifiedJedis client = new UnifiedJedis(provider)) {
// Verify initial connection to first endpoint
assertThat(getNodeId(client.info("server")), equalTo(JEDIS1_ID));
@@ -298,7 +295,7 @@ public void testCircuitBreakerCountsEachConnectionErrorSeparately() throws IOExc
assertThrows(JedisConnectionException.class, () -> client.info("server"));
// Circuit breaker should be open after just one command with retries
- assertThat(provider.getCluster(endpoint1.getHostAndPort()).getCircuitBreaker().getState(),
+ assertThat(provider.getDatabase(endpoint1.getHostAndPort()).getCircuitBreaker().getState(),
equalTo(CircuitBreaker.State.FORCED_OPEN));
// Next command should be routed to the second endpoint
@@ -318,7 +315,7 @@ public void testCircuitBreakerCountsEachConnectionErrorSeparately() throws IOExc
@Test
public void testInflightCommandsAreRetriedAfterFailover() throws Exception {
- MultiClusterPooledConnectionProvider customProvider = createProvider(
+ MultiDatabaseConnectionProvider customProvider = createProvider(
builder -> builder.retryOnFailover(true));
// Create a custom client with retryOnFailover enabled for this specific test
@@ -342,7 +339,7 @@ public void testInflightCommandsAreRetriedAfterFailover() throws Exception {
assertThat(getNodeId(customClient.info("server")), equalTo(JEDIS2_ID));
// Check that the circuit breaker for Endpoint 1 is open
assertThat(
- customProvider.getCluster(endpoint1.getHostAndPort()).getCircuitBreaker().getState(),
+ customProvider.getDatabase(endpoint1.getHostAndPort()).getCircuitBreaker().getState(),
equalTo(CircuitBreaker.State.FORCED_OPEN));
// Disable redisProxy1 to enforce connection drop for the in-flight (blpop) command
@@ -360,7 +357,7 @@ public void testInflightCommandsAreRetriedAfterFailover() throws Exception {
@Test
public void testInflightCommandsAreNotRetriedAfterFailover() throws Exception {
// Create a custom provider and client with retry disabled for this specific test
- MultiClusterPooledConnectionProvider customProvider = createProvider(
+ MultiDatabaseConnectionProvider customProvider = createProvider(
builder -> builder.retryOnFailover(false));
try (UnifiedJedis customClient = new UnifiedJedis(customProvider)) {
@@ -376,7 +373,7 @@ public void testInflightCommandsAreNotRetriedAfterFailover() throws Exception {
// Check that the circuit breaker for Endpoint 1 is open
assertThat(
- customProvider.getCluster(endpoint1.getHostAndPort()).getCircuitBreaker().getState(),
+ customProvider.getDatabase(endpoint1.getHostAndPort()).getCircuitBreaker().getState(),
equalTo(CircuitBreaker.State.FORCED_OPEN));
// Disable redisProxy1 to enforce the current blpop command failure
@@ -417,34 +414,34 @@ private static String generateTestValue(int byteSize) {
}
/**
- * Creates a MultiClusterPooledConnectionProvider with standard configuration
+ * Creates a MultiDatabaseConnectionProvider with standard configuration
* @return A configured provider
*/
- private MultiClusterPooledConnectionProvider createProvider() {
+ private MultiDatabaseConnectionProvider createProvider() {
JedisClientConfig clientConfig = DefaultJedisClientConfig.builder()
.socketTimeoutMillis(RecommendedSettings.DEFAULT_TIMEOUT_MS)
.connectionTimeoutMillis(RecommendedSettings.DEFAULT_TIMEOUT_MS).build();
- MultiClusterClientConfig failoverConfig = new MultiClusterClientConfig.Builder(
- getClusterConfigs(clientConfig, endpoint1, endpoint2)).retryMaxAttempts(1)
+ MultiDatabaseConfig failoverConfig = new MultiDatabaseConfig.Builder(
+ getDatabaseConfigs(clientConfig, endpoint1, endpoint2)).retryMaxAttempts(1)
.retryWaitDuration(1).circuitBreakerSlidingWindowSize(3)
.circuitBreakerMinNumOfFailures(1).circuitBreakerFailureRateThreshold(50f).build();
- return new MultiClusterPooledConnectionProvider(failoverConfig);
+ return new MultiDatabaseConnectionProvider(failoverConfig);
}
/**
- * Creates a MultiClusterPooledConnectionProvider with standard configuration
+ * Creates a MultiDatabaseConnectionProvider with standard configuration
* @return A configured provider
*/
- private MultiClusterPooledConnectionProvider createProvider(
- Function configCustomizer) {
+ private MultiDatabaseConnectionProvider createProvider(
+ Function configCustomizer) {
JedisClientConfig clientConfig = DefaultJedisClientConfig.builder()
.socketTimeoutMillis(RecommendedSettings.DEFAULT_TIMEOUT_MS)
.connectionTimeoutMillis(RecommendedSettings.DEFAULT_TIMEOUT_MS).build();
- MultiClusterClientConfig.Builder builder = new MultiClusterClientConfig.Builder(
- getClusterConfigs(clientConfig, endpoint1, endpoint2)).retryMaxAttempts(1)
+ MultiDatabaseConfig.Builder builder = new MultiDatabaseConfig.Builder(
+ getDatabaseConfigs(clientConfig, endpoint1, endpoint2)).retryMaxAttempts(1)
.retryWaitDuration(1).circuitBreakerSlidingWindowSize(3)
.circuitBreakerMinNumOfFailures(1).circuitBreakerFailureRateThreshold(50f);
@@ -452,6 +449,6 @@ private MultiClusterPooledConnectionProvider createProvider(
builder = configCustomizer.apply(builder);
}
- return new MultiClusterPooledConnectionProvider(builder.build());
+ return new MultiDatabaseConnectionProvider(builder.build());
}
}
diff --git a/src/test/java/redis/clients/jedis/mcf/ActiveActiveLocalFailoverTest.java b/src/test/java/redis/clients/jedis/mcf/ActiveActiveLocalFailoverTest.java
index bc00caf8ed..da295e837e 100644
--- a/src/test/java/redis/clients/jedis/mcf/ActiveActiveLocalFailoverTest.java
+++ b/src/test/java/redis/clients/jedis/mcf/ActiveActiveLocalFailoverTest.java
@@ -1,7 +1,5 @@
package redis.clients.jedis.mcf;
-import io.github.resilience4j.circuitbreaker.CircuitBreakerConfig;
-
import io.github.resilience4j.ratelimiter.RateLimiterConfig;
import org.hamcrest.Matchers;
import org.junit.jupiter.api.AfterAll;
@@ -18,7 +16,7 @@
import eu.rekawek.toxiproxy.ToxiproxyClient;
import eu.rekawek.toxiproxy.model.Toxic;
import redis.clients.jedis.*;
-import redis.clients.jedis.MultiClusterClientConfig.ClusterConfig;
+import redis.clients.jedis.MultiDatabaseConfig.DatabaseConfig;
import redis.clients.jedis.scenario.ActiveActiveFailoverTest;
import redis.clients.jedis.scenario.MultiThreadedFakeApp;
import redis.clients.jedis.scenario.RecommendedSettings;
@@ -96,18 +94,18 @@ public void testFailover(boolean fastFailover, long minFailoverCompletionDuratio
"TESTING WITH PARAMETERS: fastFailover: {} numberOfThreads: {} minFailoverCompletionDuration: {} maxFailoverCompletionDuration: {] ",
fastFailover, numberOfThreads, minFailoverCompletionDuration, maxFailoverCompletionDuration);
- MultiClusterClientConfig.ClusterConfig[] clusterConfig = new MultiClusterClientConfig.ClusterConfig[2];
+ MultiDatabaseConfig.DatabaseConfig[] clusterConfig = new MultiDatabaseConfig.DatabaseConfig[2];
JedisClientConfig config = endpoint1.getClientConfigBuilder()
.socketTimeoutMillis(RecommendedSettings.DEFAULT_TIMEOUT_MS)
.connectionTimeoutMillis(RecommendedSettings.DEFAULT_TIMEOUT_MS).build();
- clusterConfig[0] = ClusterConfig.builder(endpoint1.getHostAndPort(), config)
+ clusterConfig[0] = DatabaseConfig.builder(endpoint1.getHostAndPort(), config)
.connectionPoolConfig(RecommendedSettings.poolConfig).weight(1.0f).build();
- clusterConfig[1] = ClusterConfig.builder(endpoint2.getHostAndPort(), config)
+ clusterConfig[1] = DatabaseConfig.builder(endpoint2.getHostAndPort(), config)
.connectionPoolConfig(RecommendedSettings.poolConfig).weight(0.5f).build();
- MultiClusterClientConfig.Builder builder = new MultiClusterClientConfig.Builder(clusterConfig);
+ MultiDatabaseConfig.Builder builder = new MultiDatabaseConfig.Builder(clusterConfig);
builder.circuitBreakerSlidingWindowSize(1); // SLIDING WINDOW SIZE IN SECONDS
builder.circuitBreakerFailureRateThreshold(10.0f); // percentage of failures to trigger circuit
@@ -164,11 +162,10 @@ public void accept(ClusterSwitchEventArgs e) {
ensureEndpointAvailability(endpoint2.getHostAndPort(), config);
// Create the connection provider
- MultiClusterPooledConnectionProvider provider = new MultiClusterPooledConnectionProvider(
- builder.build());
+ MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider(builder.build());
FailoverReporter reporter = new FailoverReporter();
- provider.setClusterSwitchListener(reporter);
- provider.setActiveCluster(endpoint1.getHostAndPort());
+ provider.setDatabaseSwitchListener(reporter);
+ provider.setActiveDatabase(endpoint1.getHostAndPort());
UnifiedJedis client = new UnifiedJedis(provider);
@@ -180,7 +177,7 @@ public void accept(ClusterSwitchEventArgs e) {
AtomicBoolean unexpectedErrors = new AtomicBoolean(false);
AtomicReference lastException = new AtomicReference();
AtomicLong stopRunningAt = new AtomicLong();
- String cluster2Id = provider.getCluster(endpoint2.getHostAndPort()).getCircuitBreaker()
+ String cluster2Id = provider.getDatabase(endpoint2.getHostAndPort()).getCircuitBreaker()
.getName();
// Start thread that imitates an application that uses the client
@@ -198,7 +195,7 @@ public void accept(ClusterSwitchEventArgs e) {
while (true) {
try {
if (System.currentTimeMillis() > stopRunningAt.get()) break;
- currentClusterId = provider.getCluster().getCircuitBreaker().getName();
+ currentClusterId = provider.getDatabase().getCircuitBreaker().getName();
Map executionInfo = new HashMap() {
{
put("threadId", String.valueOf(threadId));
@@ -287,7 +284,7 @@ public boolean isCompleted(Duration checkInterval, Duration delayAfter, Duration
}
log.info("Fake app completed");
- ConnectionPool pool = provider.getCluster(endpoint1.getHostAndPort()).getConnectionPool();
+ ConnectionPool pool = provider.getDatabase(endpoint1.getHostAndPort()).getConnectionPool();
log.info("First connection pool state: active: {}, idle: {}", pool.getNumActive(),
pool.getNumIdle());
diff --git a/src/test/java/redis/clients/jedis/mcf/CircuitBreakerThresholdsTest.java b/src/test/java/redis/clients/jedis/mcf/CircuitBreakerThresholdsTest.java
index 755325c705..bf7f702004 100644
--- a/src/test/java/redis/clients/jedis/mcf/CircuitBreakerThresholdsTest.java
+++ b/src/test/java/redis/clients/jedis/mcf/CircuitBreakerThresholdsTest.java
@@ -16,11 +16,11 @@
import redis.clients.jedis.Connection;
import redis.clients.jedis.DefaultJedisClientConfig;
import redis.clients.jedis.HostAndPort;
-import redis.clients.jedis.MultiClusterClientConfig;
-import redis.clients.jedis.MultiClusterClientConfig.ClusterConfig;
+import redis.clients.jedis.MultiDatabaseConfig;
+import redis.clients.jedis.MultiDatabaseConfig.DatabaseConfig;
import redis.clients.jedis.Protocol;
import redis.clients.jedis.exceptions.JedisConnectionException;
-import redis.clients.jedis.mcf.MultiClusterPooledConnectionProvider.Cluster;
+import redis.clients.jedis.mcf.MultiDatabaseConnectionProvider.Database;
import redis.clients.jedis.util.ReflectionTestUtil;
/**
@@ -30,36 +30,36 @@
*/
public class CircuitBreakerThresholdsTest {
- private MultiClusterPooledConnectionProvider realProvider;
- private MultiClusterPooledConnectionProvider spyProvider;
- private Cluster cluster;
+ private MultiDatabaseConnectionProvider realProvider;
+ private MultiDatabaseConnectionProvider spyProvider;
+ private Database cluster;
private CircuitBreakerCommandExecutor executor;
private CommandObject dummyCommand;
private TrackingConnectionPool poolMock;
private HostAndPort fakeEndpoint = new HostAndPort("fake", 6379);
private HostAndPort fakeEndpoint2 = new HostAndPort("fake2", 6379);
- private ClusterConfig[] fakeClusterConfigs;
+ private DatabaseConfig[] fakeDatabaseConfigs;
@BeforeEach
public void setup() throws Exception {
- ClusterConfig[] clusterConfigs = new ClusterConfig[] {
- ClusterConfig.builder(fakeEndpoint, DefaultJedisClientConfig.builder().build())
+ DatabaseConfig[] databaseConfigs = new DatabaseConfig[] {
+ DatabaseConfig.builder(fakeEndpoint, DefaultJedisClientConfig.builder().build())
.healthCheckEnabled(false).weight(1.0f).build(),
- ClusterConfig.builder(fakeEndpoint2, DefaultJedisClientConfig.builder().build())
+ DatabaseConfig.builder(fakeEndpoint2, DefaultJedisClientConfig.builder().build())
.healthCheckEnabled(false).weight(0.5f).build() };
- fakeClusterConfigs = clusterConfigs;
+ fakeDatabaseConfigs = databaseConfigs;
- MultiClusterClientConfig.Builder cfgBuilder = MultiClusterClientConfig.builder(clusterConfigs)
+ MultiDatabaseConfig.Builder cfgBuilder = MultiDatabaseConfig.builder(databaseConfigs)
.circuitBreakerFailureRateThreshold(50.0f).circuitBreakerMinNumOfFailures(3)
.circuitBreakerSlidingWindowSize(10).retryMaxAttempts(1).retryOnFailover(false);
- MultiClusterClientConfig mcc = cfgBuilder.build();
+ MultiDatabaseConfig mcc = cfgBuilder.build();
- realProvider = new MultiClusterPooledConnectionProvider(mcc);
+ realProvider = new MultiDatabaseConnectionProvider(mcc);
spyProvider = spy(realProvider);
- cluster = spyProvider.getCluster();
+ cluster = spyProvider.getDatabase();
executor = new CircuitBreakerCommandExecutor(spyProvider);
@@ -88,7 +88,7 @@ public void belowMinFailures_doesNotFailover() {
}
// Below min failures; CB remains CLOSED
- assertEquals(CircuitBreaker.State.CLOSED, spyProvider.getClusterCircuitBreaker().getState());
+ assertEquals(CircuitBreaker.State.CLOSED, spyProvider.getDatabaseCircuitBreaker().getState());
}
/**
@@ -111,10 +111,10 @@ public void minFailuresAndRateExceeded_triggersFailover() {
// Next call should hit open CB (CallNotPermitted) and trigger failover
assertThrows(JedisConnectionException.class, () -> executor.executeCommand(dummyCommand));
- verify(spyProvider, atLeastOnce()).switchToHealthyCluster(eq(SwitchReason.CIRCUIT_BREAKER),
+ verify(spyProvider, atLeastOnce()).switchToHealthyDatabase(eq(SwitchReason.CIRCUIT_BREAKER),
any());
assertEquals(CircuitBreaker.State.FORCED_OPEN,
- spyProvider.getCluster(fakeEndpoint).getCircuitBreaker().getState());
+ spyProvider.getDatabase(fakeEndpoint).getCircuitBreaker().getState());
}
/**
@@ -123,14 +123,12 @@ public void minFailuresAndRateExceeded_triggersFailover() {
@Test
public void rateBelowThreshold_doesNotFailover() throws Exception {
// Use local provider with higher threshold (80%) and no retries
- MultiClusterClientConfig.Builder cfgBuilder = MultiClusterClientConfig
- .builder(fakeClusterConfigs).circuitBreakerFailureRateThreshold(80.0f)
- .circuitBreakerMinNumOfFailures(3).circuitBreakerSlidingWindowSize(10).retryMaxAttempts(1)
- .retryOnFailover(false);
- MultiClusterPooledConnectionProvider rp = new MultiClusterPooledConnectionProvider(
- cfgBuilder.build());
- MultiClusterPooledConnectionProvider sp = spy(rp);
- Cluster c = sp.getCluster();
+ MultiDatabaseConfig.Builder cfgBuilder = MultiDatabaseConfig.builder(fakeDatabaseConfigs)
+ .circuitBreakerFailureRateThreshold(80.0f).circuitBreakerMinNumOfFailures(3)
+ .circuitBreakerSlidingWindowSize(10).retryMaxAttempts(1).retryOnFailover(false);
+ MultiDatabaseConnectionProvider rp = new MultiDatabaseConnectionProvider(cfgBuilder.build());
+ MultiDatabaseConnectionProvider sp = spy(rp);
+ Database c = sp.getDatabase();
try (CircuitBreakerCommandExecutor ex = new CircuitBreakerCommandExecutor(sp)) {
CommandObject cmd = new CommandObject<>(new CommandArguments(Protocol.Command.PING),
BuilderFactory.STRING);
@@ -158,17 +156,16 @@ public void rateBelowThreshold_doesNotFailover() throws Exception {
assertThrows(JedisConnectionException.class, () -> ex.executeCommand(cmd));
}
- assertEquals(CircuitBreaker.State.CLOSED, sp.getClusterCircuitBreaker().getState());
+ assertEquals(CircuitBreaker.State.CLOSED, sp.getDatabaseCircuitBreaker().getState());
}
}
@Test
public void providerBuilder_zeroRate_mapsToHundredAndHugeMinCalls() {
- MultiClusterClientConfig.Builder cfgBuilder = MultiClusterClientConfig
- .builder(fakeClusterConfigs);
+ MultiDatabaseConfig.Builder cfgBuilder = MultiDatabaseConfig.builder(fakeDatabaseConfigs);
cfgBuilder.circuitBreakerFailureRateThreshold(0.0f).circuitBreakerMinNumOfFailures(3)
.circuitBreakerSlidingWindowSize(10);
- MultiClusterClientConfig mcc = cfgBuilder.build();
+ MultiDatabaseConfig mcc = cfgBuilder.build();
CircuitBreakerThresholdsAdapter adapter = new CircuitBreakerThresholdsAdapter(mcc);
@@ -192,16 +189,14 @@ public void providerBuilder_zeroRate_mapsToHundredAndHugeMinCalls() {
public void thresholdMatrix(int minFailures, float ratePercent, int successes, int failures,
boolean expectFailoverOnNext) throws Exception {
- MultiClusterClientConfig.Builder cfgBuilder = MultiClusterClientConfig
- .builder(fakeClusterConfigs).circuitBreakerFailureRateThreshold(ratePercent)
- .circuitBreakerMinNumOfFailures(minFailures)
+ MultiDatabaseConfig.Builder cfgBuilder = MultiDatabaseConfig.builder(fakeDatabaseConfigs)
+ .circuitBreakerFailureRateThreshold(ratePercent).circuitBreakerMinNumOfFailures(minFailures)
.circuitBreakerSlidingWindowSize(Math.max(10, successes + failures + 2)).retryMaxAttempts(1)
.retryOnFailover(false);
- MultiClusterPooledConnectionProvider real = new MultiClusterPooledConnectionProvider(
- cfgBuilder.build());
- MultiClusterPooledConnectionProvider spy = spy(real);
- Cluster c = spy.getCluster();
+ MultiDatabaseConnectionProvider real = new MultiDatabaseConnectionProvider(cfgBuilder.build());
+ MultiDatabaseConnectionProvider spy = spy(real);
+ Database c = spy.getDatabase();
try (CircuitBreakerCommandExecutor ex = new CircuitBreakerCommandExecutor(spy)) {
CommandObject cmd = new CommandObject<>(new CommandArguments(Protocol.Command.PING),
@@ -237,7 +232,7 @@ public void thresholdMatrix(int minFailures, float ratePercent, int successes, i
if (expectFailoverOnNext) {
assertThrows(Exception.class, () -> ex.executeCommand(cmd));
- verify(spy, atLeastOnce()).switchToHealthyCluster(eq(SwitchReason.CIRCUIT_BREAKER), any());
+ verify(spy, atLeastOnce()).switchToHealthyDatabase(eq(SwitchReason.CIRCUIT_BREAKER), any());
assertEquals(CircuitBreaker.State.FORCED_OPEN, c.getCircuitBreaker().getState());
} else {
CircuitBreaker.State st = c.getCircuitBreaker().getState();
diff --git a/src/test/java/redis/clients/jedis/mcf/ClusterEvaluateThresholdsTest.java b/src/test/java/redis/clients/jedis/mcf/ClusterEvaluateThresholdsTest.java
index c603509f32..251d69140c 100644
--- a/src/test/java/redis/clients/jedis/mcf/ClusterEvaluateThresholdsTest.java
+++ b/src/test/java/redis/clients/jedis/mcf/ClusterEvaluateThresholdsTest.java
@@ -11,8 +11,8 @@
import org.junit.jupiter.params.provider.CsvSource;
import redis.clients.jedis.DefaultJedisClientConfig;
import redis.clients.jedis.HostAndPort;
-import redis.clients.jedis.MultiClusterClientConfig;
-import redis.clients.jedis.mcf.MultiClusterPooledConnectionProvider.Cluster;
+import redis.clients.jedis.MultiDatabaseConfig;
+import redis.clients.jedis.mcf.MultiDatabaseConnectionProvider.Database;
/**
* Tests for circuit breaker thresholds: both failure-rate threshold and minimum number of failures
@@ -21,15 +21,15 @@
*/
public class ClusterEvaluateThresholdsTest {
- private MultiClusterPooledConnectionProvider provider;
- private Cluster cluster;
+ private MultiDatabaseConnectionProvider provider;
+ private Database cluster;
private CircuitBreaker circuitBreaker;
private CircuitBreaker.Metrics metrics;
@BeforeEach
public void setup() {
- provider = mock(MultiClusterPooledConnectionProvider.class);
- cluster = mock(Cluster.class);
+ provider = mock(MultiDatabaseConnectionProvider.class);
+ cluster = mock(Database.class);
circuitBreaker = mock(CircuitBreaker.class);
metrics = mock(CircuitBreaker.Metrics.class);
@@ -58,7 +58,7 @@ public void belowMinFailures_doesNotFailover() {
cluster.evaluateThresholds(false);
verify(circuitBreaker, never()).transitionToOpenState();
- verify(provider, never()).switchToHealthyCluster(any(), any());
+ verify(provider, never()).switchToHealthyDatabase(any(), any());
}
/**
@@ -95,18 +95,18 @@ public void rateBelowThreshold_doesNotFailover() {
cluster.evaluateThresholds(false);
verify(circuitBreaker, never()).transitionToOpenState();
- verify(provider, never()).switchToHealthyCluster(any(), any());
+ verify(provider, never()).switchToHealthyDatabase(any(), any());
}
@Test
public void providerBuilder_zeroRate_mapsToHundredAndHugeMinCalls() {
- MultiClusterClientConfig.Builder cfgBuilder = MultiClusterClientConfig
- .builder(java.util.Arrays.asList(MultiClusterClientConfig.ClusterConfig
+ MultiDatabaseConfig.Builder cfgBuilder = MultiDatabaseConfig
+ .builder(java.util.Arrays.asList(MultiDatabaseConfig.DatabaseConfig
.builder(new HostAndPort("localhost", 6379), DefaultJedisClientConfig.builder().build())
.healthCheckEnabled(false).build()));
cfgBuilder.circuitBreakerFailureRateThreshold(0.0f).circuitBreakerMinNumOfFailures(3)
.circuitBreakerSlidingWindowSize(10);
- MultiClusterClientConfig mcc = cfgBuilder.build();
+ MultiDatabaseConfig mcc = cfgBuilder.build();
CircuitBreakerThresholdsAdapter adapter = new CircuitBreakerThresholdsAdapter(mcc);
diff --git a/src/test/java/redis/clients/jedis/mcf/DefaultValuesTest.java b/src/test/java/redis/clients/jedis/mcf/DefaultValuesTest.java
index 6939ef7069..51d0aa3ec2 100644
--- a/src/test/java/redis/clients/jedis/mcf/DefaultValuesTest.java
+++ b/src/test/java/redis/clients/jedis/mcf/DefaultValuesTest.java
@@ -9,7 +9,7 @@
import redis.clients.jedis.DefaultJedisClientConfig;
import redis.clients.jedis.HostAndPort;
import redis.clients.jedis.JedisClientConfig;
-import redis.clients.jedis.MultiClusterClientConfig;
+import redis.clients.jedis.MultiDatabaseConfig;
public class DefaultValuesTest {
@@ -19,16 +19,16 @@ public class DefaultValuesTest {
@Test
void testDefaultValuesInConfig() {
- MultiClusterClientConfig.ClusterConfig clusterConfig = MultiClusterClientConfig.ClusterConfig
+ MultiDatabaseConfig.DatabaseConfig clusterConfig = MultiDatabaseConfig.DatabaseConfig
.builder(fakeEndpoint, config).build();
- MultiClusterClientConfig multiConfig = new MultiClusterClientConfig.Builder(
- new MultiClusterClientConfig.ClusterConfig[] { clusterConfig }).build();
+ MultiDatabaseConfig multiConfig = new MultiDatabaseConfig.Builder(
+ new MultiDatabaseConfig.DatabaseConfig[] { clusterConfig }).build();
// check for grace period
assertEquals(60000, multiConfig.getGracePeriod());
// check for cluster config
- assertEquals(clusterConfig, multiConfig.getClusterConfigs()[0]);
+ assertEquals(clusterConfig, multiConfig.getDatabaseConfigs()[0]);
// check healthchecks enabled
assertNotNull(clusterConfig.getHealthCheckStrategySupplier());
diff --git a/src/test/java/redis/clients/jedis/mcf/FailbackMechanismIntegrationTest.java b/src/test/java/redis/clients/jedis/mcf/FailbackMechanismIntegrationTest.java
index afedc66f4d..c216e69317 100644
--- a/src/test/java/redis/clients/jedis/mcf/FailbackMechanismIntegrationTest.java
+++ b/src/test/java/redis/clients/jedis/mcf/FailbackMechanismIntegrationTest.java
@@ -17,7 +17,7 @@
import redis.clients.jedis.DefaultJedisClientConfig;
import redis.clients.jedis.HostAndPort;
import redis.clients.jedis.JedisClientConfig;
-import redis.clients.jedis.MultiClusterClientConfig;
+import redis.clients.jedis.MultiDatabaseConfig;
@ExtendWith(MockitoExtension.class)
class FailbackMechanismIntegrationTest {
@@ -49,40 +49,38 @@ private MockedConstruction mockPool() {
void testFailbackDisabledDoesNotPerformFailback() throws InterruptedException {
try (MockedConstruction mockedPool = mockPool()) {
// Create clusters with different weights
- MultiClusterClientConfig.ClusterConfig cluster1 = MultiClusterClientConfig.ClusterConfig
+ MultiDatabaseConfig.DatabaseConfig cluster1 = MultiDatabaseConfig.DatabaseConfig
.builder(endpoint1, clientConfig).weight(1.0f).healthCheckEnabled(false).build(); // Lower
// weight
- MultiClusterClientConfig.ClusterConfig cluster2 = MultiClusterClientConfig.ClusterConfig
+ MultiDatabaseConfig.DatabaseConfig cluster2 = MultiDatabaseConfig.DatabaseConfig
.builder(endpoint2, clientConfig).weight(2.0f) // Higher weight
.healthCheckEnabled(false).build();
- MultiClusterClientConfig config = new MultiClusterClientConfig.Builder(
- new MultiClusterClientConfig.ClusterConfig[] { cluster1, cluster2 })
- .failbackSupported(false) // Disabled
+ MultiDatabaseConfig config = new MultiDatabaseConfig.Builder(
+ new MultiDatabaseConfig.DatabaseConfig[] { cluster1, cluster2 }).failbackSupported(false) // Disabled
.failbackCheckInterval(100) // Short interval for testing
.build();
- try (MultiClusterPooledConnectionProvider provider = new MultiClusterPooledConnectionProvider(
- config)) {
+ try (MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider(config)) {
// Initially, cluster2 should be active (highest weight)
- assertEquals(provider.getCluster(endpoint2), provider.getCluster());
+ assertEquals(provider.getDatabase(endpoint2), provider.getDatabase());
// Make cluster2 unhealthy to force failover to cluster1
- MultiClusterPooledConnectionProviderHelper.onHealthStatusChange(provider, endpoint2,
+ MultiDatabaseConnectionProviderHelper.onHealthStatusChange(provider, endpoint2,
HealthStatus.HEALTHY, HealthStatus.UNHEALTHY);
// Should now be on cluster1 (only healthy option)
- assertEquals(provider.getCluster(endpoint1), provider.getCluster());
+ assertEquals(provider.getDatabase(endpoint1), provider.getDatabase());
// Make cluster2 healthy again (higher weight - would normally trigger failback)
- MultiClusterPooledConnectionProviderHelper.onHealthStatusChange(provider, endpoint2,
+ MultiDatabaseConnectionProviderHelper.onHealthStatusChange(provider, endpoint2,
HealthStatus.UNHEALTHY, HealthStatus.HEALTHY);
// Wait longer than failback interval
// Should still be on cluster1 since failback is disabled
await().atMost(Durations.FIVE_HUNDRED_MILLISECONDS).pollInterval(FIFTY_MILLISECONDS)
- .until(() -> provider.getCluster(endpoint1) == provider.getCluster());
+ .until(() -> provider.getDatabase(endpoint1) == provider.getDatabase());
}
}
}
@@ -91,39 +89,38 @@ void testFailbackDisabledDoesNotPerformFailback() throws InterruptedException {
void testFailbackToHigherWeightCluster() throws InterruptedException {
try (MockedConstruction mockedPool = mockPool()) {
// Create clusters with different weights
- MultiClusterClientConfig.ClusterConfig cluster1 = MultiClusterClientConfig.ClusterConfig
+ MultiDatabaseConfig.DatabaseConfig cluster1 = MultiDatabaseConfig.DatabaseConfig
.builder(endpoint1, clientConfig).weight(2.0f) // Higher weight
.healthCheckEnabled(false).build();
- MultiClusterClientConfig.ClusterConfig cluster2 = MultiClusterClientConfig.ClusterConfig
+ MultiDatabaseConfig.DatabaseConfig cluster2 = MultiDatabaseConfig.DatabaseConfig
.builder(endpoint2, clientConfig).weight(1.0f) // Lower weight
.healthCheckEnabled(false).build();
- MultiClusterClientConfig config = new MultiClusterClientConfig.Builder(
- new MultiClusterClientConfig.ClusterConfig[] { cluster1, cluster2 })
- .failbackSupported(true).failbackCheckInterval(100) // Short interval for testing
+ MultiDatabaseConfig config = new MultiDatabaseConfig.Builder(
+ new MultiDatabaseConfig.DatabaseConfig[] { cluster1, cluster2 }).failbackSupported(true)
+ .failbackCheckInterval(100) // Short interval for testing
.gracePeriod(100).build();
- try (MultiClusterPooledConnectionProvider provider = new MultiClusterPooledConnectionProvider(
- config)) {
+ try (MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider(config)) {
// Initially, cluster1 should be active (highest weight)
- assertEquals(provider.getCluster(endpoint1), provider.getCluster());
+ assertEquals(provider.getDatabase(endpoint1), provider.getDatabase());
// Make cluster1 unhealthy to force failover to cluster2
- MultiClusterPooledConnectionProviderHelper.onHealthStatusChange(provider, endpoint1,
+ MultiDatabaseConnectionProviderHelper.onHealthStatusChange(provider, endpoint1,
HealthStatus.HEALTHY, HealthStatus.UNHEALTHY);
// Should now be on cluster2 (lower weight, but only healthy option)
- assertEquals(provider.getCluster(endpoint2), provider.getCluster());
+ assertEquals(provider.getDatabase(endpoint2), provider.getDatabase());
// Make cluster1 healthy again
- MultiClusterPooledConnectionProviderHelper.onHealthStatusChange(provider, endpoint1,
+ MultiDatabaseConnectionProviderHelper.onHealthStatusChange(provider, endpoint1,
HealthStatus.UNHEALTHY, HealthStatus.HEALTHY);
// Wait for failback check interval + some buffer
// Should have failed back to cluster1 (higher weight)
await().atMost(Durations.FIVE_HUNDRED_MILLISECONDS).pollInterval(FIFTY_MILLISECONDS)
- .until(() -> provider.getCluster(endpoint1) == provider.getCluster());
+ .until(() -> provider.getDatabase(endpoint1) == provider.getDatabase());
}
}
}
@@ -132,43 +129,42 @@ void testFailbackToHigherWeightCluster() throws InterruptedException {
void testNoFailbackToLowerWeightCluster() throws InterruptedException {
try (MockedConstruction mockedPool = mockPool()) {
// Create three clusters with different weights to properly test no failback to lower weight
- MultiClusterClientConfig.ClusterConfig cluster1 = MultiClusterClientConfig.ClusterConfig
+ MultiDatabaseConfig.DatabaseConfig cluster1 = MultiDatabaseConfig.DatabaseConfig
.builder(endpoint1, clientConfig).weight(1.0f) // Lowest weight
.healthCheckEnabled(false).build();
- MultiClusterClientConfig.ClusterConfig cluster2 = MultiClusterClientConfig.ClusterConfig
+ MultiDatabaseConfig.DatabaseConfig cluster2 = MultiDatabaseConfig.DatabaseConfig
.builder(endpoint2, clientConfig).weight(2.0f) // Medium weight
.healthCheckEnabled(false).build();
- MultiClusterClientConfig.ClusterConfig cluster3 = MultiClusterClientConfig.ClusterConfig
+ MultiDatabaseConfig.DatabaseConfig cluster3 = MultiDatabaseConfig.DatabaseConfig
.builder(endpoint3, clientConfig).weight(3.0f) // Highest weight
.healthCheckEnabled(false).build();
- MultiClusterClientConfig config = new MultiClusterClientConfig.Builder(
- new MultiClusterClientConfig.ClusterConfig[] { cluster1, cluster2, cluster3 })
+ MultiDatabaseConfig config = new MultiDatabaseConfig.Builder(
+ new MultiDatabaseConfig.DatabaseConfig[] { cluster1, cluster2, cluster3 })
.failbackSupported(true).failbackCheckInterval(100).build();
- try (MultiClusterPooledConnectionProvider provider = new MultiClusterPooledConnectionProvider(
- config)) {
+ try (MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider(config)) {
// Initially, cluster3 should be active (highest weight)
- assertEquals(provider.getCluster(endpoint3), provider.getCluster());
+ assertEquals(provider.getDatabase(endpoint3), provider.getDatabase());
// Make cluster3 unhealthy to force failover to cluster2 (medium weight)
- MultiClusterPooledConnectionProviderHelper.onHealthStatusChange(provider, endpoint3,
+ MultiDatabaseConnectionProviderHelper.onHealthStatusChange(provider, endpoint3,
HealthStatus.HEALTHY, HealthStatus.UNHEALTHY);
// Should now be on cluster2 (highest weight among healthy clusters)
- assertEquals(provider.getCluster(endpoint2), provider.getCluster());
+ assertEquals(provider.getDatabase(endpoint2), provider.getDatabase());
// Make cluster1 (lowest weight) healthy - this should NOT trigger failback
// since we don't failback to lower weight clusters
- MultiClusterPooledConnectionProviderHelper.onHealthStatusChange(provider, endpoint1,
+ MultiDatabaseConnectionProviderHelper.onHealthStatusChange(provider, endpoint1,
HealthStatus.UNHEALTHY, HealthStatus.HEALTHY);
// Wait for failback check interval
// Should still be on cluster2 (no failback to lower weight cluster1)
await().atMost(Durations.FIVE_HUNDRED_MILLISECONDS).pollInterval(FIFTY_MILLISECONDS)
- .until(() -> provider.getCluster(endpoint2) == provider.getCluster());
+ .until(() -> provider.getDatabase(endpoint2) == provider.getDatabase());
}
}
}
@@ -176,39 +172,38 @@ void testNoFailbackToLowerWeightCluster() throws InterruptedException {
@Test
void testFailbackToHigherWeightClusterImmediately() throws InterruptedException {
try (MockedConstruction mockedPool = mockPool()) {
- MultiClusterClientConfig.ClusterConfig cluster1 = MultiClusterClientConfig.ClusterConfig
+ MultiDatabaseConfig.DatabaseConfig cluster1 = MultiDatabaseConfig.DatabaseConfig
.builder(endpoint1, clientConfig).weight(2.0f).healthCheckEnabled(false).build(); // Higher
// weight
- MultiClusterClientConfig.ClusterConfig cluster2 = MultiClusterClientConfig.ClusterConfig
+ MultiDatabaseConfig.DatabaseConfig cluster2 = MultiDatabaseConfig.DatabaseConfig
.builder(endpoint2, clientConfig).weight(1.0f).healthCheckEnabled(false).build(); // Lower
// weight
- MultiClusterClientConfig config = new MultiClusterClientConfig.Builder(
- new MultiClusterClientConfig.ClusterConfig[] { cluster1, cluster2 })
- .failbackSupported(true).failbackCheckInterval(100).gracePeriod(50).build();
+ MultiDatabaseConfig config = new MultiDatabaseConfig.Builder(
+ new MultiDatabaseConfig.DatabaseConfig[] { cluster1, cluster2 }).failbackSupported(true)
+ .failbackCheckInterval(100).gracePeriod(50).build();
- try (MultiClusterPooledConnectionProvider provider = new MultiClusterPooledConnectionProvider(
- config)) {
+ try (MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider(config)) {
// Initially, cluster1 should be active (highest weight)
- assertEquals(provider.getCluster(endpoint1), provider.getCluster());
+ assertEquals(provider.getDatabase(endpoint1), provider.getDatabase());
// Make cluster1 unhealthy to force failover to cluster2
- MultiClusterPooledConnectionProviderHelper.onHealthStatusChange(provider, endpoint1,
+ MultiDatabaseConnectionProviderHelper.onHealthStatusChange(provider, endpoint1,
HealthStatus.HEALTHY, HealthStatus.UNHEALTHY);
// Should now be on cluster2 (only healthy option)
- assertEquals(provider.getCluster(endpoint2), provider.getCluster());
+ assertEquals(provider.getDatabase(endpoint2), provider.getDatabase());
// Make cluster1 healthy again
- MultiClusterPooledConnectionProviderHelper.onHealthStatusChange(provider, endpoint1,
+ MultiDatabaseConnectionProviderHelper.onHealthStatusChange(provider, endpoint1,
HealthStatus.UNHEALTHY, HealthStatus.HEALTHY);
// Wait for failback check
// Should have failed back to cluster1 immediately (higher weight, no stability period
// required)
await().atMost(Durations.TWO_HUNDRED_MILLISECONDS).pollInterval(FIFTY_MILLISECONDS)
- .until(() -> provider.getCluster(endpoint1) == provider.getCluster());
+ .until(() -> provider.getDatabase(endpoint1) == provider.getDatabase());
}
}
}
@@ -216,45 +211,44 @@ void testFailbackToHigherWeightClusterImmediately() throws InterruptedException
@Test
void testUnhealthyClusterCancelsFailback() throws InterruptedException {
try (MockedConstruction mockedPool = mockPool()) {
- MultiClusterClientConfig.ClusterConfig cluster1 = MultiClusterClientConfig.ClusterConfig
+ MultiDatabaseConfig.DatabaseConfig cluster1 = MultiDatabaseConfig.DatabaseConfig
.builder(endpoint1, clientConfig).weight(2.0f).healthCheckEnabled(false).build(); // Higher
// weight
- MultiClusterClientConfig.ClusterConfig cluster2 = MultiClusterClientConfig.ClusterConfig
+ MultiDatabaseConfig.DatabaseConfig cluster2 = MultiDatabaseConfig.DatabaseConfig
.builder(endpoint2, clientConfig).weight(1.0f).healthCheckEnabled(false).build(); // Lower
// weight
- MultiClusterClientConfig config = new MultiClusterClientConfig.Builder(
- new MultiClusterClientConfig.ClusterConfig[] { cluster1, cluster2 })
- .failbackSupported(true).failbackCheckInterval(200).build();
+ MultiDatabaseConfig config = new MultiDatabaseConfig.Builder(
+ new MultiDatabaseConfig.DatabaseConfig[] { cluster1, cluster2 }).failbackSupported(true)
+ .failbackCheckInterval(200).build();
- try (MultiClusterPooledConnectionProvider provider = new MultiClusterPooledConnectionProvider(
- config)) {
+ try (MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider(config)) {
// Initially, cluster1 should be active (highest weight)
- assertEquals(provider.getCluster(endpoint1), provider.getCluster());
+ assertEquals(provider.getDatabase(endpoint1), provider.getDatabase());
// Make cluster1 unhealthy to force failover to cluster2
- MultiClusterPooledConnectionProviderHelper.onHealthStatusChange(provider, endpoint1,
+ MultiDatabaseConnectionProviderHelper.onHealthStatusChange(provider, endpoint1,
HealthStatus.HEALTHY, HealthStatus.UNHEALTHY);
// Should now be on cluster2 (only healthy option)
- assertEquals(provider.getCluster(endpoint2), provider.getCluster());
+ assertEquals(provider.getDatabase(endpoint2), provider.getDatabase());
// Make cluster1 healthy again (should trigger failback attempt)
- MultiClusterPooledConnectionProviderHelper.onHealthStatusChange(provider, endpoint1,
+ MultiDatabaseConnectionProviderHelper.onHealthStatusChange(provider, endpoint1,
HealthStatus.UNHEALTHY, HealthStatus.HEALTHY);
// Wait a bit
Thread.sleep(100);
// Make cluster1 unhealthy again before failback completes
- MultiClusterPooledConnectionProviderHelper.onHealthStatusChange(provider, endpoint1,
+ MultiDatabaseConnectionProviderHelper.onHealthStatusChange(provider, endpoint1,
HealthStatus.HEALTHY, HealthStatus.UNHEALTHY);
// Wait past the original failback interval
// Should still be on cluster2 (failback was cancelled due to cluster1 becoming unhealthy)
await().atMost(Durations.TWO_HUNDRED_MILLISECONDS).pollInterval(FIFTY_MILLISECONDS)
- .until(() -> provider.getCluster(endpoint2) == provider.getCluster());
+ .until(() -> provider.getDatabase(endpoint2) == provider.getDatabase());
}
}
}
@@ -262,42 +256,41 @@ void testUnhealthyClusterCancelsFailback() throws InterruptedException {
@Test
void testMultipleClusterFailbackPriority() throws InterruptedException {
try (MockedConstruction mockedPool = mockPool()) {
- MultiClusterClientConfig.ClusterConfig cluster1 = MultiClusterClientConfig.ClusterConfig
+ MultiDatabaseConfig.DatabaseConfig cluster1 = MultiDatabaseConfig.DatabaseConfig
.builder(endpoint1, clientConfig).weight(1.0f).healthCheckEnabled(false).build(); // Lowest
// weight
- MultiClusterClientConfig.ClusterConfig cluster2 = MultiClusterClientConfig.ClusterConfig
+ MultiDatabaseConfig.DatabaseConfig cluster2 = MultiDatabaseConfig.DatabaseConfig
.builder(endpoint2, clientConfig).weight(2.0f).healthCheckEnabled(false).build(); // Medium
// weight
- MultiClusterClientConfig.ClusterConfig cluster3 = MultiClusterClientConfig.ClusterConfig
+ MultiDatabaseConfig.DatabaseConfig cluster3 = MultiDatabaseConfig.DatabaseConfig
.builder(endpoint3, clientConfig).weight(3.0f) // Highest weight
.healthCheckEnabled(false).build();
- MultiClusterClientConfig config = new MultiClusterClientConfig.Builder(
- new MultiClusterClientConfig.ClusterConfig[] { cluster1, cluster2, cluster3 })
+ MultiDatabaseConfig config = new MultiDatabaseConfig.Builder(
+ new MultiDatabaseConfig.DatabaseConfig[] { cluster1, cluster2, cluster3 })
.failbackSupported(true).failbackCheckInterval(100).gracePeriod(100).build();
- try (MultiClusterPooledConnectionProvider provider = new MultiClusterPooledConnectionProvider(
- config)) {
+ try (MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider(config)) {
// Initially, cluster3 should be active (highest weight)
- assertEquals(provider.getCluster(endpoint3), provider.getCluster());
+ assertEquals(provider.getDatabase(endpoint3), provider.getDatabase());
// Make cluster3 unhealthy to force failover to cluster2 (next highest weight)
- MultiClusterPooledConnectionProviderHelper.onHealthStatusChange(provider, endpoint3,
+ MultiDatabaseConnectionProviderHelper.onHealthStatusChange(provider, endpoint3,
HealthStatus.HEALTHY, HealthStatus.UNHEALTHY);
// Should now be on cluster2 (highest weight among healthy clusters)
- assertEquals(provider.getCluster(endpoint2), provider.getCluster());
+ assertEquals(provider.getDatabase(endpoint2), provider.getDatabase());
// Make cluster3 healthy again
- MultiClusterPooledConnectionProviderHelper.onHealthStatusChange(provider, endpoint3,
+ MultiDatabaseConnectionProviderHelper.onHealthStatusChange(provider, endpoint3,
HealthStatus.UNHEALTHY, HealthStatus.HEALTHY);
// Wait for failback
// Should fail back to cluster3 (highest weight)
await().atMost(Durations.FIVE_HUNDRED_MILLISECONDS).pollInterval(FIFTY_MILLISECONDS)
- .until(() -> provider.getCluster(endpoint3) == provider.getCluster());
+ .until(() -> provider.getDatabase(endpoint3) == provider.getDatabase());
}
}
}
@@ -305,34 +298,33 @@ void testMultipleClusterFailbackPriority() throws InterruptedException {
@Test
void testGracePeriodDisablesClusterOnUnhealthy() throws InterruptedException {
try (MockedConstruction mockedPool = mockPool()) {
- MultiClusterClientConfig.ClusterConfig cluster1 = MultiClusterClientConfig.ClusterConfig
+ MultiDatabaseConfig.DatabaseConfig cluster1 = MultiDatabaseConfig.DatabaseConfig
.builder(endpoint1, clientConfig).weight(1.0f).healthCheckEnabled(false).build(); // Lower
// weight
- MultiClusterClientConfig.ClusterConfig cluster2 = MultiClusterClientConfig.ClusterConfig
+ MultiDatabaseConfig.DatabaseConfig cluster2 = MultiDatabaseConfig.DatabaseConfig
.builder(endpoint2, clientConfig).weight(2.0f).healthCheckEnabled(false).build(); // Higher
// weight
- MultiClusterClientConfig config = new MultiClusterClientConfig.Builder(
- new MultiClusterClientConfig.ClusterConfig[] { cluster1, cluster2 })
- .failbackSupported(true).failbackCheckInterval(100).gracePeriod(200) // 200ms grace
- // period
+ MultiDatabaseConfig config = new MultiDatabaseConfig.Builder(
+ new MultiDatabaseConfig.DatabaseConfig[] { cluster1, cluster2 }).failbackSupported(true)
+ .failbackCheckInterval(100).gracePeriod(200) // 200ms grace
+ // period
.build();
- try (MultiClusterPooledConnectionProvider provider = new MultiClusterPooledConnectionProvider(
- config)) {
+ try (MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider(config)) {
// Initially, cluster2 should be active (highest weight)
- assertEquals(provider.getCluster(endpoint2), provider.getCluster());
+ assertEquals(provider.getDatabase(endpoint2), provider.getDatabase());
// Now make cluster2 unhealthy - it should be disabled for grace period
- MultiClusterPooledConnectionProviderHelper.onHealthStatusChange(provider, endpoint2,
+ MultiDatabaseConnectionProviderHelper.onHealthStatusChange(provider, endpoint2,
HealthStatus.HEALTHY, HealthStatus.UNHEALTHY);
// Should failover to cluster1
- assertEquals(provider.getCluster(endpoint1), provider.getCluster());
+ assertEquals(provider.getDatabase(endpoint1), provider.getDatabase());
// Cluster2 should be in grace period
- assertTrue(provider.getCluster(endpoint2).isInGracePeriod());
+ assertTrue(provider.getDatabase(endpoint2).isInGracePeriod());
}
}
}
@@ -340,51 +332,50 @@ void testGracePeriodDisablesClusterOnUnhealthy() throws InterruptedException {
@Test
void testGracePeriodReEnablesClusterAfterPeriod() throws InterruptedException {
try (MockedConstruction mockedPool = mockPool()) {
- MultiClusterClientConfig.ClusterConfig cluster1 = MultiClusterClientConfig.ClusterConfig
+ MultiDatabaseConfig.DatabaseConfig cluster1 = MultiDatabaseConfig.DatabaseConfig
.builder(endpoint1, clientConfig).weight(1.0f).healthCheckEnabled(false).build(); // Lower
// weight
- MultiClusterClientConfig.ClusterConfig cluster2 = MultiClusterClientConfig.ClusterConfig
+ MultiDatabaseConfig.DatabaseConfig cluster2 = MultiDatabaseConfig.DatabaseConfig
.builder(endpoint2, clientConfig).weight(2.0f).healthCheckEnabled(false).build(); // Higher
// weight
- MultiClusterClientConfig config = new MultiClusterClientConfig.Builder(
- new MultiClusterClientConfig.ClusterConfig[] { cluster1, cluster2 })
- .failbackSupported(true).failbackCheckInterval(50) // Short interval for testing
+ MultiDatabaseConfig config = new MultiDatabaseConfig.Builder(
+ new MultiDatabaseConfig.DatabaseConfig[] { cluster1, cluster2 }).failbackSupported(true)
+ .failbackCheckInterval(50) // Short interval for testing
.gracePeriod(100) // Short grace period for testing
.build();
- try (MultiClusterPooledConnectionProvider provider = new MultiClusterPooledConnectionProvider(
- config)) {
+ try (MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider(config)) {
// Initially, cluster2 should be active (highest weight)
- assertEquals(provider.getCluster(endpoint2), provider.getCluster());
+ assertEquals(provider.getDatabase(endpoint2), provider.getDatabase());
// Make cluster2 unhealthy to start grace period and force failover
- MultiClusterPooledConnectionProviderHelper.onHealthStatusChange(provider, endpoint2,
+ MultiDatabaseConnectionProviderHelper.onHealthStatusChange(provider, endpoint2,
HealthStatus.HEALTHY, HealthStatus.UNHEALTHY);
// Should failover to cluster1
- assertEquals(provider.getCluster(endpoint1), provider.getCluster());
+ assertEquals(provider.getDatabase(endpoint1), provider.getDatabase());
// Cluster2 should be in grace period
- assertTrue(provider.getCluster(endpoint2).isInGracePeriod());
+ assertTrue(provider.getDatabase(endpoint2).isInGracePeriod());
// Make cluster2 healthy again while it's still in grace period
- MultiClusterPooledConnectionProviderHelper.onHealthStatusChange(provider, endpoint2,
+ MultiDatabaseConnectionProviderHelper.onHealthStatusChange(provider, endpoint2,
HealthStatus.UNHEALTHY, HealthStatus.HEALTHY);
// Should still be on cluster1 because cluster2 is in grace period
- assertEquals(provider.getCluster(endpoint1), provider.getCluster());
+ assertEquals(provider.getDatabase(endpoint1), provider.getDatabase());
// Wait for grace period to expire
// Cluster2 should no longer be in grace period
await().atMost(Durations.FIVE_HUNDRED_MILLISECONDS).pollInterval(FIFTY_MILLISECONDS)
- .until(() -> !provider.getCluster(endpoint2).isInGracePeriod());
+ .until(() -> !provider.getDatabase(endpoint2).isInGracePeriod());
// Wait for failback check to run
// Should now failback to cluster2 (higher weight) since grace period has expired
await().atMost(Durations.FIVE_HUNDRED_MILLISECONDS).pollInterval(FIFTY_MILLISECONDS)
- .until(() -> provider.getCluster(endpoint2) == provider.getCluster());
+ .until(() -> provider.getDatabase(endpoint2) == provider.getDatabase());
}
}
}
diff --git a/src/test/java/redis/clients/jedis/mcf/FailbackMechanismUnitTest.java b/src/test/java/redis/clients/jedis/mcf/FailbackMechanismUnitTest.java
index fee216f2be..a200296e18 100644
--- a/src/test/java/redis/clients/jedis/mcf/FailbackMechanismUnitTest.java
+++ b/src/test/java/redis/clients/jedis/mcf/FailbackMechanismUnitTest.java
@@ -9,7 +9,7 @@
import redis.clients.jedis.DefaultJedisClientConfig;
import redis.clients.jedis.HostAndPort;
import redis.clients.jedis.JedisClientConfig;
-import redis.clients.jedis.MultiClusterClientConfig;
+import redis.clients.jedis.MultiDatabaseConfig;
@ExtendWith(MockitoExtension.class)
class FailbackMechanismUnitTest {
@@ -26,17 +26,17 @@ void setUp() {
@Test
void testFailbackCheckIntervalConfiguration() {
// Test default value
- MultiClusterClientConfig.ClusterConfig clusterConfig = MultiClusterClientConfig.ClusterConfig
+ MultiDatabaseConfig.DatabaseConfig clusterConfig = MultiDatabaseConfig.DatabaseConfig
.builder(endpoint1, clientConfig).healthCheckEnabled(false).build();
- MultiClusterClientConfig defaultConfig = new MultiClusterClientConfig.Builder(
- new MultiClusterClientConfig.ClusterConfig[] { clusterConfig }).build();
+ MultiDatabaseConfig defaultConfig = new MultiDatabaseConfig.Builder(
+ new MultiDatabaseConfig.DatabaseConfig[] { clusterConfig }).build();
assertEquals(120000, defaultConfig.getFailbackCheckInterval());
// Test custom value
- MultiClusterClientConfig customConfig = new MultiClusterClientConfig.Builder(
- new MultiClusterClientConfig.ClusterConfig[] { clusterConfig }).failbackCheckInterval(3000)
+ MultiDatabaseConfig customConfig = new MultiDatabaseConfig.Builder(
+ new MultiDatabaseConfig.DatabaseConfig[] { clusterConfig }).failbackCheckInterval(3000)
.build();
assertEquals(3000, customConfig.getFailbackCheckInterval());
@@ -44,18 +44,18 @@ void testFailbackCheckIntervalConfiguration() {
@Test
void testFailbackSupportedConfiguration() {
- MultiClusterClientConfig.ClusterConfig clusterConfig = MultiClusterClientConfig.ClusterConfig
+ MultiDatabaseConfig.DatabaseConfig clusterConfig = MultiDatabaseConfig.DatabaseConfig
.builder(endpoint1, clientConfig).healthCheckEnabled(false).build();
// Test default (should be true)
- MultiClusterClientConfig defaultConfig = new MultiClusterClientConfig.Builder(
- new MultiClusterClientConfig.ClusterConfig[] { clusterConfig }).build();
+ MultiDatabaseConfig defaultConfig = new MultiDatabaseConfig.Builder(
+ new MultiDatabaseConfig.DatabaseConfig[] { clusterConfig }).build();
assertTrue(defaultConfig.isFailbackSupported());
// Test disabled
- MultiClusterClientConfig disabledConfig = new MultiClusterClientConfig.Builder(
- new MultiClusterClientConfig.ClusterConfig[] { clusterConfig }).failbackSupported(false)
+ MultiDatabaseConfig disabledConfig = new MultiDatabaseConfig.Builder(
+ new MultiDatabaseConfig.DatabaseConfig[] { clusterConfig }).failbackSupported(false)
.build();
assertFalse(disabledConfig.isFailbackSupported());
@@ -63,19 +63,19 @@ void testFailbackSupportedConfiguration() {
@Test
void testFailbackCheckIntervalValidation() {
- MultiClusterClientConfig.ClusterConfig clusterConfig = MultiClusterClientConfig.ClusterConfig
+ MultiDatabaseConfig.DatabaseConfig clusterConfig = MultiDatabaseConfig.DatabaseConfig
.builder(endpoint1, clientConfig).healthCheckEnabled(false).build();
// Test zero interval (should be allowed)
- MultiClusterClientConfig zeroConfig = new MultiClusterClientConfig.Builder(
- new MultiClusterClientConfig.ClusterConfig[] { clusterConfig }).failbackCheckInterval(0)
+ MultiDatabaseConfig zeroConfig = new MultiDatabaseConfig.Builder(
+ new MultiDatabaseConfig.DatabaseConfig[] { clusterConfig }).failbackCheckInterval(0)
.build();
assertEquals(0, zeroConfig.getFailbackCheckInterval());
// Test negative interval (should be allowed - implementation decision)
- MultiClusterClientConfig negativeConfig = new MultiClusterClientConfig.Builder(
- new MultiClusterClientConfig.ClusterConfig[] { clusterConfig }).failbackCheckInterval(-1000)
+ MultiDatabaseConfig negativeConfig = new MultiDatabaseConfig.Builder(
+ new MultiDatabaseConfig.DatabaseConfig[] { clusterConfig }).failbackCheckInterval(-1000)
.build();
assertEquals(-1000, negativeConfig.getFailbackCheckInterval());
@@ -83,12 +83,12 @@ void testFailbackCheckIntervalValidation() {
@Test
void testBuilderChaining() {
- MultiClusterClientConfig.ClusterConfig clusterConfig = MultiClusterClientConfig.ClusterConfig
+ MultiDatabaseConfig.DatabaseConfig clusterConfig = MultiDatabaseConfig.DatabaseConfig
.builder(endpoint1, clientConfig).healthCheckEnabled(false).build();
// Test that builder methods can be chained
- MultiClusterClientConfig config = new MultiClusterClientConfig.Builder(
- new MultiClusterClientConfig.ClusterConfig[] { clusterConfig }).failbackSupported(true)
+ MultiDatabaseConfig config = new MultiDatabaseConfig.Builder(
+ new MultiDatabaseConfig.DatabaseConfig[] { clusterConfig }).failbackSupported(true)
.failbackCheckInterval(2000).retryOnFailover(true).build();
assertTrue(config.isFailbackSupported());
@@ -99,47 +99,47 @@ void testBuilderChaining() {
@Test
void testGracePeriodConfiguration() {
// Test default value
- MultiClusterClientConfig.ClusterConfig clusterConfig = MultiClusterClientConfig.ClusterConfig
+ MultiDatabaseConfig.DatabaseConfig clusterConfig = MultiDatabaseConfig.DatabaseConfig
.builder(endpoint1, clientConfig).healthCheckEnabled(false).build();
- MultiClusterClientConfig defaultConfig = new MultiClusterClientConfig.Builder(
- new MultiClusterClientConfig.ClusterConfig[] { clusterConfig }).build();
+ MultiDatabaseConfig defaultConfig = new MultiDatabaseConfig.Builder(
+ new MultiDatabaseConfig.DatabaseConfig[] { clusterConfig }).build();
assertEquals(60000, defaultConfig.getGracePeriod());
// Test custom value
- MultiClusterClientConfig customConfig = new MultiClusterClientConfig.Builder(
- new MultiClusterClientConfig.ClusterConfig[] { clusterConfig }).gracePeriod(5000).build();
+ MultiDatabaseConfig customConfig = new MultiDatabaseConfig.Builder(
+ new MultiDatabaseConfig.DatabaseConfig[] { clusterConfig }).gracePeriod(5000).build();
assertEquals(5000, customConfig.getGracePeriod());
}
@Test
void testGracePeriodValidation() {
- MultiClusterClientConfig.ClusterConfig clusterConfig = MultiClusterClientConfig.ClusterConfig
+ MultiDatabaseConfig.DatabaseConfig clusterConfig = MultiDatabaseConfig.DatabaseConfig
.builder(endpoint1, clientConfig).healthCheckEnabled(false).build();
// Test zero grace period (should be allowed)
- MultiClusterClientConfig zeroConfig = new MultiClusterClientConfig.Builder(
- new MultiClusterClientConfig.ClusterConfig[] { clusterConfig }).gracePeriod(0).build();
+ MultiDatabaseConfig zeroConfig = new MultiDatabaseConfig.Builder(
+ new MultiDatabaseConfig.DatabaseConfig[] { clusterConfig }).gracePeriod(0).build();
assertEquals(0, zeroConfig.getGracePeriod());
// Test negative grace period (should be allowed - implementation decision)
- MultiClusterClientConfig negativeConfig = new MultiClusterClientConfig.Builder(
- new MultiClusterClientConfig.ClusterConfig[] { clusterConfig }).gracePeriod(-1000).build();
+ MultiDatabaseConfig negativeConfig = new MultiDatabaseConfig.Builder(
+ new MultiDatabaseConfig.DatabaseConfig[] { clusterConfig }).gracePeriod(-1000).build();
assertEquals(-1000, negativeConfig.getGracePeriod());
}
@Test
void testGracePeriodBuilderChaining() {
- MultiClusterClientConfig.ClusterConfig clusterConfig = MultiClusterClientConfig.ClusterConfig
+ MultiDatabaseConfig.DatabaseConfig clusterConfig = MultiDatabaseConfig.DatabaseConfig
.builder(endpoint1, clientConfig).healthCheckEnabled(false).build();
// Test that builder methods can be chained
- MultiClusterClientConfig config = new MultiClusterClientConfig.Builder(
- new MultiClusterClientConfig.ClusterConfig[] { clusterConfig }).failbackSupported(true)
+ MultiDatabaseConfig config = new MultiDatabaseConfig.Builder(
+ new MultiDatabaseConfig.DatabaseConfig[] { clusterConfig }).failbackSupported(true)
.failbackCheckInterval(2000).gracePeriod(8000).retryOnFailover(true).build();
assertTrue(config.isFailbackSupported());
diff --git a/src/test/java/redis/clients/jedis/mcf/HealthCheckIntegrationTest.java b/src/test/java/redis/clients/jedis/mcf/HealthCheckIntegrationTest.java
index d1cb8b90e9..c43baf9933 100644
--- a/src/test/java/redis/clients/jedis/mcf/HealthCheckIntegrationTest.java
+++ b/src/test/java/redis/clients/jedis/mcf/HealthCheckIntegrationTest.java
@@ -15,10 +15,10 @@
import redis.clients.jedis.EndpointConfig;
import redis.clients.jedis.HostAndPorts;
import redis.clients.jedis.JedisClientConfig;
-import redis.clients.jedis.MultiClusterClientConfig;
+import redis.clients.jedis.MultiDatabaseConfig;
import redis.clients.jedis.UnifiedJedis;
-import redis.clients.jedis.MultiClusterClientConfig.ClusterConfig;
-import redis.clients.jedis.MultiClusterClientConfig.StrategySupplier;
+import redis.clients.jedis.MultiDatabaseConfig.DatabaseConfig;
+import redis.clients.jedis.MultiDatabaseConfig.StrategySupplier;
import redis.clients.jedis.mcf.ProbingPolicy.BuiltIn;
import redis.clients.jedis.scenario.RecommendedSettings;
@@ -32,7 +32,7 @@ public class HealthCheckIntegrationTest {
@Test
public void testDisableHealthCheck() {
// No health check strategy supplier means health check is disabled
- MultiClusterPooledConnectionProvider customProvider = getMCCF(null);
+ MultiDatabaseConnectionProvider customProvider = getMCCF(null);
try (UnifiedJedis customClient = new UnifiedJedis(customProvider)) {
// Verify that the client can connect and execute commands
String result = customClient.ping();
@@ -43,11 +43,10 @@ public void testDisableHealthCheck() {
@Test
public void testDefaultStrategySupplier() {
// Create a default strategy supplier that creates EchoStrategy instances
- MultiClusterClientConfig.StrategySupplier defaultSupplier = (hostAndPort,
- jedisClientConfig) -> {
+ MultiDatabaseConfig.StrategySupplier defaultSupplier = (hostAndPort, jedisClientConfig) -> {
return new EchoStrategy(hostAndPort, jedisClientConfig);
};
- MultiClusterPooledConnectionProvider customProvider = getMCCF(defaultSupplier);
+ MultiDatabaseConnectionProvider customProvider = getMCCF(defaultSupplier);
try (UnifiedJedis customClient = new UnifiedJedis(customProvider)) {
// Verify that the client can connect and execute commands
String result = customClient.ping();
@@ -58,8 +57,7 @@ public void testDefaultStrategySupplier() {
@Test
public void testCustomStrategySupplier() {
// Create a StrategySupplier that uses the JedisClientConfig when available
- MultiClusterClientConfig.StrategySupplier strategySupplier = (hostAndPort,
- jedisClientConfig) -> {
+ MultiDatabaseConfig.StrategySupplier strategySupplier = (hostAndPort, jedisClientConfig) -> {
return new TestHealthCheckStrategy(HealthCheckStrategy.Config.builder().interval(500)
.timeout(500).numProbes(1).policy(BuiltIn.ANY_SUCCESS).build(), (endpoint) -> {
// Create connection per health check to avoid resource leak
@@ -72,7 +70,7 @@ public void testCustomStrategySupplier() {
});
};
- MultiClusterPooledConnectionProvider customProvider = getMCCF(strategySupplier);
+ MultiDatabaseConnectionProvider customProvider = getMCCF(strategySupplier);
try (UnifiedJedis customClient = new UnifiedJedis(customProvider)) {
// Verify that the client can connect and execute commands
String result = customClient.ping();
@@ -80,23 +78,23 @@ public void testCustomStrategySupplier() {
}
}
- private MultiClusterPooledConnectionProvider getMCCF(
- MultiClusterClientConfig.StrategySupplier strategySupplier) {
- Function modifier = builder -> strategySupplier == null
+ private MultiDatabaseConnectionProvider getMCCF(
+ MultiDatabaseConfig.StrategySupplier strategySupplier) {
+ Function modifier = builder -> strategySupplier == null
? builder.healthCheckEnabled(false)
: builder.healthCheckStrategySupplier(strategySupplier);
- List clusterConfigs = Arrays.stream(new EndpointConfig[] { endpoint1 })
+ List databaseConfigs = Arrays.stream(new EndpointConfig[] { endpoint1 })
.map(e -> modifier
- .apply(MultiClusterClientConfig.ClusterConfig.builder(e.getHostAndPort(), clientConfig))
+ .apply(MultiDatabaseConfig.DatabaseConfig.builder(e.getHostAndPort(), clientConfig))
.build())
.collect(Collectors.toList());
- MultiClusterClientConfig mccf = new MultiClusterClientConfig.Builder(clusterConfigs)
- .retryMaxAttempts(1).retryWaitDuration(1).circuitBreakerSlidingWindowSize(1)
+ MultiDatabaseConfig mccf = new MultiDatabaseConfig.Builder(databaseConfigs).retryMaxAttempts(1)
+ .retryWaitDuration(1).circuitBreakerSlidingWindowSize(1)
.circuitBreakerFailureRateThreshold(100).build();
- return new MultiClusterPooledConnectionProvider(mccf);
+ return new MultiDatabaseConnectionProvider(mccf);
}
// ========== Probe Logic Integration Tests ==========
diff --git a/src/test/java/redis/clients/jedis/mcf/HealthCheckTest.java b/src/test/java/redis/clients/jedis/mcf/HealthCheckTest.java
index b7205fd808..a9a592de1f 100644
--- a/src/test/java/redis/clients/jedis/mcf/HealthCheckTest.java
+++ b/src/test/java/redis/clients/jedis/mcf/HealthCheckTest.java
@@ -9,7 +9,7 @@
import redis.clients.jedis.Endpoint;
import redis.clients.jedis.HostAndPort;
import redis.clients.jedis.JedisClientConfig;
-import redis.clients.jedis.MultiClusterClientConfig;
+import redis.clients.jedis.MultiDatabaseConfig;
import redis.clients.jedis.UnifiedJedis;
import redis.clients.jedis.mcf.ProbingPolicy.BuiltIn;
@@ -338,7 +338,7 @@ void testEchoStrategyCustomIntervalTimeout() {
@Test
void testEchoStrategyDefaultSupplier() {
- MultiClusterClientConfig.StrategySupplier supplier = EchoStrategy.DEFAULT;
+ MultiDatabaseConfig.StrategySupplier supplier = EchoStrategy.DEFAULT;
HealthCheckStrategy strategy = supplier.get(testEndpoint, testConfig);
assertInstanceOf(EchoStrategy.class, strategy);
@@ -348,12 +348,12 @@ void testEchoStrategyDefaultSupplier() {
@Test
void testNewFieldLocations() {
- // Test new field locations in ClusterConfig and MultiClusterClientConfig
- MultiClusterClientConfig.ClusterConfig clusterConfig = MultiClusterClientConfig.ClusterConfig
+ // Test new field locations in DatabaseConfig and MultiDatabaseConfig
+ MultiDatabaseConfig.DatabaseConfig clusterConfig = MultiDatabaseConfig.DatabaseConfig
.builder(testEndpoint, testConfig).weight(2.5f).build();
- MultiClusterClientConfig multiConfig = new MultiClusterClientConfig.Builder(
- new MultiClusterClientConfig.ClusterConfig[] { clusterConfig }).retryOnFailover(true)
+ MultiDatabaseConfig multiConfig = new MultiDatabaseConfig.Builder(
+ new MultiDatabaseConfig.DatabaseConfig[] { clusterConfig }).retryOnFailover(true)
.failbackSupported(false).build();
assertEquals(2.5f, clusterConfig.getWeight());
@@ -363,8 +363,8 @@ void testNewFieldLocations() {
@Test
void testDefaultValues() {
- // Test default values in ClusterConfig
- MultiClusterClientConfig.ClusterConfig clusterConfig = MultiClusterClientConfig.ClusterConfig
+ // Test default values in DatabaseConfig
+ MultiDatabaseConfig.DatabaseConfig clusterConfig = MultiDatabaseConfig.DatabaseConfig
.builder(testEndpoint, testConfig).build();
assertEquals(1.0f, clusterConfig.getWeight()); // Default weight
@@ -374,22 +374,22 @@ void testDefaultValues() {
// health
// check)
- // Test default values in MultiClusterClientConfig
- MultiClusterClientConfig multiConfig = new MultiClusterClientConfig.Builder(
- new MultiClusterClientConfig.ClusterConfig[] { clusterConfig }).build();
+ // Test default values in MultiDatabaseConfig
+ MultiDatabaseConfig multiConfig = new MultiDatabaseConfig.Builder(
+ new MultiDatabaseConfig.DatabaseConfig[] { clusterConfig }).build();
assertFalse(multiConfig.isRetryOnFailover()); // Default is false
assertTrue(multiConfig.isFailbackSupported()); // Default is true
}
@Test
- void testClusterConfigWithHealthCheckStrategy() {
+ void testDatabaseConfigWithHealthCheckStrategy() {
HealthCheckStrategy customStrategy = mock(HealthCheckStrategy.class);
- MultiClusterClientConfig.StrategySupplier supplier = (hostAndPort,
+ MultiDatabaseConfig.StrategySupplier supplier = (hostAndPort,
jedisClientConfig) -> customStrategy;
- MultiClusterClientConfig.ClusterConfig clusterConfig = MultiClusterClientConfig.ClusterConfig
+ MultiDatabaseConfig.DatabaseConfig clusterConfig = MultiDatabaseConfig.DatabaseConfig
.builder(testEndpoint, testConfig).healthCheckStrategySupplier(supplier).build();
assertNotNull(clusterConfig.getHealthCheckStrategySupplier());
@@ -399,35 +399,34 @@ void testClusterConfigWithHealthCheckStrategy() {
}
@Test
- void testClusterConfigWithStrategySupplier() {
- MultiClusterClientConfig.StrategySupplier customSupplier = (hostAndPort, jedisClientConfig) -> {
+ void testDatabaseConfigWithStrategySupplier() {
+ MultiDatabaseConfig.StrategySupplier customSupplier = (hostAndPort, jedisClientConfig) -> {
return mock(HealthCheckStrategy.class);
};
- MultiClusterClientConfig.ClusterConfig clusterConfig = MultiClusterClientConfig.ClusterConfig
+ MultiDatabaseConfig.DatabaseConfig clusterConfig = MultiDatabaseConfig.DatabaseConfig
.builder(testEndpoint, testConfig).healthCheckStrategySupplier(customSupplier).build();
assertEquals(customSupplier, clusterConfig.getHealthCheckStrategySupplier());
}
@Test
- void testClusterConfigWithEchoStrategy() {
- MultiClusterClientConfig.StrategySupplier echoSupplier = (hostAndPort, jedisClientConfig) -> {
+ void testDatabaseConfigWithEchoStrategy() {
+ MultiDatabaseConfig.StrategySupplier echoSupplier = (hostAndPort, jedisClientConfig) -> {
return new EchoStrategy(hostAndPort, jedisClientConfig);
};
- MultiClusterClientConfig.ClusterConfig clusterConfig = MultiClusterClientConfig.ClusterConfig
+ MultiDatabaseConfig.DatabaseConfig clusterConfig = MultiDatabaseConfig.DatabaseConfig
.builder(testEndpoint, testConfig).healthCheckStrategySupplier(echoSupplier).build();
- MultiClusterClientConfig.StrategySupplier supplier = clusterConfig
- .getHealthCheckStrategySupplier();
+ MultiDatabaseConfig.StrategySupplier supplier = clusterConfig.getHealthCheckStrategySupplier();
assertNotNull(supplier);
assertInstanceOf(EchoStrategy.class, supplier.get(testEndpoint, testConfig));
}
@Test
- void testClusterConfigWithDefaultHealthCheck() {
- MultiClusterClientConfig.ClusterConfig clusterConfig = MultiClusterClientConfig.ClusterConfig
+ void testDatabaseConfigWithDefaultHealthCheck() {
+ MultiDatabaseConfig.DatabaseConfig clusterConfig = MultiDatabaseConfig.DatabaseConfig
.builder(testEndpoint, testConfig).build(); // Should use default EchoStrategy
assertNotNull(clusterConfig.getHealthCheckStrategySupplier());
@@ -435,16 +434,16 @@ void testClusterConfigWithDefaultHealthCheck() {
}
@Test
- void testClusterConfigWithDisabledHealthCheck() {
- MultiClusterClientConfig.ClusterConfig clusterConfig = MultiClusterClientConfig.ClusterConfig
+ void testDatabaseConfigWithDisabledHealthCheck() {
+ MultiDatabaseConfig.DatabaseConfig clusterConfig = MultiDatabaseConfig.DatabaseConfig
.builder(testEndpoint, testConfig).healthCheckEnabled(false).build();
assertNull(clusterConfig.getHealthCheckStrategySupplier());
}
@Test
- void testClusterConfigHealthCheckEnabledExplicitly() {
- MultiClusterClientConfig.ClusterConfig clusterConfig = MultiClusterClientConfig.ClusterConfig
+ void testDatabaseConfigHealthCheckEnabledExplicitly() {
+ MultiDatabaseConfig.DatabaseConfig clusterConfig = MultiDatabaseConfig.DatabaseConfig
.builder(testEndpoint, testConfig).healthCheckEnabled(true).build();
assertNotNull(clusterConfig.getHealthCheckStrategySupplier());
@@ -516,7 +515,7 @@ void testHealthCheckIntegration() throws InterruptedException {
@Test
void testStrategySupplierPolymorphism() {
// Test that the polymorphic design works correctly
- MultiClusterClientConfig.StrategySupplier supplier = (hostAndPort, jedisClientConfig) -> {
+ MultiDatabaseConfig.StrategySupplier supplier = (hostAndPort, jedisClientConfig) -> {
if (jedisClientConfig != null) {
return new EchoStrategy(hostAndPort, jedisClientConfig,
HealthCheckStrategy.Config.builder().interval(500).timeout(250).numProbes(1).build());
diff --git a/src/test/java/redis/clients/jedis/mcf/MultiClusterDynamicEndpointUnitTest.java b/src/test/java/redis/clients/jedis/mcf/MultiClusterDynamicEndpointUnitTest.java
index 90ff443794..973c854f6c 100644
--- a/src/test/java/redis/clients/jedis/mcf/MultiClusterDynamicEndpointUnitTest.java
+++ b/src/test/java/redis/clients/jedis/mcf/MultiClusterDynamicEndpointUnitTest.java
@@ -11,8 +11,8 @@
import redis.clients.jedis.HostAndPort;
import redis.clients.jedis.HostAndPorts;
import redis.clients.jedis.JedisClientConfig;
-import redis.clients.jedis.MultiClusterClientConfig;
-import redis.clients.jedis.MultiClusterClientConfig.ClusterConfig;
+import redis.clients.jedis.MultiDatabaseConfig;
+import redis.clients.jedis.MultiDatabaseConfig.DatabaseConfig;
import redis.clients.jedis.exceptions.JedisValidationException;
import static org.junit.jupiter.api.Assertions.*;
@@ -23,7 +23,7 @@
public class MultiClusterDynamicEndpointUnitTest {
- private MultiClusterPooledConnectionProvider provider;
+ private MultiDatabaseConnectionProvider provider;
private JedisClientConfig clientConfig;
private final EndpointConfig endpoint1 = HostAndPorts.getRedisEndpoint("standalone0");
private final EndpointConfig endpoint2 = HostAndPorts.getRedisEndpoint("standalone1");
@@ -33,42 +33,42 @@ void setUp() {
clientConfig = DefaultJedisClientConfig.builder().build();
// Create initial provider with endpoint1
- ClusterConfig initialConfig = createClusterConfig(endpoint1.getHostAndPort(), 1.0f);
+ DatabaseConfig initialConfig = createDatabaseConfig(endpoint1.getHostAndPort(), 1.0f);
- MultiClusterClientConfig multiConfig = new MultiClusterClientConfig.Builder(
- new ClusterConfig[] { initialConfig }).build();
+ MultiDatabaseConfig multiConfig = new MultiDatabaseConfig.Builder(
+ new DatabaseConfig[] { initialConfig }).build();
- provider = new MultiClusterPooledConnectionProvider(multiConfig);
+ provider = new MultiDatabaseConnectionProvider(multiConfig);
}
// Helper method to create cluster configurations
- private ClusterConfig createClusterConfig(HostAndPort hostAndPort, float weight) {
+ private DatabaseConfig createDatabaseConfig(HostAndPort hostAndPort, float weight) {
// Disable health check for unit tests to avoid real connections
- return ClusterConfig.builder(hostAndPort, clientConfig).weight(weight).healthCheckEnabled(false)
- .build();
+ return DatabaseConfig.builder(hostAndPort, clientConfig).weight(weight)
+ .healthCheckEnabled(false).build();
}
@Test
void testAddNewCluster() {
- ClusterConfig newConfig = createClusterConfig(endpoint2.getHostAndPort(), 2.0f);
+ DatabaseConfig newConfig = createDatabaseConfig(endpoint2.getHostAndPort(), 2.0f);
// Should not throw exception
assertDoesNotThrow(() -> provider.add(newConfig));
// Verify the cluster was added by checking it can be retrieved
- assertNotNull(provider.getCluster(endpoint2.getHostAndPort()));
+ assertNotNull(provider.getDatabase(endpoint2.getHostAndPort()));
}
@Test
void testAddDuplicateCluster() {
- ClusterConfig duplicateConfig = createClusterConfig(endpoint1.getHostAndPort(), 2.0f);
+ DatabaseConfig duplicateConfig = createDatabaseConfig(endpoint1.getHostAndPort(), 2.0f);
// Should throw validation exception for duplicate endpoint
assertThrows(JedisValidationException.class, () -> provider.add(duplicateConfig));
}
@Test
- void testAddNullClusterConfig() {
+ void testAddNullDatabaseConfig() {
// Should throw validation exception for null config
assertThrows(JedisValidationException.class, () -> provider.add(null));
}
@@ -80,26 +80,26 @@ void testRemoveExistingCluster() {
try (MockedConstruction mockedPool = mockPool(mockConnection)) {
// Create initial provider with endpoint1
- ClusterConfig clusterConfig1 = createClusterConfig(endpoint1.getHostAndPort(), 1.0f);
+ DatabaseConfig clusterConfig1 = createDatabaseConfig(endpoint1.getHostAndPort(), 1.0f);
- MultiClusterClientConfig multiConfig = MultiClusterClientConfig
- .builder(new ClusterConfig[] { clusterConfig1 }).build();
+ MultiDatabaseConfig multiConfig = MultiDatabaseConfig
+ .builder(new DatabaseConfig[] { clusterConfig1 }).build();
try (
- MultiClusterPooledConnectionProvider providerWithMockedPool = new MultiClusterPooledConnectionProvider(
+ MultiDatabaseConnectionProvider providerWithMockedPool = new MultiDatabaseConnectionProvider(
multiConfig)) {
// Add endpoint2 as second cluster
- ClusterConfig newConfig = createClusterConfig(endpoint2.getHostAndPort(), 2.0f);
+ DatabaseConfig newConfig = createDatabaseConfig(endpoint2.getHostAndPort(), 2.0f);
providerWithMockedPool.add(newConfig);
// Now remove endpoint1 (original cluster)
assertDoesNotThrow(() -> providerWithMockedPool.remove(endpoint1.getHostAndPort()));
// Verify endpoint1 was removed
- assertNull(providerWithMockedPool.getCluster(endpoint1.getHostAndPort()));
+ assertNull(providerWithMockedPool.getDatabase(endpoint1.getHostAndPort()));
// Verify endpoint2 still exists
- assertNotNull(providerWithMockedPool.getCluster(endpoint2.getHostAndPort()));
+ assertNotNull(providerWithMockedPool.getDatabase(endpoint2.getHostAndPort()));
}
}
}
@@ -134,40 +134,40 @@ void testRemoveNullEndpoint() {
@Test
void testAddAndRemoveMultipleClusters() {
// Add endpoint2 as second cluster
- ClusterConfig config2 = createClusterConfig(endpoint2.getHostAndPort(), 2.0f);
+ DatabaseConfig config2 = createDatabaseConfig(endpoint2.getHostAndPort(), 2.0f);
// Create a third endpoint for this test
HostAndPort endpoint3 = new HostAndPort("localhost", 6381);
- ClusterConfig config3 = createClusterConfig(endpoint3, 3.0f);
+ DatabaseConfig config3 = createDatabaseConfig(endpoint3, 3.0f);
provider.add(config2);
provider.add(config3);
// Verify all clusters exist
- assertNotNull(provider.getCluster(endpoint1.getHostAndPort()));
- assertNotNull(provider.getCluster(endpoint2.getHostAndPort()));
- assertNotNull(provider.getCluster(endpoint3));
+ assertNotNull(provider.getDatabase(endpoint1.getHostAndPort()));
+ assertNotNull(provider.getDatabase(endpoint2.getHostAndPort()));
+ assertNotNull(provider.getDatabase(endpoint3));
// Remove endpoint2
provider.remove(endpoint2.getHostAndPort());
// Verify correct cluster was removed
- assertNull(provider.getCluster(endpoint2.getHostAndPort()));
- assertNotNull(provider.getCluster(endpoint1.getHostAndPort()));
- assertNotNull(provider.getCluster(endpoint3));
+ assertNull(provider.getDatabase(endpoint2.getHostAndPort()));
+ assertNotNull(provider.getDatabase(endpoint1.getHostAndPort()));
+ assertNotNull(provider.getDatabase(endpoint3));
}
@Test
void testActiveClusterHandlingOnAdd() {
// The initial cluster should be active
- assertNotNull(provider.getCluster());
+ assertNotNull(provider.getDatabase());
// Add endpoint2 with higher weight
- ClusterConfig newConfig = createClusterConfig(endpoint2.getHostAndPort(), 5.0f);
+ DatabaseConfig newConfig = createDatabaseConfig(endpoint2.getHostAndPort(), 5.0f);
provider.add(newConfig);
// Active cluster should still be valid (implementation may or may not switch)
- assertNotNull(provider.getCluster());
+ assertNotNull(provider.getDatabase());
}
@Test
@@ -177,28 +177,28 @@ void testActiveClusterHandlingOnRemove() {
try (MockedConstruction mockedPool = mockPool(mockConnection)) {
// Create initial provider with endpoint1
- ClusterConfig clusterConfig1 = createClusterConfig(endpoint1.getHostAndPort(), 1.0f);
+ DatabaseConfig clusterConfig1 = createDatabaseConfig(endpoint1.getHostAndPort(), 1.0f);
- MultiClusterClientConfig multiConfig = MultiClusterClientConfig
- .builder(new ClusterConfig[] { clusterConfig1 }).build();
+ MultiDatabaseConfig multiConfig = MultiDatabaseConfig
+ .builder(new DatabaseConfig[] { clusterConfig1 }).build();
try (
- MultiClusterPooledConnectionProvider providerWithMockedPool = new MultiClusterPooledConnectionProvider(
+ MultiDatabaseConnectionProvider providerWithMockedPool = new MultiDatabaseConnectionProvider(
multiConfig)) {
// Add endpoint2 as second cluster
- ClusterConfig newConfig = createClusterConfig(endpoint2.getHostAndPort(), 2.0f);
+ DatabaseConfig newConfig = createDatabaseConfig(endpoint2.getHostAndPort(), 2.0f);
providerWithMockedPool.add(newConfig);
// Get current active cluster
- Object initialActiveCluster = providerWithMockedPool.getCluster();
+ Object initialActiveCluster = providerWithMockedPool.getDatabase();
assertNotNull(initialActiveCluster);
// Remove endpoint1 (original cluster, might be active)
providerWithMockedPool.remove(endpoint1.getHostAndPort());
// Should still have an active cluster
- assertNotNull(providerWithMockedPool.getCluster());
+ assertNotNull(providerWithMockedPool.getDatabase());
}
}
}
diff --git a/src/test/java/redis/clients/jedis/mcf/MultiClusterFailoverAttemptsConfigTest.java b/src/test/java/redis/clients/jedis/mcf/MultiClusterFailoverAttemptsConfigTest.java
index e4992fb92b..2742084082 100644
--- a/src/test/java/redis/clients/jedis/mcf/MultiClusterFailoverAttemptsConfigTest.java
+++ b/src/test/java/redis/clients/jedis/mcf/MultiClusterFailoverAttemptsConfigTest.java
@@ -8,8 +8,8 @@
import redis.clients.jedis.DefaultJedisClientConfig;
import redis.clients.jedis.HostAndPort;
import redis.clients.jedis.JedisClientConfig;
-import redis.clients.jedis.MultiClusterClientConfig;
-import redis.clients.jedis.MultiClusterClientConfig.ClusterConfig;
+import redis.clients.jedis.MultiDatabaseConfig;
+import redis.clients.jedis.MultiDatabaseConfig.DatabaseConfig;
import redis.clients.jedis.mcf.JedisFailoverException.JedisPermanentlyNotAvailableException;
import redis.clients.jedis.mcf.JedisFailoverException.JedisTemporarilyNotAvailableException;
import redis.clients.jedis.util.ReflectionTestUtil;
@@ -22,34 +22,34 @@
/**
* Tests for how getMaxNumFailoverAttempts and getDelayInBetweenFailoverAttempts impact
- * MultiClusterPooledConnectionProvider behaviour when no healthy clusters are available.
+ * MultiDatabaseConnectionProvider behaviour when no healthy clusters are available.
*/
public class MultiClusterFailoverAttemptsConfigTest {
private HostAndPort endpoint0 = new HostAndPort("purposefully-incorrect", 0000);
private HostAndPort endpoint1 = new HostAndPort("purposefully-incorrect", 0001);
- private MultiClusterPooledConnectionProvider provider;
+ private MultiDatabaseConnectionProvider provider;
@BeforeEach
void setUp() throws Exception {
JedisClientConfig clientCfg = DefaultJedisClientConfig.builder().build();
- ClusterConfig[] clusterConfigs = new ClusterConfig[] {
- ClusterConfig.builder(endpoint0, clientCfg).weight(1.0f).healthCheckEnabled(false).build(),
- ClusterConfig.builder(endpoint1, clientCfg).weight(0.5f).healthCheckEnabled(false)
+ DatabaseConfig[] databaseConfigs = new DatabaseConfig[] {
+ DatabaseConfig.builder(endpoint0, clientCfg).weight(1.0f).healthCheckEnabled(false).build(),
+ DatabaseConfig.builder(endpoint1, clientCfg).weight(0.5f).healthCheckEnabled(false)
.build() };
- MultiClusterClientConfig.Builder builder = new MultiClusterClientConfig.Builder(clusterConfigs);
+ MultiDatabaseConfig.Builder builder = new MultiDatabaseConfig.Builder(databaseConfigs);
// Use small values by default for tests unless overridden per-test via reflection
setBuilderFailoverConfig(builder, /* maxAttempts */ 10, /* delayMs */ 12000);
- provider = new MultiClusterPooledConnectionProvider(builder.build());
+ provider = new MultiDatabaseConnectionProvider(builder.build());
// Disable both clusters to force handleNoHealthyCluster path
- provider.getCluster(endpoint0).setDisabled(true);
- provider.getCluster(endpoint1).setDisabled(true);
+ provider.getDatabase(endpoint0).setDisabled(true);
+ provider.getDatabase(endpoint1).setDisabled(true);
}
@AfterEach
@@ -70,8 +70,8 @@ void delayBetweenFailoverAttempts_gatesCounterIncrementsWithinWindow() throws Ex
// First call: should throw temporary and start the freeze window, incrementing attempt count to
// 1
assertThrows(JedisTemporarilyNotAvailableException.class,
- () -> MultiClusterPooledConnectionProviderHelper.switchToHealthyCluster(provider,
- SwitchReason.HEALTH_CHECK, provider.getCluster()));
+ () -> MultiDatabaseConnectionProviderHelper.switchToHealthyCluster(provider,
+ SwitchReason.HEALTH_CHECK, provider.getDatabase()));
int afterFirst = getProviderAttemptCount();
assertEquals(1, afterFirst);
@@ -79,8 +79,8 @@ void delayBetweenFailoverAttempts_gatesCounterIncrementsWithinWindow() throws Ex
// and should NOT increment the attempt count beyond 1
for (int i = 0; i < 50; i++) {
assertThrows(JedisTemporarilyNotAvailableException.class,
- () -> MultiClusterPooledConnectionProviderHelper.switchToHealthyCluster(provider,
- SwitchReason.HEALTH_CHECK, provider.getCluster()));
+ () -> MultiDatabaseConnectionProviderHelper.switchToHealthyCluster(provider,
+ SwitchReason.HEALTH_CHECK, provider.getDatabase()));
assertEquals(1, getProviderAttemptCount());
}
}
@@ -98,8 +98,8 @@ void delayBetweenFailoverAttempts_permanentExceptionAfterAttemptsExhausted() thr
// First call: should throw temporary and start the freeze window, incrementing attempt count to
// 1
assertThrows(JedisTemporarilyNotAvailableException.class,
- () -> MultiClusterPooledConnectionProviderHelper.switchToHealthyCluster(provider,
- SwitchReason.HEALTH_CHECK, provider.getCluster()));
+ () -> MultiDatabaseConnectionProviderHelper.switchToHealthyCluster(provider,
+ SwitchReason.HEALTH_CHECK, provider.getDatabase()));
int afterFirst = getProviderAttemptCount();
assertEquals(1, afterFirst);
@@ -107,14 +107,14 @@ void delayBetweenFailoverAttempts_permanentExceptionAfterAttemptsExhausted() thr
// and should NOT increment the attempt count beyond 1
for (int i = 0; i < 50; i++) {
assertThrows(JedisTemporarilyNotAvailableException.class,
- () -> provider.switchToHealthyCluster(SwitchReason.HEALTH_CHECK, provider.getCluster()));
+ () -> provider.switchToHealthyDatabase(SwitchReason.HEALTH_CHECK, provider.getDatabase()));
assertEquals(1, getProviderAttemptCount());
}
await().atMost(Durations.TWO_HUNDRED_MILLISECONDS).pollInterval(Duration.ofMillis(10))
.until(() -> {
Exception e = assertThrows(JedisFailoverException.class, () -> provider
- .switchToHealthyCluster(SwitchReason.HEALTH_CHECK, provider.getCluster()));
+ .switchToHealthyDatabase(SwitchReason.HEALTH_CHECK, provider.getDatabase()));
return e instanceof JedisPermanentlyNotAvailableException;
});
}
@@ -130,15 +130,15 @@ void maxNumFailoverAttempts_zeroDelay_leadsToPermanentAfterExceeding() throws Ex
// Expect exactly 'maxAttempts' temporary exceptions, then a permanent one
assertThrows(JedisTemporarilyNotAvailableException.class,
- () -> provider.switchToHealthyCluster(SwitchReason.HEALTH_CHECK, provider.getCluster())); // attempt
+ () -> provider.switchToHealthyDatabase(SwitchReason.HEALTH_CHECK, provider.getDatabase())); // attempt
// 1
assertThrows(JedisTemporarilyNotAvailableException.class,
- () -> provider.switchToHealthyCluster(SwitchReason.HEALTH_CHECK, provider.getCluster())); // attempt
+ () -> provider.switchToHealthyDatabase(SwitchReason.HEALTH_CHECK, provider.getDatabase())); // attempt
// 2
// Next should exceed max and become permanent
assertThrows(JedisPermanentlyNotAvailableException.class,
- () -> provider.switchToHealthyCluster(SwitchReason.HEALTH_CHECK, provider.getCluster())); // attempt
+ () -> provider.switchToHealthyDatabase(SwitchReason.HEALTH_CHECK, provider.getDatabase())); // attempt
// 3
// ->
// permanent
@@ -146,17 +146,17 @@ void maxNumFailoverAttempts_zeroDelay_leadsToPermanentAfterExceeding() throws Ex
// ======== Test helper methods (reflection) ========
- private static void setBuilderFailoverConfig(MultiClusterClientConfig.Builder builder,
- int maxAttempts, int delayMs) throws Exception {
+ private static void setBuilderFailoverConfig(MultiDatabaseConfig.Builder builder, int maxAttempts,
+ int delayMs) throws Exception {
ReflectionTestUtil.setField(builder, "maxNumFailoverAttempts", maxAttempts);
ReflectionTestUtil.setField(builder, "delayInBetweenFailoverAttempts", delayMs);
}
private void setProviderFailoverConfig(int maxAttempts, int delayMs) throws Exception {
- // Access the underlying MultiClusterClientConfig inside provider and adjust fields for this
+ // Access the underlying MultiDatabaseConfig inside provider and adjust fields for this
// test
- Object cfg = ReflectionTestUtil.getField(provider, "multiClusterClientConfig");
+ Object cfg = ReflectionTestUtil.getField(provider, "multiDatabaseConfig");
ReflectionTestUtil.setField(cfg, "maxNumFailoverAttempts", maxAttempts);
@@ -164,13 +164,13 @@ private void setProviderFailoverConfig(int maxAttempts, int delayMs) throws Exce
}
private int getProviderMaxAttempts() throws Exception {
- Object cfg = ReflectionTestUtil.getField(provider, "multiClusterClientConfig");
+ Object cfg = ReflectionTestUtil.getField(provider, "multiDatabaseConfig");
return ReflectionTestUtil.getField(cfg, "maxNumFailoverAttempts");
}
private int getProviderDelayMs() throws Exception {
- Object cfg = ReflectionTestUtil.getField(provider, "multiClusterClientConfig");
+ Object cfg = ReflectionTestUtil.getField(provider, "multiDatabaseConfig");
return ReflectionTestUtil.getField(cfg, "delayInBetweenFailoverAttempts");
}
diff --git a/src/test/java/redis/clients/jedis/mcf/MultiClusterInitializationTest.java b/src/test/java/redis/clients/jedis/mcf/MultiClusterInitializationTest.java
index 7b580042dc..9aed74a5ea 100644
--- a/src/test/java/redis/clients/jedis/mcf/MultiClusterInitializationTest.java
+++ b/src/test/java/redis/clients/jedis/mcf/MultiClusterInitializationTest.java
@@ -14,11 +14,11 @@
import redis.clients.jedis.DefaultJedisClientConfig;
import redis.clients.jedis.HostAndPort;
import redis.clients.jedis.JedisClientConfig;
-import redis.clients.jedis.MultiClusterClientConfig;
+import redis.clients.jedis.MultiDatabaseConfig;
import redis.clients.jedis.exceptions.JedisValidationException;
/**
- * Tests for MultiClusterPooledConnectionProvider initialization edge cases
+ * Tests for MultiDatabaseConnectionProvider initialization edge cases
*/
@ExtendWith(MockitoExtension.class)
public class MultiClusterInitializationTest {
@@ -49,30 +49,29 @@ private MockedConstruction mockPool() {
void testInitializationWithMixedHealthCheckConfiguration() {
try (MockedConstruction mockedPool = mockPool()) {
// Create clusters with mixed health check configuration
- MultiClusterClientConfig.ClusterConfig cluster1 = MultiClusterClientConfig.ClusterConfig
+ MultiDatabaseConfig.DatabaseConfig cluster1 = MultiDatabaseConfig.DatabaseConfig
.builder(endpoint1, clientConfig).weight(1.0f).healthCheckEnabled(false) // No health
// check
.build();
- MultiClusterClientConfig.ClusterConfig cluster2 = MultiClusterClientConfig.ClusterConfig
+ MultiDatabaseConfig.DatabaseConfig cluster2 = MultiDatabaseConfig.DatabaseConfig
.builder(endpoint2, clientConfig).weight(2.0f)
.healthCheckStrategySupplier(EchoStrategy.DEFAULT) // With
// health
// check
.build();
- MultiClusterClientConfig config = new MultiClusterClientConfig.Builder(
- new MultiClusterClientConfig.ClusterConfig[] { cluster1, cluster2 }).build();
+ MultiDatabaseConfig config = new MultiDatabaseConfig.Builder(
+ new MultiDatabaseConfig.DatabaseConfig[] { cluster1, cluster2 }).build();
- try (MultiClusterPooledConnectionProvider provider = new MultiClusterPooledConnectionProvider(
- config)) {
+ try (MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider(config)) {
// Should initialize successfully
- assertNotNull(provider.getCluster());
+ assertNotNull(provider.getDatabase());
// Should select cluster1 (no health check, assumed healthy) or cluster2 based on weight
// Since cluster2 has higher weight and health checks, it should be selected if healthy
- assertTrue(provider.getCluster() == provider.getCluster(endpoint1)
- || provider.getCluster() == provider.getCluster(endpoint2));
+ assertTrue(provider.getDatabase() == provider.getDatabase(endpoint1)
+ || provider.getDatabase() == provider.getDatabase(endpoint2));
}
}
}
@@ -81,20 +80,19 @@ void testInitializationWithMixedHealthCheckConfiguration() {
void testInitializationWithAllHealthChecksDisabled() {
try (MockedConstruction mockedPool = mockPool()) {
// Create clusters with no health checks
- MultiClusterClientConfig.ClusterConfig cluster1 = MultiClusterClientConfig.ClusterConfig
+ MultiDatabaseConfig.DatabaseConfig cluster1 = MultiDatabaseConfig.DatabaseConfig
.builder(endpoint1, clientConfig).weight(1.0f).healthCheckEnabled(false).build();
- MultiClusterClientConfig.ClusterConfig cluster2 = MultiClusterClientConfig.ClusterConfig
+ MultiDatabaseConfig.DatabaseConfig cluster2 = MultiDatabaseConfig.DatabaseConfig
.builder(endpoint2, clientConfig).weight(3.0f) // Higher weight
.healthCheckEnabled(false).build();
- MultiClusterClientConfig config = new MultiClusterClientConfig.Builder(
- new MultiClusterClientConfig.ClusterConfig[] { cluster1, cluster2 }).build();
+ MultiDatabaseConfig config = new MultiDatabaseConfig.Builder(
+ new MultiDatabaseConfig.DatabaseConfig[] { cluster1, cluster2 }).build();
- try (MultiClusterPooledConnectionProvider provider = new MultiClusterPooledConnectionProvider(
- config)) {
+ try (MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider(config)) {
// Should select cluster2 (highest weight, no health checks)
- assertEquals(provider.getCluster(endpoint2), provider.getCluster());
+ assertEquals(provider.getDatabase(endpoint2), provider.getDatabase());
}
}
}
@@ -102,16 +100,15 @@ void testInitializationWithAllHealthChecksDisabled() {
@Test
void testInitializationWithSingleCluster() {
try (MockedConstruction mockedPool = mockPool()) {
- MultiClusterClientConfig.ClusterConfig cluster = MultiClusterClientConfig.ClusterConfig
+ MultiDatabaseConfig.DatabaseConfig cluster = MultiDatabaseConfig.DatabaseConfig
.builder(endpoint1, clientConfig).weight(1.0f).healthCheckEnabled(false).build();
- MultiClusterClientConfig config = new MultiClusterClientConfig.Builder(
- new MultiClusterClientConfig.ClusterConfig[] { cluster }).build();
+ MultiDatabaseConfig config = new MultiDatabaseConfig.Builder(
+ new MultiDatabaseConfig.DatabaseConfig[] { cluster }).build();
- try (MultiClusterPooledConnectionProvider provider = new MultiClusterPooledConnectionProvider(
- config)) {
+ try (MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider(config)) {
// Should select the only available cluster
- assertEquals(provider.getCluster(endpoint1), provider.getCluster());
+ assertEquals(provider.getDatabase(endpoint1), provider.getDatabase());
}
}
}
@@ -119,43 +116,41 @@ void testInitializationWithSingleCluster() {
@Test
void testErrorHandlingWithNullConfiguration() {
assertThrows(JedisValidationException.class, () -> {
- new MultiClusterPooledConnectionProvider(null);
+ new MultiDatabaseConnectionProvider(null);
});
}
@Test
void testErrorHandlingWithEmptyClusterArray() {
assertThrows(JedisValidationException.class, () -> {
- new MultiClusterClientConfig.Builder(new MultiClusterClientConfig.ClusterConfig[0]).build();
+ new MultiDatabaseConfig.Builder(new MultiDatabaseConfig.DatabaseConfig[0]).build();
});
}
@Test
- void testErrorHandlingWithNullClusterConfig() {
+ void testErrorHandlingWithNullDatabaseConfig() {
assertThrows(IllegalArgumentException.class, () -> {
- new MultiClusterClientConfig.Builder(new MultiClusterClientConfig.ClusterConfig[] { null })
- .build();
+ new MultiDatabaseConfig.Builder(new MultiDatabaseConfig.DatabaseConfig[] { null }).build();
});
}
@Test
void testInitializationWithZeroWeights() {
try (MockedConstruction mockedPool = mockPool()) {
- MultiClusterClientConfig.ClusterConfig cluster1 = MultiClusterClientConfig.ClusterConfig
+ MultiDatabaseConfig.DatabaseConfig cluster1 = MultiDatabaseConfig.DatabaseConfig
.builder(endpoint1, clientConfig).weight(0.0f) // Zero weight
.healthCheckEnabled(false).build();
- MultiClusterClientConfig.ClusterConfig cluster2 = MultiClusterClientConfig.ClusterConfig
+ MultiDatabaseConfig.DatabaseConfig cluster2 = MultiDatabaseConfig.DatabaseConfig
.builder(endpoint2, clientConfig).weight(0.0f) // Zero weight
.healthCheckEnabled(false).build();
- MultiClusterClientConfig config = new MultiClusterClientConfig.Builder(
- new MultiClusterClientConfig.ClusterConfig[] { cluster1, cluster2 }).build();
+ MultiDatabaseConfig config = new MultiDatabaseConfig.Builder(
+ new MultiDatabaseConfig.DatabaseConfig[] { cluster1, cluster2 }).build();
- try (MultiClusterPooledConnectionProvider provider = new MultiClusterPooledConnectionProvider(
- config)) {
+ try (MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider(config)) {
// Should still initialize and select one of the clusters
- assertNotNull(provider.getCluster());
+ assertNotNull(provider.getDatabase());
}
}
}
diff --git a/src/test/java/redis/clients/jedis/mcf/MultiClusterPooledConnectionProviderHelper.java b/src/test/java/redis/clients/jedis/mcf/MultiClusterPooledConnectionProviderHelper.java
deleted file mode 100644
index f5076694c8..0000000000
--- a/src/test/java/redis/clients/jedis/mcf/MultiClusterPooledConnectionProviderHelper.java
+++ /dev/null
@@ -1,20 +0,0 @@
-package redis.clients.jedis.mcf;
-
-import redis.clients.jedis.Endpoint;
-
-public class MultiClusterPooledConnectionProviderHelper {
-
- public static void onHealthStatusChange(MultiClusterPooledConnectionProvider provider,
- Endpoint endpoint, HealthStatus oldStatus, HealthStatus newStatus) {
- provider.onHealthStatusChange(new HealthStatusChangeEvent(endpoint, oldStatus, newStatus));
- }
-
- public static void periodicFailbackCheck(MultiClusterPooledConnectionProvider provider) {
- provider.periodicFailbackCheck();
- }
-
- public static Endpoint switchToHealthyCluster(MultiClusterPooledConnectionProvider provider,
- SwitchReason reason, MultiClusterPooledConnectionProvider.Cluster iterateFrom) {
- return provider.switchToHealthyCluster(reason, iterateFrom);
- }
-}
diff --git a/src/test/java/redis/clients/jedis/mcf/MultiDatabaseConnectionProviderHelper.java b/src/test/java/redis/clients/jedis/mcf/MultiDatabaseConnectionProviderHelper.java
new file mode 100644
index 0000000000..a88e53feed
--- /dev/null
+++ b/src/test/java/redis/clients/jedis/mcf/MultiDatabaseConnectionProviderHelper.java
@@ -0,0 +1,20 @@
+package redis.clients.jedis.mcf;
+
+import redis.clients.jedis.Endpoint;
+
+public class MultiDatabaseConnectionProviderHelper {
+
+ public static void onHealthStatusChange(MultiDatabaseConnectionProvider provider,
+ Endpoint endpoint, HealthStatus oldStatus, HealthStatus newStatus) {
+ provider.onHealthStatusChange(new HealthStatusChangeEvent(endpoint, oldStatus, newStatus));
+ }
+
+ public static void periodicFailbackCheck(MultiDatabaseConnectionProvider provider) {
+ provider.periodicFailbackCheck();
+ }
+
+ public static Endpoint switchToHealthyCluster(MultiDatabaseConnectionProvider provider,
+ SwitchReason reason, MultiDatabaseConnectionProvider.Database iterateFrom) {
+ return provider.switchToHealthyDatabase(reason, iterateFrom);
+ }
+}
diff --git a/src/test/java/redis/clients/jedis/mcf/MultiClusterPooledConnectionProviderTest.java b/src/test/java/redis/clients/jedis/mcf/MultiDatabaseConnectionProviderTest.java
similarity index 69%
rename from src/test/java/redis/clients/jedis/mcf/MultiClusterPooledConnectionProviderTest.java
rename to src/test/java/redis/clients/jedis/mcf/MultiDatabaseConnectionProviderTest.java
index 88b2948016..fa84564645 100644
--- a/src/test/java/redis/clients/jedis/mcf/MultiClusterPooledConnectionProviderTest.java
+++ b/src/test/java/redis/clients/jedis/mcf/MultiDatabaseConnectionProviderTest.java
@@ -5,10 +5,10 @@
import org.awaitility.Durations;
import org.junit.jupiter.api.*;
import redis.clients.jedis.*;
-import redis.clients.jedis.MultiClusterClientConfig.ClusterConfig;
+import redis.clients.jedis.MultiDatabaseConfig.DatabaseConfig;
import redis.clients.jedis.exceptions.JedisConnectionException;
import redis.clients.jedis.exceptions.JedisValidationException;
-import redis.clients.jedis.mcf.MultiClusterPooledConnectionProvider.Cluster;
+import redis.clients.jedis.mcf.MultiDatabaseConnectionProvider.Database;
import redis.clients.jedis.mcf.ProbingPolicy.BuiltIn;
import redis.clients.jedis.mcf.JedisFailoverException.JedisPermanentlyNotAvailableException;
import redis.clients.jedis.mcf.JedisFailoverException.JedisTemporarilyNotAvailableException;
@@ -23,27 +23,27 @@
import static org.junit.jupiter.api.Assertions.*;
/**
- * @see MultiClusterPooledConnectionProvider
+ * @see MultiDatabaseConnectionProvider
*/
@Tag("integration")
-public class MultiClusterPooledConnectionProviderTest {
+public class MultiDatabaseConnectionProviderTest {
private final EndpointConfig endpointStandalone0 = HostAndPorts.getRedisEndpoint("standalone0");
private final EndpointConfig endpointStandalone1 = HostAndPorts.getRedisEndpoint("standalone1");
- private MultiClusterPooledConnectionProvider provider;
+ private MultiDatabaseConnectionProvider provider;
@BeforeEach
public void setUp() {
- ClusterConfig[] clusterConfigs = new ClusterConfig[2];
- clusterConfigs[0] = ClusterConfig.builder(endpointStandalone0.getHostAndPort(),
+ DatabaseConfig[] databaseConfigs = new DatabaseConfig[2];
+ databaseConfigs[0] = DatabaseConfig.builder(endpointStandalone0.getHostAndPort(),
endpointStandalone0.getClientConfigBuilder().build()).weight(0.5f).build();
- clusterConfigs[1] = ClusterConfig.builder(endpointStandalone1.getHostAndPort(),
+ databaseConfigs[1] = DatabaseConfig.builder(endpointStandalone1.getHostAndPort(),
endpointStandalone1.getClientConfigBuilder().build()).weight(0.3f).build();
- provider = new MultiClusterPooledConnectionProvider(
- new MultiClusterClientConfig.Builder(clusterConfigs).build());
+ provider = new MultiDatabaseConnectionProvider(
+ new MultiDatabaseConfig.Builder(databaseConfigs).build());
}
@AfterEach
@@ -55,7 +55,7 @@ public void destroy() {
@Test
public void testCircuitBreakerForcedTransitions() {
- CircuitBreaker circuitBreaker = provider.getClusterCircuitBreaker();
+ CircuitBreaker circuitBreaker = provider.getDatabaseCircuitBreaker();
circuitBreaker.getState();
if (CircuitBreaker.State.FORCED_OPEN.equals(circuitBreaker.getState()))
@@ -70,45 +70,46 @@ public void testCircuitBreakerForcedTransitions() {
@Test
public void testIterateActiveCluster() throws InterruptedException {
- waitForClustersToGetHealthy(provider.getCluster(endpointStandalone0.getHostAndPort()),
- provider.getCluster(endpointStandalone1.getHostAndPort()));
+ waitForClustersToGetHealthy(provider.getDatabase(endpointStandalone0.getHostAndPort()),
+ provider.getDatabase(endpointStandalone1.getHostAndPort()));
- Endpoint e2 = provider.switchToHealthyCluster(SwitchReason.HEALTH_CHECK, provider.getCluster());
+ Endpoint e2 = provider.switchToHealthyDatabase(SwitchReason.HEALTH_CHECK,
+ provider.getDatabase());
assertEquals(endpointStandalone1.getHostAndPort(), e2);
}
@Test
public void testCanIterateOnceMore() {
Endpoint endpoint0 = endpointStandalone0.getHostAndPort();
- waitForClustersToGetHealthy(provider.getCluster(endpoint0),
- provider.getCluster(endpointStandalone1.getHostAndPort()));
+ waitForClustersToGetHealthy(provider.getDatabase(endpoint0),
+ provider.getDatabase(endpointStandalone1.getHostAndPort()));
- provider.setActiveCluster(endpoint0);
- provider.getCluster().setDisabled(true);
- provider.switchToHealthyCluster(SwitchReason.HEALTH_CHECK, provider.getCluster(endpoint0));
+ provider.setActiveDatabase(endpoint0);
+ provider.getDatabase().setDisabled(true);
+ provider.switchToHealthyDatabase(SwitchReason.HEALTH_CHECK, provider.getDatabase(endpoint0));
- assertFalse(provider.canIterateFrom(provider.getCluster()));
+ assertFalse(provider.canIterateFrom(provider.getDatabase()));
}
- private void waitForClustersToGetHealthy(Cluster... clusters) {
+ private void waitForClustersToGetHealthy(Database... clusters) {
Awaitility.await().pollInterval(Durations.ONE_HUNDRED_MILLISECONDS)
.atMost(Durations.TWO_SECONDS)
- .until(() -> Arrays.stream(clusters).allMatch(Cluster::isHealthy));
+ .until(() -> Arrays.stream(clusters).allMatch(Database::isHealthy));
}
@Test
public void testRunClusterFailoverPostProcessor() {
- ClusterConfig[] clusterConfigs = new ClusterConfig[2];
- clusterConfigs[0] = ClusterConfig
+ DatabaseConfig[] databaseConfigs = new DatabaseConfig[2];
+ databaseConfigs[0] = DatabaseConfig
.builder(new HostAndPort("purposefully-incorrect", 0000),
DefaultJedisClientConfig.builder().build())
.weight(0.5f).healthCheckEnabled(false).build();
- clusterConfigs[1] = ClusterConfig
+ databaseConfigs[1] = DatabaseConfig
.builder(new HostAndPort("purposefully-incorrect", 0001),
DefaultJedisClientConfig.builder().build())
.weight(0.4f).healthCheckEnabled(false).build();
- MultiClusterClientConfig.Builder builder = new MultiClusterClientConfig.Builder(clusterConfigs);
+ MultiDatabaseConfig.Builder builder = new MultiDatabaseConfig.Builder(databaseConfigs);
// Configures a single failed command to trigger an open circuit on the next subsequent failure
builder.circuitBreakerSlidingWindowSize(3).circuitBreakerMinNumOfFailures(1)
@@ -116,9 +117,9 @@ public void testRunClusterFailoverPostProcessor() {
AtomicBoolean isValidTest = new AtomicBoolean(false);
- MultiClusterPooledConnectionProvider localProvider = new MultiClusterPooledConnectionProvider(
+ MultiDatabaseConnectionProvider localProvider = new MultiDatabaseConnectionProvider(
builder.build());
- localProvider.setClusterSwitchListener(a -> {
+ localProvider.setDatabaseSwitchListener(a -> {
isValidTest.set(true);
});
@@ -138,21 +139,23 @@ public void testRunClusterFailoverPostProcessor() {
@Test
public void testSetActiveMultiClusterIndexEqualsZero() {
- assertThrows(JedisValidationException.class, () -> provider.setActiveCluster(null)); // Should
- // throw an
- // exception
+ assertThrows(JedisValidationException.class, () -> provider.setActiveDatabase(null)); // Should
+ // throw
+ // an
+ // exception
}
@Test
public void testSetActiveMultiClusterIndexLessThanZero() {
- assertThrows(JedisValidationException.class, () -> provider.setActiveCluster(null)); // Should
- // throw an
- // exception
+ assertThrows(JedisValidationException.class, () -> provider.setActiveDatabase(null)); // Should
+ // throw
+ // an
+ // exception
}
@Test
public void testSetActiveMultiClusterIndexOutOfRange() {
- assertThrows(JedisValidationException.class, () -> provider.setActiveCluster(new Endpoint() {
+ assertThrows(JedisValidationException.class, () -> provider.setActiveDatabase(new Endpoint() {
@Override
public String getHost() {
return "purposefully-incorrect";
@@ -171,15 +174,14 @@ public void testConnectionPoolConfigApplied() {
poolConfig.setMaxTotal(8);
poolConfig.setMaxIdle(4);
poolConfig.setMinIdle(1);
- ClusterConfig[] clusterConfigs = new ClusterConfig[2];
- clusterConfigs[0] = new ClusterConfig(endpointStandalone0.getHostAndPort(),
+ DatabaseConfig[] databaseConfigs = new DatabaseConfig[2];
+ databaseConfigs[0] = new DatabaseConfig(endpointStandalone0.getHostAndPort(),
endpointStandalone0.getClientConfigBuilder().build(), poolConfig);
- clusterConfigs[1] = new ClusterConfig(endpointStandalone1.getHostAndPort(),
+ databaseConfigs[1] = new DatabaseConfig(endpointStandalone1.getHostAndPort(),
endpointStandalone0.getClientConfigBuilder().build(), poolConfig);
- try (
- MultiClusterPooledConnectionProvider customProvider = new MultiClusterPooledConnectionProvider(
- new MultiClusterClientConfig.Builder(clusterConfigs).build())) {
- MultiClusterPooledConnectionProvider.Cluster activeCluster = customProvider.getCluster();
+ try (MultiDatabaseConnectionProvider customProvider = new MultiDatabaseConnectionProvider(
+ new MultiDatabaseConfig.Builder(databaseConfigs).build())) {
+ MultiDatabaseConnectionProvider.Database activeCluster = customProvider.getDatabase();
ConnectionPool connectionPool = activeCluster.getConnectionPool();
assertEquals(8, connectionPool.getMaxTotal());
assertEquals(4, connectionPool.getMaxIdle());
@@ -202,13 +204,13 @@ void testHealthChecksStopAfterProviderClose() throws InterruptedException {
});
// Create new provider with health check strategy (don't use the setUp() provider)
- ClusterConfig config = ClusterConfig
+ DatabaseConfig config = DatabaseConfig
.builder(endpointStandalone0.getHostAndPort(),
endpointStandalone0.getClientConfigBuilder().build())
.healthCheckStrategy(countingStrategy).build();
- MultiClusterPooledConnectionProvider testProvider = new MultiClusterPooledConnectionProvider(
- new MultiClusterClientConfig.Builder(Collections.singletonList(config)).build());
+ MultiDatabaseConnectionProvider testProvider = new MultiDatabaseConnectionProvider(
+ new MultiDatabaseConfig.Builder(Collections.singletonList(config)).build());
try {
// Wait for some health checks to occur
@@ -236,22 +238,22 @@ void testHealthChecksStopAfterProviderClose() throws InterruptedException {
@Test
public void userCommand_firstTemporary_thenPermanent_inOrder() {
- ClusterConfig[] clusterConfigs = new ClusterConfig[2];
- clusterConfigs[0] = ClusterConfig.builder(endpointStandalone0.getHostAndPort(),
+ DatabaseConfig[] databaseConfigs = new DatabaseConfig[2];
+ databaseConfigs[0] = DatabaseConfig.builder(endpointStandalone0.getHostAndPort(),
endpointStandalone0.getClientConfigBuilder().build()).weight(0.5f).build();
- clusterConfigs[1] = ClusterConfig.builder(endpointStandalone1.getHostAndPort(),
+ databaseConfigs[1] = DatabaseConfig.builder(endpointStandalone1.getHostAndPort(),
endpointStandalone1.getClientConfigBuilder().build()).weight(0.3f).build();
- MultiClusterPooledConnectionProvider testProvider = new MultiClusterPooledConnectionProvider(
- new MultiClusterClientConfig.Builder(clusterConfigs).delayInBetweenFailoverAttempts(100)
+ MultiDatabaseConnectionProvider testProvider = new MultiDatabaseConnectionProvider(
+ new MultiDatabaseConfig.Builder(databaseConfigs).delayInBetweenFailoverAttempts(100)
.maxNumFailoverAttempts(2).retryMaxAttempts(1).build());
try (UnifiedJedis jedis = new UnifiedJedis(testProvider)) {
jedis.get("foo");
// Disable both clusters so any attempt to switch results in 'no healthy cluster' path
- testProvider.getCluster(endpointStandalone0.getHostAndPort()).setDisabled(true);
- testProvider.getCluster(endpointStandalone1.getHostAndPort()).setDisabled(true);
+ testProvider.getDatabase(endpointStandalone0.getHostAndPort()).setDisabled(true);
+ testProvider.getDatabase(endpointStandalone1.getHostAndPort()).setDisabled(true);
// Simulate user running a command that fails and triggers failover iteration
assertThrows(JedisTemporarilyNotAvailableException.class, () -> jedis.get("foo"));
@@ -266,12 +268,12 @@ public void userCommand_firstTemporary_thenPermanent_inOrder() {
@Test
public void userCommand_connectionExceptions_thenMultipleTemporary_thenPermanent_inOrder() {
- ClusterConfig[] clusterConfigs = new ClusterConfig[2];
- clusterConfigs[0] = ClusterConfig
+ DatabaseConfig[] databaseConfigs = new DatabaseConfig[2];
+ databaseConfigs[0] = DatabaseConfig
.builder(endpointStandalone0.getHostAndPort(),
endpointStandalone0.getClientConfigBuilder().build())
.weight(0.5f).healthCheckEnabled(false).build();
- clusterConfigs[1] = ClusterConfig
+ databaseConfigs[1] = DatabaseConfig
.builder(endpointStandalone1.getHostAndPort(),
endpointStandalone1.getClientConfigBuilder().build())
.weight(0.3f).healthCheckEnabled(false).build();
@@ -279,8 +281,8 @@ public void userCommand_connectionExceptions_thenMultipleTemporary_thenPermanent
// ATTENTION: these configuration settings are not random and
// adjusted to get exact numbers of failures with exact exception types
// and open to impact from other defaulted values withing the components in use.
- MultiClusterPooledConnectionProvider testProvider = new MultiClusterPooledConnectionProvider(
- new MultiClusterClientConfig.Builder(clusterConfigs).delayInBetweenFailoverAttempts(100)
+ MultiDatabaseConnectionProvider testProvider = new MultiDatabaseConnectionProvider(
+ new MultiDatabaseConfig.Builder(databaseConfigs).delayInBetweenFailoverAttempts(100)
.maxNumFailoverAttempts(2).retryMaxAttempts(1).circuitBreakerSlidingWindowSize(5)
.circuitBreakerFailureRateThreshold(60).build()) {
};
@@ -289,7 +291,7 @@ public void userCommand_connectionExceptions_thenMultipleTemporary_thenPermanent
jedis.get("foo");
// disable most weighted cluster so that it will fail on initial requests
- testProvider.getCluster(endpointStandalone0.getHostAndPort()).setDisabled(true);
+ testProvider.getDatabase(endpointStandalone0.getHostAndPort()).setDisabled(true);
Exception e = assertThrows(JedisConnectionException.class, () -> jedis.get("foo"));
assertEquals(JedisConnectionException.class, e.getClass());
@@ -298,7 +300,7 @@ public void userCommand_connectionExceptions_thenMultipleTemporary_thenPermanent
assertEquals(JedisConnectionException.class, e.getClass());
// then disable the second ones
- testProvider.getCluster(endpointStandalone1.getHostAndPort()).setDisabled(true);
+ testProvider.getDatabase(endpointStandalone1.getHostAndPort()).setDisabled(true);
assertThrows(JedisTemporarilyNotAvailableException.class, () -> jedis.get("foo"));
assertThrows(JedisTemporarilyNotAvailableException.class, () -> jedis.get("foo"));
diff --git a/src/test/java/redis/clients/jedis/mcf/PeriodicFailbackTest.java b/src/test/java/redis/clients/jedis/mcf/PeriodicFailbackTest.java
index d657e75829..1496ee3815 100644
--- a/src/test/java/redis/clients/jedis/mcf/PeriodicFailbackTest.java
+++ b/src/test/java/redis/clients/jedis/mcf/PeriodicFailbackTest.java
@@ -2,7 +2,7 @@
import static org.junit.jupiter.api.Assertions.*;
import static org.mockito.Mockito.*;
-import static redis.clients.jedis.mcf.MultiClusterPooledConnectionProviderHelper.onHealthStatusChange;
+import static redis.clients.jedis.mcf.MultiDatabaseConnectionProviderHelper.onHealthStatusChange;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
@@ -14,7 +14,7 @@
import redis.clients.jedis.DefaultJedisClientConfig;
import redis.clients.jedis.HostAndPort;
import redis.clients.jedis.JedisClientConfig;
-import redis.clients.jedis.MultiClusterClientConfig;
+import redis.clients.jedis.MultiDatabaseConfig;
@ExtendWith(MockitoExtension.class)
class PeriodicFailbackTest {
@@ -42,33 +42,32 @@ private MockedConstruction mockPool() {
@Test
void testPeriodicFailbackCheckWithDisabledCluster() throws InterruptedException {
try (MockedConstruction mockedPool = mockPool()) {
- MultiClusterClientConfig.ClusterConfig cluster1 = MultiClusterClientConfig.ClusterConfig
+ MultiDatabaseConfig.DatabaseConfig cluster1 = MultiDatabaseConfig.DatabaseConfig
.builder(endpoint1, clientConfig).weight(1.0f).healthCheckEnabled(false).build();
- MultiClusterClientConfig.ClusterConfig cluster2 = MultiClusterClientConfig.ClusterConfig
+ MultiDatabaseConfig.DatabaseConfig cluster2 = MultiDatabaseConfig.DatabaseConfig
.builder(endpoint2, clientConfig).weight(2.0f).healthCheckEnabled(false).build();
- MultiClusterClientConfig config = new MultiClusterClientConfig.Builder(
- new MultiClusterClientConfig.ClusterConfig[] { cluster1, cluster2 })
- .failbackSupported(true).failbackCheckInterval(100).build();
+ MultiDatabaseConfig config = new MultiDatabaseConfig.Builder(
+ new MultiDatabaseConfig.DatabaseConfig[] { cluster1, cluster2 }).failbackSupported(true)
+ .failbackCheckInterval(100).build();
- try (MultiClusterPooledConnectionProvider provider = new MultiClusterPooledConnectionProvider(
- config)) {
+ try (MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider(config)) {
// Initially, cluster2 should be active (highest weight: 2.0f vs 1.0f)
- assertEquals(provider.getCluster(endpoint2), provider.getCluster());
+ assertEquals(provider.getDatabase(endpoint2), provider.getDatabase());
// Start grace period for cluster2 manually
- provider.getCluster(endpoint2).setGracePeriod();
- provider.getCluster(endpoint2).setDisabled(true);
+ provider.getDatabase(endpoint2).setGracePeriod();
+ provider.getDatabase(endpoint2).setDisabled(true);
// Force failover to cluster1 since cluster2 is disabled
- provider.switchToHealthyCluster(SwitchReason.FORCED, provider.getCluster(endpoint2));
+ provider.switchToHealthyDatabase(SwitchReason.FORCED, provider.getDatabase(endpoint2));
// Manually trigger periodic check
- MultiClusterPooledConnectionProviderHelper.periodicFailbackCheck(provider);
+ MultiDatabaseConnectionProviderHelper.periodicFailbackCheck(provider);
// Should still be on cluster1 (cluster2 is in grace period)
- assertEquals(provider.getCluster(endpoint1), provider.getCluster());
+ assertEquals(provider.getDatabase(endpoint1), provider.getDatabase());
}
}
}
@@ -76,47 +75,46 @@ void testPeriodicFailbackCheckWithDisabledCluster() throws InterruptedException
@Test
void testPeriodicFailbackCheckWithHealthyCluster() throws InterruptedException {
try (MockedConstruction mockedPool = mockPool()) {
- MultiClusterClientConfig.ClusterConfig cluster1 = MultiClusterClientConfig.ClusterConfig
+ MultiDatabaseConfig.DatabaseConfig cluster1 = MultiDatabaseConfig.DatabaseConfig
.builder(endpoint1, clientConfig).weight(1.0f).healthCheckEnabled(false).build();
- MultiClusterClientConfig.ClusterConfig cluster2 = MultiClusterClientConfig.ClusterConfig
+ MultiDatabaseConfig.DatabaseConfig cluster2 = MultiDatabaseConfig.DatabaseConfig
.builder(endpoint2, clientConfig).weight(2.0f).healthCheckEnabled(false).build();
- MultiClusterClientConfig config = new MultiClusterClientConfig.Builder(
- new MultiClusterClientConfig.ClusterConfig[] { cluster1, cluster2 })
- .failbackSupported(true).failbackCheckInterval(50).gracePeriod(100).build(); // Add
- // grace
- // period
+ MultiDatabaseConfig config = new MultiDatabaseConfig.Builder(
+ new MultiDatabaseConfig.DatabaseConfig[] { cluster1, cluster2 }).failbackSupported(true)
+ .failbackCheckInterval(50).gracePeriod(100).build(); // Add
+ // grace
+ // period
- try (MultiClusterPooledConnectionProvider provider = new MultiClusterPooledConnectionProvider(
- config)) {
+ try (MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider(config)) {
// Initially, cluster2 should be active (highest weight: 2.0f vs 1.0f)
- assertEquals(provider.getCluster(endpoint2), provider.getCluster());
+ assertEquals(provider.getDatabase(endpoint2), provider.getDatabase());
// Make cluster2 unhealthy to force failover to cluster1
onHealthStatusChange(provider, endpoint2, HealthStatus.HEALTHY, HealthStatus.UNHEALTHY);
// Should now be on cluster1 (cluster2 is in grace period)
- assertEquals(provider.getCluster(endpoint1), provider.getCluster());
+ assertEquals(provider.getDatabase(endpoint1), provider.getDatabase());
// Verify cluster2 is in grace period
- assertTrue(provider.getCluster(endpoint2).isInGracePeriod());
+ assertTrue(provider.getDatabase(endpoint2).isInGracePeriod());
// Make cluster2 healthy again (but it's still in grace period)
onHealthStatusChange(provider, endpoint2, HealthStatus.UNHEALTHY, HealthStatus.HEALTHY);
// Trigger periodic check immediately - should still be on cluster1
- MultiClusterPooledConnectionProviderHelper.periodicFailbackCheck(provider);
- assertEquals(provider.getCluster(endpoint1), provider.getCluster());
+ MultiDatabaseConnectionProviderHelper.periodicFailbackCheck(provider);
+ assertEquals(provider.getDatabase(endpoint1), provider.getDatabase());
// Wait for grace period to expire
Thread.sleep(150);
// Trigger periodic check after grace period expires
- MultiClusterPooledConnectionProviderHelper.periodicFailbackCheck(provider);
+ MultiDatabaseConnectionProviderHelper.periodicFailbackCheck(provider);
// Should have failed back to cluster2 (higher weight, grace period expired)
- assertEquals(provider.getCluster(endpoint2), provider.getCluster());
+ assertEquals(provider.getDatabase(endpoint2), provider.getDatabase());
}
}
}
@@ -124,27 +122,25 @@ void testPeriodicFailbackCheckWithHealthyCluster() throws InterruptedException {
@Test
void testPeriodicFailbackCheckWithFailbackDisabled() throws InterruptedException {
try (MockedConstruction mockedPool = mockPool()) {
- MultiClusterClientConfig.ClusterConfig cluster1 = MultiClusterClientConfig.ClusterConfig
+ MultiDatabaseConfig.DatabaseConfig cluster1 = MultiDatabaseConfig.DatabaseConfig
.builder(endpoint1, clientConfig).weight(1.0f).healthCheckEnabled(false).build();
- MultiClusterClientConfig.ClusterConfig cluster2 = MultiClusterClientConfig.ClusterConfig
+ MultiDatabaseConfig.DatabaseConfig cluster2 = MultiDatabaseConfig.DatabaseConfig
.builder(endpoint2, clientConfig).weight(2.0f).healthCheckEnabled(false).build();
- MultiClusterClientConfig config = new MultiClusterClientConfig.Builder(
- new MultiClusterClientConfig.ClusterConfig[] { cluster1, cluster2 })
- .failbackSupported(false) // Disabled
+ MultiDatabaseConfig config = new MultiDatabaseConfig.Builder(
+ new MultiDatabaseConfig.DatabaseConfig[] { cluster1, cluster2 }).failbackSupported(false) // Disabled
.failbackCheckInterval(50).build();
- try (MultiClusterPooledConnectionProvider provider = new MultiClusterPooledConnectionProvider(
- config)) {
+ try (MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider(config)) {
// Initially, cluster2 should be active (highest weight: 2.0f vs 1.0f)
- assertEquals(provider.getCluster(endpoint2), provider.getCluster());
+ assertEquals(provider.getDatabase(endpoint2), provider.getDatabase());
// Make cluster2 unhealthy to force failover to cluster1
onHealthStatusChange(provider, endpoint2, HealthStatus.HEALTHY, HealthStatus.UNHEALTHY);
// Should now be on cluster1
- assertEquals(provider.getCluster(endpoint1), provider.getCluster());
+ assertEquals(provider.getDatabase(endpoint1), provider.getDatabase());
// Make cluster2 healthy again
onHealthStatusChange(provider, endpoint2, HealthStatus.UNHEALTHY, HealthStatus.HEALTHY);
@@ -153,10 +149,10 @@ void testPeriodicFailbackCheckWithFailbackDisabled() throws InterruptedException
Thread.sleep(100);
// Trigger periodic check
- MultiClusterPooledConnectionProviderHelper.periodicFailbackCheck(provider);
+ MultiDatabaseConnectionProviderHelper.periodicFailbackCheck(provider);
// Should still be on cluster1 (failback disabled)
- assertEquals(provider.getCluster(endpoint1), provider.getCluster());
+ assertEquals(provider.getDatabase(endpoint1), provider.getDatabase());
}
}
}
@@ -166,38 +162,37 @@ void testPeriodicFailbackCheckSelectsHighestWeightCluster() throws InterruptedEx
try (MockedConstruction mockedPool = mockPool()) {
HostAndPort endpoint3 = new HostAndPort("localhost", 6381);
- MultiClusterClientConfig.ClusterConfig cluster1 = MultiClusterClientConfig.ClusterConfig
+ MultiDatabaseConfig.DatabaseConfig cluster1 = MultiDatabaseConfig.DatabaseConfig
.builder(endpoint1, clientConfig).weight(1.0f).healthCheckEnabled(false).build();
- MultiClusterClientConfig.ClusterConfig cluster2 = MultiClusterClientConfig.ClusterConfig
+ MultiDatabaseConfig.DatabaseConfig cluster2 = MultiDatabaseConfig.DatabaseConfig
.builder(endpoint2, clientConfig).weight(2.0f).healthCheckEnabled(false).build();
- MultiClusterClientConfig.ClusterConfig cluster3 = MultiClusterClientConfig.ClusterConfig
+ MultiDatabaseConfig.DatabaseConfig cluster3 = MultiDatabaseConfig.DatabaseConfig
.builder(endpoint3, clientConfig).weight(3.0f) // Highest weight
.healthCheckEnabled(false).build();
- MultiClusterClientConfig config = new MultiClusterClientConfig.Builder(
- new MultiClusterClientConfig.ClusterConfig[] { cluster1, cluster2, cluster3 })
+ MultiDatabaseConfig config = new MultiDatabaseConfig.Builder(
+ new MultiDatabaseConfig.DatabaseConfig[] { cluster1, cluster2, cluster3 })
.failbackSupported(true).failbackCheckInterval(50).gracePeriod(100).build(); // Add
// grace
// period
- try (MultiClusterPooledConnectionProvider provider = new MultiClusterPooledConnectionProvider(
- config)) {
+ try (MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider(config)) {
// Initially, cluster3 should be active (highest weight: 3.0f vs 2.0f vs 1.0f)
- assertEquals(provider.getCluster(endpoint3), provider.getCluster());
+ assertEquals(provider.getDatabase(endpoint3), provider.getDatabase());
// Make cluster3 unhealthy to force failover to cluster2 (next highest weight)
onHealthStatusChange(provider, endpoint3, HealthStatus.HEALTHY, HealthStatus.UNHEALTHY);
// Should now be on cluster2 (weight 2.0f, higher than cluster1's 1.0f)
- assertEquals(provider.getCluster(endpoint2), provider.getCluster());
+ assertEquals(provider.getDatabase(endpoint2), provider.getDatabase());
// Make cluster2 unhealthy to force failover to cluster1
onHealthStatusChange(provider, endpoint2, HealthStatus.HEALTHY, HealthStatus.UNHEALTHY);
// Should now be on cluster1 (only healthy cluster left)
- assertEquals(provider.getCluster(endpoint1), provider.getCluster());
+ assertEquals(provider.getDatabase(endpoint1), provider.getDatabase());
// Make cluster2 and cluster3 healthy again
onHealthStatusChange(provider, endpoint2, HealthStatus.UNHEALTHY, HealthStatus.HEALTHY);
@@ -207,10 +202,10 @@ void testPeriodicFailbackCheckSelectsHighestWeightCluster() throws InterruptedEx
Thread.sleep(150);
// Trigger periodic check
- MultiClusterPooledConnectionProviderHelper.periodicFailbackCheck(provider);
+ MultiDatabaseConnectionProviderHelper.periodicFailbackCheck(provider);
// Should have failed back to cluster3 (highest weight, grace period expired)
- assertEquals(provider.getCluster(endpoint3), provider.getCluster());
+ assertEquals(provider.getDatabase(endpoint3), provider.getDatabase());
}
}
}
diff --git a/src/test/java/redis/clients/jedis/misc/AutomaticFailoverTest.java b/src/test/java/redis/clients/jedis/misc/AutomaticFailoverTest.java
index 3be7d29656..25788fa916 100644
--- a/src/test/java/redis/clients/jedis/misc/AutomaticFailoverTest.java
+++ b/src/test/java/redis/clients/jedis/misc/AutomaticFailoverTest.java
@@ -18,8 +18,8 @@
import redis.clients.jedis.exceptions.JedisAccessControlException;
import redis.clients.jedis.exceptions.JedisConnectionException;
import redis.clients.jedis.mcf.ClusterSwitchEventArgs;
-import redis.clients.jedis.mcf.MultiClusterPooledConnectionProvider;
-import redis.clients.jedis.mcf.MultiClusterPooledConnectionProviderHelper;
+import redis.clients.jedis.mcf.MultiDatabaseConnectionProvider;
+import redis.clients.jedis.mcf.MultiDatabaseConnectionProviderHelper;
import redis.clients.jedis.mcf.SwitchReason;
import redis.clients.jedis.util.IOUtils;
@@ -47,10 +47,10 @@ public class AutomaticFailoverTest {
private Jedis jedis2;
- private List getClusterConfigs(
+ private List getDatabaseConfigs(
JedisClientConfig clientConfig, HostAndPort... hostPorts) {
return Arrays.stream(hostPorts)
- .map(hp -> new MultiClusterClientConfig.ClusterConfig(hp, clientConfig))
+ .map(hp -> new MultiDatabaseConfig.DatabaseConfig(hp, clientConfig))
.collect(Collectors.toList());
}
@@ -68,17 +68,17 @@ public void cleanUp() {
@Test
public void pipelineWithSwitch() {
- MultiClusterPooledConnectionProvider provider = new MultiClusterPooledConnectionProvider(
- new MultiClusterClientConfig.Builder(
- getClusterConfigs(clientConfig, hostPortWithFailure, workingEndpoint.getHostAndPort()))
+ MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider(
+ new MultiDatabaseConfig.Builder(
+ getDatabaseConfigs(clientConfig, hostPortWithFailure, workingEndpoint.getHostAndPort()))
.build());
try (UnifiedJedis client = new UnifiedJedis(provider)) {
AbstractPipeline pipe = client.pipelined();
pipe.set("pstr", "foobar");
pipe.hset("phash", "foo", "bar");
- MultiClusterPooledConnectionProviderHelper.switchToHealthyCluster(provider,
- SwitchReason.HEALTH_CHECK, provider.getCluster());
+ MultiDatabaseConnectionProviderHelper.switchToHealthyCluster(provider,
+ SwitchReason.HEALTH_CHECK, provider.getDatabase());
pipe.sync();
}
@@ -88,17 +88,17 @@ public void pipelineWithSwitch() {
@Test
public void transactionWithSwitch() {
- MultiClusterPooledConnectionProvider provider = new MultiClusterPooledConnectionProvider(
- new MultiClusterClientConfig.Builder(
- getClusterConfigs(clientConfig, hostPortWithFailure, workingEndpoint.getHostAndPort()))
+ MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider(
+ new MultiDatabaseConfig.Builder(
+ getDatabaseConfigs(clientConfig, hostPortWithFailure, workingEndpoint.getHostAndPort()))
.build());
try (UnifiedJedis client = new UnifiedJedis(provider)) {
AbstractTransaction tx = client.multi();
tx.set("tstr", "foobar");
tx.hset("thash", "foo", "bar");
- MultiClusterPooledConnectionProviderHelper.switchToHealthyCluster(provider,
- SwitchReason.HEALTH_CHECK, provider.getCluster());
+ MultiDatabaseConnectionProviderHelper.switchToHealthyCluster(provider,
+ SwitchReason.HEALTH_CHECK, provider.getDatabase());
assertEquals(Arrays.asList("OK", 1L), tx.exec());
}
@@ -112,16 +112,16 @@ public void commandFailoverUnresolvableHost() {
int slidingWindowSize = 2;
HostAndPort unresolvableHostAndPort = new HostAndPort("unresolvable", 6379);
- MultiClusterClientConfig.Builder builder = new MultiClusterClientConfig.Builder(
- getClusterConfigs(clientConfig, unresolvableHostAndPort, workingEndpoint.getHostAndPort()))
+ MultiDatabaseConfig.Builder builder = new MultiDatabaseConfig.Builder(
+ getDatabaseConfigs(clientConfig, unresolvableHostAndPort, workingEndpoint.getHostAndPort()))
.retryWaitDuration(1).retryMaxAttempts(1)
.circuitBreakerSlidingWindowSize(slidingWindowSize)
.circuitBreakerMinNumOfFailures(slidingWindowMinFails);
RedisFailoverReporter failoverReporter = new RedisFailoverReporter();
- MultiClusterPooledConnectionProvider connectionProvider = new MultiClusterPooledConnectionProvider(
+ MultiDatabaseConnectionProvider connectionProvider = new MultiDatabaseConnectionProvider(
builder.build());
- connectionProvider.setClusterSwitchListener(failoverReporter);
+ connectionProvider.setDatabaseSwitchListener(failoverReporter);
UnifiedJedis jedis = new UnifiedJedis(connectionProvider);
@@ -152,8 +152,8 @@ public void commandFailover() {
int slidingWindowSize = 6;
int retryMaxAttempts = 3;
- MultiClusterClientConfig.Builder builder = new MultiClusterClientConfig.Builder(
- getClusterConfigs(clientConfig, hostPortWithFailure, workingEndpoint.getHostAndPort()))
+ MultiDatabaseConfig.Builder builder = new MultiDatabaseConfig.Builder(
+ getDatabaseConfigs(clientConfig, hostPortWithFailure, workingEndpoint.getHostAndPort()))
.retryMaxAttempts(retryMaxAttempts) // Default
// is
// 3
@@ -162,9 +162,9 @@ public void commandFailover() {
.circuitBreakerSlidingWindowSize(slidingWindowSize);
RedisFailoverReporter failoverReporter = new RedisFailoverReporter();
- MultiClusterPooledConnectionProvider connectionProvider = new MultiClusterPooledConnectionProvider(
+ MultiDatabaseConnectionProvider connectionProvider = new MultiDatabaseConnectionProvider(
builder.build());
- connectionProvider.setClusterSwitchListener(failoverReporter);
+ connectionProvider.setDatabaseSwitchListener(failoverReporter);
UnifiedJedis jedis = new UnifiedJedis(connectionProvider);
@@ -194,15 +194,15 @@ public void commandFailover() {
public void pipelineFailover() {
int slidingWindowSize = 10;
- MultiClusterClientConfig.Builder builder = new MultiClusterClientConfig.Builder(
- getClusterConfigs(clientConfig, hostPortWithFailure, workingEndpoint.getHostAndPort()))
+ MultiDatabaseConfig.Builder builder = new MultiDatabaseConfig.Builder(
+ getDatabaseConfigs(clientConfig, hostPortWithFailure, workingEndpoint.getHostAndPort()))
.circuitBreakerSlidingWindowSize(slidingWindowSize)
.fallbackExceptionList(Collections.singletonList(JedisConnectionException.class));
RedisFailoverReporter failoverReporter = new RedisFailoverReporter();
- MultiClusterPooledConnectionProvider cacheProvider = new MultiClusterPooledConnectionProvider(
+ MultiDatabaseConnectionProvider cacheProvider = new MultiDatabaseConnectionProvider(
builder.build());
- cacheProvider.setClusterSwitchListener(failoverReporter);
+ cacheProvider.setDatabaseSwitchListener(failoverReporter);
UnifiedJedis jedis = new UnifiedJedis(cacheProvider);
@@ -226,15 +226,15 @@ public void pipelineFailover() {
public void failoverFromAuthError() {
int slidingWindowSize = 10;
- MultiClusterClientConfig.Builder builder = new MultiClusterClientConfig.Builder(
- getClusterConfigs(clientConfig, endpointForAuthFailure.getHostAndPort(),
+ MultiDatabaseConfig.Builder builder = new MultiDatabaseConfig.Builder(
+ getDatabaseConfigs(clientConfig, endpointForAuthFailure.getHostAndPort(),
workingEndpoint.getHostAndPort())).circuitBreakerSlidingWindowSize(slidingWindowSize)
.fallbackExceptionList(Collections.singletonList(JedisAccessControlException.class));
RedisFailoverReporter failoverReporter = new RedisFailoverReporter();
- MultiClusterPooledConnectionProvider cacheProvider = new MultiClusterPooledConnectionProvider(
+ MultiDatabaseConnectionProvider cacheProvider = new MultiDatabaseConnectionProvider(
builder.build());
- cacheProvider.setClusterSwitchListener(failoverReporter);
+ cacheProvider.setDatabaseSwitchListener(failoverReporter);
UnifiedJedis jedis = new UnifiedJedis(cacheProvider);
diff --git a/src/test/java/redis/clients/jedis/providers/MultiClusterProviderHealthStatusChangeEventTest.java b/src/test/java/redis/clients/jedis/providers/MultiClusterProviderHealthStatusChangeEventTest.java
index bde6ab7fc6..c3f974dcd5 100644
--- a/src/test/java/redis/clients/jedis/providers/MultiClusterProviderHealthStatusChangeEventTest.java
+++ b/src/test/java/redis/clients/jedis/providers/MultiClusterProviderHealthStatusChangeEventTest.java
@@ -14,13 +14,13 @@
import redis.clients.jedis.DefaultJedisClientConfig;
import redis.clients.jedis.HostAndPort;
import redis.clients.jedis.JedisClientConfig;
-import redis.clients.jedis.MultiClusterClientConfig;
+import redis.clients.jedis.MultiDatabaseConfig;
import redis.clients.jedis.mcf.HealthStatus;
-import redis.clients.jedis.mcf.MultiClusterPooledConnectionProvider;
-import redis.clients.jedis.mcf.MultiClusterPooledConnectionProviderHelper;
+import redis.clients.jedis.mcf.MultiDatabaseConnectionProvider;
+import redis.clients.jedis.mcf.MultiDatabaseConnectionProviderHelper;
/**
- * Tests for MultiClusterPooledConnectionProvider event handling behavior during initialization and
+ * Tests for MultiDatabaseConnectionProvider event handling behavior during initialization and
* throughout its lifecycle with HealthStatusChangeEvents.
*/
@ExtendWith(MockitoExtension.class)
@@ -52,30 +52,30 @@ private MockedConstruction mockConnectionPool() {
void postInit_unhealthy_active_sets_grace_and_fails_over() throws Exception {
try (MockedConstruction mockedPool = mockConnectionPool()) {
// Create clusters without health checks
- MultiClusterClientConfig.ClusterConfig cluster1 = MultiClusterClientConfig.ClusterConfig
+ MultiDatabaseConfig.DatabaseConfig cluster1 = MultiDatabaseConfig.DatabaseConfig
.builder(endpoint1, clientConfig).weight(1.0f).healthCheckEnabled(false).build();
- MultiClusterClientConfig.ClusterConfig cluster2 = MultiClusterClientConfig.ClusterConfig
+ MultiDatabaseConfig.DatabaseConfig cluster2 = MultiDatabaseConfig.DatabaseConfig
.builder(endpoint2, clientConfig).weight(0.5f).healthCheckEnabled(false).build();
- MultiClusterClientConfig config = new MultiClusterClientConfig.Builder(
- new MultiClusterClientConfig.ClusterConfig[] { cluster1, cluster2 }).build();
+ MultiDatabaseConfig config = new MultiDatabaseConfig.Builder(
+ new MultiDatabaseConfig.DatabaseConfig[] { cluster1, cluster2 }).build();
- try (MultiClusterPooledConnectionProvider provider = new MultiClusterPooledConnectionProvider(
+ try (MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider(
config)) {
- assertFalse(provider.getCluster(endpoint1).isInGracePeriod());
- assertEquals(provider.getCluster(), provider.getCluster(endpoint1));
+ assertFalse(provider.getDatabase(endpoint1).isInGracePeriod());
+ assertEquals(provider.getDatabase(), provider.getDatabase(endpoint1));
// This should process immediately since initialization is complete
assertDoesNotThrow(() -> {
- MultiClusterPooledConnectionProviderHelper.onHealthStatusChange(provider, endpoint1,
+ MultiDatabaseConnectionProviderHelper.onHealthStatusChange(provider, endpoint1,
HealthStatus.HEALTHY, HealthStatus.UNHEALTHY);
}, "Post-initialization events should be processed immediately");
// Verify the cluster has changed according to the UNHEALTHY status
- assertTrue(provider.getCluster(endpoint1).isInGracePeriod(),
+ assertTrue(provider.getDatabase(endpoint1).isInGracePeriod(),
"UNHEALTHY status on active cluster should cause a grace period");
- assertNotEquals(provider.getCluster(), provider.getCluster(endpoint1),
+ assertNotEquals(provider.getDatabase(), provider.getDatabase(endpoint1),
"UNHEALTHY status on active cluster should cause a failover");
}
}
@@ -84,46 +84,46 @@ void postInit_unhealthy_active_sets_grace_and_fails_over() throws Exception {
@Test
void postInit_nonActive_changes_do_not_switch_active() throws Exception {
try (MockedConstruction mockedPool = mockConnectionPool()) {
- MultiClusterClientConfig.ClusterConfig cluster1 = MultiClusterClientConfig.ClusterConfig
+ MultiDatabaseConfig.DatabaseConfig cluster1 = MultiDatabaseConfig.DatabaseConfig
.builder(endpoint1, clientConfig).weight(1.0f).healthCheckEnabled(false).build();
- MultiClusterClientConfig.ClusterConfig cluster2 = MultiClusterClientConfig.ClusterConfig
+ MultiDatabaseConfig.DatabaseConfig cluster2 = MultiDatabaseConfig.DatabaseConfig
.builder(endpoint2, clientConfig).weight(0.5f).healthCheckEnabled(false).build();
- MultiClusterClientConfig config = new MultiClusterClientConfig.Builder(
- new MultiClusterClientConfig.ClusterConfig[] { cluster1, cluster2 }).build();
+ MultiDatabaseConfig config = new MultiDatabaseConfig.Builder(
+ new MultiDatabaseConfig.DatabaseConfig[] { cluster1, cluster2 }).build();
- try (MultiClusterPooledConnectionProvider provider = new MultiClusterPooledConnectionProvider(
+ try (MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider(
config)) {
// Verify initial state
- assertEquals(provider.getCluster(endpoint1), provider.getCluster(),
+ assertEquals(provider.getDatabase(endpoint1), provider.getDatabase(),
"Should start with endpoint1 active");
// Simulate multiple rapid events for the same endpoint (post-init behavior)
- MultiClusterPooledConnectionProviderHelper.onHealthStatusChange(provider, endpoint1,
+ MultiDatabaseConnectionProviderHelper.onHealthStatusChange(provider, endpoint1,
HealthStatus.HEALTHY, HealthStatus.UNHEALTHY);
// After first UNHEALTHY on active cluster: it enters grace period and provider fails over
- assertTrue(provider.getCluster(endpoint1).isInGracePeriod(),
+ assertTrue(provider.getDatabase(endpoint1).isInGracePeriod(),
"Active cluster should enter grace period");
- assertEquals(provider.getCluster(endpoint2), provider.getCluster(),
+ assertEquals(provider.getDatabase(endpoint2), provider.getDatabase(),
"Should fail over to endpoint2");
- MultiClusterPooledConnectionProviderHelper.onHealthStatusChange(provider, endpoint1,
+ MultiDatabaseConnectionProviderHelper.onHealthStatusChange(provider, endpoint1,
HealthStatus.UNHEALTHY, HealthStatus.HEALTHY);
// Healthy event for non-active cluster should not immediately revert active cluster
- assertEquals(provider.getCluster(endpoint2), provider.getCluster(),
+ assertEquals(provider.getDatabase(endpoint2), provider.getDatabase(),
"Active cluster should remain endpoint2");
- assertTrue(provider.getCluster(endpoint1).isInGracePeriod(),
+ assertTrue(provider.getDatabase(endpoint1).isInGracePeriod(),
"Grace period should still be in effect");
- MultiClusterPooledConnectionProviderHelper.onHealthStatusChange(provider, endpoint1,
+ MultiDatabaseConnectionProviderHelper.onHealthStatusChange(provider, endpoint1,
HealthStatus.HEALTHY, HealthStatus.UNHEALTHY);
// Further UNHEALTHY for non-active cluster is a no-op
- assertEquals(provider.getCluster(endpoint2), provider.getCluster(),
+ assertEquals(provider.getDatabase(endpoint2), provider.getDatabase(),
"Active cluster unchanged");
- assertTrue(provider.getCluster(endpoint1).isInGracePeriod(), "Still in grace period");
+ assertTrue(provider.getDatabase(endpoint1).isInGracePeriod(), "Still in grace period");
}
}
}
@@ -131,26 +131,26 @@ void postInit_nonActive_changes_do_not_switch_active() throws Exception {
@Test
void init_selects_highest_weight_healthy_when_checks_disabled() throws Exception {
try (MockedConstruction mockedPool = mockConnectionPool()) {
- MultiClusterClientConfig.ClusterConfig cluster1 = MultiClusterClientConfig.ClusterConfig
+ MultiDatabaseConfig.DatabaseConfig cluster1 = MultiDatabaseConfig.DatabaseConfig
.builder(endpoint1, clientConfig).weight(1.0f).healthCheckEnabled(false).build();
- MultiClusterClientConfig.ClusterConfig cluster2 = MultiClusterClientConfig.ClusterConfig
+ MultiDatabaseConfig.DatabaseConfig cluster2 = MultiDatabaseConfig.DatabaseConfig
.builder(endpoint2, clientConfig).weight(2.0f).healthCheckEnabled(false).build();
- MultiClusterClientConfig config = new MultiClusterClientConfig.Builder(
- new MultiClusterClientConfig.ClusterConfig[] { cluster1, cluster2 }).build();
+ MultiDatabaseConfig config = new MultiDatabaseConfig.Builder(
+ new MultiDatabaseConfig.DatabaseConfig[] { cluster1, cluster2 }).build();
- try (MultiClusterPooledConnectionProvider provider = new MultiClusterPooledConnectionProvider(
+ try (MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider(
config)) {
// This test verifies that multiple endpoints are properly initialized
// Verify both clusters are initialized properly
- assertNotNull(provider.getCluster(endpoint1), "Cluster 1 should be available");
- assertNotNull(provider.getCluster(endpoint2), "Cluster 2 should be available");
+ assertNotNull(provider.getDatabase(endpoint1), "Database 1 should be available");
+ assertNotNull(provider.getDatabase(endpoint2), "Database 2 should be available");
// Both should be healthy (no health checks = assumed healthy)
- assertTrue(provider.getCluster(endpoint1).isHealthy(), "Cluster 1 should be healthy");
- assertTrue(provider.getCluster(endpoint2).isHealthy(), "Cluster 2 should be healthy");
+ assertTrue(provider.getDatabase(endpoint1).isHealthy(), "Database 1 should be healthy");
+ assertTrue(provider.getDatabase(endpoint2).isHealthy(), "Database 2 should be healthy");
}
}
}
@@ -158,22 +158,22 @@ void init_selects_highest_weight_healthy_when_checks_disabled() throws Exception
@Test
void init_single_cluster_initializes_and_is_healthy() throws Exception {
try (MockedConstruction mockedPool = mockConnectionPool()) {
- MultiClusterClientConfig.ClusterConfig cluster1 = MultiClusterClientConfig.ClusterConfig
+ MultiDatabaseConfig.DatabaseConfig cluster1 = MultiDatabaseConfig.DatabaseConfig
.builder(endpoint1, clientConfig).weight(1.0f).healthCheckEnabled(false).build();
- MultiClusterClientConfig config = new MultiClusterClientConfig.Builder(
- new MultiClusterClientConfig.ClusterConfig[] { cluster1 }).build();
+ MultiDatabaseConfig config = new MultiDatabaseConfig.Builder(
+ new MultiDatabaseConfig.DatabaseConfig[] { cluster1 }).build();
// This test verifies that the provider initializes correctly and doesn't lose events
// In practice, with health checks disabled, no events should be generated during init
- try (MultiClusterPooledConnectionProvider provider = new MultiClusterPooledConnectionProvider(
+ try (MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider(
config)) {
// Verify successful initialization
- assertNotNull(provider.getCluster(), "Provider should have initialized successfully");
- assertEquals(provider.getCluster(endpoint1), provider.getCluster(),
+ assertNotNull(provider.getDatabase(), "Provider should have initialized successfully");
+ assertEquals(provider.getDatabase(endpoint1), provider.getDatabase(),
"Should have selected the configured cluster");
- assertTrue(provider.getCluster().isHealthy(),
- "Cluster should be healthy (assumed healthy with no health checks)");
+ assertTrue(provider.getDatabase().isHealthy(),
+ "Database should be healthy (assumed healthy with no health checks)");
}
}
}
@@ -183,42 +183,42 @@ void init_single_cluster_initializes_and_is_healthy() throws Exception {
@Test
void postInit_two_hop_failover_chain_respected() throws Exception {
try (MockedConstruction mockedPool = mockConnectionPool()) {
- MultiClusterClientConfig.ClusterConfig cluster1 = MultiClusterClientConfig.ClusterConfig
+ MultiDatabaseConfig.DatabaseConfig cluster1 = MultiDatabaseConfig.DatabaseConfig
.builder(endpoint1, clientConfig).weight(1.0f).healthCheckEnabled(false).build();
- MultiClusterClientConfig.ClusterConfig cluster2 = MultiClusterClientConfig.ClusterConfig
+ MultiDatabaseConfig.DatabaseConfig cluster2 = MultiDatabaseConfig.DatabaseConfig
.builder(endpoint2, clientConfig).weight(0.5f).healthCheckEnabled(false).build();
- MultiClusterClientConfig.ClusterConfig cluster3 = MultiClusterClientConfig.ClusterConfig
+ MultiDatabaseConfig.DatabaseConfig cluster3 = MultiDatabaseConfig.DatabaseConfig
.builder(endpoint3, clientConfig).weight(0.2f).healthCheckEnabled(false).build();
- MultiClusterClientConfig config = new MultiClusterClientConfig.Builder(
- new MultiClusterClientConfig.ClusterConfig[] { cluster1, cluster2, cluster3 }).build();
+ MultiDatabaseConfig config = new MultiDatabaseConfig.Builder(
+ new MultiDatabaseConfig.DatabaseConfig[] { cluster1, cluster2, cluster3 }).build();
- try (MultiClusterPooledConnectionProvider provider = new MultiClusterPooledConnectionProvider(
+ try (MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider(
config)) {
// First event: endpoint1 (active) becomes UNHEALTHY -> failover to endpoint2, endpoint1
// enters grace
- MultiClusterPooledConnectionProviderHelper.onHealthStatusChange(provider, endpoint1,
+ MultiDatabaseConnectionProviderHelper.onHealthStatusChange(provider, endpoint1,
HealthStatus.HEALTHY, HealthStatus.UNHEALTHY);
- assertTrue(provider.getCluster(endpoint1).isInGracePeriod(),
+ assertTrue(provider.getDatabase(endpoint1).isInGracePeriod(),
"Endpoint1 should be in grace after unhealthy");
- assertEquals(provider.getCluster(endpoint2), provider.getCluster(),
+ assertEquals(provider.getDatabase(endpoint2), provider.getDatabase(),
"Should have failed over to endpoint2");
// Second event: endpoint2 (now active) becomes UNHEALTHY -> failover to endpoint3
- MultiClusterPooledConnectionProviderHelper.onHealthStatusChange(provider, endpoint2,
+ MultiDatabaseConnectionProviderHelper.onHealthStatusChange(provider, endpoint2,
HealthStatus.HEALTHY, HealthStatus.UNHEALTHY);
- assertTrue(provider.getCluster(endpoint2).isInGracePeriod(),
+ assertTrue(provider.getDatabase(endpoint2).isInGracePeriod(),
"Endpoint2 should be in grace after unhealthy");
- assertEquals(provider.getCluster(endpoint3), provider.getCluster(),
+ assertEquals(provider.getDatabase(endpoint3), provider.getDatabase(),
"Should have failed over to endpoint3");
// Third event: endpoint1 becomes HEALTHY again -> no immediate switch due to grace period
// behavior
- MultiClusterPooledConnectionProviderHelper.onHealthStatusChange(provider, endpoint1,
+ MultiDatabaseConnectionProviderHelper.onHealthStatusChange(provider, endpoint1,
HealthStatus.UNHEALTHY, HealthStatus.HEALTHY);
- assertEquals(provider.getCluster(endpoint3), provider.getCluster(),
+ assertEquals(provider.getDatabase(endpoint3), provider.getDatabase(),
"Active cluster should remain endpoint3");
}
}
@@ -227,33 +227,33 @@ void postInit_two_hop_failover_chain_respected() throws Exception {
@Test
void postInit_rapid_events_respect_grace_and_keep_active_stable() throws Exception {
try (MockedConstruction mockedPool = mockConnectionPool()) {
- MultiClusterClientConfig.ClusterConfig cluster1 = MultiClusterClientConfig.ClusterConfig
+ MultiDatabaseConfig.DatabaseConfig cluster1 = MultiDatabaseConfig.DatabaseConfig
.builder(endpoint1, clientConfig).weight(1.0f).healthCheckEnabled(false).build();
- MultiClusterClientConfig.ClusterConfig cluster2 = MultiClusterClientConfig.ClusterConfig
+ MultiDatabaseConfig.DatabaseConfig cluster2 = MultiDatabaseConfig.DatabaseConfig
.builder(endpoint2, clientConfig).weight(0.5f).healthCheckEnabled(false).build();
- MultiClusterClientConfig config = new MultiClusterClientConfig.Builder(
- new MultiClusterClientConfig.ClusterConfig[] { cluster1, cluster2 }).build();
+ MultiDatabaseConfig config = new MultiDatabaseConfig.Builder(
+ new MultiDatabaseConfig.DatabaseConfig[] { cluster1, cluster2 }).build();
- try (MultiClusterPooledConnectionProvider provider = new MultiClusterPooledConnectionProvider(
+ try (MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider(
config)) {
// Verify initial state
- assertEquals(HealthStatus.HEALTHY, provider.getCluster(endpoint1).getHealthStatus(),
+ assertEquals(HealthStatus.HEALTHY, provider.getDatabase(endpoint1).getHealthStatus(),
"Should start as HEALTHY");
// Send rapid sequence of events post-init
- MultiClusterPooledConnectionProviderHelper.onHealthStatusChange(provider, endpoint1,
+ MultiDatabaseConnectionProviderHelper.onHealthStatusChange(provider, endpoint1,
HealthStatus.HEALTHY, HealthStatus.UNHEALTHY); // triggers failover and grace
- MultiClusterPooledConnectionProviderHelper.onHealthStatusChange(provider, endpoint1,
+ MultiDatabaseConnectionProviderHelper.onHealthStatusChange(provider, endpoint1,
HealthStatus.UNHEALTHY, HealthStatus.HEALTHY); // non-active cluster becomes healthy
- MultiClusterPooledConnectionProviderHelper.onHealthStatusChange(provider, endpoint1,
+ MultiDatabaseConnectionProviderHelper.onHealthStatusChange(provider, endpoint1,
HealthStatus.HEALTHY, HealthStatus.UNHEALTHY); // still non-active and in grace; no change
// Final expectations: endpoint1 is in grace, provider remains on endpoint2
- assertTrue(provider.getCluster(endpoint1).isInGracePeriod(),
+ assertTrue(provider.getDatabase(endpoint1).isInGracePeriod(),
"Endpoint1 should be in grace period");
- assertEquals(provider.getCluster(endpoint2), provider.getCluster(),
+ assertEquals(provider.getDatabase(endpoint2), provider.getDatabase(),
"Active cluster should remain endpoint2");
}
}
diff --git a/src/test/java/redis/clients/jedis/scenario/ActiveActiveFailoverTest.java b/src/test/java/redis/clients/jedis/scenario/ActiveActiveFailoverTest.java
index a6deb256eb..400fd65404 100644
--- a/src/test/java/redis/clients/jedis/scenario/ActiveActiveFailoverTest.java
+++ b/src/test/java/redis/clients/jedis/scenario/ActiveActiveFailoverTest.java
@@ -9,10 +9,10 @@
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import redis.clients.jedis.*;
-import redis.clients.jedis.MultiClusterClientConfig.ClusterConfig;
+import redis.clients.jedis.MultiDatabaseConfig.DatabaseConfig;
import redis.clients.jedis.exceptions.JedisConnectionException;
import redis.clients.jedis.mcf.ClusterSwitchEventArgs;
-import redis.clients.jedis.mcf.MultiClusterPooledConnectionProvider;
+import redis.clients.jedis.mcf.MultiDatabaseConnectionProvider;
import redis.clients.jedis.util.ClientTestUtil;
import java.io.IOException;
@@ -62,13 +62,13 @@ public void testFailover() {
.socketTimeoutMillis(SOCKET_TIMEOUT_MS)
.connectionTimeoutMillis(CONNECTION_TIMEOUT_MS).build();
- ClusterConfig primary = ClusterConfig.builder(endpoint.getHostAndPort(0), config)
+ DatabaseConfig primary = DatabaseConfig.builder(endpoint.getHostAndPort(0), config)
.connectionPoolConfig(RecommendedSettings.poolConfig).weight(1.0f).build();
- ClusterConfig secondary = ClusterConfig.builder(endpoint.getHostAndPort(1), config)
+ DatabaseConfig secondary = DatabaseConfig.builder(endpoint.getHostAndPort(1), config)
.connectionPoolConfig(RecommendedSettings.poolConfig).weight(0.5f).build();
- MultiClusterClientConfig multiConfig = MultiClusterClientConfig.builder()
+ MultiDatabaseConfig multiConfig = MultiDatabaseConfig.builder()
.endpoint(primary)
.endpoint(secondary)
.circuitBreakerSlidingWindowSize(1) // SLIDING WINDOW SIZE IN SECONDS
@@ -208,9 +208,9 @@ public void accept(ClusterSwitchEventArgs e) {
throw new RuntimeException(e);
}
- MultiClusterPooledConnectionProvider provider = ClientTestUtil.getConnectionProvider(client);
- ConnectionPool pool1 = provider.getCluster(endpoint.getHostAndPort(0)).getConnectionPool();
- ConnectionPool pool2 = provider.getCluster(endpoint.getHostAndPort(1)).getConnectionPool();
+ MultiDatabaseConnectionProvider provider = ClientTestUtil.getConnectionProvider(client);
+ ConnectionPool pool1 = provider.getDatabase(endpoint.getHostAndPort(0)).getConnectionPool();
+ ConnectionPool pool2 = provider.getDatabase(endpoint.getHostAndPort(1)).getConnectionPool();
await().atMost(Duration.ofSeconds(1)).until(() -> pool1.getNumActive() == 0);
await().atMost(Duration.ofSeconds(1)).until(() -> pool2.getNumActive() == 0);
From 12d50a4292f98004404211e66a1a77cdfa01eb71 Mon Sep 17 00:00:00 2001
From: ggivo
Date: Mon, 6 Oct 2025 14:08:39 +0300
Subject: [PATCH 07/17] Rename MultiDatabaseConfig to MultiDbConfig
---
pom.xml | 2 +-
.../redis/clients/jedis/MultiDbClient.java | 6 +-
...DatabaseConfig.java => MultiDbConfig.java} | 26 +++----
.../jedis/builders/MultiDbClientBuilder.java | 8 +--
.../mcf/CircuitBreakerThresholdsAdapter.java | 8 +--
.../redis/clients/jedis/mcf/EchoStrategy.java | 2 +-
.../jedis/mcf/JedisFailoverException.java | 10 +--
.../mcf/MultiDatabaseConnectionProvider.java | 66 ++++++++---------
.../clients/jedis/MultiDbClientTest.java | 8 +--
.../failover/FailoverIntegrationTest.java | 14 ++--
.../mcf/ActiveActiveLocalFailoverTest.java | 6 +-
.../mcf/CircuitBreakerThresholdsTest.java | 16 ++---
.../mcf/ClusterEvaluateThresholdsTest.java | 8 +--
.../clients/jedis/mcf/DefaultValuesTest.java | 8 +--
.../mcf/FailbackMechanismIntegrationTest.java | 70 +++++++++----------
.../jedis/mcf/FailbackMechanismUnitTest.java | 64 ++++++++---------
.../jedis/mcf/HealthCheckIntegrationTest.java | 16 ++---
.../clients/jedis/mcf/HealthCheckTest.java | 42 +++++------
.../MultiClusterDynamicEndpointUnitTest.java | 10 +--
...ultiClusterFailoverAttemptsConfigTest.java | 16 ++---
.../mcf/MultiClusterInitializationTest.java | 36 +++++-----
.../MultiDatabaseConnectionProviderTest.java | 14 ++--
.../jedis/mcf/PeriodicFailbackTest.java | 36 +++++-----
.../jedis/misc/AutomaticFailoverTest.java | 16 ++---
...erProviderHealthStatusChangeEventTest.java | 50 ++++++-------
.../scenario/ActiveActiveFailoverTest.java | 4 +-
26 files changed, 281 insertions(+), 281 deletions(-)
rename src/main/java/redis/clients/jedis/{MultiDatabaseConfig.java => MultiDbConfig.java} (98%)
diff --git a/pom.xml b/pom.xml
index 957561819d..e056cb1748 100644
--- a/pom.xml
+++ b/pom.xml
@@ -488,7 +488,7 @@
**/Health*.java
**/*IT.java
**/scenario/RestEndpointUtil.java
- src/main/java/redis/clients/jedis/MultiDatabaseConfig.java
+ src/main/java/redis/clients/jedis/MultiDbConfig.java
src/main/java/redis/clients/jedis/HostAndPort.java
**/builders/*.java
**/MultiDb*.java
diff --git a/src/main/java/redis/clients/jedis/MultiDbClient.java b/src/main/java/redis/clients/jedis/MultiDbClient.java
index 9224307a56..2e008f5c98 100644
--- a/src/main/java/redis/clients/jedis/MultiDbClient.java
+++ b/src/main/java/redis/clients/jedis/MultiDbClient.java
@@ -1,6 +1,6 @@
package redis.clients.jedis;
-import redis.clients.jedis.MultiDatabaseConfig.DatabaseConfig;
+import redis.clients.jedis.MultiDbConfig.DatabaseConfig;
import redis.clients.jedis.annots.Experimental;
import redis.clients.jedis.builders.MultiDbClientBuilder;
import redis.clients.jedis.csc.Cache;
@@ -43,7 +43,7 @@
*
* MultiDbClient client = MultiDbClient.builder()
* .multiDbConfig(
- * MultiDatabaseConfig.builder()
+ * MultiDbConfig.builder()
* .endpoint(
* DatabaseConfig.builder(
* primary,
@@ -78,7 +78,7 @@
* @since 5.2.0
* @see MultiDatabaseConnectionProvider
* @see CircuitBreakerCommandExecutor
- * @see MultiDatabaseConfig
+ * @see MultiDbConfig
*/
@Experimental
public class MultiDbClient extends UnifiedJedis {
diff --git a/src/main/java/redis/clients/jedis/MultiDatabaseConfig.java b/src/main/java/redis/clients/jedis/MultiDbConfig.java
similarity index 98%
rename from src/main/java/redis/clients/jedis/MultiDatabaseConfig.java
rename to src/main/java/redis/clients/jedis/MultiDbConfig.java
index 96ab7d7971..5bde00c34a 100644
--- a/src/main/java/redis/clients/jedis/MultiDatabaseConfig.java
+++ b/src/main/java/redis/clients/jedis/MultiDbConfig.java
@@ -56,7 +56,7 @@
* .healthCheckEnabled(true).build();
*
* // Build multi-cluster configuration
- * MultiDatabaseConfig config = MultiDatabaseConfig.builder(primary, secondary)
+ * MultiDbConfig config = MultiDbConfig.builder(primary, secondary)
* .circuitBreakerFailureRateThreshold(10.0f).retryMaxAttempts(3).failbackSupported(true)
* .gracePeriod(10000).build();
*
@@ -76,7 +76,7 @@
*/
// TODO: move
@Experimental
-public final class MultiDatabaseConfig {
+public final class MultiDbConfig {
/**
* Functional interface for creating {@link HealthCheckStrategy} instances for specific Redis
@@ -437,7 +437,7 @@ public static interface StrategySupplier {
private int delayInBetweenFailoverAttempts;
/**
- * Constructs a new MultiDatabaseConfig with the specified cluster configurations.
+ * Constructs a new MultiDbConfig with the specified cluster configurations.
*
* This constructor validates that at least one cluster configuration is provided and that all
* configurations are non-null. Use the {@link Builder} class for more convenient configuration
@@ -448,7 +448,7 @@ public static interface StrategySupplier {
* @throws IllegalArgumentException if any cluster configuration is null
* @see Builder#Builder(DatabaseConfig[])
*/
- public MultiDatabaseConfig(DatabaseConfig[] databaseConfigs) {
+ public MultiDbConfig(DatabaseConfig[] databaseConfigs) {
if (databaseConfigs == null || databaseConfigs.length < 1) throw new JedisValidationException(
"DatabaseClientConfigs are required for MultiDatabaseConnectionProvider");
@@ -636,7 +636,7 @@ public boolean isFastFailover() {
}
/**
- * Creates a new Builder instance for configuring MultiDatabaseConfig.
+ * Creates a new Builder instance for configuring MultiDbConfig.
*
* At least one cluster configuration must be added to the builder before calling build(). Use the
* endpoint() methods to add cluster configurations.
@@ -650,7 +650,7 @@ public static Builder builder() {
}
/**
- * Creates a new Builder instance for configuring MultiDatabaseConfig.
+ * Creates a new Builder instance for configuring MultiDbConfig.
* @param databaseConfigs array of cluster configurations defining available Redis endpoints
* @return new Builder instance
* @throws JedisValidationException if databaseConfigs is null or empty
@@ -661,7 +661,7 @@ public static Builder builder(DatabaseConfig[] databaseConfigs) {
}
/**
- * Creates a new Builder instance for configuring MultiDatabaseConfig.
+ * Creates a new Builder instance for configuring MultiDbConfig.
* @param databaseConfigs list of cluster configurations defining available Redis endpoints
* @return new Builder instance
* @throws JedisValidationException if databaseConfigs is null or empty
@@ -976,7 +976,7 @@ public DatabaseConfig build() {
}
/**
- * Builder class for creating MultiDatabaseConfig instances with comprehensive configuration
+ * Builder class for creating MultiDbConfig instances with comprehensive configuration
* options.
*
* The Builder provides a fluent API for configuring all aspects of multi-cluster failover
@@ -984,7 +984,7 @@ public DatabaseConfig build() {
* sensible defaults based on production best practices while allowing fine-tuning for specific
* requirements.
*
- * @see MultiDatabaseConfig
+ * @see MultiDbConfig
* @see DatabaseConfig
*/
public static class Builder {
@@ -1499,17 +1499,17 @@ public Builder delayInBetweenFailoverAttempts(int delayInBetweenFailoverAttempts
}
/**
- * Builds and returns a new MultiDatabaseConfig instance with all configured settings.
+ * Builds and returns a new MultiDbConfig instance with all configured settings.
*
* This method creates the final configuration object by copying all builder settings to the
* configuration instance. The builder can be reused after calling build() to create additional
* configurations with different settings.
*
- * @return a new MultiDatabaseConfig instance with the configured settings
+ * @return a new MultiDbConfig instance with the configured settings
*/
- public MultiDatabaseConfig build() {
+ public MultiDbConfig build() {
- MultiDatabaseConfig config = new MultiDatabaseConfig(
+ MultiDbConfig config = new MultiDbConfig(
this.databaseConfigs.toArray(new DatabaseConfig[0]));
// Copy retry configuration
diff --git a/src/main/java/redis/clients/jedis/builders/MultiDbClientBuilder.java b/src/main/java/redis/clients/jedis/builders/MultiDbClientBuilder.java
index c4592ec905..252a931762 100644
--- a/src/main/java/redis/clients/jedis/builders/MultiDbClientBuilder.java
+++ b/src/main/java/redis/clients/jedis/builders/MultiDbClientBuilder.java
@@ -2,7 +2,7 @@
import java.util.function.Consumer;
-import redis.clients.jedis.MultiDatabaseConfig;
+import redis.clients.jedis.MultiDbConfig;
import redis.clients.jedis.annots.Experimental;
import redis.clients.jedis.executors.CommandExecutor;
import redis.clients.jedis.mcf.CircuitBreakerCommandExecutor;
@@ -38,7 +38,7 @@
*
* MultiDbClient client = MultiDbClient.builder()
* .multiDbConfig(
- * MultiDatabaseConfig.builder()
+ * MultiDbConfig.builder()
* .endpoint(
* DatabaseConfig.builder(
* east,
@@ -67,7 +67,7 @@ public abstract class MultiDbClientBuilder
extends AbstractClientBuilder, C> {
// Multi-db specific configuration fields
- private MultiDatabaseConfig multiDbConfig = null;
+ private MultiDbConfig multiDbConfig = null;
private Consumer databaseSwitchListener = null;
/**
@@ -79,7 +79,7 @@ public abstract class MultiDbClientBuilder
* @param config the multi-database configuration
* @return this builder
*/
- public MultiDbClientBuilder multiDbConfig(MultiDatabaseConfig config) {
+ public MultiDbClientBuilder multiDbConfig(MultiDbConfig config) {
this.multiDbConfig = config;
return this;
}
diff --git a/src/main/java/redis/clients/jedis/mcf/CircuitBreakerThresholdsAdapter.java b/src/main/java/redis/clients/jedis/mcf/CircuitBreakerThresholdsAdapter.java
index b23a07289c..4c4d14c38b 100644
--- a/src/main/java/redis/clients/jedis/mcf/CircuitBreakerThresholdsAdapter.java
+++ b/src/main/java/redis/clients/jedis/mcf/CircuitBreakerThresholdsAdapter.java
@@ -1,7 +1,7 @@
package redis.clients.jedis.mcf;
import io.github.resilience4j.circuitbreaker.CircuitBreakerConfig.SlidingWindowType;
-import redis.clients.jedis.MultiDatabaseConfig;
+import redis.clients.jedis.MultiDbConfig;
/**
* Adapter that disables Resilience4j's built-in circuit breaker evaluation and help delegate
@@ -67,9 +67,9 @@ int getSlidingWindowSize() {
* method controls circuit breaker state based on the original configuration's dual-threshold
* logic.
*
- * @param multiDatabaseConfig configuration containing sliding window size
+ * @param multiDbConfig configuration containing sliding window size
*/
- CircuitBreakerThresholdsAdapter(MultiDatabaseConfig multiDatabaseConfig) {
+ CircuitBreakerThresholdsAdapter(MultiDbConfig multiDbConfig) {
// IMPORTANT: failureRateThreshold is set to max theoretically disable Resilience4j's evaluation
// and rely on our custom evaluateThresholds() logic.
@@ -79,6 +79,6 @@ int getSlidingWindowSize() {
// and rely on our custom evaluateThresholds() logic.
minimumNumberOfCalls = Integer.MAX_VALUE;
- slidingWindowSize = multiDatabaseConfig.getCircuitBreakerSlidingWindowSize();
+ slidingWindowSize = multiDbConfig.getCircuitBreakerSlidingWindowSize();
}
}
diff --git a/src/main/java/redis/clients/jedis/mcf/EchoStrategy.java b/src/main/java/redis/clients/jedis/mcf/EchoStrategy.java
index 51173ace31..6be05e2cfb 100644
--- a/src/main/java/redis/clients/jedis/mcf/EchoStrategy.java
+++ b/src/main/java/redis/clients/jedis/mcf/EchoStrategy.java
@@ -8,7 +8,7 @@
import redis.clients.jedis.JedisClientConfig;
import redis.clients.jedis.JedisPooled;
import redis.clients.jedis.UnifiedJedis;
-import redis.clients.jedis.MultiDatabaseConfig.StrategySupplier;
+import redis.clients.jedis.MultiDbConfig.StrategySupplier;
public class EchoStrategy implements HealthCheckStrategy {
private static final int MAX_HEALTH_CHECK_POOL_SIZE = 2;
diff --git a/src/main/java/redis/clients/jedis/mcf/JedisFailoverException.java b/src/main/java/redis/clients/jedis/mcf/JedisFailoverException.java
index fec047824f..2d6a81b81a 100644
--- a/src/main/java/redis/clients/jedis/mcf/JedisFailoverException.java
+++ b/src/main/java/redis/clients/jedis/mcf/JedisFailoverException.java
@@ -11,7 +11,7 @@
* @see JedisFailoverException.JedisTemporarilyNotAvailableException
*/
public class JedisFailoverException extends JedisConnectionException {
- private static final String MESSAGE = "Cluster/database endpoint could not failover since the MultiDatabaseConfig was not "
+ private static final String MESSAGE = "Cluster/database endpoint could not failover since the MultiDbConfig was not "
+ "provided with an additional cluster/database endpoint according to its prioritized sequence. "
+ "If applicable, consider falling back OR restarting with an available cluster/database endpoint";
@@ -28,8 +28,8 @@ public JedisFailoverException() {
* the max number of failover attempts has been exceeded. And there is still no healthy cluster.
*
* See the configuration properties
- * {@link redis.clients.jedis.MultiDatabaseConfig#maxNumFailoverAttempts} and
- * {@link redis.clients.jedis.MultiDatabaseConfig#delayInBetweenFailoverAttempts} for more
+ * {@link redis.clients.jedis.MultiDbConfig#maxNumFailoverAttempts} and
+ * {@link redis.clients.jedis.MultiDbConfig#delayInBetweenFailoverAttempts} for more
* details.
*/
public static class JedisPermanentlyNotAvailableException extends JedisFailoverException {
@@ -49,8 +49,8 @@ public JedisPermanentlyNotAvailableException() {
* temporary condition and it is possible that there will be a healthy cluster available.
*
* See the configuration properties
- * {@link redis.clients.jedis.MultiDatabaseConfig#maxNumFailoverAttempts} and
- * {@link redis.clients.jedis.MultiDatabaseConfig#delayInBetweenFailoverAttempts} for more
+ * {@link redis.clients.jedis.MultiDbConfig#maxNumFailoverAttempts} and
+ * {@link redis.clients.jedis.MultiDbConfig#delayInBetweenFailoverAttempts} for more
* details.
*/
public static class JedisTemporarilyNotAvailableException extends JedisFailoverException {
diff --git a/src/main/java/redis/clients/jedis/mcf/MultiDatabaseConnectionProvider.java b/src/main/java/redis/clients/jedis/mcf/MultiDatabaseConnectionProvider.java
index 9b7d12cb96..ba445ae5cb 100644
--- a/src/main/java/redis/clients/jedis/mcf/MultiDatabaseConnectionProvider.java
+++ b/src/main/java/redis/clients/jedis/mcf/MultiDatabaseConnectionProvider.java
@@ -33,7 +33,7 @@
import org.slf4j.LoggerFactory;
import redis.clients.jedis.*;
-import redis.clients.jedis.MultiDatabaseConfig.DatabaseConfig;
+import redis.clients.jedis.MultiDbConfig.DatabaseConfig;
import redis.clients.jedis.annots.Experimental;
import redis.clients.jedis.annots.VisibleForTesting;
import redis.clients.jedis.exceptions.JedisConnectionException;
@@ -41,7 +41,7 @@
import redis.clients.jedis.exceptions.JedisValidationException;
import redis.clients.jedis.mcf.JedisFailoverException.*;
import redis.clients.jedis.providers.ConnectionProvider;
-import redis.clients.jedis.MultiDatabaseConfig.StrategySupplier;
+import redis.clients.jedis.MultiDbConfig.StrategySupplier;
import redis.clients.jedis.util.Pool;
/**
@@ -69,7 +69,7 @@ public class MultiDatabaseConnectionProvider implements ConnectionProvider {
/**
* Indicates the actively used database endpoint (connection pool) amongst the pre-configured list
- * which were provided at startup via the MultiDatabaseConfig. All traffic will be routed with
+ * which were provided at startup via the MultiDbConfig. All traffic will be routed with
* this database
*/
private volatile Database activeDatabase;
@@ -101,30 +101,30 @@ public class MultiDatabaseConnectionProvider implements ConnectionProvider {
// Store retry and circuit breaker configs for dynamic database addition/removal
private RetryConfig retryConfig;
private CircuitBreakerConfig circuitBreakerConfig;
- private MultiDatabaseConfig multiDatabaseConfig;
+ private MultiDbConfig multiDbConfig;
private AtomicLong failoverFreezeUntil = new AtomicLong(0);
private AtomicInteger failoverAttemptCount = new AtomicInteger(0);
- public MultiDatabaseConnectionProvider(MultiDatabaseConfig multiDatabaseConfig) {
+ public MultiDatabaseConnectionProvider(MultiDbConfig multiDbConfig) {
- if (multiDatabaseConfig == null) throw new JedisValidationException(
- "MultiDatabaseConfig must not be NULL for MultiDatabaseConnectionProvider");
+ if (multiDbConfig == null) throw new JedisValidationException(
+ "MultiDbConfig must not be NULL for MultiDatabaseConnectionProvider");
- this.multiDatabaseConfig = multiDatabaseConfig;
+ this.multiDbConfig = multiDbConfig;
////////////// Configure Retry ////////////////////
RetryConfig.Builder retryConfigBuilder = RetryConfig.custom();
- retryConfigBuilder.maxAttempts(multiDatabaseConfig.getRetryMaxAttempts());
+ retryConfigBuilder.maxAttempts(multiDbConfig.getRetryMaxAttempts());
retryConfigBuilder.intervalFunction(
- IntervalFunction.ofExponentialBackoff(multiDatabaseConfig.getRetryWaitDuration(),
- multiDatabaseConfig.getRetryWaitDurationExponentialBackoffMultiplier()));
+ IntervalFunction.ofExponentialBackoff(multiDbConfig.getRetryWaitDuration(),
+ multiDbConfig.getRetryWaitDurationExponentialBackoffMultiplier()));
retryConfigBuilder.failAfterMaxAttempts(false); // JedisConnectionException will be thrown
retryConfigBuilder.retryExceptions(
- multiDatabaseConfig.getRetryIncludedExceptionList().stream().toArray(Class[]::new));
+ multiDbConfig.getRetryIncludedExceptionList().stream().toArray(Class[]::new));
- List retryIgnoreExceptionList = multiDatabaseConfig.getRetryIgnoreExceptionList();
+ List retryIgnoreExceptionList = multiDbConfig.getRetryIgnoreExceptionList();
if (retryIgnoreExceptionList != null)
retryConfigBuilder.ignoreExceptions(retryIgnoreExceptionList.stream().toArray(Class[]::new));
@@ -135,14 +135,14 @@ public MultiDatabaseConnectionProvider(MultiDatabaseConfig multiDatabaseConfig)
CircuitBreakerConfig.Builder circuitBreakerConfigBuilder = CircuitBreakerConfig.custom();
CircuitBreakerThresholdsAdapter adapter = new CircuitBreakerThresholdsAdapter(
- multiDatabaseConfig);
+ multiDbConfig);
circuitBreakerConfigBuilder.minimumNumberOfCalls(adapter.getMinimumNumberOfCalls());
circuitBreakerConfigBuilder.failureRateThreshold(adapter.getFailureRateThreshold());
circuitBreakerConfigBuilder.slidingWindowSize(adapter.getSlidingWindowSize());
circuitBreakerConfigBuilder.slidingWindowType(adapter.getSlidingWindowType());
circuitBreakerConfigBuilder.recordExceptions(
- multiDatabaseConfig.getCircuitBreakerIncludedExceptionList().stream().toArray(Class[]::new));
+ multiDbConfig.getCircuitBreakerIncludedExceptionList().stream().toArray(Class[]::new));
circuitBreakerConfigBuilder.automaticTransitionFromOpenToHalfOpenEnabled(false); // State
// transitions
// are
@@ -151,7 +151,7 @@ public MultiDatabaseConnectionProvider(MultiDatabaseConfig multiDatabaseConfig)
// states
// are used
- List circuitBreakerIgnoreExceptionList = multiDatabaseConfig
+ List circuitBreakerIgnoreExceptionList = multiDbConfig
.getCircuitBreakerIgnoreExceptionList();
if (circuitBreakerIgnoreExceptionList != null) circuitBreakerConfigBuilder
.ignoreExceptions(circuitBreakerIgnoreExceptionList.stream().toArray(Class[]::new));
@@ -160,11 +160,11 @@ public MultiDatabaseConnectionProvider(MultiDatabaseConfig multiDatabaseConfig)
////////////// Configure Database Map ////////////////////
- DatabaseConfig[] databaseConfigs = multiDatabaseConfig.getDatabaseConfigs();
+ DatabaseConfig[] databaseConfigs = multiDbConfig.getDatabaseConfigs();
// Now add databases - health checks will start but events will be queued
for (DatabaseConfig config : databaseConfigs) {
- addClusterInternal(multiDatabaseConfig, config);
+ addClusterInternal(multiDbConfig, config);
}
// Initialize StatusTracker for waiting on health check results
@@ -185,11 +185,11 @@ public MultiDatabaseConnectionProvider(MultiDatabaseConfig multiDatabaseConfig)
waitForInitialHealthyCluster(statusTracker);
switchToHealthyDatabase(SwitchReason.HEALTH_CHECK, temp);
}
- this.fallbackExceptionList = multiDatabaseConfig.getFallbackExceptionList();
+ this.fallbackExceptionList = multiDbConfig.getFallbackExceptionList();
// Start periodic failback checker
- if (multiDatabaseConfig.isFailbackSupported()) {
- long failbackInterval = multiDatabaseConfig.getFailbackCheckInterval();
+ if (multiDbConfig.isFailbackSupported()) {
+ long failbackInterval = multiDbConfig.getFailbackCheckInterval();
failbackScheduler.scheduleAtFixedRate(this::periodicFailbackCheck, failbackInterval,
failbackInterval, TimeUnit.MILLISECONDS);
}
@@ -213,7 +213,7 @@ public void add(DatabaseConfig databaseConfig) {
activeDatabaseChangeLock.lock();
try {
- addClusterInternal(multiDatabaseConfig, databaseConfig);
+ addClusterInternal(multiDbConfig, databaseConfig);
} finally {
activeDatabaseChangeLock.unlock();
}
@@ -286,7 +286,7 @@ public void remove(Endpoint endpoint) {
* Internal method to add a database configuration. This method is not thread-safe and should be
* called within appropriate locks.
*/
- private void addClusterInternal(MultiDatabaseConfig multiDatabaseConfig, DatabaseConfig config) {
+ private void addClusterInternal(MultiDbConfig multiDbConfig, DatabaseConfig config) {
if (databaseMap.containsKey(config.getEndpoint())) {
throw new JedisValidationException(
"Endpoint " + config.getEndpoint() + " already exists in the provider");
@@ -322,10 +322,10 @@ private void addClusterInternal(MultiDatabaseConfig multiDatabaseConfig, Databas
healthStatusManager.registerListener(config.getEndpoint(), this::onHealthStatusChange);
HealthCheck hc = healthStatusManager.add(config.getEndpoint(), hcs);
database = new Database(config.getEndpoint(), pool, retry, hc, circuitBreaker,
- config.getWeight(), multiDatabaseConfig);
+ config.getWeight(), multiDbConfig);
} else {
database = new Database(config.getEndpoint(), pool, retry, circuitBreaker, config.getWeight(),
- multiDatabaseConfig);
+ multiDbConfig);
}
databaseMap.put(config.getEndpoint(), database);
@@ -475,7 +475,7 @@ Endpoint switchToHealthyDatabase(SwitchReason reason, Database iterateFrom) {
}
private void handleNoHealthyCluster() {
- int max = multiDatabaseConfig.getMaxNumFailoverAttempts();
+ int max = multiDbConfig.getMaxNumFailoverAttempts();
log.error("No healthy cluster available to switch to");
if (failoverAttemptCount.get() > max) {
throw new JedisPermanentlyNotAvailableException();
@@ -494,7 +494,7 @@ private boolean markAsFreeze() {
long until = failoverFreezeUntil.get();
long now = System.currentTimeMillis();
if (until <= now) {
- long nextUntil = now + multiDatabaseConfig.getDelayInBetweenFailoverAttempts();
+ long nextUntil = now + multiDbConfig.getDelayInBetweenFailoverAttempts();
if (failoverFreezeUntil.compareAndSet(until, nextUntil)) {
return true;
}
@@ -639,7 +639,7 @@ private boolean setActiveDatabase(Database database, boolean validateConnection)
activeDatabaseChangeLock.unlock();
}
boolean switched = oldCluster != database;
- if (switched && this.multiDatabaseConfig.isFastFailover()) {
+ if (switched && this.multiDbConfig.isFastFailover()) {
log.info("Forcing disconnect of all active connections in old database: {}",
oldCluster.circuitBreaker.getName());
oldCluster.forceDisconnect();
@@ -734,7 +734,7 @@ public CircuitBreaker getDatabaseCircuitBreaker() {
/**
* Indicates the final cluster/database endpoint (connection pool), according to the
- * pre-configured list provided at startup via the MultiDatabaseConfig, is unavailable and
+ * pre-configured list provided at startup via the MultiDbConfig, is unavailable and
* therefore no further failover is possible. Users can manually failback to an available cluster
*/
public boolean canIterateFrom(Database iterateFrom) {
@@ -764,7 +764,7 @@ public static class Database {
private final CircuitBreaker circuitBreaker;
private final float weight;
private final HealthCheck healthCheck;
- private final MultiDatabaseConfig multiDbConfig;
+ private final MultiDbConfig multiDbConfig;
private boolean disabled = false;
private final Endpoint endpoint;
@@ -773,20 +773,20 @@ public static class Database {
private final Logger log = LoggerFactory.getLogger(getClass());
private Database(Endpoint endpoint, TrackingConnectionPool connectionPool, Retry retry,
- CircuitBreaker circuitBreaker, float weight, MultiDatabaseConfig multiDatabaseConfig) {
+ CircuitBreaker circuitBreaker, float weight, MultiDbConfig multiDbConfig) {
this.endpoint = endpoint;
this.connectionPool = connectionPool;
this.retry = retry;
this.circuitBreaker = circuitBreaker;
this.weight = weight;
- this.multiDbConfig = multiDatabaseConfig;
+ this.multiDbConfig = multiDbConfig;
this.healthCheck = null;
}
private Database(Endpoint endpoint, TrackingConnectionPool connectionPool, Retry retry,
HealthCheck hc, CircuitBreaker circuitBreaker, float weight,
- MultiDatabaseConfig multiDbConfig) {
+ MultiDbConfig multiDbConfig) {
this.endpoint = endpoint;
this.connectionPool = connectionPool;
diff --git a/src/test/java/redis/clients/jedis/MultiDbClientTest.java b/src/test/java/redis/clients/jedis/MultiDbClientTest.java
index 05e923f2d4..5325f0e742 100644
--- a/src/test/java/redis/clients/jedis/MultiDbClientTest.java
+++ b/src/test/java/redis/clients/jedis/MultiDbClientTest.java
@@ -15,7 +15,7 @@
import static org.hamcrest.Matchers.not;
import static org.junit.jupiter.api.Assertions.*;
-import redis.clients.jedis.MultiDatabaseConfig.DatabaseConfig;
+import redis.clients.jedis.MultiDbConfig.DatabaseConfig;
import redis.clients.jedis.exceptions.JedisValidationException;
import redis.clients.jedis.mcf.ClusterSwitchEventArgs;
import redis.clients.jedis.mcf.SwitchReason;
@@ -56,7 +56,7 @@ public static void setupAdminClients() throws IOException {
@BeforeEach
void setUp() {
// Create a simple resilient client with mock endpoints for testing
- MultiDatabaseConfig clientConfig = MultiDatabaseConfig.builder()
+ MultiDbConfig clientConfig = MultiDbConfig.builder()
.endpoint(endpoint1.getHostAndPort(), 100.0f, endpoint1.getClientConfigBuilder().build())
.endpoint(endpoint2.getHostAndPort(), 50.0f, endpoint2.getClientConfigBuilder().build())
.build();
@@ -121,7 +121,7 @@ void testSetActiveDatabase() {
@Test
void testBuilderWithMultipleEndpointTypes() {
- MultiDatabaseConfig clientConfig = MultiDatabaseConfig.builder()
+ MultiDbConfig clientConfig = MultiDbConfig.builder()
.endpoint(endpoint1.getHostAndPort(), 100.0f, DefaultJedisClientConfig.builder().build())
.endpoint(DatabaseConfig
.builder(endpoint2.getHostAndPort(), DefaultJedisClientConfig.builder().build())
@@ -172,7 +172,7 @@ public void testForceActiveEndpointWithNonExistingEndpoint() {
@Test
public void testWithDatabaseSwitchListener() {
- MultiDatabaseConfig endpointsConfig = MultiDatabaseConfig.builder()
+ MultiDbConfig endpointsConfig = MultiDbConfig.builder()
.endpoint(DatabaseConfig
.builder(endpoint1.getHostAndPort(), endpoint1.getClientConfigBuilder().build())
.weight(100.0f).build())
diff --git a/src/test/java/redis/clients/jedis/failover/FailoverIntegrationTest.java b/src/test/java/redis/clients/jedis/failover/FailoverIntegrationTest.java
index 3416f68b74..7cda9db8fd 100644
--- a/src/test/java/redis/clients/jedis/failover/FailoverIntegrationTest.java
+++ b/src/test/java/redis/clients/jedis/failover/FailoverIntegrationTest.java
@@ -16,7 +16,7 @@
import redis.clients.jedis.EndpointConfig;
import redis.clients.jedis.HostAndPorts;
import redis.clients.jedis.JedisClientConfig;
-import redis.clients.jedis.MultiDatabaseConfig;
+import redis.clients.jedis.MultiDbConfig;
import redis.clients.jedis.UnifiedJedis;
import redis.clients.jedis.exceptions.JedisConnectionException;
import redis.clients.jedis.mcf.MultiDatabaseConnectionProvider;
@@ -180,13 +180,13 @@ public void testManualFailoverNewCommandsAreSentToActiveCluster() throws Interru
assertThat(getNodeId(failoverClient.info("server")), equalTo(JEDIS2_ID));
}
- private List getDatabaseConfigs(
+ private List getDatabaseConfigs(
JedisClientConfig clientConfig, EndpointConfig... endpoints) {
int weight = endpoints.length;
AtomicInteger weightCounter = new AtomicInteger(weight);
return Arrays.stream(endpoints)
- .map(e -> MultiDatabaseConfig.DatabaseConfig.builder(e.getHostAndPort(), clientConfig)
+ .map(e -> MultiDbConfig.DatabaseConfig.builder(e.getHostAndPort(), clientConfig)
.weight(1.0f / weightCounter.getAndIncrement()).healthCheckEnabled(false).build())
.collect(Collectors.toList());
}
@@ -261,7 +261,7 @@ public void testManualFailoverInflightCommandsWithErrorsPropagateError() throws
*/
@Test
public void testCircuitBreakerCountsEachConnectionErrorSeparately() throws IOException {
- MultiDatabaseConfig failoverConfig = new MultiDatabaseConfig.Builder(getDatabaseConfigs(
+ MultiDbConfig failoverConfig = new MultiDbConfig.Builder(getDatabaseConfigs(
DefaultJedisClientConfig.builder().socketTimeoutMillis(RecommendedSettings.DEFAULT_TIMEOUT_MS)
.connectionTimeoutMillis(RecommendedSettings.DEFAULT_TIMEOUT_MS).build(),
endpoint1, endpoint2)).retryMaxAttempts(2).retryWaitDuration(1)
@@ -422,7 +422,7 @@ private MultiDatabaseConnectionProvider createProvider() {
.socketTimeoutMillis(RecommendedSettings.DEFAULT_TIMEOUT_MS)
.connectionTimeoutMillis(RecommendedSettings.DEFAULT_TIMEOUT_MS).build();
- MultiDatabaseConfig failoverConfig = new MultiDatabaseConfig.Builder(
+ MultiDbConfig failoverConfig = new MultiDbConfig.Builder(
getDatabaseConfigs(clientConfig, endpoint1, endpoint2)).retryMaxAttempts(1)
.retryWaitDuration(1).circuitBreakerSlidingWindowSize(3)
.circuitBreakerMinNumOfFailures(1).circuitBreakerFailureRateThreshold(50f).build();
@@ -435,12 +435,12 @@ private MultiDatabaseConnectionProvider createProvider() {
* @return A configured provider
*/
private MultiDatabaseConnectionProvider createProvider(
- Function configCustomizer) {
+ Function configCustomizer) {
JedisClientConfig clientConfig = DefaultJedisClientConfig.builder()
.socketTimeoutMillis(RecommendedSettings.DEFAULT_TIMEOUT_MS)
.connectionTimeoutMillis(RecommendedSettings.DEFAULT_TIMEOUT_MS).build();
- MultiDatabaseConfig.Builder builder = new MultiDatabaseConfig.Builder(
+ MultiDbConfig.Builder builder = new MultiDbConfig.Builder(
getDatabaseConfigs(clientConfig, endpoint1, endpoint2)).retryMaxAttempts(1)
.retryWaitDuration(1).circuitBreakerSlidingWindowSize(3)
.circuitBreakerMinNumOfFailures(1).circuitBreakerFailureRateThreshold(50f);
diff --git a/src/test/java/redis/clients/jedis/mcf/ActiveActiveLocalFailoverTest.java b/src/test/java/redis/clients/jedis/mcf/ActiveActiveLocalFailoverTest.java
index da295e837e..6fdc720cc9 100644
--- a/src/test/java/redis/clients/jedis/mcf/ActiveActiveLocalFailoverTest.java
+++ b/src/test/java/redis/clients/jedis/mcf/ActiveActiveLocalFailoverTest.java
@@ -16,7 +16,7 @@
import eu.rekawek.toxiproxy.ToxiproxyClient;
import eu.rekawek.toxiproxy.model.Toxic;
import redis.clients.jedis.*;
-import redis.clients.jedis.MultiDatabaseConfig.DatabaseConfig;
+import redis.clients.jedis.MultiDbConfig.DatabaseConfig;
import redis.clients.jedis.scenario.ActiveActiveFailoverTest;
import redis.clients.jedis.scenario.MultiThreadedFakeApp;
import redis.clients.jedis.scenario.RecommendedSettings;
@@ -94,7 +94,7 @@ public void testFailover(boolean fastFailover, long minFailoverCompletionDuratio
"TESTING WITH PARAMETERS: fastFailover: {} numberOfThreads: {} minFailoverCompletionDuration: {} maxFailoverCompletionDuration: {] ",
fastFailover, numberOfThreads, minFailoverCompletionDuration, maxFailoverCompletionDuration);
- MultiDatabaseConfig.DatabaseConfig[] clusterConfig = new MultiDatabaseConfig.DatabaseConfig[2];
+ MultiDbConfig.DatabaseConfig[] clusterConfig = new MultiDbConfig.DatabaseConfig[2];
JedisClientConfig config = endpoint1.getClientConfigBuilder()
.socketTimeoutMillis(RecommendedSettings.DEFAULT_TIMEOUT_MS)
@@ -105,7 +105,7 @@ public void testFailover(boolean fastFailover, long minFailoverCompletionDuratio
clusterConfig[1] = DatabaseConfig.builder(endpoint2.getHostAndPort(), config)
.connectionPoolConfig(RecommendedSettings.poolConfig).weight(0.5f).build();
- MultiDatabaseConfig.Builder builder = new MultiDatabaseConfig.Builder(clusterConfig);
+ MultiDbConfig.Builder builder = new MultiDbConfig.Builder(clusterConfig);
builder.circuitBreakerSlidingWindowSize(1); // SLIDING WINDOW SIZE IN SECONDS
builder.circuitBreakerFailureRateThreshold(10.0f); // percentage of failures to trigger circuit
diff --git a/src/test/java/redis/clients/jedis/mcf/CircuitBreakerThresholdsTest.java b/src/test/java/redis/clients/jedis/mcf/CircuitBreakerThresholdsTest.java
index bf7f702004..2e71b0f554 100644
--- a/src/test/java/redis/clients/jedis/mcf/CircuitBreakerThresholdsTest.java
+++ b/src/test/java/redis/clients/jedis/mcf/CircuitBreakerThresholdsTest.java
@@ -16,8 +16,8 @@
import redis.clients.jedis.Connection;
import redis.clients.jedis.DefaultJedisClientConfig;
import redis.clients.jedis.HostAndPort;
-import redis.clients.jedis.MultiDatabaseConfig;
-import redis.clients.jedis.MultiDatabaseConfig.DatabaseConfig;
+import redis.clients.jedis.MultiDbConfig;
+import redis.clients.jedis.MultiDbConfig.DatabaseConfig;
import redis.clients.jedis.Protocol;
import redis.clients.jedis.exceptions.JedisConnectionException;
import redis.clients.jedis.mcf.MultiDatabaseConnectionProvider.Database;
@@ -50,11 +50,11 @@ public void setup() throws Exception {
.healthCheckEnabled(false).weight(0.5f).build() };
fakeDatabaseConfigs = databaseConfigs;
- MultiDatabaseConfig.Builder cfgBuilder = MultiDatabaseConfig.builder(databaseConfigs)
+ MultiDbConfig.Builder cfgBuilder = MultiDbConfig.builder(databaseConfigs)
.circuitBreakerFailureRateThreshold(50.0f).circuitBreakerMinNumOfFailures(3)
.circuitBreakerSlidingWindowSize(10).retryMaxAttempts(1).retryOnFailover(false);
- MultiDatabaseConfig mcc = cfgBuilder.build();
+ MultiDbConfig mcc = cfgBuilder.build();
realProvider = new MultiDatabaseConnectionProvider(mcc);
spyProvider = spy(realProvider);
@@ -123,7 +123,7 @@ public void minFailuresAndRateExceeded_triggersFailover() {
@Test
public void rateBelowThreshold_doesNotFailover() throws Exception {
// Use local provider with higher threshold (80%) and no retries
- MultiDatabaseConfig.Builder cfgBuilder = MultiDatabaseConfig.builder(fakeDatabaseConfigs)
+ MultiDbConfig.Builder cfgBuilder = MultiDbConfig.builder(fakeDatabaseConfigs)
.circuitBreakerFailureRateThreshold(80.0f).circuitBreakerMinNumOfFailures(3)
.circuitBreakerSlidingWindowSize(10).retryMaxAttempts(1).retryOnFailover(false);
MultiDatabaseConnectionProvider rp = new MultiDatabaseConnectionProvider(cfgBuilder.build());
@@ -162,10 +162,10 @@ public void rateBelowThreshold_doesNotFailover() throws Exception {
@Test
public void providerBuilder_zeroRate_mapsToHundredAndHugeMinCalls() {
- MultiDatabaseConfig.Builder cfgBuilder = MultiDatabaseConfig.builder(fakeDatabaseConfigs);
+ MultiDbConfig.Builder cfgBuilder = MultiDbConfig.builder(fakeDatabaseConfigs);
cfgBuilder.circuitBreakerFailureRateThreshold(0.0f).circuitBreakerMinNumOfFailures(3)
.circuitBreakerSlidingWindowSize(10);
- MultiDatabaseConfig mcc = cfgBuilder.build();
+ MultiDbConfig mcc = cfgBuilder.build();
CircuitBreakerThresholdsAdapter adapter = new CircuitBreakerThresholdsAdapter(mcc);
@@ -189,7 +189,7 @@ public void providerBuilder_zeroRate_mapsToHundredAndHugeMinCalls() {
public void thresholdMatrix(int minFailures, float ratePercent, int successes, int failures,
boolean expectFailoverOnNext) throws Exception {
- MultiDatabaseConfig.Builder cfgBuilder = MultiDatabaseConfig.builder(fakeDatabaseConfigs)
+ MultiDbConfig.Builder cfgBuilder = MultiDbConfig.builder(fakeDatabaseConfigs)
.circuitBreakerFailureRateThreshold(ratePercent).circuitBreakerMinNumOfFailures(minFailures)
.circuitBreakerSlidingWindowSize(Math.max(10, successes + failures + 2)).retryMaxAttempts(1)
.retryOnFailover(false);
diff --git a/src/test/java/redis/clients/jedis/mcf/ClusterEvaluateThresholdsTest.java b/src/test/java/redis/clients/jedis/mcf/ClusterEvaluateThresholdsTest.java
index 251d69140c..a3e9317a98 100644
--- a/src/test/java/redis/clients/jedis/mcf/ClusterEvaluateThresholdsTest.java
+++ b/src/test/java/redis/clients/jedis/mcf/ClusterEvaluateThresholdsTest.java
@@ -11,7 +11,7 @@
import org.junit.jupiter.params.provider.CsvSource;
import redis.clients.jedis.DefaultJedisClientConfig;
import redis.clients.jedis.HostAndPort;
-import redis.clients.jedis.MultiDatabaseConfig;
+import redis.clients.jedis.MultiDbConfig;
import redis.clients.jedis.mcf.MultiDatabaseConnectionProvider.Database;
/**
@@ -100,13 +100,13 @@ public void rateBelowThreshold_doesNotFailover() {
@Test
public void providerBuilder_zeroRate_mapsToHundredAndHugeMinCalls() {
- MultiDatabaseConfig.Builder cfgBuilder = MultiDatabaseConfig
- .builder(java.util.Arrays.asList(MultiDatabaseConfig.DatabaseConfig
+ MultiDbConfig.Builder cfgBuilder = MultiDbConfig
+ .builder(java.util.Arrays.asList(MultiDbConfig.DatabaseConfig
.builder(new HostAndPort("localhost", 6379), DefaultJedisClientConfig.builder().build())
.healthCheckEnabled(false).build()));
cfgBuilder.circuitBreakerFailureRateThreshold(0.0f).circuitBreakerMinNumOfFailures(3)
.circuitBreakerSlidingWindowSize(10);
- MultiDatabaseConfig mcc = cfgBuilder.build();
+ MultiDbConfig mcc = cfgBuilder.build();
CircuitBreakerThresholdsAdapter adapter = new CircuitBreakerThresholdsAdapter(mcc);
diff --git a/src/test/java/redis/clients/jedis/mcf/DefaultValuesTest.java b/src/test/java/redis/clients/jedis/mcf/DefaultValuesTest.java
index 51d0aa3ec2..e3e5f3f05e 100644
--- a/src/test/java/redis/clients/jedis/mcf/DefaultValuesTest.java
+++ b/src/test/java/redis/clients/jedis/mcf/DefaultValuesTest.java
@@ -9,7 +9,7 @@
import redis.clients.jedis.DefaultJedisClientConfig;
import redis.clients.jedis.HostAndPort;
import redis.clients.jedis.JedisClientConfig;
-import redis.clients.jedis.MultiDatabaseConfig;
+import redis.clients.jedis.MultiDbConfig;
public class DefaultValuesTest {
@@ -19,10 +19,10 @@ public class DefaultValuesTest {
@Test
void testDefaultValuesInConfig() {
- MultiDatabaseConfig.DatabaseConfig clusterConfig = MultiDatabaseConfig.DatabaseConfig
+ MultiDbConfig.DatabaseConfig clusterConfig = MultiDbConfig.DatabaseConfig
.builder(fakeEndpoint, config).build();
- MultiDatabaseConfig multiConfig = new MultiDatabaseConfig.Builder(
- new MultiDatabaseConfig.DatabaseConfig[] { clusterConfig }).build();
+ MultiDbConfig multiConfig = new MultiDbConfig.Builder(
+ new MultiDbConfig.DatabaseConfig[] { clusterConfig }).build();
// check for grace period
assertEquals(60000, multiConfig.getGracePeriod());
diff --git a/src/test/java/redis/clients/jedis/mcf/FailbackMechanismIntegrationTest.java b/src/test/java/redis/clients/jedis/mcf/FailbackMechanismIntegrationTest.java
index c216e69317..7179c0c475 100644
--- a/src/test/java/redis/clients/jedis/mcf/FailbackMechanismIntegrationTest.java
+++ b/src/test/java/redis/clients/jedis/mcf/FailbackMechanismIntegrationTest.java
@@ -17,7 +17,7 @@
import redis.clients.jedis.DefaultJedisClientConfig;
import redis.clients.jedis.HostAndPort;
import redis.clients.jedis.JedisClientConfig;
-import redis.clients.jedis.MultiDatabaseConfig;
+import redis.clients.jedis.MultiDbConfig;
@ExtendWith(MockitoExtension.class)
class FailbackMechanismIntegrationTest {
@@ -49,16 +49,16 @@ private MockedConstruction mockPool() {
void testFailbackDisabledDoesNotPerformFailback() throws InterruptedException {
try (MockedConstruction mockedPool = mockPool()) {
// Create clusters with different weights
- MultiDatabaseConfig.DatabaseConfig cluster1 = MultiDatabaseConfig.DatabaseConfig
+ MultiDbConfig.DatabaseConfig cluster1 = MultiDbConfig.DatabaseConfig
.builder(endpoint1, clientConfig).weight(1.0f).healthCheckEnabled(false).build(); // Lower
// weight
- MultiDatabaseConfig.DatabaseConfig cluster2 = MultiDatabaseConfig.DatabaseConfig
+ MultiDbConfig.DatabaseConfig cluster2 = MultiDbConfig.DatabaseConfig
.builder(endpoint2, clientConfig).weight(2.0f) // Higher weight
.healthCheckEnabled(false).build();
- MultiDatabaseConfig config = new MultiDatabaseConfig.Builder(
- new MultiDatabaseConfig.DatabaseConfig[] { cluster1, cluster2 }).failbackSupported(false) // Disabled
+ MultiDbConfig config = new MultiDbConfig.Builder(
+ new MultiDbConfig.DatabaseConfig[] { cluster1, cluster2 }).failbackSupported(false) // Disabled
.failbackCheckInterval(100) // Short interval for testing
.build();
@@ -89,16 +89,16 @@ void testFailbackDisabledDoesNotPerformFailback() throws InterruptedException {
void testFailbackToHigherWeightCluster() throws InterruptedException {
try (MockedConstruction mockedPool = mockPool()) {
// Create clusters with different weights
- MultiDatabaseConfig.DatabaseConfig cluster1 = MultiDatabaseConfig.DatabaseConfig
+ MultiDbConfig.DatabaseConfig cluster1 = MultiDbConfig.DatabaseConfig
.builder(endpoint1, clientConfig).weight(2.0f) // Higher weight
.healthCheckEnabled(false).build();
- MultiDatabaseConfig.DatabaseConfig cluster2 = MultiDatabaseConfig.DatabaseConfig
+ MultiDbConfig.DatabaseConfig cluster2 = MultiDbConfig.DatabaseConfig
.builder(endpoint2, clientConfig).weight(1.0f) // Lower weight
.healthCheckEnabled(false).build();
- MultiDatabaseConfig config = new MultiDatabaseConfig.Builder(
- new MultiDatabaseConfig.DatabaseConfig[] { cluster1, cluster2 }).failbackSupported(true)
+ MultiDbConfig config = new MultiDbConfig.Builder(
+ new MultiDbConfig.DatabaseConfig[] { cluster1, cluster2 }).failbackSupported(true)
.failbackCheckInterval(100) // Short interval for testing
.gracePeriod(100).build();
@@ -129,20 +129,20 @@ void testFailbackToHigherWeightCluster() throws InterruptedException {
void testNoFailbackToLowerWeightCluster() throws InterruptedException {
try (MockedConstruction mockedPool = mockPool()) {
// Create three clusters with different weights to properly test no failback to lower weight
- MultiDatabaseConfig.DatabaseConfig cluster1 = MultiDatabaseConfig.DatabaseConfig
+ MultiDbConfig.DatabaseConfig cluster1 = MultiDbConfig.DatabaseConfig
.builder(endpoint1, clientConfig).weight(1.0f) // Lowest weight
.healthCheckEnabled(false).build();
- MultiDatabaseConfig.DatabaseConfig cluster2 = MultiDatabaseConfig.DatabaseConfig
+ MultiDbConfig.DatabaseConfig cluster2 = MultiDbConfig.DatabaseConfig
.builder(endpoint2, clientConfig).weight(2.0f) // Medium weight
.healthCheckEnabled(false).build();
- MultiDatabaseConfig.DatabaseConfig cluster3 = MultiDatabaseConfig.DatabaseConfig
+ MultiDbConfig.DatabaseConfig cluster3 = MultiDbConfig.DatabaseConfig
.builder(endpoint3, clientConfig).weight(3.0f) // Highest weight
.healthCheckEnabled(false).build();
- MultiDatabaseConfig config = new MultiDatabaseConfig.Builder(
- new MultiDatabaseConfig.DatabaseConfig[] { cluster1, cluster2, cluster3 })
+ MultiDbConfig config = new MultiDbConfig.Builder(
+ new MultiDbConfig.DatabaseConfig[] { cluster1, cluster2, cluster3 })
.failbackSupported(true).failbackCheckInterval(100).build();
try (MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider(config)) {
@@ -172,16 +172,16 @@ void testNoFailbackToLowerWeightCluster() throws InterruptedException {
@Test
void testFailbackToHigherWeightClusterImmediately() throws InterruptedException {
try (MockedConstruction mockedPool = mockPool()) {
- MultiDatabaseConfig.DatabaseConfig cluster1 = MultiDatabaseConfig.DatabaseConfig
+ MultiDbConfig.DatabaseConfig cluster1 = MultiDbConfig.DatabaseConfig
.builder(endpoint1, clientConfig).weight(2.0f).healthCheckEnabled(false).build(); // Higher
// weight
- MultiDatabaseConfig.DatabaseConfig cluster2 = MultiDatabaseConfig.DatabaseConfig
+ MultiDbConfig.DatabaseConfig cluster2 = MultiDbConfig.DatabaseConfig
.builder(endpoint2, clientConfig).weight(1.0f).healthCheckEnabled(false).build(); // Lower
// weight
- MultiDatabaseConfig config = new MultiDatabaseConfig.Builder(
- new MultiDatabaseConfig.DatabaseConfig[] { cluster1, cluster2 }).failbackSupported(true)
+ MultiDbConfig config = new MultiDbConfig.Builder(
+ new MultiDbConfig.DatabaseConfig[] { cluster1, cluster2 }).failbackSupported(true)
.failbackCheckInterval(100).gracePeriod(50).build();
try (MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider(config)) {
@@ -211,16 +211,16 @@ void testFailbackToHigherWeightClusterImmediately() throws InterruptedException
@Test
void testUnhealthyClusterCancelsFailback() throws InterruptedException {
try (MockedConstruction mockedPool = mockPool()) {
- MultiDatabaseConfig.DatabaseConfig cluster1 = MultiDatabaseConfig.DatabaseConfig
+ MultiDbConfig.DatabaseConfig cluster1 = MultiDbConfig.DatabaseConfig
.builder(endpoint1, clientConfig).weight(2.0f).healthCheckEnabled(false).build(); // Higher
// weight
- MultiDatabaseConfig.DatabaseConfig cluster2 = MultiDatabaseConfig.DatabaseConfig
+ MultiDbConfig.DatabaseConfig cluster2 = MultiDbConfig.DatabaseConfig
.builder(endpoint2, clientConfig).weight(1.0f).healthCheckEnabled(false).build(); // Lower
// weight
- MultiDatabaseConfig config = new MultiDatabaseConfig.Builder(
- new MultiDatabaseConfig.DatabaseConfig[] { cluster1, cluster2 }).failbackSupported(true)
+ MultiDbConfig config = new MultiDbConfig.Builder(
+ new MultiDbConfig.DatabaseConfig[] { cluster1, cluster2 }).failbackSupported(true)
.failbackCheckInterval(200).build();
try (MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider(config)) {
@@ -256,20 +256,20 @@ void testUnhealthyClusterCancelsFailback() throws InterruptedException {
@Test
void testMultipleClusterFailbackPriority() throws InterruptedException {
try (MockedConstruction mockedPool = mockPool()) {
- MultiDatabaseConfig.DatabaseConfig cluster1 = MultiDatabaseConfig.DatabaseConfig
+ MultiDbConfig.DatabaseConfig cluster1 = MultiDbConfig.DatabaseConfig
.builder(endpoint1, clientConfig).weight(1.0f).healthCheckEnabled(false).build(); // Lowest
// weight
- MultiDatabaseConfig.DatabaseConfig cluster2 = MultiDatabaseConfig.DatabaseConfig
+ MultiDbConfig.DatabaseConfig cluster2 = MultiDbConfig.DatabaseConfig
.builder(endpoint2, clientConfig).weight(2.0f).healthCheckEnabled(false).build(); // Medium
// weight
- MultiDatabaseConfig.DatabaseConfig cluster3 = MultiDatabaseConfig.DatabaseConfig
+ MultiDbConfig.DatabaseConfig cluster3 = MultiDbConfig.DatabaseConfig
.builder(endpoint3, clientConfig).weight(3.0f) // Highest weight
.healthCheckEnabled(false).build();
- MultiDatabaseConfig config = new MultiDatabaseConfig.Builder(
- new MultiDatabaseConfig.DatabaseConfig[] { cluster1, cluster2, cluster3 })
+ MultiDbConfig config = new MultiDbConfig.Builder(
+ new MultiDbConfig.DatabaseConfig[] { cluster1, cluster2, cluster3 })
.failbackSupported(true).failbackCheckInterval(100).gracePeriod(100).build();
try (MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider(config)) {
@@ -298,16 +298,16 @@ void testMultipleClusterFailbackPriority() throws InterruptedException {
@Test
void testGracePeriodDisablesClusterOnUnhealthy() throws InterruptedException {
try (MockedConstruction mockedPool = mockPool()) {
- MultiDatabaseConfig.DatabaseConfig cluster1 = MultiDatabaseConfig.DatabaseConfig
+ MultiDbConfig.DatabaseConfig cluster1 = MultiDbConfig.DatabaseConfig
.builder(endpoint1, clientConfig).weight(1.0f).healthCheckEnabled(false).build(); // Lower
// weight
- MultiDatabaseConfig.DatabaseConfig cluster2 = MultiDatabaseConfig.DatabaseConfig
+ MultiDbConfig.DatabaseConfig cluster2 = MultiDbConfig.DatabaseConfig
.builder(endpoint2, clientConfig).weight(2.0f).healthCheckEnabled(false).build(); // Higher
// weight
- MultiDatabaseConfig config = new MultiDatabaseConfig.Builder(
- new MultiDatabaseConfig.DatabaseConfig[] { cluster1, cluster2 }).failbackSupported(true)
+ MultiDbConfig config = new MultiDbConfig.Builder(
+ new MultiDbConfig.DatabaseConfig[] { cluster1, cluster2 }).failbackSupported(true)
.failbackCheckInterval(100).gracePeriod(200) // 200ms grace
// period
.build();
@@ -332,16 +332,16 @@ void testGracePeriodDisablesClusterOnUnhealthy() throws InterruptedException {
@Test
void testGracePeriodReEnablesClusterAfterPeriod() throws InterruptedException {
try (MockedConstruction mockedPool = mockPool()) {
- MultiDatabaseConfig.DatabaseConfig cluster1 = MultiDatabaseConfig.DatabaseConfig
+ MultiDbConfig.DatabaseConfig cluster1 = MultiDbConfig.DatabaseConfig
.builder(endpoint1, clientConfig).weight(1.0f).healthCheckEnabled(false).build(); // Lower
// weight
- MultiDatabaseConfig.DatabaseConfig cluster2 = MultiDatabaseConfig.DatabaseConfig
+ MultiDbConfig.DatabaseConfig cluster2 = MultiDbConfig.DatabaseConfig
.builder(endpoint2, clientConfig).weight(2.0f).healthCheckEnabled(false).build(); // Higher
// weight
- MultiDatabaseConfig config = new MultiDatabaseConfig.Builder(
- new MultiDatabaseConfig.DatabaseConfig[] { cluster1, cluster2 }).failbackSupported(true)
+ MultiDbConfig config = new MultiDbConfig.Builder(
+ new MultiDbConfig.DatabaseConfig[] { cluster1, cluster2 }).failbackSupported(true)
.failbackCheckInterval(50) // Short interval for testing
.gracePeriod(100) // Short grace period for testing
.build();
diff --git a/src/test/java/redis/clients/jedis/mcf/FailbackMechanismUnitTest.java b/src/test/java/redis/clients/jedis/mcf/FailbackMechanismUnitTest.java
index a200296e18..a69bdb9ee6 100644
--- a/src/test/java/redis/clients/jedis/mcf/FailbackMechanismUnitTest.java
+++ b/src/test/java/redis/clients/jedis/mcf/FailbackMechanismUnitTest.java
@@ -9,7 +9,7 @@
import redis.clients.jedis.DefaultJedisClientConfig;
import redis.clients.jedis.HostAndPort;
import redis.clients.jedis.JedisClientConfig;
-import redis.clients.jedis.MultiDatabaseConfig;
+import redis.clients.jedis.MultiDbConfig;
@ExtendWith(MockitoExtension.class)
class FailbackMechanismUnitTest {
@@ -26,17 +26,17 @@ void setUp() {
@Test
void testFailbackCheckIntervalConfiguration() {
// Test default value
- MultiDatabaseConfig.DatabaseConfig clusterConfig = MultiDatabaseConfig.DatabaseConfig
+ MultiDbConfig.DatabaseConfig clusterConfig = MultiDbConfig.DatabaseConfig
.builder(endpoint1, clientConfig).healthCheckEnabled(false).build();
- MultiDatabaseConfig defaultConfig = new MultiDatabaseConfig.Builder(
- new MultiDatabaseConfig.DatabaseConfig[] { clusterConfig }).build();
+ MultiDbConfig defaultConfig = new MultiDbConfig.Builder(
+ new MultiDbConfig.DatabaseConfig[] { clusterConfig }).build();
assertEquals(120000, defaultConfig.getFailbackCheckInterval());
// Test custom value
- MultiDatabaseConfig customConfig = new MultiDatabaseConfig.Builder(
- new MultiDatabaseConfig.DatabaseConfig[] { clusterConfig }).failbackCheckInterval(3000)
+ MultiDbConfig customConfig = new MultiDbConfig.Builder(
+ new MultiDbConfig.DatabaseConfig[] { clusterConfig }).failbackCheckInterval(3000)
.build();
assertEquals(3000, customConfig.getFailbackCheckInterval());
@@ -44,18 +44,18 @@ void testFailbackCheckIntervalConfiguration() {
@Test
void testFailbackSupportedConfiguration() {
- MultiDatabaseConfig.DatabaseConfig clusterConfig = MultiDatabaseConfig.DatabaseConfig
+ MultiDbConfig.DatabaseConfig clusterConfig = MultiDbConfig.DatabaseConfig
.builder(endpoint1, clientConfig).healthCheckEnabled(false).build();
// Test default (should be true)
- MultiDatabaseConfig defaultConfig = new MultiDatabaseConfig.Builder(
- new MultiDatabaseConfig.DatabaseConfig[] { clusterConfig }).build();
+ MultiDbConfig defaultConfig = new MultiDbConfig.Builder(
+ new MultiDbConfig.DatabaseConfig[] { clusterConfig }).build();
assertTrue(defaultConfig.isFailbackSupported());
// Test disabled
- MultiDatabaseConfig disabledConfig = new MultiDatabaseConfig.Builder(
- new MultiDatabaseConfig.DatabaseConfig[] { clusterConfig }).failbackSupported(false)
+ MultiDbConfig disabledConfig = new MultiDbConfig.Builder(
+ new MultiDbConfig.DatabaseConfig[] { clusterConfig }).failbackSupported(false)
.build();
assertFalse(disabledConfig.isFailbackSupported());
@@ -63,19 +63,19 @@ void testFailbackSupportedConfiguration() {
@Test
void testFailbackCheckIntervalValidation() {
- MultiDatabaseConfig.DatabaseConfig clusterConfig = MultiDatabaseConfig.DatabaseConfig
+ MultiDbConfig.DatabaseConfig clusterConfig = MultiDbConfig.DatabaseConfig
.builder(endpoint1, clientConfig).healthCheckEnabled(false).build();
// Test zero interval (should be allowed)
- MultiDatabaseConfig zeroConfig = new MultiDatabaseConfig.Builder(
- new MultiDatabaseConfig.DatabaseConfig[] { clusterConfig }).failbackCheckInterval(0)
+ MultiDbConfig zeroConfig = new MultiDbConfig.Builder(
+ new MultiDbConfig.DatabaseConfig[] { clusterConfig }).failbackCheckInterval(0)
.build();
assertEquals(0, zeroConfig.getFailbackCheckInterval());
// Test negative interval (should be allowed - implementation decision)
- MultiDatabaseConfig negativeConfig = new MultiDatabaseConfig.Builder(
- new MultiDatabaseConfig.DatabaseConfig[] { clusterConfig }).failbackCheckInterval(-1000)
+ MultiDbConfig negativeConfig = new MultiDbConfig.Builder(
+ new MultiDbConfig.DatabaseConfig[] { clusterConfig }).failbackCheckInterval(-1000)
.build();
assertEquals(-1000, negativeConfig.getFailbackCheckInterval());
@@ -83,12 +83,12 @@ void testFailbackCheckIntervalValidation() {
@Test
void testBuilderChaining() {
- MultiDatabaseConfig.DatabaseConfig clusterConfig = MultiDatabaseConfig.DatabaseConfig
+ MultiDbConfig.DatabaseConfig clusterConfig = MultiDbConfig.DatabaseConfig
.builder(endpoint1, clientConfig).healthCheckEnabled(false).build();
// Test that builder methods can be chained
- MultiDatabaseConfig config = new MultiDatabaseConfig.Builder(
- new MultiDatabaseConfig.DatabaseConfig[] { clusterConfig }).failbackSupported(true)
+ MultiDbConfig config = new MultiDbConfig.Builder(
+ new MultiDbConfig.DatabaseConfig[] { clusterConfig }).failbackSupported(true)
.failbackCheckInterval(2000).retryOnFailover(true).build();
assertTrue(config.isFailbackSupported());
@@ -99,47 +99,47 @@ void testBuilderChaining() {
@Test
void testGracePeriodConfiguration() {
// Test default value
- MultiDatabaseConfig.DatabaseConfig clusterConfig = MultiDatabaseConfig.DatabaseConfig
+ MultiDbConfig.DatabaseConfig clusterConfig = MultiDbConfig.DatabaseConfig
.builder(endpoint1, clientConfig).healthCheckEnabled(false).build();
- MultiDatabaseConfig defaultConfig = new MultiDatabaseConfig.Builder(
- new MultiDatabaseConfig.DatabaseConfig[] { clusterConfig }).build();
+ MultiDbConfig defaultConfig = new MultiDbConfig.Builder(
+ new MultiDbConfig.DatabaseConfig[] { clusterConfig }).build();
assertEquals(60000, defaultConfig.getGracePeriod());
// Test custom value
- MultiDatabaseConfig customConfig = new MultiDatabaseConfig.Builder(
- new MultiDatabaseConfig.DatabaseConfig[] { clusterConfig }).gracePeriod(5000).build();
+ MultiDbConfig customConfig = new MultiDbConfig.Builder(
+ new MultiDbConfig.DatabaseConfig[] { clusterConfig }).gracePeriod(5000).build();
assertEquals(5000, customConfig.getGracePeriod());
}
@Test
void testGracePeriodValidation() {
- MultiDatabaseConfig.DatabaseConfig clusterConfig = MultiDatabaseConfig.DatabaseConfig
+ MultiDbConfig.DatabaseConfig clusterConfig = MultiDbConfig.DatabaseConfig
.builder(endpoint1, clientConfig).healthCheckEnabled(false).build();
// Test zero grace period (should be allowed)
- MultiDatabaseConfig zeroConfig = new MultiDatabaseConfig.Builder(
- new MultiDatabaseConfig.DatabaseConfig[] { clusterConfig }).gracePeriod(0).build();
+ MultiDbConfig zeroConfig = new MultiDbConfig.Builder(
+ new MultiDbConfig.DatabaseConfig[] { clusterConfig }).gracePeriod(0).build();
assertEquals(0, zeroConfig.getGracePeriod());
// Test negative grace period (should be allowed - implementation decision)
- MultiDatabaseConfig negativeConfig = new MultiDatabaseConfig.Builder(
- new MultiDatabaseConfig.DatabaseConfig[] { clusterConfig }).gracePeriod(-1000).build();
+ MultiDbConfig negativeConfig = new MultiDbConfig.Builder(
+ new MultiDbConfig.DatabaseConfig[] { clusterConfig }).gracePeriod(-1000).build();
assertEquals(-1000, negativeConfig.getGracePeriod());
}
@Test
void testGracePeriodBuilderChaining() {
- MultiDatabaseConfig.DatabaseConfig clusterConfig = MultiDatabaseConfig.DatabaseConfig
+ MultiDbConfig.DatabaseConfig clusterConfig = MultiDbConfig.DatabaseConfig
.builder(endpoint1, clientConfig).healthCheckEnabled(false).build();
// Test that builder methods can be chained
- MultiDatabaseConfig config = new MultiDatabaseConfig.Builder(
- new MultiDatabaseConfig.DatabaseConfig[] { clusterConfig }).failbackSupported(true)
+ MultiDbConfig config = new MultiDbConfig.Builder(
+ new MultiDbConfig.DatabaseConfig[] { clusterConfig }).failbackSupported(true)
.failbackCheckInterval(2000).gracePeriod(8000).retryOnFailover(true).build();
assertTrue(config.isFailbackSupported());
diff --git a/src/test/java/redis/clients/jedis/mcf/HealthCheckIntegrationTest.java b/src/test/java/redis/clients/jedis/mcf/HealthCheckIntegrationTest.java
index c43baf9933..d315616956 100644
--- a/src/test/java/redis/clients/jedis/mcf/HealthCheckIntegrationTest.java
+++ b/src/test/java/redis/clients/jedis/mcf/HealthCheckIntegrationTest.java
@@ -15,10 +15,10 @@
import redis.clients.jedis.EndpointConfig;
import redis.clients.jedis.HostAndPorts;
import redis.clients.jedis.JedisClientConfig;
-import redis.clients.jedis.MultiDatabaseConfig;
+import redis.clients.jedis.MultiDbConfig;
import redis.clients.jedis.UnifiedJedis;
-import redis.clients.jedis.MultiDatabaseConfig.DatabaseConfig;
-import redis.clients.jedis.MultiDatabaseConfig.StrategySupplier;
+import redis.clients.jedis.MultiDbConfig.DatabaseConfig;
+import redis.clients.jedis.MultiDbConfig.StrategySupplier;
import redis.clients.jedis.mcf.ProbingPolicy.BuiltIn;
import redis.clients.jedis.scenario.RecommendedSettings;
@@ -43,7 +43,7 @@ public void testDisableHealthCheck() {
@Test
public void testDefaultStrategySupplier() {
// Create a default strategy supplier that creates EchoStrategy instances
- MultiDatabaseConfig.StrategySupplier defaultSupplier = (hostAndPort, jedisClientConfig) -> {
+ MultiDbConfig.StrategySupplier defaultSupplier = (hostAndPort, jedisClientConfig) -> {
return new EchoStrategy(hostAndPort, jedisClientConfig);
};
MultiDatabaseConnectionProvider customProvider = getMCCF(defaultSupplier);
@@ -57,7 +57,7 @@ public void testDefaultStrategySupplier() {
@Test
public void testCustomStrategySupplier() {
// Create a StrategySupplier that uses the JedisClientConfig when available
- MultiDatabaseConfig.StrategySupplier strategySupplier = (hostAndPort, jedisClientConfig) -> {
+ MultiDbConfig.StrategySupplier strategySupplier = (hostAndPort, jedisClientConfig) -> {
return new TestHealthCheckStrategy(HealthCheckStrategy.Config.builder().interval(500)
.timeout(500).numProbes(1).policy(BuiltIn.ANY_SUCCESS).build(), (endpoint) -> {
// Create connection per health check to avoid resource leak
@@ -79,18 +79,18 @@ public void testCustomStrategySupplier() {
}
private MultiDatabaseConnectionProvider getMCCF(
- MultiDatabaseConfig.StrategySupplier strategySupplier) {
+ MultiDbConfig.StrategySupplier strategySupplier) {
Function modifier = builder -> strategySupplier == null
? builder.healthCheckEnabled(false)
: builder.healthCheckStrategySupplier(strategySupplier);
List databaseConfigs = Arrays.stream(new EndpointConfig[] { endpoint1 })
.map(e -> modifier
- .apply(MultiDatabaseConfig.DatabaseConfig.builder(e.getHostAndPort(), clientConfig))
+ .apply(MultiDbConfig.DatabaseConfig.builder(e.getHostAndPort(), clientConfig))
.build())
.collect(Collectors.toList());
- MultiDatabaseConfig mccf = new MultiDatabaseConfig.Builder(databaseConfigs).retryMaxAttempts(1)
+ MultiDbConfig mccf = new MultiDbConfig.Builder(databaseConfigs).retryMaxAttempts(1)
.retryWaitDuration(1).circuitBreakerSlidingWindowSize(1)
.circuitBreakerFailureRateThreshold(100).build();
diff --git a/src/test/java/redis/clients/jedis/mcf/HealthCheckTest.java b/src/test/java/redis/clients/jedis/mcf/HealthCheckTest.java
index a9a592de1f..7a23a6d88f 100644
--- a/src/test/java/redis/clients/jedis/mcf/HealthCheckTest.java
+++ b/src/test/java/redis/clients/jedis/mcf/HealthCheckTest.java
@@ -9,7 +9,7 @@
import redis.clients.jedis.Endpoint;
import redis.clients.jedis.HostAndPort;
import redis.clients.jedis.JedisClientConfig;
-import redis.clients.jedis.MultiDatabaseConfig;
+import redis.clients.jedis.MultiDbConfig;
import redis.clients.jedis.UnifiedJedis;
import redis.clients.jedis.mcf.ProbingPolicy.BuiltIn;
@@ -338,7 +338,7 @@ void testEchoStrategyCustomIntervalTimeout() {
@Test
void testEchoStrategyDefaultSupplier() {
- MultiDatabaseConfig.StrategySupplier supplier = EchoStrategy.DEFAULT;
+ MultiDbConfig.StrategySupplier supplier = EchoStrategy.DEFAULT;
HealthCheckStrategy strategy = supplier.get(testEndpoint, testConfig);
assertInstanceOf(EchoStrategy.class, strategy);
@@ -348,12 +348,12 @@ void testEchoStrategyDefaultSupplier() {
@Test
void testNewFieldLocations() {
- // Test new field locations in DatabaseConfig and MultiDatabaseConfig
- MultiDatabaseConfig.DatabaseConfig clusterConfig = MultiDatabaseConfig.DatabaseConfig
+ // Test new field locations in DatabaseConfig and MultiDbConfig
+ MultiDbConfig.DatabaseConfig clusterConfig = MultiDbConfig.DatabaseConfig
.builder(testEndpoint, testConfig).weight(2.5f).build();
- MultiDatabaseConfig multiConfig = new MultiDatabaseConfig.Builder(
- new MultiDatabaseConfig.DatabaseConfig[] { clusterConfig }).retryOnFailover(true)
+ MultiDbConfig multiConfig = new MultiDbConfig.Builder(
+ new MultiDbConfig.DatabaseConfig[] { clusterConfig }).retryOnFailover(true)
.failbackSupported(false).build();
assertEquals(2.5f, clusterConfig.getWeight());
@@ -364,7 +364,7 @@ void testNewFieldLocations() {
@Test
void testDefaultValues() {
// Test default values in DatabaseConfig
- MultiDatabaseConfig.DatabaseConfig clusterConfig = MultiDatabaseConfig.DatabaseConfig
+ MultiDbConfig.DatabaseConfig clusterConfig = MultiDbConfig.DatabaseConfig
.builder(testEndpoint, testConfig).build();
assertEquals(1.0f, clusterConfig.getWeight()); // Default weight
@@ -374,9 +374,9 @@ void testDefaultValues() {
// health
// check)
- // Test default values in MultiDatabaseConfig
- MultiDatabaseConfig multiConfig = new MultiDatabaseConfig.Builder(
- new MultiDatabaseConfig.DatabaseConfig[] { clusterConfig }).build();
+ // Test default values in MultiDbConfig
+ MultiDbConfig multiConfig = new MultiDbConfig.Builder(
+ new MultiDbConfig.DatabaseConfig[] { clusterConfig }).build();
assertFalse(multiConfig.isRetryOnFailover()); // Default is false
assertTrue(multiConfig.isFailbackSupported()); // Default is true
@@ -386,10 +386,10 @@ void testDefaultValues() {
void testDatabaseConfigWithHealthCheckStrategy() {
HealthCheckStrategy customStrategy = mock(HealthCheckStrategy.class);
- MultiDatabaseConfig.StrategySupplier supplier = (hostAndPort,
+ MultiDbConfig.StrategySupplier supplier = (hostAndPort,
jedisClientConfig) -> customStrategy;
- MultiDatabaseConfig.DatabaseConfig clusterConfig = MultiDatabaseConfig.DatabaseConfig
+ MultiDbConfig.DatabaseConfig clusterConfig = MultiDbConfig.DatabaseConfig
.builder(testEndpoint, testConfig).healthCheckStrategySupplier(supplier).build();
assertNotNull(clusterConfig.getHealthCheckStrategySupplier());
@@ -400,11 +400,11 @@ void testDatabaseConfigWithHealthCheckStrategy() {
@Test
void testDatabaseConfigWithStrategySupplier() {
- MultiDatabaseConfig.StrategySupplier customSupplier = (hostAndPort, jedisClientConfig) -> {
+ MultiDbConfig.StrategySupplier customSupplier = (hostAndPort, jedisClientConfig) -> {
return mock(HealthCheckStrategy.class);
};
- MultiDatabaseConfig.DatabaseConfig clusterConfig = MultiDatabaseConfig.DatabaseConfig
+ MultiDbConfig.DatabaseConfig clusterConfig = MultiDbConfig.DatabaseConfig
.builder(testEndpoint, testConfig).healthCheckStrategySupplier(customSupplier).build();
assertEquals(customSupplier, clusterConfig.getHealthCheckStrategySupplier());
@@ -412,21 +412,21 @@ void testDatabaseConfigWithStrategySupplier() {
@Test
void testDatabaseConfigWithEchoStrategy() {
- MultiDatabaseConfig.StrategySupplier echoSupplier = (hostAndPort, jedisClientConfig) -> {
+ MultiDbConfig.StrategySupplier echoSupplier = (hostAndPort, jedisClientConfig) -> {
return new EchoStrategy(hostAndPort, jedisClientConfig);
};
- MultiDatabaseConfig.DatabaseConfig clusterConfig = MultiDatabaseConfig.DatabaseConfig
+ MultiDbConfig.DatabaseConfig clusterConfig = MultiDbConfig.DatabaseConfig
.builder(testEndpoint, testConfig).healthCheckStrategySupplier(echoSupplier).build();
- MultiDatabaseConfig.StrategySupplier supplier = clusterConfig.getHealthCheckStrategySupplier();
+ MultiDbConfig.StrategySupplier supplier = clusterConfig.getHealthCheckStrategySupplier();
assertNotNull(supplier);
assertInstanceOf(EchoStrategy.class, supplier.get(testEndpoint, testConfig));
}
@Test
void testDatabaseConfigWithDefaultHealthCheck() {
- MultiDatabaseConfig.DatabaseConfig clusterConfig = MultiDatabaseConfig.DatabaseConfig
+ MultiDbConfig.DatabaseConfig clusterConfig = MultiDbConfig.DatabaseConfig
.builder(testEndpoint, testConfig).build(); // Should use default EchoStrategy
assertNotNull(clusterConfig.getHealthCheckStrategySupplier());
@@ -435,7 +435,7 @@ void testDatabaseConfigWithDefaultHealthCheck() {
@Test
void testDatabaseConfigWithDisabledHealthCheck() {
- MultiDatabaseConfig.DatabaseConfig clusterConfig = MultiDatabaseConfig.DatabaseConfig
+ MultiDbConfig.DatabaseConfig clusterConfig = MultiDbConfig.DatabaseConfig
.builder(testEndpoint, testConfig).healthCheckEnabled(false).build();
assertNull(clusterConfig.getHealthCheckStrategySupplier());
@@ -443,7 +443,7 @@ void testDatabaseConfigWithDisabledHealthCheck() {
@Test
void testDatabaseConfigHealthCheckEnabledExplicitly() {
- MultiDatabaseConfig.DatabaseConfig clusterConfig = MultiDatabaseConfig.DatabaseConfig
+ MultiDbConfig.DatabaseConfig clusterConfig = MultiDbConfig.DatabaseConfig
.builder(testEndpoint, testConfig).healthCheckEnabled(true).build();
assertNotNull(clusterConfig.getHealthCheckStrategySupplier());
@@ -515,7 +515,7 @@ void testHealthCheckIntegration() throws InterruptedException {
@Test
void testStrategySupplierPolymorphism() {
// Test that the polymorphic design works correctly
- MultiDatabaseConfig.StrategySupplier supplier = (hostAndPort, jedisClientConfig) -> {
+ MultiDbConfig.StrategySupplier supplier = (hostAndPort, jedisClientConfig) -> {
if (jedisClientConfig != null) {
return new EchoStrategy(hostAndPort, jedisClientConfig,
HealthCheckStrategy.Config.builder().interval(500).timeout(250).numProbes(1).build());
diff --git a/src/test/java/redis/clients/jedis/mcf/MultiClusterDynamicEndpointUnitTest.java b/src/test/java/redis/clients/jedis/mcf/MultiClusterDynamicEndpointUnitTest.java
index 973c854f6c..41aac09723 100644
--- a/src/test/java/redis/clients/jedis/mcf/MultiClusterDynamicEndpointUnitTest.java
+++ b/src/test/java/redis/clients/jedis/mcf/MultiClusterDynamicEndpointUnitTest.java
@@ -11,8 +11,8 @@
import redis.clients.jedis.HostAndPort;
import redis.clients.jedis.HostAndPorts;
import redis.clients.jedis.JedisClientConfig;
-import redis.clients.jedis.MultiDatabaseConfig;
-import redis.clients.jedis.MultiDatabaseConfig.DatabaseConfig;
+import redis.clients.jedis.MultiDbConfig;
+import redis.clients.jedis.MultiDbConfig.DatabaseConfig;
import redis.clients.jedis.exceptions.JedisValidationException;
import static org.junit.jupiter.api.Assertions.*;
@@ -35,7 +35,7 @@ void setUp() {
// Create initial provider with endpoint1
DatabaseConfig initialConfig = createDatabaseConfig(endpoint1.getHostAndPort(), 1.0f);
- MultiDatabaseConfig multiConfig = new MultiDatabaseConfig.Builder(
+ MultiDbConfig multiConfig = new MultiDbConfig.Builder(
new DatabaseConfig[] { initialConfig }).build();
provider = new MultiDatabaseConnectionProvider(multiConfig);
@@ -82,7 +82,7 @@ void testRemoveExistingCluster() {
// Create initial provider with endpoint1
DatabaseConfig clusterConfig1 = createDatabaseConfig(endpoint1.getHostAndPort(), 1.0f);
- MultiDatabaseConfig multiConfig = MultiDatabaseConfig
+ MultiDbConfig multiConfig = MultiDbConfig
.builder(new DatabaseConfig[] { clusterConfig1 }).build();
try (
@@ -179,7 +179,7 @@ void testActiveClusterHandlingOnRemove() {
// Create initial provider with endpoint1
DatabaseConfig clusterConfig1 = createDatabaseConfig(endpoint1.getHostAndPort(), 1.0f);
- MultiDatabaseConfig multiConfig = MultiDatabaseConfig
+ MultiDbConfig multiConfig = MultiDbConfig
.builder(new DatabaseConfig[] { clusterConfig1 }).build();
try (
diff --git a/src/test/java/redis/clients/jedis/mcf/MultiClusterFailoverAttemptsConfigTest.java b/src/test/java/redis/clients/jedis/mcf/MultiClusterFailoverAttemptsConfigTest.java
index 2742084082..26e437b2d1 100644
--- a/src/test/java/redis/clients/jedis/mcf/MultiClusterFailoverAttemptsConfigTest.java
+++ b/src/test/java/redis/clients/jedis/mcf/MultiClusterFailoverAttemptsConfigTest.java
@@ -8,8 +8,8 @@
import redis.clients.jedis.DefaultJedisClientConfig;
import redis.clients.jedis.HostAndPort;
import redis.clients.jedis.JedisClientConfig;
-import redis.clients.jedis.MultiDatabaseConfig;
-import redis.clients.jedis.MultiDatabaseConfig.DatabaseConfig;
+import redis.clients.jedis.MultiDbConfig;
+import redis.clients.jedis.MultiDbConfig.DatabaseConfig;
import redis.clients.jedis.mcf.JedisFailoverException.JedisPermanentlyNotAvailableException;
import redis.clients.jedis.mcf.JedisFailoverException.JedisTemporarilyNotAvailableException;
import redis.clients.jedis.util.ReflectionTestUtil;
@@ -40,7 +40,7 @@ void setUp() throws Exception {
DatabaseConfig.builder(endpoint1, clientCfg).weight(0.5f).healthCheckEnabled(false)
.build() };
- MultiDatabaseConfig.Builder builder = new MultiDatabaseConfig.Builder(databaseConfigs);
+ MultiDbConfig.Builder builder = new MultiDbConfig.Builder(databaseConfigs);
// Use small values by default for tests unless overridden per-test via reflection
setBuilderFailoverConfig(builder, /* maxAttempts */ 10, /* delayMs */ 12000);
@@ -146,7 +146,7 @@ void maxNumFailoverAttempts_zeroDelay_leadsToPermanentAfterExceeding() throws Ex
// ======== Test helper methods (reflection) ========
- private static void setBuilderFailoverConfig(MultiDatabaseConfig.Builder builder, int maxAttempts,
+ private static void setBuilderFailoverConfig(MultiDbConfig.Builder builder, int maxAttempts,
int delayMs) throws Exception {
ReflectionTestUtil.setField(builder, "maxNumFailoverAttempts", maxAttempts);
@@ -154,9 +154,9 @@ private static void setBuilderFailoverConfig(MultiDatabaseConfig.Builder builder
}
private void setProviderFailoverConfig(int maxAttempts, int delayMs) throws Exception {
- // Access the underlying MultiDatabaseConfig inside provider and adjust fields for this
+ // Access the underlying MultiDbConfig inside provider and adjust fields for this
// test
- Object cfg = ReflectionTestUtil.getField(provider, "multiDatabaseConfig");
+ Object cfg = ReflectionTestUtil.getField(provider, "MultiDbConfig");
ReflectionTestUtil.setField(cfg, "maxNumFailoverAttempts", maxAttempts);
@@ -164,13 +164,13 @@ private void setProviderFailoverConfig(int maxAttempts, int delayMs) throws Exce
}
private int getProviderMaxAttempts() throws Exception {
- Object cfg = ReflectionTestUtil.getField(provider, "multiDatabaseConfig");
+ Object cfg = ReflectionTestUtil.getField(provider, "MultiDbConfig");
return ReflectionTestUtil.getField(cfg, "maxNumFailoverAttempts");
}
private int getProviderDelayMs() throws Exception {
- Object cfg = ReflectionTestUtil.getField(provider, "multiDatabaseConfig");
+ Object cfg = ReflectionTestUtil.getField(provider, "MultiDbConfig");
return ReflectionTestUtil.getField(cfg, "delayInBetweenFailoverAttempts");
}
diff --git a/src/test/java/redis/clients/jedis/mcf/MultiClusterInitializationTest.java b/src/test/java/redis/clients/jedis/mcf/MultiClusterInitializationTest.java
index 9aed74a5ea..780f3c2571 100644
--- a/src/test/java/redis/clients/jedis/mcf/MultiClusterInitializationTest.java
+++ b/src/test/java/redis/clients/jedis/mcf/MultiClusterInitializationTest.java
@@ -14,7 +14,7 @@
import redis.clients.jedis.DefaultJedisClientConfig;
import redis.clients.jedis.HostAndPort;
import redis.clients.jedis.JedisClientConfig;
-import redis.clients.jedis.MultiDatabaseConfig;
+import redis.clients.jedis.MultiDbConfig;
import redis.clients.jedis.exceptions.JedisValidationException;
/**
@@ -49,20 +49,20 @@ private MockedConstruction mockPool() {
void testInitializationWithMixedHealthCheckConfiguration() {
try (MockedConstruction mockedPool = mockPool()) {
// Create clusters with mixed health check configuration
- MultiDatabaseConfig.DatabaseConfig cluster1 = MultiDatabaseConfig.DatabaseConfig
+ MultiDbConfig.DatabaseConfig cluster1 = MultiDbConfig.DatabaseConfig
.builder(endpoint1, clientConfig).weight(1.0f).healthCheckEnabled(false) // No health
// check
.build();
- MultiDatabaseConfig.DatabaseConfig cluster2 = MultiDatabaseConfig.DatabaseConfig
+ MultiDbConfig.DatabaseConfig cluster2 = MultiDbConfig.DatabaseConfig
.builder(endpoint2, clientConfig).weight(2.0f)
.healthCheckStrategySupplier(EchoStrategy.DEFAULT) // With
// health
// check
.build();
- MultiDatabaseConfig config = new MultiDatabaseConfig.Builder(
- new MultiDatabaseConfig.DatabaseConfig[] { cluster1, cluster2 }).build();
+ MultiDbConfig config = new MultiDbConfig.Builder(
+ new MultiDbConfig.DatabaseConfig[] { cluster1, cluster2 }).build();
try (MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider(config)) {
// Should initialize successfully
@@ -80,15 +80,15 @@ void testInitializationWithMixedHealthCheckConfiguration() {
void testInitializationWithAllHealthChecksDisabled() {
try (MockedConstruction mockedPool = mockPool()) {
// Create clusters with no health checks
- MultiDatabaseConfig.DatabaseConfig cluster1 = MultiDatabaseConfig.DatabaseConfig
+ MultiDbConfig.DatabaseConfig cluster1 = MultiDbConfig.DatabaseConfig
.builder(endpoint1, clientConfig).weight(1.0f).healthCheckEnabled(false).build();
- MultiDatabaseConfig.DatabaseConfig cluster2 = MultiDatabaseConfig.DatabaseConfig
+ MultiDbConfig.DatabaseConfig cluster2 = MultiDbConfig.DatabaseConfig
.builder(endpoint2, clientConfig).weight(3.0f) // Higher weight
.healthCheckEnabled(false).build();
- MultiDatabaseConfig config = new MultiDatabaseConfig.Builder(
- new MultiDatabaseConfig.DatabaseConfig[] { cluster1, cluster2 }).build();
+ MultiDbConfig config = new MultiDbConfig.Builder(
+ new MultiDbConfig.DatabaseConfig[] { cluster1, cluster2 }).build();
try (MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider(config)) {
// Should select cluster2 (highest weight, no health checks)
@@ -100,11 +100,11 @@ void testInitializationWithAllHealthChecksDisabled() {
@Test
void testInitializationWithSingleCluster() {
try (MockedConstruction mockedPool = mockPool()) {
- MultiDatabaseConfig.DatabaseConfig cluster = MultiDatabaseConfig.DatabaseConfig
+ MultiDbConfig.DatabaseConfig cluster = MultiDbConfig.DatabaseConfig
.builder(endpoint1, clientConfig).weight(1.0f).healthCheckEnabled(false).build();
- MultiDatabaseConfig config = new MultiDatabaseConfig.Builder(
- new MultiDatabaseConfig.DatabaseConfig[] { cluster }).build();
+ MultiDbConfig config = new MultiDbConfig.Builder(
+ new MultiDbConfig.DatabaseConfig[] { cluster }).build();
try (MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider(config)) {
// Should select the only available cluster
@@ -123,30 +123,30 @@ void testErrorHandlingWithNullConfiguration() {
@Test
void testErrorHandlingWithEmptyClusterArray() {
assertThrows(JedisValidationException.class, () -> {
- new MultiDatabaseConfig.Builder(new MultiDatabaseConfig.DatabaseConfig[0]).build();
+ new MultiDbConfig.Builder(new MultiDbConfig.DatabaseConfig[0]).build();
});
}
@Test
void testErrorHandlingWithNullDatabaseConfig() {
assertThrows(IllegalArgumentException.class, () -> {
- new MultiDatabaseConfig.Builder(new MultiDatabaseConfig.DatabaseConfig[] { null }).build();
+ new MultiDbConfig.Builder(new MultiDbConfig.DatabaseConfig[] { null }).build();
});
}
@Test
void testInitializationWithZeroWeights() {
try (MockedConstruction mockedPool = mockPool()) {
- MultiDatabaseConfig.DatabaseConfig cluster1 = MultiDatabaseConfig.DatabaseConfig
+ MultiDbConfig.DatabaseConfig cluster1 = MultiDbConfig.DatabaseConfig
.builder(endpoint1, clientConfig).weight(0.0f) // Zero weight
.healthCheckEnabled(false).build();
- MultiDatabaseConfig.DatabaseConfig cluster2 = MultiDatabaseConfig.DatabaseConfig
+ MultiDbConfig.DatabaseConfig cluster2 = MultiDbConfig.DatabaseConfig
.builder(endpoint2, clientConfig).weight(0.0f) // Zero weight
.healthCheckEnabled(false).build();
- MultiDatabaseConfig config = new MultiDatabaseConfig.Builder(
- new MultiDatabaseConfig.DatabaseConfig[] { cluster1, cluster2 }).build();
+ MultiDbConfig config = new MultiDbConfig.Builder(
+ new MultiDbConfig.DatabaseConfig[] { cluster1, cluster2 }).build();
try (MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider(config)) {
// Should still initialize and select one of the clusters
diff --git a/src/test/java/redis/clients/jedis/mcf/MultiDatabaseConnectionProviderTest.java b/src/test/java/redis/clients/jedis/mcf/MultiDatabaseConnectionProviderTest.java
index fa84564645..ad9bc150dd 100644
--- a/src/test/java/redis/clients/jedis/mcf/MultiDatabaseConnectionProviderTest.java
+++ b/src/test/java/redis/clients/jedis/mcf/MultiDatabaseConnectionProviderTest.java
@@ -5,7 +5,7 @@
import org.awaitility.Durations;
import org.junit.jupiter.api.*;
import redis.clients.jedis.*;
-import redis.clients.jedis.MultiDatabaseConfig.DatabaseConfig;
+import redis.clients.jedis.MultiDbConfig.DatabaseConfig;
import redis.clients.jedis.exceptions.JedisConnectionException;
import redis.clients.jedis.exceptions.JedisValidationException;
import redis.clients.jedis.mcf.MultiDatabaseConnectionProvider.Database;
@@ -43,7 +43,7 @@ public void setUp() {
endpointStandalone1.getClientConfigBuilder().build()).weight(0.3f).build();
provider = new MultiDatabaseConnectionProvider(
- new MultiDatabaseConfig.Builder(databaseConfigs).build());
+ new MultiDbConfig.Builder(databaseConfigs).build());
}
@AfterEach
@@ -109,7 +109,7 @@ public void testRunClusterFailoverPostProcessor() {
DefaultJedisClientConfig.builder().build())
.weight(0.4f).healthCheckEnabled(false).build();
- MultiDatabaseConfig.Builder builder = new MultiDatabaseConfig.Builder(databaseConfigs);
+ MultiDbConfig.Builder builder = new MultiDbConfig.Builder(databaseConfigs);
// Configures a single failed command to trigger an open circuit on the next subsequent failure
builder.circuitBreakerSlidingWindowSize(3).circuitBreakerMinNumOfFailures(1)
@@ -180,7 +180,7 @@ public void testConnectionPoolConfigApplied() {
databaseConfigs[1] = new DatabaseConfig(endpointStandalone1.getHostAndPort(),
endpointStandalone0.getClientConfigBuilder().build(), poolConfig);
try (MultiDatabaseConnectionProvider customProvider = new MultiDatabaseConnectionProvider(
- new MultiDatabaseConfig.Builder(databaseConfigs).build())) {
+ new MultiDbConfig.Builder(databaseConfigs).build())) {
MultiDatabaseConnectionProvider.Database activeCluster = customProvider.getDatabase();
ConnectionPool connectionPool = activeCluster.getConnectionPool();
assertEquals(8, connectionPool.getMaxTotal());
@@ -210,7 +210,7 @@ void testHealthChecksStopAfterProviderClose() throws InterruptedException {
.healthCheckStrategy(countingStrategy).build();
MultiDatabaseConnectionProvider testProvider = new MultiDatabaseConnectionProvider(
- new MultiDatabaseConfig.Builder(Collections.singletonList(config)).build());
+ new MultiDbConfig.Builder(Collections.singletonList(config)).build());
try {
// Wait for some health checks to occur
@@ -245,7 +245,7 @@ public void userCommand_firstTemporary_thenPermanent_inOrder() {
endpointStandalone1.getClientConfigBuilder().build()).weight(0.3f).build();
MultiDatabaseConnectionProvider testProvider = new MultiDatabaseConnectionProvider(
- new MultiDatabaseConfig.Builder(databaseConfigs).delayInBetweenFailoverAttempts(100)
+ new MultiDbConfig.Builder(databaseConfigs).delayInBetweenFailoverAttempts(100)
.maxNumFailoverAttempts(2).retryMaxAttempts(1).build());
try (UnifiedJedis jedis = new UnifiedJedis(testProvider)) {
@@ -282,7 +282,7 @@ public void userCommand_connectionExceptions_thenMultipleTemporary_thenPermanent
// adjusted to get exact numbers of failures with exact exception types
// and open to impact from other defaulted values withing the components in use.
MultiDatabaseConnectionProvider testProvider = new MultiDatabaseConnectionProvider(
- new MultiDatabaseConfig.Builder(databaseConfigs).delayInBetweenFailoverAttempts(100)
+ new MultiDbConfig.Builder(databaseConfigs).delayInBetweenFailoverAttempts(100)
.maxNumFailoverAttempts(2).retryMaxAttempts(1).circuitBreakerSlidingWindowSize(5)
.circuitBreakerFailureRateThreshold(60).build()) {
};
diff --git a/src/test/java/redis/clients/jedis/mcf/PeriodicFailbackTest.java b/src/test/java/redis/clients/jedis/mcf/PeriodicFailbackTest.java
index 1496ee3815..54fc1e1f7a 100644
--- a/src/test/java/redis/clients/jedis/mcf/PeriodicFailbackTest.java
+++ b/src/test/java/redis/clients/jedis/mcf/PeriodicFailbackTest.java
@@ -14,7 +14,7 @@
import redis.clients.jedis.DefaultJedisClientConfig;
import redis.clients.jedis.HostAndPort;
import redis.clients.jedis.JedisClientConfig;
-import redis.clients.jedis.MultiDatabaseConfig;
+import redis.clients.jedis.MultiDbConfig;
@ExtendWith(MockitoExtension.class)
class PeriodicFailbackTest {
@@ -42,14 +42,14 @@ private MockedConstruction mockPool() {
@Test
void testPeriodicFailbackCheckWithDisabledCluster() throws InterruptedException {
try (MockedConstruction mockedPool = mockPool()) {
- MultiDatabaseConfig.DatabaseConfig cluster1 = MultiDatabaseConfig.DatabaseConfig
+ MultiDbConfig.DatabaseConfig cluster1 = MultiDbConfig.DatabaseConfig
.builder(endpoint1, clientConfig).weight(1.0f).healthCheckEnabled(false).build();
- MultiDatabaseConfig.DatabaseConfig cluster2 = MultiDatabaseConfig.DatabaseConfig
+ MultiDbConfig.DatabaseConfig cluster2 = MultiDbConfig.DatabaseConfig
.builder(endpoint2, clientConfig).weight(2.0f).healthCheckEnabled(false).build();
- MultiDatabaseConfig config = new MultiDatabaseConfig.Builder(
- new MultiDatabaseConfig.DatabaseConfig[] { cluster1, cluster2 }).failbackSupported(true)
+ MultiDbConfig config = new MultiDbConfig.Builder(
+ new MultiDbConfig.DatabaseConfig[] { cluster1, cluster2 }).failbackSupported(true)
.failbackCheckInterval(100).build();
try (MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider(config)) {
@@ -75,14 +75,14 @@ void testPeriodicFailbackCheckWithDisabledCluster() throws InterruptedException
@Test
void testPeriodicFailbackCheckWithHealthyCluster() throws InterruptedException {
try (MockedConstruction mockedPool = mockPool()) {
- MultiDatabaseConfig.DatabaseConfig cluster1 = MultiDatabaseConfig.DatabaseConfig
+ MultiDbConfig.DatabaseConfig cluster1 = MultiDbConfig.DatabaseConfig
.builder(endpoint1, clientConfig).weight(1.0f).healthCheckEnabled(false).build();
- MultiDatabaseConfig.DatabaseConfig cluster2 = MultiDatabaseConfig.DatabaseConfig
+ MultiDbConfig.DatabaseConfig cluster2 = MultiDbConfig.DatabaseConfig
.builder(endpoint2, clientConfig).weight(2.0f).healthCheckEnabled(false).build();
- MultiDatabaseConfig config = new MultiDatabaseConfig.Builder(
- new MultiDatabaseConfig.DatabaseConfig[] { cluster1, cluster2 }).failbackSupported(true)
+ MultiDbConfig config = new MultiDbConfig.Builder(
+ new MultiDbConfig.DatabaseConfig[] { cluster1, cluster2 }).failbackSupported(true)
.failbackCheckInterval(50).gracePeriod(100).build(); // Add
// grace
// period
@@ -122,14 +122,14 @@ void testPeriodicFailbackCheckWithHealthyCluster() throws InterruptedException {
@Test
void testPeriodicFailbackCheckWithFailbackDisabled() throws InterruptedException {
try (MockedConstruction mockedPool = mockPool()) {
- MultiDatabaseConfig.DatabaseConfig cluster1 = MultiDatabaseConfig.DatabaseConfig
+ MultiDbConfig.DatabaseConfig cluster1 = MultiDbConfig.DatabaseConfig
.builder(endpoint1, clientConfig).weight(1.0f).healthCheckEnabled(false).build();
- MultiDatabaseConfig.DatabaseConfig cluster2 = MultiDatabaseConfig.DatabaseConfig
+ MultiDbConfig.DatabaseConfig cluster2 = MultiDbConfig.DatabaseConfig
.builder(endpoint2, clientConfig).weight(2.0f).healthCheckEnabled(false).build();
- MultiDatabaseConfig config = new MultiDatabaseConfig.Builder(
- new MultiDatabaseConfig.DatabaseConfig[] { cluster1, cluster2 }).failbackSupported(false) // Disabled
+ MultiDbConfig config = new MultiDbConfig.Builder(
+ new MultiDbConfig.DatabaseConfig[] { cluster1, cluster2 }).failbackSupported(false) // Disabled
.failbackCheckInterval(50).build();
try (MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider(config)) {
@@ -162,18 +162,18 @@ void testPeriodicFailbackCheckSelectsHighestWeightCluster() throws InterruptedEx
try (MockedConstruction mockedPool = mockPool()) {
HostAndPort endpoint3 = new HostAndPort("localhost", 6381);
- MultiDatabaseConfig.DatabaseConfig cluster1 = MultiDatabaseConfig.DatabaseConfig
+ MultiDbConfig.DatabaseConfig cluster1 = MultiDbConfig.DatabaseConfig
.builder(endpoint1, clientConfig).weight(1.0f).healthCheckEnabled(false).build();
- MultiDatabaseConfig.DatabaseConfig cluster2 = MultiDatabaseConfig.DatabaseConfig
+ MultiDbConfig.DatabaseConfig cluster2 = MultiDbConfig.DatabaseConfig
.builder(endpoint2, clientConfig).weight(2.0f).healthCheckEnabled(false).build();
- MultiDatabaseConfig.DatabaseConfig cluster3 = MultiDatabaseConfig.DatabaseConfig
+ MultiDbConfig.DatabaseConfig cluster3 = MultiDbConfig.DatabaseConfig
.builder(endpoint3, clientConfig).weight(3.0f) // Highest weight
.healthCheckEnabled(false).build();
- MultiDatabaseConfig config = new MultiDatabaseConfig.Builder(
- new MultiDatabaseConfig.DatabaseConfig[] { cluster1, cluster2, cluster3 })
+ MultiDbConfig config = new MultiDbConfig.Builder(
+ new MultiDbConfig.DatabaseConfig[] { cluster1, cluster2, cluster3 })
.failbackSupported(true).failbackCheckInterval(50).gracePeriod(100).build(); // Add
// grace
// period
diff --git a/src/test/java/redis/clients/jedis/misc/AutomaticFailoverTest.java b/src/test/java/redis/clients/jedis/misc/AutomaticFailoverTest.java
index 25788fa916..c061c5c4e6 100644
--- a/src/test/java/redis/clients/jedis/misc/AutomaticFailoverTest.java
+++ b/src/test/java/redis/clients/jedis/misc/AutomaticFailoverTest.java
@@ -47,10 +47,10 @@ public class AutomaticFailoverTest {
private Jedis jedis2;
- private List getDatabaseConfigs(
+ private List getDatabaseConfigs(
JedisClientConfig clientConfig, HostAndPort... hostPorts) {
return Arrays.stream(hostPorts)
- .map(hp -> new MultiDatabaseConfig.DatabaseConfig(hp, clientConfig))
+ .map(hp -> new MultiDbConfig.DatabaseConfig(hp, clientConfig))
.collect(Collectors.toList());
}
@@ -69,7 +69,7 @@ public void cleanUp() {
@Test
public void pipelineWithSwitch() {
MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider(
- new MultiDatabaseConfig.Builder(
+ new MultiDbConfig.Builder(
getDatabaseConfigs(clientConfig, hostPortWithFailure, workingEndpoint.getHostAndPort()))
.build());
@@ -89,7 +89,7 @@ public void pipelineWithSwitch() {
@Test
public void transactionWithSwitch() {
MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider(
- new MultiDatabaseConfig.Builder(
+ new MultiDbConfig.Builder(
getDatabaseConfigs(clientConfig, hostPortWithFailure, workingEndpoint.getHostAndPort()))
.build());
@@ -112,7 +112,7 @@ public void commandFailoverUnresolvableHost() {
int slidingWindowSize = 2;
HostAndPort unresolvableHostAndPort = new HostAndPort("unresolvable", 6379);
- MultiDatabaseConfig.Builder builder = new MultiDatabaseConfig.Builder(
+ MultiDbConfig.Builder builder = new MultiDbConfig.Builder(
getDatabaseConfigs(clientConfig, unresolvableHostAndPort, workingEndpoint.getHostAndPort()))
.retryWaitDuration(1).retryMaxAttempts(1)
.circuitBreakerSlidingWindowSize(slidingWindowSize)
@@ -152,7 +152,7 @@ public void commandFailover() {
int slidingWindowSize = 6;
int retryMaxAttempts = 3;
- MultiDatabaseConfig.Builder builder = new MultiDatabaseConfig.Builder(
+ MultiDbConfig.Builder builder = new MultiDbConfig.Builder(
getDatabaseConfigs(clientConfig, hostPortWithFailure, workingEndpoint.getHostAndPort()))
.retryMaxAttempts(retryMaxAttempts) // Default
// is
@@ -194,7 +194,7 @@ public void commandFailover() {
public void pipelineFailover() {
int slidingWindowSize = 10;
- MultiDatabaseConfig.Builder builder = new MultiDatabaseConfig.Builder(
+ MultiDbConfig.Builder builder = new MultiDbConfig.Builder(
getDatabaseConfigs(clientConfig, hostPortWithFailure, workingEndpoint.getHostAndPort()))
.circuitBreakerSlidingWindowSize(slidingWindowSize)
.fallbackExceptionList(Collections.singletonList(JedisConnectionException.class));
@@ -226,7 +226,7 @@ public void pipelineFailover() {
public void failoverFromAuthError() {
int slidingWindowSize = 10;
- MultiDatabaseConfig.Builder builder = new MultiDatabaseConfig.Builder(
+ MultiDbConfig.Builder builder = new MultiDbConfig.Builder(
getDatabaseConfigs(clientConfig, endpointForAuthFailure.getHostAndPort(),
workingEndpoint.getHostAndPort())).circuitBreakerSlidingWindowSize(slidingWindowSize)
.fallbackExceptionList(Collections.singletonList(JedisAccessControlException.class));
diff --git a/src/test/java/redis/clients/jedis/providers/MultiClusterProviderHealthStatusChangeEventTest.java b/src/test/java/redis/clients/jedis/providers/MultiClusterProviderHealthStatusChangeEventTest.java
index c3f974dcd5..ef14cc51ce 100644
--- a/src/test/java/redis/clients/jedis/providers/MultiClusterProviderHealthStatusChangeEventTest.java
+++ b/src/test/java/redis/clients/jedis/providers/MultiClusterProviderHealthStatusChangeEventTest.java
@@ -14,7 +14,7 @@
import redis.clients.jedis.DefaultJedisClientConfig;
import redis.clients.jedis.HostAndPort;
import redis.clients.jedis.JedisClientConfig;
-import redis.clients.jedis.MultiDatabaseConfig;
+import redis.clients.jedis.MultiDbConfig;
import redis.clients.jedis.mcf.HealthStatus;
import redis.clients.jedis.mcf.MultiDatabaseConnectionProvider;
import redis.clients.jedis.mcf.MultiDatabaseConnectionProviderHelper;
@@ -52,13 +52,13 @@ private MockedConstruction mockConnectionPool() {
void postInit_unhealthy_active_sets_grace_and_fails_over() throws Exception {
try (MockedConstruction mockedPool = mockConnectionPool()) {
// Create clusters without health checks
- MultiDatabaseConfig.DatabaseConfig cluster1 = MultiDatabaseConfig.DatabaseConfig
+ MultiDbConfig.DatabaseConfig cluster1 = MultiDbConfig.DatabaseConfig
.builder(endpoint1, clientConfig).weight(1.0f).healthCheckEnabled(false).build();
- MultiDatabaseConfig.DatabaseConfig cluster2 = MultiDatabaseConfig.DatabaseConfig
+ MultiDbConfig.DatabaseConfig cluster2 = MultiDbConfig.DatabaseConfig
.builder(endpoint2, clientConfig).weight(0.5f).healthCheckEnabled(false).build();
- MultiDatabaseConfig config = new MultiDatabaseConfig.Builder(
- new MultiDatabaseConfig.DatabaseConfig[] { cluster1, cluster2 }).build();
+ MultiDbConfig config = new MultiDbConfig.Builder(
+ new MultiDbConfig.DatabaseConfig[] { cluster1, cluster2 }).build();
try (MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider(
config)) {
@@ -84,13 +84,13 @@ void postInit_unhealthy_active_sets_grace_and_fails_over() throws Exception {
@Test
void postInit_nonActive_changes_do_not_switch_active() throws Exception {
try (MockedConstruction mockedPool = mockConnectionPool()) {
- MultiDatabaseConfig.DatabaseConfig cluster1 = MultiDatabaseConfig.DatabaseConfig
+ MultiDbConfig.DatabaseConfig cluster1 = MultiDbConfig.DatabaseConfig
.builder(endpoint1, clientConfig).weight(1.0f).healthCheckEnabled(false).build();
- MultiDatabaseConfig.DatabaseConfig cluster2 = MultiDatabaseConfig.DatabaseConfig
+ MultiDbConfig.DatabaseConfig cluster2 = MultiDbConfig.DatabaseConfig
.builder(endpoint2, clientConfig).weight(0.5f).healthCheckEnabled(false).build();
- MultiDatabaseConfig config = new MultiDatabaseConfig.Builder(
- new MultiDatabaseConfig.DatabaseConfig[] { cluster1, cluster2 }).build();
+ MultiDbConfig config = new MultiDbConfig.Builder(
+ new MultiDbConfig.DatabaseConfig[] { cluster1, cluster2 }).build();
try (MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider(
config)) {
@@ -131,14 +131,14 @@ void postInit_nonActive_changes_do_not_switch_active() throws Exception {
@Test
void init_selects_highest_weight_healthy_when_checks_disabled() throws Exception {
try (MockedConstruction mockedPool = mockConnectionPool()) {
- MultiDatabaseConfig.DatabaseConfig cluster1 = MultiDatabaseConfig.DatabaseConfig
+ MultiDbConfig.DatabaseConfig cluster1 = MultiDbConfig.DatabaseConfig
.builder(endpoint1, clientConfig).weight(1.0f).healthCheckEnabled(false).build();
- MultiDatabaseConfig.DatabaseConfig cluster2 = MultiDatabaseConfig.DatabaseConfig
+ MultiDbConfig.DatabaseConfig cluster2 = MultiDbConfig.DatabaseConfig
.builder(endpoint2, clientConfig).weight(2.0f).healthCheckEnabled(false).build();
- MultiDatabaseConfig config = new MultiDatabaseConfig.Builder(
- new MultiDatabaseConfig.DatabaseConfig[] { cluster1, cluster2 }).build();
+ MultiDbConfig config = new MultiDbConfig.Builder(
+ new MultiDbConfig.DatabaseConfig[] { cluster1, cluster2 }).build();
try (MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider(
config)) {
@@ -158,11 +158,11 @@ void init_selects_highest_weight_healthy_when_checks_disabled() throws Exception
@Test
void init_single_cluster_initializes_and_is_healthy() throws Exception {
try (MockedConstruction mockedPool = mockConnectionPool()) {
- MultiDatabaseConfig.DatabaseConfig cluster1 = MultiDatabaseConfig.DatabaseConfig
+ MultiDbConfig.DatabaseConfig cluster1 = MultiDbConfig.DatabaseConfig
.builder(endpoint1, clientConfig).weight(1.0f).healthCheckEnabled(false).build();
- MultiDatabaseConfig config = new MultiDatabaseConfig.Builder(
- new MultiDatabaseConfig.DatabaseConfig[] { cluster1 }).build();
+ MultiDbConfig config = new MultiDbConfig.Builder(
+ new MultiDbConfig.DatabaseConfig[] { cluster1 }).build();
// This test verifies that the provider initializes correctly and doesn't lose events
// In practice, with health checks disabled, no events should be generated during init
@@ -183,17 +183,17 @@ void init_single_cluster_initializes_and_is_healthy() throws Exception {
@Test
void postInit_two_hop_failover_chain_respected() throws Exception {
try (MockedConstruction mockedPool = mockConnectionPool()) {
- MultiDatabaseConfig.DatabaseConfig cluster1 = MultiDatabaseConfig.DatabaseConfig
+ MultiDbConfig.DatabaseConfig cluster1 = MultiDbConfig.DatabaseConfig
.builder(endpoint1, clientConfig).weight(1.0f).healthCheckEnabled(false).build();
- MultiDatabaseConfig.DatabaseConfig cluster2 = MultiDatabaseConfig.DatabaseConfig
+ MultiDbConfig.DatabaseConfig cluster2 = MultiDbConfig.DatabaseConfig
.builder(endpoint2, clientConfig).weight(0.5f).healthCheckEnabled(false).build();
- MultiDatabaseConfig.DatabaseConfig cluster3 = MultiDatabaseConfig.DatabaseConfig
+ MultiDbConfig.DatabaseConfig cluster3 = MultiDbConfig.DatabaseConfig
.builder(endpoint3, clientConfig).weight(0.2f).healthCheckEnabled(false).build();
- MultiDatabaseConfig config = new MultiDatabaseConfig.Builder(
- new MultiDatabaseConfig.DatabaseConfig[] { cluster1, cluster2, cluster3 }).build();
+ MultiDbConfig config = new MultiDbConfig.Builder(
+ new MultiDbConfig.DatabaseConfig[] { cluster1, cluster2, cluster3 }).build();
try (MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider(
config)) {
@@ -227,14 +227,14 @@ void postInit_two_hop_failover_chain_respected() throws Exception {
@Test
void postInit_rapid_events_respect_grace_and_keep_active_stable() throws Exception {
try (MockedConstruction mockedPool = mockConnectionPool()) {
- MultiDatabaseConfig.DatabaseConfig cluster1 = MultiDatabaseConfig.DatabaseConfig
+ MultiDbConfig.DatabaseConfig cluster1 = MultiDbConfig.DatabaseConfig
.builder(endpoint1, clientConfig).weight(1.0f).healthCheckEnabled(false).build();
- MultiDatabaseConfig.DatabaseConfig cluster2 = MultiDatabaseConfig.DatabaseConfig
+ MultiDbConfig.DatabaseConfig cluster2 = MultiDbConfig.DatabaseConfig
.builder(endpoint2, clientConfig).weight(0.5f).healthCheckEnabled(false).build();
- MultiDatabaseConfig config = new MultiDatabaseConfig.Builder(
- new MultiDatabaseConfig.DatabaseConfig[] { cluster1, cluster2 }).build();
+ MultiDbConfig config = new MultiDbConfig.Builder(
+ new MultiDbConfig.DatabaseConfig[] { cluster1, cluster2 }).build();
try (MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider(
config)) {
diff --git a/src/test/java/redis/clients/jedis/scenario/ActiveActiveFailoverTest.java b/src/test/java/redis/clients/jedis/scenario/ActiveActiveFailoverTest.java
index 400fd65404..9250e90a3e 100644
--- a/src/test/java/redis/clients/jedis/scenario/ActiveActiveFailoverTest.java
+++ b/src/test/java/redis/clients/jedis/scenario/ActiveActiveFailoverTest.java
@@ -9,7 +9,7 @@
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import redis.clients.jedis.*;
-import redis.clients.jedis.MultiDatabaseConfig.DatabaseConfig;
+import redis.clients.jedis.MultiDbConfig.DatabaseConfig;
import redis.clients.jedis.exceptions.JedisConnectionException;
import redis.clients.jedis.mcf.ClusterSwitchEventArgs;
import redis.clients.jedis.mcf.MultiDatabaseConnectionProvider;
@@ -68,7 +68,7 @@ public void testFailover() {
DatabaseConfig secondary = DatabaseConfig.builder(endpoint.getHostAndPort(1), config)
.connectionPoolConfig(RecommendedSettings.poolConfig).weight(0.5f).build();
- MultiDatabaseConfig multiConfig = MultiDatabaseConfig.builder()
+ MultiDbConfig multiConfig = MultiDbConfig.builder()
.endpoint(primary)
.endpoint(secondary)
.circuitBreakerSlidingWindowSize(1) // SLIDING WINDOW SIZE IN SECONDS
From 79cd686b3f0b902ea17b1ab055646addd55f3569 Mon Sep 17 00:00:00 2001
From: ggivo
Date: Mon, 6 Oct 2025 14:32:30 +0300
Subject: [PATCH 08/17] Rename MultiDatabaseConnectionProvider to
MultiDbConnectionProvider
---
.../redis/clients/jedis/MultiDbClient.java | 38 +++++++--------
.../redis/clients/jedis/MultiDbConfig.java | 14 +++---
.../redis/clients/jedis/UnifiedJedis.java | 12 ++---
.../jedis/builders/MultiDbClientBuilder.java | 7 ++-
.../mcf/CircuitBreakerCommandExecutor.java | 4 +-
.../jedis/mcf/CircuitBreakerFailoverBase.java | 6 +--
...cuitBreakerFailoverConnectionProvider.java | 4 +-
.../mcf/CircuitBreakerThresholdsAdapter.java | 4 +-
.../jedis/mcf/ClusterSwitchEventArgs.java | 2 +-
.../jedis/mcf/JedisFailoverException.java | 6 +--
.../jedis/mcf/MultiClusterPipeline.java | 4 +-
.../jedis/mcf/MultiClusterTransaction.java | 6 +--
...er.java => MultiDbConnectionProvider.java} | 22 ++++-----
...UnifiedJedisConstructorReflectionTest.java | 2 +-
.../failover/FailoverIntegrationTest.java | 26 +++++-----
.../mcf/ActiveActiveLocalFailoverTest.java | 2 +-
.../mcf/CircuitBreakerThresholdsTest.java | 16 +++----
.../mcf/ClusterEvaluateThresholdsTest.java | 6 +--
.../mcf/FailbackMechanismIntegrationTest.java | 48 +++++++++----------
.../jedis/mcf/FailbackMechanismUnitTest.java | 12 ++---
.../jedis/mcf/HealthCheckIntegrationTest.java | 14 +++---
.../clients/jedis/mcf/HealthCheckTest.java | 3 +-
.../MultiClusterDynamicEndpointUnitTest.java | 26 +++++-----
...ultiClusterFailoverAttemptsConfigTest.java | 18 ++++---
.../mcf/MultiClusterInitializationTest.java | 12 ++---
...MultiDatabaseConnectionProviderHelper.java | 20 --------
.../mcf/MultiDbConnectionProviderHelper.java | 20 ++++++++
...ava => MultiDbConnectionProviderTest.java} | 24 +++++-----
.../jedis/mcf/PeriodicFailbackTest.java | 20 ++++----
.../jedis/misc/AutomaticFailoverTest.java | 20 ++++----
...erProviderHealthStatusChangeEventTest.java | 38 +++++++--------
.../scenario/ActiveActiveFailoverTest.java | 4 +-
32 files changed, 219 insertions(+), 241 deletions(-)
rename src/main/java/redis/clients/jedis/mcf/{MultiDatabaseConnectionProvider.java => MultiDbConnectionProvider.java} (98%)
delete mode 100644 src/test/java/redis/clients/jedis/mcf/MultiDatabaseConnectionProviderHelper.java
create mode 100644 src/test/java/redis/clients/jedis/mcf/MultiDbConnectionProviderHelper.java
rename src/test/java/redis/clients/jedis/mcf/{MultiDatabaseConnectionProviderTest.java => MultiDbConnectionProviderTest.java} (93%)
diff --git a/src/main/java/redis/clients/jedis/MultiDbClient.java b/src/main/java/redis/clients/jedis/MultiDbClient.java
index 2e008f5c98..bd65f119ff 100644
--- a/src/main/java/redis/clients/jedis/MultiDbClient.java
+++ b/src/main/java/redis/clients/jedis/MultiDbClient.java
@@ -9,7 +9,7 @@
import redis.clients.jedis.mcf.MultiClusterPipeline;
import redis.clients.jedis.mcf.MultiClusterTransaction;
import redis.clients.jedis.providers.ConnectionProvider;
-import redis.clients.jedis.mcf.MultiDatabaseConnectionProvider;
+import redis.clients.jedis.mcf.MultiDbConnectionProvider;
import java.util.Set;
@@ -76,7 +76,7 @@
*
* @author Ivo Gaydazhiev
* @since 5.2.0
- * @see MultiDatabaseConnectionProvider
+ * @see MultiDbConnectionProvider
* @see CircuitBreakerCommandExecutor
* @see MultiDbConfig
*/
@@ -91,7 +91,7 @@ public class MultiDbClient extends UnifiedJedis {
* {@link #builder()} to create instances.
*
* @param commandExecutor the command executor (typically CircuitBreakerCommandExecutor)
- * @param connectionProvider the connection provider (typically MultiDatabaseConnectionProvider)
+ * @param connectionProvider the connection provider (typically MultiDbConnectionProvider)
* @param commandObjects the command objects
* @param redisProtocol the Redis protocol version
* @param cache the client-side cache (may be null)
@@ -102,16 +102,16 @@ public class MultiDbClient extends UnifiedJedis {
}
/**
- * Returns the underlying MultiDatabaseConnectionProvider.
+ * Returns the underlying MultiDbConnectionProvider.
*
* This provides access to multi-cluster specific operations like manual failover, health status
* monitoring, and cluster switch event handling.
*
* @return the multi-cluster connection provider
- * @throws ClassCastException if the provider is not a MultiDatabaseConnectionProvider
+ * @throws ClassCastException if the provider is not a MultiDbConnectionProvider
*/
- private MultiDatabaseConnectionProvider getMultiDatabaseConnectionProvider() {
- return (MultiDatabaseConnectionProvider) this.provider;
+ private MultiDbConnectionProvider getMultiDbConnectionProvider() {
+ return (MultiDbConnectionProvider) this.provider;
}
/**
@@ -123,7 +123,7 @@ private MultiDatabaseConnectionProvider getMultiDatabaseConnectionProvider() {
* @param endpoint the endpoint to switch to
*/
public void setActiveDatabase(Endpoint endpoint) {
- getMultiDatabaseConnectionProvider().setActiveDatabase(endpoint);
+ getMultiDbConnectionProvider().setActiveDatabase(endpoint);
}
/**
@@ -136,7 +136,7 @@ public void setActiveDatabase(Endpoint endpoint) {
* @param databaseConfig the pre-configured database configuration
*/
public void addEndpoint(DatabaseConfig databaseConfig) {
- getMultiDatabaseConnectionProvider().add(databaseConfig);
+ getMultiDbConnectionProvider().add(databaseConfig);
}
/**
@@ -155,7 +155,7 @@ public void addEndpoint(Endpoint endpoint, float weight, JedisClientConfig clien
DatabaseConfig databaseConfig = DatabaseConfig.builder(endpoint, clientConfig).weight(weight)
.build();
- getMultiDatabaseConnectionProvider().add(databaseConfig);
+ getMultiDbConnectionProvider().add(databaseConfig);
}
/**
@@ -166,7 +166,7 @@ public void addEndpoint(Endpoint endpoint, float weight, JedisClientConfig clien
* @return the set of all configured endpoints
*/
public Set getEndpoints() {
- return getMultiDatabaseConnectionProvider().getEndpoints();
+ return getMultiDbConnectionProvider().getEndpoints();
}
/**
@@ -178,7 +178,7 @@ public Set getEndpoints() {
* @return the health status of the endpoint
*/
public boolean isHealthy(Endpoint endpoint) {
- return getMultiDatabaseConnectionProvider().isHealthy(endpoint);
+ return getMultiDbConnectionProvider().isHealthy(endpoint);
}
/**
@@ -194,7 +194,7 @@ public boolean isHealthy(Endpoint endpoint) {
* healthy clusters available
*/
public void removeEndpoint(Endpoint endpoint) {
- getMultiDatabaseConnectionProvider().remove(endpoint);
+ getMultiDbConnectionProvider().remove(endpoint);
}
/**
@@ -210,7 +210,7 @@ public void removeEndpoint(Endpoint endpoint) {
* or doesn't exist
*/
public void forceActiveEndpoint(Endpoint endpoint, long forcedActiveDurationMs) {
- getMultiDatabaseConnectionProvider().forceActiveDatabase(endpoint, forcedActiveDurationMs);
+ getMultiDbConnectionProvider().forceActiveDatabase(endpoint, forcedActiveDurationMs);
}
/**
@@ -223,7 +223,7 @@ public void forceActiveEndpoint(Endpoint endpoint, long forcedActiveDurationMs)
*/
@Override
public MultiClusterPipeline pipelined() {
- return new MultiClusterPipeline(getMultiDatabaseConnectionProvider(), commandObjects);
+ return new MultiClusterPipeline(getMultiDbConnectionProvider(), commandObjects);
}
/**
@@ -236,8 +236,7 @@ public MultiClusterPipeline pipelined() {
*/
@Override
public MultiClusterTransaction multi() {
- return new MultiClusterTransaction((MultiDatabaseConnectionProvider) provider, true,
- commandObjects);
+ return new MultiClusterTransaction((MultiDbConnectionProvider) provider, true, commandObjects);
}
/**
@@ -251,12 +250,11 @@ public MultiClusterTransaction transaction(boolean doMulti) {
"It is not allowed to create Transaction from this " + getClass());
}
- return new MultiClusterTransaction(getMultiDatabaseConnectionProvider(), doMulti,
- commandObjects);
+ return new MultiClusterTransaction(getMultiDbConnectionProvider(), doMulti, commandObjects);
}
public Endpoint getActiveEndpoint() {
- return getMultiDatabaseConnectionProvider().getDatabase().getEndpoint();
+ return getMultiDbConnectionProvider().getDatabase().getEndpoint();
}
/**
diff --git a/src/main/java/redis/clients/jedis/MultiDbConfig.java b/src/main/java/redis/clients/jedis/MultiDbConfig.java
index 5bde00c34a..7bafdd9f5e 100644
--- a/src/main/java/redis/clients/jedis/MultiDbConfig.java
+++ b/src/main/java/redis/clients/jedis/MultiDbConfig.java
@@ -21,7 +21,7 @@
* This configuration enables seamless failover between multiple Redis clusters, databases, or
* endpoints by providing comprehensive settings for retry logic, circuit breaker behavior, health
* checks, and failback mechanisms. It is designed to work with
- * {@link redis.clients.jedis.mcf.MultiDatabaseConnectionProvider} to provide high availability and
+ * {@link redis.clients.jedis.mcf.MultiDbConnectionProvider} to provide high availability and
* disaster recovery capabilities.
*
*
@@ -61,14 +61,14 @@
* .gracePeriod(10000).build();
*
* // Use with connection provider
- * MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider(config);
+ * MultiDbConnectionProvider provider = new MultiDbConnectionProvider(config);
* }
*
*
* The configuration leverages Resilience4j for
* circuit breaker and retry implementations, providing battle-tested fault tolerance patterns.
*
- * @see redis.clients.jedis.mcf.MultiDatabaseConnectionProvider
+ * @see redis.clients.jedis.mcf.MultiDbConnectionProvider
* @see redis.clients.jedis.mcf.HealthCheckStrategy
* @see redis.clients.jedis.mcf.EchoStrategy
* @see redis.clients.jedis.mcf.LagAwareStrategy
@@ -451,7 +451,7 @@ public static interface StrategySupplier {
public MultiDbConfig(DatabaseConfig[] databaseConfigs) {
if (databaseConfigs == null || databaseConfigs.length < 1) throw new JedisValidationException(
- "DatabaseClientConfigs are required for MultiDatabaseConnectionProvider");
+ "DatabaseClientConfigs are required for MultiDbConnectionProvider");
for (DatabaseConfig databaseConfig : databaseConfigs) {
if (databaseConfig == null)
@@ -976,8 +976,7 @@ public DatabaseConfig build() {
}
/**
- * Builder class for creating MultiDbConfig instances with comprehensive configuration
- * options.
+ * Builder class for creating MultiDbConfig instances with comprehensive configuration options.
*
* The Builder provides a fluent API for configuring all aspects of multi-cluster failover
* behavior, including retry logic, circuit breaker settings, and failback mechanisms. It uses
@@ -1509,8 +1508,7 @@ public Builder delayInBetweenFailoverAttempts(int delayInBetweenFailoverAttempts
*/
public MultiDbConfig build() {
- MultiDbConfig config = new MultiDbConfig(
- this.databaseConfigs.toArray(new DatabaseConfig[0]));
+ MultiDbConfig config = new MultiDbConfig(this.databaseConfigs.toArray(new DatabaseConfig[0]));
// Copy retry configuration
config.retryMaxAttempts = this.retryMaxAttempts;
diff --git a/src/main/java/redis/clients/jedis/UnifiedJedis.java b/src/main/java/redis/clients/jedis/UnifiedJedis.java
index ebe5a55f54..ccb9d37df6 100644
--- a/src/main/java/redis/clients/jedis/UnifiedJedis.java
+++ b/src/main/java/redis/clients/jedis/UnifiedJedis.java
@@ -34,7 +34,7 @@
import redis.clients.jedis.json.JsonObjectMapper;
import redis.clients.jedis.mcf.CircuitBreakerCommandExecutor;
import redis.clients.jedis.mcf.MultiClusterPipeline;
-import redis.clients.jedis.mcf.MultiDatabaseConnectionProvider;
+import redis.clients.jedis.mcf.MultiDbConnectionProvider;
import redis.clients.jedis.mcf.MultiClusterTransaction;
import redis.clients.jedis.params.*;
import redis.clients.jedis.providers.*;
@@ -240,7 +240,7 @@ public UnifiedJedis(ConnectionProvider provider, int maxAttempts, Duration maxTo
*
*/
@Experimental
- public UnifiedJedis(MultiDatabaseConnectionProvider provider) {
+ public UnifiedJedis(MultiDbConnectionProvider provider) {
this(new CircuitBreakerCommandExecutor(provider), provider);
}
@@ -5099,8 +5099,8 @@ public List tdigestByRevRank(String key, long... ranks) {
public PipelineBase pipelined() {
if (provider == null) {
throw new IllegalStateException("It is not allowed to create Pipeline from this " + getClass());
- } else if (provider instanceof MultiDatabaseConnectionProvider) {
- return new MultiClusterPipeline((MultiDatabaseConnectionProvider) provider, commandObjects);
+ } else if (provider instanceof MultiDbConnectionProvider) {
+ return new MultiClusterPipeline((MultiDbConnectionProvider) provider, commandObjects);
} else {
return new Pipeline(provider.getConnection(), true, commandObjects);
}
@@ -5120,8 +5120,8 @@ public AbstractTransaction multi() {
public AbstractTransaction transaction(boolean doMulti) {
if (provider == null) {
throw new IllegalStateException("It is not allowed to create Transaction from this " + getClass());
- } else if (provider instanceof MultiDatabaseConnectionProvider) {
- return new MultiClusterTransaction((MultiDatabaseConnectionProvider) provider, doMulti, commandObjects);
+ } else if (provider instanceof MultiDbConnectionProvider) {
+ return new MultiClusterTransaction((MultiDbConnectionProvider) provider, doMulti, commandObjects);
} else {
return new Transaction(provider.getConnection(), doMulti, true, commandObjects);
}
diff --git a/src/main/java/redis/clients/jedis/builders/MultiDbClientBuilder.java b/src/main/java/redis/clients/jedis/builders/MultiDbClientBuilder.java
index 252a931762..12cc2efefb 100644
--- a/src/main/java/redis/clients/jedis/builders/MultiDbClientBuilder.java
+++ b/src/main/java/redis/clients/jedis/builders/MultiDbClientBuilder.java
@@ -7,7 +7,7 @@
import redis.clients.jedis.executors.CommandExecutor;
import redis.clients.jedis.mcf.CircuitBreakerCommandExecutor;
import redis.clients.jedis.mcf.ClusterSwitchEventArgs;
-import redis.clients.jedis.mcf.MultiDatabaseConnectionProvider;
+import redis.clients.jedis.mcf.MultiDbConnectionProvider;
import redis.clients.jedis.providers.ConnectionProvider;
/**
@@ -113,7 +113,7 @@ protected ConnectionProvider createDefaultConnectionProvider() {
}
// Create the multi-cluster connection provider
- MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider(multiDbConfig);
+ MultiDbConnectionProvider provider = new MultiDbConnectionProvider(multiDbConfig);
// Set database switch listener if provided
if (this.databaseSwitchListener != null) {
@@ -126,8 +126,7 @@ protected ConnectionProvider createDefaultConnectionProvider() {
@Override
protected CommandExecutor createDefaultCommandExecutor() {
// For multi-db clients, we always use CircuitBreakerCommandExecutor
- return new CircuitBreakerCommandExecutor(
- (MultiDatabaseConnectionProvider) this.connectionProvider);
+ return new CircuitBreakerCommandExecutor((MultiDbConnectionProvider) this.connectionProvider);
}
@Override
diff --git a/src/main/java/redis/clients/jedis/mcf/CircuitBreakerCommandExecutor.java b/src/main/java/redis/clients/jedis/mcf/CircuitBreakerCommandExecutor.java
index 5a5f24e063..31d8d67a73 100644
--- a/src/main/java/redis/clients/jedis/mcf/CircuitBreakerCommandExecutor.java
+++ b/src/main/java/redis/clients/jedis/mcf/CircuitBreakerCommandExecutor.java
@@ -9,7 +9,7 @@
import redis.clients.jedis.annots.Experimental;
import redis.clients.jedis.exceptions.JedisConnectionException;
import redis.clients.jedis.executors.CommandExecutor;
-import redis.clients.jedis.mcf.MultiDatabaseConnectionProvider.Database;
+import redis.clients.jedis.mcf.MultiDbConnectionProvider.Database;
/**
* @author Allen Terleto (aterleto)
@@ -24,7 +24,7 @@
public class CircuitBreakerCommandExecutor extends CircuitBreakerFailoverBase
implements CommandExecutor {
- public CircuitBreakerCommandExecutor(MultiDatabaseConnectionProvider provider) {
+ public CircuitBreakerCommandExecutor(MultiDbConnectionProvider provider) {
super(provider);
}
diff --git a/src/main/java/redis/clients/jedis/mcf/CircuitBreakerFailoverBase.java b/src/main/java/redis/clients/jedis/mcf/CircuitBreakerFailoverBase.java
index cbe97f27a8..ba1ea98dec 100644
--- a/src/main/java/redis/clients/jedis/mcf/CircuitBreakerFailoverBase.java
+++ b/src/main/java/redis/clients/jedis/mcf/CircuitBreakerFailoverBase.java
@@ -6,7 +6,7 @@
import java.util.concurrent.locks.ReentrantLock;
import redis.clients.jedis.annots.Experimental;
-import redis.clients.jedis.mcf.MultiDatabaseConnectionProvider.Database;
+import redis.clients.jedis.mcf.MultiDbConnectionProvider.Database;
import redis.clients.jedis.util.IOUtils;
/**
@@ -23,9 +23,9 @@
public class CircuitBreakerFailoverBase implements AutoCloseable {
private final Lock lock = new ReentrantLock(true);
- protected final MultiDatabaseConnectionProvider provider;
+ protected final MultiDbConnectionProvider provider;
- public CircuitBreakerFailoverBase(MultiDatabaseConnectionProvider provider) {
+ public CircuitBreakerFailoverBase(MultiDbConnectionProvider provider) {
this.provider = provider;
}
diff --git a/src/main/java/redis/clients/jedis/mcf/CircuitBreakerFailoverConnectionProvider.java b/src/main/java/redis/clients/jedis/mcf/CircuitBreakerFailoverConnectionProvider.java
index b45cd04c61..7dfd1ef527 100644
--- a/src/main/java/redis/clients/jedis/mcf/CircuitBreakerFailoverConnectionProvider.java
+++ b/src/main/java/redis/clients/jedis/mcf/CircuitBreakerFailoverConnectionProvider.java
@@ -6,7 +6,7 @@
import redis.clients.jedis.Connection;
import redis.clients.jedis.annots.Experimental;
-import redis.clients.jedis.mcf.MultiDatabaseConnectionProvider.Database;
+import redis.clients.jedis.mcf.MultiDbConnectionProvider.Database;
/**
* ConnectionProvider with built-in retry, circuit-breaker, and failover to another cluster/database
@@ -16,7 +16,7 @@
@Experimental
public class CircuitBreakerFailoverConnectionProvider extends CircuitBreakerFailoverBase {
- public CircuitBreakerFailoverConnectionProvider(MultiDatabaseConnectionProvider provider) {
+ public CircuitBreakerFailoverConnectionProvider(MultiDbConnectionProvider provider) {
super(provider);
}
diff --git a/src/main/java/redis/clients/jedis/mcf/CircuitBreakerThresholdsAdapter.java b/src/main/java/redis/clients/jedis/mcf/CircuitBreakerThresholdsAdapter.java
index 4c4d14c38b..bcd6ee208d 100644
--- a/src/main/java/redis/clients/jedis/mcf/CircuitBreakerThresholdsAdapter.java
+++ b/src/main/java/redis/clients/jedis/mcf/CircuitBreakerThresholdsAdapter.java
@@ -9,10 +9,10 @@
*
* This adapter sets maximum values for failure rate (100%) and minimum calls (Integer.MAX_VALUE) to
* effectively disable Resilience4j's automatic circuit breaker transitions, allowing
- * {@link MultiDatabaseConnectionProvider.Database#evaluateThresholds(boolean)} to control when the
+ * {@link MultiDbConnectionProvider.Database#evaluateThresholds(boolean)} to control when the
* circuit breaker opens based on both minimum failure count AND failure rate.
*
- * @see MultiDatabaseConnectionProvider.Database#evaluateThresholds(boolean)
+ * @see MultiDbConnectionProvider.Database#evaluateThresholds(boolean)
*/
class CircuitBreakerThresholdsAdapter {
/** Maximum failure rate threshold (100%) to disable Resilience4j evaluation */
diff --git a/src/main/java/redis/clients/jedis/mcf/ClusterSwitchEventArgs.java b/src/main/java/redis/clients/jedis/mcf/ClusterSwitchEventArgs.java
index 2c3e283445..a78c41864a 100644
--- a/src/main/java/redis/clients/jedis/mcf/ClusterSwitchEventArgs.java
+++ b/src/main/java/redis/clients/jedis/mcf/ClusterSwitchEventArgs.java
@@ -1,7 +1,7 @@
package redis.clients.jedis.mcf;
import redis.clients.jedis.Endpoint;
-import redis.clients.jedis.mcf.MultiDatabaseConnectionProvider.Database;
+import redis.clients.jedis.mcf.MultiDbConnectionProvider.Database;
public class ClusterSwitchEventArgs {
diff --git a/src/main/java/redis/clients/jedis/mcf/JedisFailoverException.java b/src/main/java/redis/clients/jedis/mcf/JedisFailoverException.java
index 2d6a81b81a..c431764d42 100644
--- a/src/main/java/redis/clients/jedis/mcf/JedisFailoverException.java
+++ b/src/main/java/redis/clients/jedis/mcf/JedisFailoverException.java
@@ -29,8 +29,7 @@ public JedisFailoverException() {
*
* See the configuration properties
* {@link redis.clients.jedis.MultiDbConfig#maxNumFailoverAttempts} and
- * {@link redis.clients.jedis.MultiDbConfig#delayInBetweenFailoverAttempts} for more
- * details.
+ * {@link redis.clients.jedis.MultiDbConfig#delayInBetweenFailoverAttempts} for more details.
*/
public static class JedisPermanentlyNotAvailableException extends JedisFailoverException {
public JedisPermanentlyNotAvailableException(String s) {
@@ -50,8 +49,7 @@ public JedisPermanentlyNotAvailableException() {
*
* See the configuration properties
* {@link redis.clients.jedis.MultiDbConfig#maxNumFailoverAttempts} and
- * {@link redis.clients.jedis.MultiDbConfig#delayInBetweenFailoverAttempts} for more
- * details.
+ * {@link redis.clients.jedis.MultiDbConfig#delayInBetweenFailoverAttempts} for more details.
*/
public static class JedisTemporarilyNotAvailableException extends JedisFailoverException {
diff --git a/src/main/java/redis/clients/jedis/mcf/MultiClusterPipeline.java b/src/main/java/redis/clients/jedis/mcf/MultiClusterPipeline.java
index d23f56411f..d302768fad 100644
--- a/src/main/java/redis/clients/jedis/mcf/MultiClusterPipeline.java
+++ b/src/main/java/redis/clients/jedis/mcf/MultiClusterPipeline.java
@@ -21,7 +21,7 @@ public class MultiClusterPipeline extends PipelineBase implements Closeable {
private final Queue>> commands = new LinkedList<>();
@Deprecated
- public MultiClusterPipeline(MultiDatabaseConnectionProvider pooledProvider) {
+ public MultiClusterPipeline(MultiDbConnectionProvider pooledProvider) {
super(new CommandObjects());
this.failoverProvider = new CircuitBreakerFailoverConnectionProvider(pooledProvider);
@@ -32,7 +32,7 @@ public MultiClusterPipeline(MultiDatabaseConnectionProvider pooledProvider) {
}
}
- public MultiClusterPipeline(MultiDatabaseConnectionProvider pooledProvider,
+ public MultiClusterPipeline(MultiDbConnectionProvider pooledProvider,
CommandObjects commandObjects) {
super(commandObjects);
this.failoverProvider = new CircuitBreakerFailoverConnectionProvider(pooledProvider);
diff --git a/src/main/java/redis/clients/jedis/mcf/MultiClusterTransaction.java b/src/main/java/redis/clients/jedis/mcf/MultiClusterTransaction.java
index 6f634549e2..e4afa24887 100644
--- a/src/main/java/redis/clients/jedis/mcf/MultiClusterTransaction.java
+++ b/src/main/java/redis/clients/jedis/mcf/MultiClusterTransaction.java
@@ -39,7 +39,7 @@ public class MultiClusterTransaction extends TransactionBase {
* @param provider
*/
@Deprecated
- public MultiClusterTransaction(MultiDatabaseConnectionProvider provider) {
+ public MultiClusterTransaction(MultiDbConnectionProvider provider) {
this(provider, true);
}
@@ -50,7 +50,7 @@ public MultiClusterTransaction(MultiDatabaseConnectionProvider provider) {
* @param doMulti {@code false} should be set to enable manual WATCH, UNWATCH and MULTI
*/
@Deprecated
- public MultiClusterTransaction(MultiDatabaseConnectionProvider provider, boolean doMulti) {
+ public MultiClusterTransaction(MultiDbConnectionProvider provider, boolean doMulti) {
this.failoverProvider = new CircuitBreakerFailoverConnectionProvider(provider);
try (Connection connection = failoverProvider.getConnection()) {
@@ -68,7 +68,7 @@ public MultiClusterTransaction(MultiDatabaseConnectionProvider provider, boolean
* @param doMulti {@code false} should be set to enable manual WATCH, UNWATCH and MULTI
* @param commandObjects command objects
*/
- public MultiClusterTransaction(MultiDatabaseConnectionProvider provider, boolean doMulti,
+ public MultiClusterTransaction(MultiDbConnectionProvider provider, boolean doMulti,
CommandObjects commandObjects) {
super(commandObjects);
this.failoverProvider = new CircuitBreakerFailoverConnectionProvider(provider);
diff --git a/src/main/java/redis/clients/jedis/mcf/MultiDatabaseConnectionProvider.java b/src/main/java/redis/clients/jedis/mcf/MultiDbConnectionProvider.java
similarity index 98%
rename from src/main/java/redis/clients/jedis/mcf/MultiDatabaseConnectionProvider.java
rename to src/main/java/redis/clients/jedis/mcf/MultiDbConnectionProvider.java
index ba445ae5cb..c39d137616 100644
--- a/src/main/java/redis/clients/jedis/mcf/MultiDatabaseConnectionProvider.java
+++ b/src/main/java/redis/clients/jedis/mcf/MultiDbConnectionProvider.java
@@ -57,7 +57,7 @@
*
*/
@Experimental
-public class MultiDatabaseConnectionProvider implements ConnectionProvider {
+public class MultiDbConnectionProvider implements ConnectionProvider {
private final Logger log = LoggerFactory.getLogger(getClass());
@@ -69,8 +69,8 @@ public class MultiDatabaseConnectionProvider implements ConnectionProvider {
/**
* Indicates the actively used database endpoint (connection pool) amongst the pre-configured list
- * which were provided at startup via the MultiDbConfig. All traffic will be routed with
- * this database
+ * which were provided at startup via the MultiDbConfig. All traffic will be routed with this
+ * database
*/
private volatile Database activeDatabase;
@@ -106,10 +106,10 @@ public class MultiDatabaseConnectionProvider implements ConnectionProvider {
private AtomicLong failoverFreezeUntil = new AtomicLong(0);
private AtomicInteger failoverAttemptCount = new AtomicInteger(0);
- public MultiDatabaseConnectionProvider(MultiDbConfig multiDbConfig) {
+ public MultiDbConnectionProvider(MultiDbConfig multiDbConfig) {
if (multiDbConfig == null) throw new JedisValidationException(
- "MultiDbConfig must not be NULL for MultiDatabaseConnectionProvider");
+ "MultiDbConfig must not be NULL for MultiDbConnectionProvider");
this.multiDbConfig = multiDbConfig;
@@ -134,8 +134,7 @@ public MultiDatabaseConnectionProvider(MultiDbConfig multiDbConfig) {
CircuitBreakerConfig.Builder circuitBreakerConfigBuilder = CircuitBreakerConfig.custom();
- CircuitBreakerThresholdsAdapter adapter = new CircuitBreakerThresholdsAdapter(
- multiDbConfig);
+ CircuitBreakerThresholdsAdapter adapter = new CircuitBreakerThresholdsAdapter(multiDbConfig);
circuitBreakerConfigBuilder.minimumNumberOfCalls(adapter.getMinimumNumberOfCalls());
circuitBreakerConfigBuilder.failureRateThreshold(adapter.getFailureRateThreshold());
circuitBreakerConfigBuilder.slidingWindowSize(adapter.getSlidingWindowSize());
@@ -410,7 +409,7 @@ private Database waitForInitialHealthyCluster(StatusTracker statusTracker) {
// All clusters are unhealthy
throw new JedisConnectionException(
- "All configured clusters are unhealthy. Cannot initialize MultiDatabaseConnectionProvider.");
+ "All configured clusters are unhealthy. Cannot initialize MultiDbConnectionProvider.");
}
/**
@@ -734,8 +733,8 @@ public CircuitBreaker getDatabaseCircuitBreaker() {
/**
* Indicates the final cluster/database endpoint (connection pool), according to the
- * pre-configured list provided at startup via the MultiDbConfig, is unavailable and
- * therefore no further failover is possible. Users can manually failback to an available cluster
+ * pre-configured list provided at startup via the MultiDbConfig, is unavailable and therefore no
+ * further failover is possible. Users can manually failback to an available cluster
*/
public boolean canIterateFrom(Database iterateFrom) {
Map.Entry e = findWeightedHealthyClusterToIterate(iterateFrom);
@@ -785,8 +784,7 @@ private Database(Endpoint endpoint, TrackingConnectionPool connectionPool, Retry
}
private Database(Endpoint endpoint, TrackingConnectionPool connectionPool, Retry retry,
- HealthCheck hc, CircuitBreaker circuitBreaker, float weight,
- MultiDbConfig multiDbConfig) {
+ HealthCheck hc, CircuitBreaker circuitBreaker, float weight, MultiDbConfig multiDbConfig) {
this.endpoint = endpoint;
this.connectionPool = connectionPool;
diff --git a/src/test/java/redis/clients/jedis/builders/UnifiedJedisConstructorReflectionTest.java b/src/test/java/redis/clients/jedis/builders/UnifiedJedisConstructorReflectionTest.java
index 39c76b6338..cd2ca8e4c5 100644
--- a/src/test/java/redis/clients/jedis/builders/UnifiedJedisConstructorReflectionTest.java
+++ b/src/test/java/redis/clients/jedis/builders/UnifiedJedisConstructorReflectionTest.java
@@ -184,7 +184,7 @@ private static boolean clusterConstructorThatShouldBeDeprecatedAndRemoved(Constr
private static boolean multiClusterPooledConnectionProviderShouldBeReplacedWithResilientClient(
Constructor> ctor) {
Class>[] types = ctor.getParameterTypes();
- return types.length == 1 && types[0].getSimpleName().equals("MultiDatabaseConnectionProvider");
+ return types.length == 1 && types[0].getSimpleName().equals("MultiDbConnectionProvider");
}
private static String prettySignature(Constructor> ctor) {
diff --git a/src/test/java/redis/clients/jedis/failover/FailoverIntegrationTest.java b/src/test/java/redis/clients/jedis/failover/FailoverIntegrationTest.java
index 7cda9db8fd..b3c19fdda5 100644
--- a/src/test/java/redis/clients/jedis/failover/FailoverIntegrationTest.java
+++ b/src/test/java/redis/clients/jedis/failover/FailoverIntegrationTest.java
@@ -19,7 +19,7 @@
import redis.clients.jedis.MultiDbConfig;
import redis.clients.jedis.UnifiedJedis;
import redis.clients.jedis.exceptions.JedisConnectionException;
-import redis.clients.jedis.mcf.MultiDatabaseConnectionProvider;
+import redis.clients.jedis.mcf.MultiDbConnectionProvider;
import redis.clients.jedis.scenario.RecommendedSettings;
import java.io.IOException;
@@ -57,7 +57,7 @@ public class FailoverIntegrationTest {
private static UnifiedJedis jedis2;
private static String JEDIS1_ID = "";
private static String JEDIS2_ID = "";
- private MultiDatabaseConnectionProvider provider;
+ private MultiDbConnectionProvider provider;
private UnifiedJedis failoverClient;
@BeforeAll
@@ -180,8 +180,8 @@ public void testManualFailoverNewCommandsAreSentToActiveCluster() throws Interru
assertThat(getNodeId(failoverClient.info("server")), equalTo(JEDIS2_ID));
}
- private List getDatabaseConfigs(
- JedisClientConfig clientConfig, EndpointConfig... endpoints) {
+ private List getDatabaseConfigs(JedisClientConfig clientConfig,
+ EndpointConfig... endpoints) {
int weight = endpoints.length;
AtomicInteger weightCounter = new AtomicInteger(weight);
@@ -269,7 +269,7 @@ public void testCircuitBreakerCountsEachConnectionErrorSeparately() throws IOExc
.circuitBreakerFailureRateThreshold(50f) // %50 failure rate
.build();
- MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider(failoverConfig);
+ MultiDbConnectionProvider provider = new MultiDbConnectionProvider(failoverConfig);
try (UnifiedJedis client = new UnifiedJedis(provider)) {
// Verify initial connection to first endpoint
assertThat(getNodeId(client.info("server")), equalTo(JEDIS1_ID));
@@ -315,7 +315,7 @@ public void testCircuitBreakerCountsEachConnectionErrorSeparately() throws IOExc
@Test
public void testInflightCommandsAreRetriedAfterFailover() throws Exception {
- MultiDatabaseConnectionProvider customProvider = createProvider(
+ MultiDbConnectionProvider customProvider = createProvider(
builder -> builder.retryOnFailover(true));
// Create a custom client with retryOnFailover enabled for this specific test
@@ -357,7 +357,7 @@ public void testInflightCommandsAreRetriedAfterFailover() throws Exception {
@Test
public void testInflightCommandsAreNotRetriedAfterFailover() throws Exception {
// Create a custom provider and client with retry disabled for this specific test
- MultiDatabaseConnectionProvider customProvider = createProvider(
+ MultiDbConnectionProvider customProvider = createProvider(
builder -> builder.retryOnFailover(false));
try (UnifiedJedis customClient = new UnifiedJedis(customProvider)) {
@@ -414,10 +414,10 @@ private static String generateTestValue(int byteSize) {
}
/**
- * Creates a MultiDatabaseConnectionProvider with standard configuration
+ * Creates a MultiDbConnectionProvider with standard configuration
* @return A configured provider
*/
- private MultiDatabaseConnectionProvider createProvider() {
+ private MultiDbConnectionProvider createProvider() {
JedisClientConfig clientConfig = DefaultJedisClientConfig.builder()
.socketTimeoutMillis(RecommendedSettings.DEFAULT_TIMEOUT_MS)
.connectionTimeoutMillis(RecommendedSettings.DEFAULT_TIMEOUT_MS).build();
@@ -427,14 +427,14 @@ private MultiDatabaseConnectionProvider createProvider() {
.retryWaitDuration(1).circuitBreakerSlidingWindowSize(3)
.circuitBreakerMinNumOfFailures(1).circuitBreakerFailureRateThreshold(50f).build();
- return new MultiDatabaseConnectionProvider(failoverConfig);
+ return new MultiDbConnectionProvider(failoverConfig);
}
/**
- * Creates a MultiDatabaseConnectionProvider with standard configuration
+ * Creates a MultiDbConnectionProvider with standard configuration
* @return A configured provider
*/
- private MultiDatabaseConnectionProvider createProvider(
+ private MultiDbConnectionProvider createProvider(
Function configCustomizer) {
JedisClientConfig clientConfig = DefaultJedisClientConfig.builder()
.socketTimeoutMillis(RecommendedSettings.DEFAULT_TIMEOUT_MS)
@@ -449,6 +449,6 @@ private MultiDatabaseConnectionProvider createProvider(
builder = configCustomizer.apply(builder);
}
- return new MultiDatabaseConnectionProvider(builder.build());
+ return new MultiDbConnectionProvider(builder.build());
}
}
diff --git a/src/test/java/redis/clients/jedis/mcf/ActiveActiveLocalFailoverTest.java b/src/test/java/redis/clients/jedis/mcf/ActiveActiveLocalFailoverTest.java
index 6fdc720cc9..f42030cd52 100644
--- a/src/test/java/redis/clients/jedis/mcf/ActiveActiveLocalFailoverTest.java
+++ b/src/test/java/redis/clients/jedis/mcf/ActiveActiveLocalFailoverTest.java
@@ -162,7 +162,7 @@ public void accept(ClusterSwitchEventArgs e) {
ensureEndpointAvailability(endpoint2.getHostAndPort(), config);
// Create the connection provider
- MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider(builder.build());
+ MultiDbConnectionProvider provider = new MultiDbConnectionProvider(builder.build());
FailoverReporter reporter = new FailoverReporter();
provider.setDatabaseSwitchListener(reporter);
provider.setActiveDatabase(endpoint1.getHostAndPort());
diff --git a/src/test/java/redis/clients/jedis/mcf/CircuitBreakerThresholdsTest.java b/src/test/java/redis/clients/jedis/mcf/CircuitBreakerThresholdsTest.java
index 2e71b0f554..7d439c6d46 100644
--- a/src/test/java/redis/clients/jedis/mcf/CircuitBreakerThresholdsTest.java
+++ b/src/test/java/redis/clients/jedis/mcf/CircuitBreakerThresholdsTest.java
@@ -20,7 +20,7 @@
import redis.clients.jedis.MultiDbConfig.DatabaseConfig;
import redis.clients.jedis.Protocol;
import redis.clients.jedis.exceptions.JedisConnectionException;
-import redis.clients.jedis.mcf.MultiDatabaseConnectionProvider.Database;
+import redis.clients.jedis.mcf.MultiDbConnectionProvider.Database;
import redis.clients.jedis.util.ReflectionTestUtil;
/**
@@ -30,8 +30,8 @@
*/
public class CircuitBreakerThresholdsTest {
- private MultiDatabaseConnectionProvider realProvider;
- private MultiDatabaseConnectionProvider spyProvider;
+ private MultiDbConnectionProvider realProvider;
+ private MultiDbConnectionProvider spyProvider;
private Database cluster;
private CircuitBreakerCommandExecutor executor;
private CommandObject dummyCommand;
@@ -56,7 +56,7 @@ public void setup() throws Exception {
MultiDbConfig mcc = cfgBuilder.build();
- realProvider = new MultiDatabaseConnectionProvider(mcc);
+ realProvider = new MultiDbConnectionProvider(mcc);
spyProvider = spy(realProvider);
cluster = spyProvider.getDatabase();
@@ -126,8 +126,8 @@ public void rateBelowThreshold_doesNotFailover() throws Exception {
MultiDbConfig.Builder cfgBuilder = MultiDbConfig.builder(fakeDatabaseConfigs)
.circuitBreakerFailureRateThreshold(80.0f).circuitBreakerMinNumOfFailures(3)
.circuitBreakerSlidingWindowSize(10).retryMaxAttempts(1).retryOnFailover(false);
- MultiDatabaseConnectionProvider rp = new MultiDatabaseConnectionProvider(cfgBuilder.build());
- MultiDatabaseConnectionProvider sp = spy(rp);
+ MultiDbConnectionProvider rp = new MultiDbConnectionProvider(cfgBuilder.build());
+ MultiDbConnectionProvider sp = spy(rp);
Database c = sp.getDatabase();
try (CircuitBreakerCommandExecutor ex = new CircuitBreakerCommandExecutor(sp)) {
CommandObject cmd = new CommandObject<>(new CommandArguments(Protocol.Command.PING),
@@ -194,8 +194,8 @@ public void thresholdMatrix(int minFailures, float ratePercent, int successes, i
.circuitBreakerSlidingWindowSize(Math.max(10, successes + failures + 2)).retryMaxAttempts(1)
.retryOnFailover(false);
- MultiDatabaseConnectionProvider real = new MultiDatabaseConnectionProvider(cfgBuilder.build());
- MultiDatabaseConnectionProvider spy = spy(real);
+ MultiDbConnectionProvider real = new MultiDbConnectionProvider(cfgBuilder.build());
+ MultiDbConnectionProvider spy = spy(real);
Database c = spy.getDatabase();
try (CircuitBreakerCommandExecutor ex = new CircuitBreakerCommandExecutor(spy)) {
diff --git a/src/test/java/redis/clients/jedis/mcf/ClusterEvaluateThresholdsTest.java b/src/test/java/redis/clients/jedis/mcf/ClusterEvaluateThresholdsTest.java
index a3e9317a98..8a6fd466c0 100644
--- a/src/test/java/redis/clients/jedis/mcf/ClusterEvaluateThresholdsTest.java
+++ b/src/test/java/redis/clients/jedis/mcf/ClusterEvaluateThresholdsTest.java
@@ -12,7 +12,7 @@
import redis.clients.jedis.DefaultJedisClientConfig;
import redis.clients.jedis.HostAndPort;
import redis.clients.jedis.MultiDbConfig;
-import redis.clients.jedis.mcf.MultiDatabaseConnectionProvider.Database;
+import redis.clients.jedis.mcf.MultiDbConnectionProvider.Database;
/**
* Tests for circuit breaker thresholds: both failure-rate threshold and minimum number of failures
@@ -21,14 +21,14 @@
*/
public class ClusterEvaluateThresholdsTest {
- private MultiDatabaseConnectionProvider provider;
+ private MultiDbConnectionProvider provider;
private Database cluster;
private CircuitBreaker circuitBreaker;
private CircuitBreaker.Metrics metrics;
@BeforeEach
public void setup() {
- provider = mock(MultiDatabaseConnectionProvider.class);
+ provider = mock(MultiDbConnectionProvider.class);
cluster = mock(Database.class);
circuitBreaker = mock(CircuitBreaker.class);
diff --git a/src/test/java/redis/clients/jedis/mcf/FailbackMechanismIntegrationTest.java b/src/test/java/redis/clients/jedis/mcf/FailbackMechanismIntegrationTest.java
index 7179c0c475..34e521683e 100644
--- a/src/test/java/redis/clients/jedis/mcf/FailbackMechanismIntegrationTest.java
+++ b/src/test/java/redis/clients/jedis/mcf/FailbackMechanismIntegrationTest.java
@@ -62,19 +62,19 @@ void testFailbackDisabledDoesNotPerformFailback() throws InterruptedException {
.failbackCheckInterval(100) // Short interval for testing
.build();
- try (MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider(config)) {
+ try (MultiDbConnectionProvider provider = new MultiDbConnectionProvider(config)) {
// Initially, cluster2 should be active (highest weight)
assertEquals(provider.getDatabase(endpoint2), provider.getDatabase());
// Make cluster2 unhealthy to force failover to cluster1
- MultiDatabaseConnectionProviderHelper.onHealthStatusChange(provider, endpoint2,
+ MultiDbConnectionProviderHelper.onHealthStatusChange(provider, endpoint2,
HealthStatus.HEALTHY, HealthStatus.UNHEALTHY);
// Should now be on cluster1 (only healthy option)
assertEquals(provider.getDatabase(endpoint1), provider.getDatabase());
// Make cluster2 healthy again (higher weight - would normally trigger failback)
- MultiDatabaseConnectionProviderHelper.onHealthStatusChange(provider, endpoint2,
+ MultiDbConnectionProviderHelper.onHealthStatusChange(provider, endpoint2,
HealthStatus.UNHEALTHY, HealthStatus.HEALTHY);
// Wait longer than failback interval
@@ -102,19 +102,19 @@ void testFailbackToHigherWeightCluster() throws InterruptedException {
.failbackCheckInterval(100) // Short interval for testing
.gracePeriod(100).build();
- try (MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider(config)) {
+ try (MultiDbConnectionProvider provider = new MultiDbConnectionProvider(config)) {
// Initially, cluster1 should be active (highest weight)
assertEquals(provider.getDatabase(endpoint1), provider.getDatabase());
// Make cluster1 unhealthy to force failover to cluster2
- MultiDatabaseConnectionProviderHelper.onHealthStatusChange(provider, endpoint1,
+ MultiDbConnectionProviderHelper.onHealthStatusChange(provider, endpoint1,
HealthStatus.HEALTHY, HealthStatus.UNHEALTHY);
// Should now be on cluster2 (lower weight, but only healthy option)
assertEquals(provider.getDatabase(endpoint2), provider.getDatabase());
// Make cluster1 healthy again
- MultiDatabaseConnectionProviderHelper.onHealthStatusChange(provider, endpoint1,
+ MultiDbConnectionProviderHelper.onHealthStatusChange(provider, endpoint1,
HealthStatus.UNHEALTHY, HealthStatus.HEALTHY);
// Wait for failback check interval + some buffer
@@ -145,12 +145,12 @@ void testNoFailbackToLowerWeightCluster() throws InterruptedException {
new MultiDbConfig.DatabaseConfig[] { cluster1, cluster2, cluster3 })
.failbackSupported(true).failbackCheckInterval(100).build();
- try (MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider(config)) {
+ try (MultiDbConnectionProvider provider = new MultiDbConnectionProvider(config)) {
// Initially, cluster3 should be active (highest weight)
assertEquals(provider.getDatabase(endpoint3), provider.getDatabase());
// Make cluster3 unhealthy to force failover to cluster2 (medium weight)
- MultiDatabaseConnectionProviderHelper.onHealthStatusChange(provider, endpoint3,
+ MultiDbConnectionProviderHelper.onHealthStatusChange(provider, endpoint3,
HealthStatus.HEALTHY, HealthStatus.UNHEALTHY);
// Should now be on cluster2 (highest weight among healthy clusters)
@@ -158,7 +158,7 @@ void testNoFailbackToLowerWeightCluster() throws InterruptedException {
// Make cluster1 (lowest weight) healthy - this should NOT trigger failback
// since we don't failback to lower weight clusters
- MultiDatabaseConnectionProviderHelper.onHealthStatusChange(provider, endpoint1,
+ MultiDbConnectionProviderHelper.onHealthStatusChange(provider, endpoint1,
HealthStatus.UNHEALTHY, HealthStatus.HEALTHY);
// Wait for failback check interval
@@ -184,19 +184,19 @@ void testFailbackToHigherWeightClusterImmediately() throws InterruptedException
new MultiDbConfig.DatabaseConfig[] { cluster1, cluster2 }).failbackSupported(true)
.failbackCheckInterval(100).gracePeriod(50).build();
- try (MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider(config)) {
+ try (MultiDbConnectionProvider provider = new MultiDbConnectionProvider(config)) {
// Initially, cluster1 should be active (highest weight)
assertEquals(provider.getDatabase(endpoint1), provider.getDatabase());
// Make cluster1 unhealthy to force failover to cluster2
- MultiDatabaseConnectionProviderHelper.onHealthStatusChange(provider, endpoint1,
+ MultiDbConnectionProviderHelper.onHealthStatusChange(provider, endpoint1,
HealthStatus.HEALTHY, HealthStatus.UNHEALTHY);
// Should now be on cluster2 (only healthy option)
assertEquals(provider.getDatabase(endpoint2), provider.getDatabase());
// Make cluster1 healthy again
- MultiDatabaseConnectionProviderHelper.onHealthStatusChange(provider, endpoint1,
+ MultiDbConnectionProviderHelper.onHealthStatusChange(provider, endpoint1,
HealthStatus.UNHEALTHY, HealthStatus.HEALTHY);
// Wait for failback check
@@ -223,26 +223,26 @@ void testUnhealthyClusterCancelsFailback() throws InterruptedException {
new MultiDbConfig.DatabaseConfig[] { cluster1, cluster2 }).failbackSupported(true)
.failbackCheckInterval(200).build();
- try (MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider(config)) {
+ try (MultiDbConnectionProvider provider = new MultiDbConnectionProvider(config)) {
// Initially, cluster1 should be active (highest weight)
assertEquals(provider.getDatabase(endpoint1), provider.getDatabase());
// Make cluster1 unhealthy to force failover to cluster2
- MultiDatabaseConnectionProviderHelper.onHealthStatusChange(provider, endpoint1,
+ MultiDbConnectionProviderHelper.onHealthStatusChange(provider, endpoint1,
HealthStatus.HEALTHY, HealthStatus.UNHEALTHY);
// Should now be on cluster2 (only healthy option)
assertEquals(provider.getDatabase(endpoint2), provider.getDatabase());
// Make cluster1 healthy again (should trigger failback attempt)
- MultiDatabaseConnectionProviderHelper.onHealthStatusChange(provider, endpoint1,
+ MultiDbConnectionProviderHelper.onHealthStatusChange(provider, endpoint1,
HealthStatus.UNHEALTHY, HealthStatus.HEALTHY);
// Wait a bit
Thread.sleep(100);
// Make cluster1 unhealthy again before failback completes
- MultiDatabaseConnectionProviderHelper.onHealthStatusChange(provider, endpoint1,
+ MultiDbConnectionProviderHelper.onHealthStatusChange(provider, endpoint1,
HealthStatus.HEALTHY, HealthStatus.UNHEALTHY);
// Wait past the original failback interval
@@ -272,19 +272,19 @@ void testMultipleClusterFailbackPriority() throws InterruptedException {
new MultiDbConfig.DatabaseConfig[] { cluster1, cluster2, cluster3 })
.failbackSupported(true).failbackCheckInterval(100).gracePeriod(100).build();
- try (MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider(config)) {
+ try (MultiDbConnectionProvider provider = new MultiDbConnectionProvider(config)) {
// Initially, cluster3 should be active (highest weight)
assertEquals(provider.getDatabase(endpoint3), provider.getDatabase());
// Make cluster3 unhealthy to force failover to cluster2 (next highest weight)
- MultiDatabaseConnectionProviderHelper.onHealthStatusChange(provider, endpoint3,
+ MultiDbConnectionProviderHelper.onHealthStatusChange(provider, endpoint3,
HealthStatus.HEALTHY, HealthStatus.UNHEALTHY);
// Should now be on cluster2 (highest weight among healthy clusters)
assertEquals(provider.getDatabase(endpoint2), provider.getDatabase());
// Make cluster3 healthy again
- MultiDatabaseConnectionProviderHelper.onHealthStatusChange(provider, endpoint3,
+ MultiDbConnectionProviderHelper.onHealthStatusChange(provider, endpoint3,
HealthStatus.UNHEALTHY, HealthStatus.HEALTHY);
// Wait for failback
@@ -312,12 +312,12 @@ void testGracePeriodDisablesClusterOnUnhealthy() throws InterruptedException {
// period
.build();
- try (MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider(config)) {
+ try (MultiDbConnectionProvider provider = new MultiDbConnectionProvider(config)) {
// Initially, cluster2 should be active (highest weight)
assertEquals(provider.getDatabase(endpoint2), provider.getDatabase());
// Now make cluster2 unhealthy - it should be disabled for grace period
- MultiDatabaseConnectionProviderHelper.onHealthStatusChange(provider, endpoint2,
+ MultiDbConnectionProviderHelper.onHealthStatusChange(provider, endpoint2,
HealthStatus.HEALTHY, HealthStatus.UNHEALTHY);
// Should failover to cluster1
@@ -346,12 +346,12 @@ void testGracePeriodReEnablesClusterAfterPeriod() throws InterruptedException {
.gracePeriod(100) // Short grace period for testing
.build();
- try (MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider(config)) {
+ try (MultiDbConnectionProvider provider = new MultiDbConnectionProvider(config)) {
// Initially, cluster2 should be active (highest weight)
assertEquals(provider.getDatabase(endpoint2), provider.getDatabase());
// Make cluster2 unhealthy to start grace period and force failover
- MultiDatabaseConnectionProviderHelper.onHealthStatusChange(provider, endpoint2,
+ MultiDbConnectionProviderHelper.onHealthStatusChange(provider, endpoint2,
HealthStatus.HEALTHY, HealthStatus.UNHEALTHY);
// Should failover to cluster1
@@ -361,7 +361,7 @@ void testGracePeriodReEnablesClusterAfterPeriod() throws InterruptedException {
assertTrue(provider.getDatabase(endpoint2).isInGracePeriod());
// Make cluster2 healthy again while it's still in grace period
- MultiDatabaseConnectionProviderHelper.onHealthStatusChange(provider, endpoint2,
+ MultiDbConnectionProviderHelper.onHealthStatusChange(provider, endpoint2,
HealthStatus.UNHEALTHY, HealthStatus.HEALTHY);
// Should still be on cluster1 because cluster2 is in grace period
diff --git a/src/test/java/redis/clients/jedis/mcf/FailbackMechanismUnitTest.java b/src/test/java/redis/clients/jedis/mcf/FailbackMechanismUnitTest.java
index a69bdb9ee6..ad251975c2 100644
--- a/src/test/java/redis/clients/jedis/mcf/FailbackMechanismUnitTest.java
+++ b/src/test/java/redis/clients/jedis/mcf/FailbackMechanismUnitTest.java
@@ -36,8 +36,7 @@ void testFailbackCheckIntervalConfiguration() {
// Test custom value
MultiDbConfig customConfig = new MultiDbConfig.Builder(
- new MultiDbConfig.DatabaseConfig[] { clusterConfig }).failbackCheckInterval(3000)
- .build();
+ new MultiDbConfig.DatabaseConfig[] { clusterConfig }).failbackCheckInterval(3000).build();
assertEquals(3000, customConfig.getFailbackCheckInterval());
}
@@ -55,8 +54,7 @@ void testFailbackSupportedConfiguration() {
// Test disabled
MultiDbConfig disabledConfig = new MultiDbConfig.Builder(
- new MultiDbConfig.DatabaseConfig[] { clusterConfig }).failbackSupported(false)
- .build();
+ new MultiDbConfig.DatabaseConfig[] { clusterConfig }).failbackSupported(false).build();
assertFalse(disabledConfig.isFailbackSupported());
}
@@ -68,15 +66,13 @@ void testFailbackCheckIntervalValidation() {
// Test zero interval (should be allowed)
MultiDbConfig zeroConfig = new MultiDbConfig.Builder(
- new MultiDbConfig.DatabaseConfig[] { clusterConfig }).failbackCheckInterval(0)
- .build();
+ new MultiDbConfig.DatabaseConfig[] { clusterConfig }).failbackCheckInterval(0).build();
assertEquals(0, zeroConfig.getFailbackCheckInterval());
// Test negative interval (should be allowed - implementation decision)
MultiDbConfig negativeConfig = new MultiDbConfig.Builder(
- new MultiDbConfig.DatabaseConfig[] { clusterConfig }).failbackCheckInterval(-1000)
- .build();
+ new MultiDbConfig.DatabaseConfig[] { clusterConfig }).failbackCheckInterval(-1000).build();
assertEquals(-1000, negativeConfig.getFailbackCheckInterval());
}
diff --git a/src/test/java/redis/clients/jedis/mcf/HealthCheckIntegrationTest.java b/src/test/java/redis/clients/jedis/mcf/HealthCheckIntegrationTest.java
index d315616956..ce12cde8a7 100644
--- a/src/test/java/redis/clients/jedis/mcf/HealthCheckIntegrationTest.java
+++ b/src/test/java/redis/clients/jedis/mcf/HealthCheckIntegrationTest.java
@@ -32,7 +32,7 @@ public class HealthCheckIntegrationTest {
@Test
public void testDisableHealthCheck() {
// No health check strategy supplier means health check is disabled
- MultiDatabaseConnectionProvider customProvider = getMCCF(null);
+ MultiDbConnectionProvider customProvider = getMCCF(null);
try (UnifiedJedis customClient = new UnifiedJedis(customProvider)) {
// Verify that the client can connect and execute commands
String result = customClient.ping();
@@ -46,7 +46,7 @@ public void testDefaultStrategySupplier() {
MultiDbConfig.StrategySupplier defaultSupplier = (hostAndPort, jedisClientConfig) -> {
return new EchoStrategy(hostAndPort, jedisClientConfig);
};
- MultiDatabaseConnectionProvider customProvider = getMCCF(defaultSupplier);
+ MultiDbConnectionProvider customProvider = getMCCF(defaultSupplier);
try (UnifiedJedis customClient = new UnifiedJedis(customProvider)) {
// Verify that the client can connect and execute commands
String result = customClient.ping();
@@ -70,7 +70,7 @@ public void testCustomStrategySupplier() {
});
};
- MultiDatabaseConnectionProvider customProvider = getMCCF(strategySupplier);
+ MultiDbConnectionProvider customProvider = getMCCF(strategySupplier);
try (UnifiedJedis customClient = new UnifiedJedis(customProvider)) {
// Verify that the client can connect and execute commands
String result = customClient.ping();
@@ -78,23 +78,21 @@ public void testCustomStrategySupplier() {
}
}
- private MultiDatabaseConnectionProvider getMCCF(
- MultiDbConfig.StrategySupplier strategySupplier) {
+ private MultiDbConnectionProvider getMCCF(MultiDbConfig.StrategySupplier strategySupplier) {
Function modifier = builder -> strategySupplier == null
? builder.healthCheckEnabled(false)
: builder.healthCheckStrategySupplier(strategySupplier);
List databaseConfigs = Arrays.stream(new EndpointConfig[] { endpoint1 })
.map(e -> modifier
- .apply(MultiDbConfig.DatabaseConfig.builder(e.getHostAndPort(), clientConfig))
- .build())
+ .apply(MultiDbConfig.DatabaseConfig.builder(e.getHostAndPort(), clientConfig)).build())
.collect(Collectors.toList());
MultiDbConfig mccf = new MultiDbConfig.Builder(databaseConfigs).retryMaxAttempts(1)
.retryWaitDuration(1).circuitBreakerSlidingWindowSize(1)
.circuitBreakerFailureRateThreshold(100).build();
- return new MultiDatabaseConnectionProvider(mccf);
+ return new MultiDbConnectionProvider(mccf);
}
// ========== Probe Logic Integration Tests ==========
diff --git a/src/test/java/redis/clients/jedis/mcf/HealthCheckTest.java b/src/test/java/redis/clients/jedis/mcf/HealthCheckTest.java
index 7a23a6d88f..b83ecb8981 100644
--- a/src/test/java/redis/clients/jedis/mcf/HealthCheckTest.java
+++ b/src/test/java/redis/clients/jedis/mcf/HealthCheckTest.java
@@ -386,8 +386,7 @@ void testDefaultValues() {
void testDatabaseConfigWithHealthCheckStrategy() {
HealthCheckStrategy customStrategy = mock(HealthCheckStrategy.class);
- MultiDbConfig.StrategySupplier supplier = (hostAndPort,
- jedisClientConfig) -> customStrategy;
+ MultiDbConfig.StrategySupplier supplier = (hostAndPort, jedisClientConfig) -> customStrategy;
MultiDbConfig.DatabaseConfig clusterConfig = MultiDbConfig.DatabaseConfig
.builder(testEndpoint, testConfig).healthCheckStrategySupplier(supplier).build();
diff --git a/src/test/java/redis/clients/jedis/mcf/MultiClusterDynamicEndpointUnitTest.java b/src/test/java/redis/clients/jedis/mcf/MultiClusterDynamicEndpointUnitTest.java
index 41aac09723..6ddca7ae12 100644
--- a/src/test/java/redis/clients/jedis/mcf/MultiClusterDynamicEndpointUnitTest.java
+++ b/src/test/java/redis/clients/jedis/mcf/MultiClusterDynamicEndpointUnitTest.java
@@ -23,7 +23,7 @@
public class MultiClusterDynamicEndpointUnitTest {
- private MultiDatabaseConnectionProvider provider;
+ private MultiDbConnectionProvider provider;
private JedisClientConfig clientConfig;
private final EndpointConfig endpoint1 = HostAndPorts.getRedisEndpoint("standalone0");
private final EndpointConfig endpoint2 = HostAndPorts.getRedisEndpoint("standalone1");
@@ -35,10 +35,10 @@ void setUp() {
// Create initial provider with endpoint1
DatabaseConfig initialConfig = createDatabaseConfig(endpoint1.getHostAndPort(), 1.0f);
- MultiDbConfig multiConfig = new MultiDbConfig.Builder(
- new DatabaseConfig[] { initialConfig }).build();
+ MultiDbConfig multiConfig = new MultiDbConfig.Builder(new DatabaseConfig[] { initialConfig })
+ .build();
- provider = new MultiDatabaseConnectionProvider(multiConfig);
+ provider = new MultiDbConnectionProvider(multiConfig);
}
// Helper method to create cluster configurations
@@ -82,12 +82,11 @@ void testRemoveExistingCluster() {
// Create initial provider with endpoint1
DatabaseConfig clusterConfig1 = createDatabaseConfig(endpoint1.getHostAndPort(), 1.0f);
- MultiDbConfig multiConfig = MultiDbConfig
- .builder(new DatabaseConfig[] { clusterConfig1 }).build();
+ MultiDbConfig multiConfig = MultiDbConfig.builder(new DatabaseConfig[] { clusterConfig1 })
+ .build();
- try (
- MultiDatabaseConnectionProvider providerWithMockedPool = new MultiDatabaseConnectionProvider(
- multiConfig)) {
+ try (MultiDbConnectionProvider providerWithMockedPool = new MultiDbConnectionProvider(
+ multiConfig)) {
// Add endpoint2 as second cluster
DatabaseConfig newConfig = createDatabaseConfig(endpoint2.getHostAndPort(), 2.0f);
@@ -179,12 +178,11 @@ void testActiveClusterHandlingOnRemove() {
// Create initial provider with endpoint1
DatabaseConfig clusterConfig1 = createDatabaseConfig(endpoint1.getHostAndPort(), 1.0f);
- MultiDbConfig multiConfig = MultiDbConfig
- .builder(new DatabaseConfig[] { clusterConfig1 }).build();
+ MultiDbConfig multiConfig = MultiDbConfig.builder(new DatabaseConfig[] { clusterConfig1 })
+ .build();
- try (
- MultiDatabaseConnectionProvider providerWithMockedPool = new MultiDatabaseConnectionProvider(
- multiConfig)) {
+ try (MultiDbConnectionProvider providerWithMockedPool = new MultiDbConnectionProvider(
+ multiConfig)) {
// Add endpoint2 as second cluster
DatabaseConfig newConfig = createDatabaseConfig(endpoint2.getHostAndPort(), 2.0f);
diff --git a/src/test/java/redis/clients/jedis/mcf/MultiClusterFailoverAttemptsConfigTest.java b/src/test/java/redis/clients/jedis/mcf/MultiClusterFailoverAttemptsConfigTest.java
index 26e437b2d1..076eade442 100644
--- a/src/test/java/redis/clients/jedis/mcf/MultiClusterFailoverAttemptsConfigTest.java
+++ b/src/test/java/redis/clients/jedis/mcf/MultiClusterFailoverAttemptsConfigTest.java
@@ -22,14 +22,14 @@
/**
* Tests for how getMaxNumFailoverAttempts and getDelayInBetweenFailoverAttempts impact
- * MultiDatabaseConnectionProvider behaviour when no healthy clusters are available.
+ * MultiDbConnectionProvider behaviour when no healthy clusters are available.
*/
public class MultiClusterFailoverAttemptsConfigTest {
private HostAndPort endpoint0 = new HostAndPort("purposefully-incorrect", 0000);
private HostAndPort endpoint1 = new HostAndPort("purposefully-incorrect", 0001);
- private MultiDatabaseConnectionProvider provider;
+ private MultiDbConnectionProvider provider;
@BeforeEach
void setUp() throws Exception {
@@ -45,7 +45,7 @@ void setUp() throws Exception {
// Use small values by default for tests unless overridden per-test via reflection
setBuilderFailoverConfig(builder, /* maxAttempts */ 10, /* delayMs */ 12000);
- provider = new MultiDatabaseConnectionProvider(builder.build());
+ provider = new MultiDbConnectionProvider(builder.build());
// Disable both clusters to force handleNoHealthyCluster path
provider.getDatabase(endpoint0).setDisabled(true);
@@ -69,9 +69,8 @@ void delayBetweenFailoverAttempts_gatesCounterIncrementsWithinWindow() throws Ex
// First call: should throw temporary and start the freeze window, incrementing attempt count to
// 1
- assertThrows(JedisTemporarilyNotAvailableException.class,
- () -> MultiDatabaseConnectionProviderHelper.switchToHealthyCluster(provider,
- SwitchReason.HEALTH_CHECK, provider.getDatabase()));
+ assertThrows(JedisTemporarilyNotAvailableException.class, () -> MultiDbConnectionProviderHelper
+ .switchToHealthyCluster(provider, SwitchReason.HEALTH_CHECK, provider.getDatabase()));
int afterFirst = getProviderAttemptCount();
assertEquals(1, afterFirst);
@@ -79,7 +78,7 @@ void delayBetweenFailoverAttempts_gatesCounterIncrementsWithinWindow() throws Ex
// and should NOT increment the attempt count beyond 1
for (int i = 0; i < 50; i++) {
assertThrows(JedisTemporarilyNotAvailableException.class,
- () -> MultiDatabaseConnectionProviderHelper.switchToHealthyCluster(provider,
+ () -> MultiDbConnectionProviderHelper.switchToHealthyCluster(provider,
SwitchReason.HEALTH_CHECK, provider.getDatabase()));
assertEquals(1, getProviderAttemptCount());
}
@@ -97,9 +96,8 @@ void delayBetweenFailoverAttempts_permanentExceptionAfterAttemptsExhausted() thr
// First call: should throw temporary and start the freeze window, incrementing attempt count to
// 1
- assertThrows(JedisTemporarilyNotAvailableException.class,
- () -> MultiDatabaseConnectionProviderHelper.switchToHealthyCluster(provider,
- SwitchReason.HEALTH_CHECK, provider.getDatabase()));
+ assertThrows(JedisTemporarilyNotAvailableException.class, () -> MultiDbConnectionProviderHelper
+ .switchToHealthyCluster(provider, SwitchReason.HEALTH_CHECK, provider.getDatabase()));
int afterFirst = getProviderAttemptCount();
assertEquals(1, afterFirst);
diff --git a/src/test/java/redis/clients/jedis/mcf/MultiClusterInitializationTest.java b/src/test/java/redis/clients/jedis/mcf/MultiClusterInitializationTest.java
index 780f3c2571..e01af8f8f4 100644
--- a/src/test/java/redis/clients/jedis/mcf/MultiClusterInitializationTest.java
+++ b/src/test/java/redis/clients/jedis/mcf/MultiClusterInitializationTest.java
@@ -18,7 +18,7 @@
import redis.clients.jedis.exceptions.JedisValidationException;
/**
- * Tests for MultiDatabaseConnectionProvider initialization edge cases
+ * Tests for MultiDbConnectionProvider initialization edge cases
*/
@ExtendWith(MockitoExtension.class)
public class MultiClusterInitializationTest {
@@ -64,7 +64,7 @@ void testInitializationWithMixedHealthCheckConfiguration() {
MultiDbConfig config = new MultiDbConfig.Builder(
new MultiDbConfig.DatabaseConfig[] { cluster1, cluster2 }).build();
- try (MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider(config)) {
+ try (MultiDbConnectionProvider provider = new MultiDbConnectionProvider(config)) {
// Should initialize successfully
assertNotNull(provider.getDatabase());
@@ -90,7 +90,7 @@ void testInitializationWithAllHealthChecksDisabled() {
MultiDbConfig config = new MultiDbConfig.Builder(
new MultiDbConfig.DatabaseConfig[] { cluster1, cluster2 }).build();
- try (MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider(config)) {
+ try (MultiDbConnectionProvider provider = new MultiDbConnectionProvider(config)) {
// Should select cluster2 (highest weight, no health checks)
assertEquals(provider.getDatabase(endpoint2), provider.getDatabase());
}
@@ -106,7 +106,7 @@ void testInitializationWithSingleCluster() {
MultiDbConfig config = new MultiDbConfig.Builder(
new MultiDbConfig.DatabaseConfig[] { cluster }).build();
- try (MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider(config)) {
+ try (MultiDbConnectionProvider provider = new MultiDbConnectionProvider(config)) {
// Should select the only available cluster
assertEquals(provider.getDatabase(endpoint1), provider.getDatabase());
}
@@ -116,7 +116,7 @@ void testInitializationWithSingleCluster() {
@Test
void testErrorHandlingWithNullConfiguration() {
assertThrows(JedisValidationException.class, () -> {
- new MultiDatabaseConnectionProvider(null);
+ new MultiDbConnectionProvider(null);
});
}
@@ -148,7 +148,7 @@ void testInitializationWithZeroWeights() {
MultiDbConfig config = new MultiDbConfig.Builder(
new MultiDbConfig.DatabaseConfig[] { cluster1, cluster2 }).build();
- try (MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider(config)) {
+ try (MultiDbConnectionProvider provider = new MultiDbConnectionProvider(config)) {
// Should still initialize and select one of the clusters
assertNotNull(provider.getDatabase());
}
diff --git a/src/test/java/redis/clients/jedis/mcf/MultiDatabaseConnectionProviderHelper.java b/src/test/java/redis/clients/jedis/mcf/MultiDatabaseConnectionProviderHelper.java
deleted file mode 100644
index a88e53feed..0000000000
--- a/src/test/java/redis/clients/jedis/mcf/MultiDatabaseConnectionProviderHelper.java
+++ /dev/null
@@ -1,20 +0,0 @@
-package redis.clients.jedis.mcf;
-
-import redis.clients.jedis.Endpoint;
-
-public class MultiDatabaseConnectionProviderHelper {
-
- public static void onHealthStatusChange(MultiDatabaseConnectionProvider provider,
- Endpoint endpoint, HealthStatus oldStatus, HealthStatus newStatus) {
- provider.onHealthStatusChange(new HealthStatusChangeEvent(endpoint, oldStatus, newStatus));
- }
-
- public static void periodicFailbackCheck(MultiDatabaseConnectionProvider provider) {
- provider.periodicFailbackCheck();
- }
-
- public static Endpoint switchToHealthyCluster(MultiDatabaseConnectionProvider provider,
- SwitchReason reason, MultiDatabaseConnectionProvider.Database iterateFrom) {
- return provider.switchToHealthyDatabase(reason, iterateFrom);
- }
-}
diff --git a/src/test/java/redis/clients/jedis/mcf/MultiDbConnectionProviderHelper.java b/src/test/java/redis/clients/jedis/mcf/MultiDbConnectionProviderHelper.java
new file mode 100644
index 0000000000..4ae061c9f5
--- /dev/null
+++ b/src/test/java/redis/clients/jedis/mcf/MultiDbConnectionProviderHelper.java
@@ -0,0 +1,20 @@
+package redis.clients.jedis.mcf;
+
+import redis.clients.jedis.Endpoint;
+
+public class MultiDbConnectionProviderHelper {
+
+ public static void onHealthStatusChange(MultiDbConnectionProvider provider, Endpoint endpoint,
+ HealthStatus oldStatus, HealthStatus newStatus) {
+ provider.onHealthStatusChange(new HealthStatusChangeEvent(endpoint, oldStatus, newStatus));
+ }
+
+ public static void periodicFailbackCheck(MultiDbConnectionProvider provider) {
+ provider.periodicFailbackCheck();
+ }
+
+ public static Endpoint switchToHealthyCluster(MultiDbConnectionProvider provider,
+ SwitchReason reason, MultiDbConnectionProvider.Database iterateFrom) {
+ return provider.switchToHealthyDatabase(reason, iterateFrom);
+ }
+}
diff --git a/src/test/java/redis/clients/jedis/mcf/MultiDatabaseConnectionProviderTest.java b/src/test/java/redis/clients/jedis/mcf/MultiDbConnectionProviderTest.java
similarity index 93%
rename from src/test/java/redis/clients/jedis/mcf/MultiDatabaseConnectionProviderTest.java
rename to src/test/java/redis/clients/jedis/mcf/MultiDbConnectionProviderTest.java
index ad9bc150dd..9af53002e0 100644
--- a/src/test/java/redis/clients/jedis/mcf/MultiDatabaseConnectionProviderTest.java
+++ b/src/test/java/redis/clients/jedis/mcf/MultiDbConnectionProviderTest.java
@@ -8,7 +8,7 @@
import redis.clients.jedis.MultiDbConfig.DatabaseConfig;
import redis.clients.jedis.exceptions.JedisConnectionException;
import redis.clients.jedis.exceptions.JedisValidationException;
-import redis.clients.jedis.mcf.MultiDatabaseConnectionProvider.Database;
+import redis.clients.jedis.mcf.MultiDbConnectionProvider.Database;
import redis.clients.jedis.mcf.ProbingPolicy.BuiltIn;
import redis.clients.jedis.mcf.JedisFailoverException.JedisPermanentlyNotAvailableException;
import redis.clients.jedis.mcf.JedisFailoverException.JedisTemporarilyNotAvailableException;
@@ -23,15 +23,15 @@
import static org.junit.jupiter.api.Assertions.*;
/**
- * @see MultiDatabaseConnectionProvider
+ * @see MultiDbConnectionProvider
*/
@Tag("integration")
-public class MultiDatabaseConnectionProviderTest {
+public class MultiDbConnectionProviderTest {
private final EndpointConfig endpointStandalone0 = HostAndPorts.getRedisEndpoint("standalone0");
private final EndpointConfig endpointStandalone1 = HostAndPorts.getRedisEndpoint("standalone1");
- private MultiDatabaseConnectionProvider provider;
+ private MultiDbConnectionProvider provider;
@BeforeEach
public void setUp() {
@@ -42,8 +42,7 @@ public void setUp() {
databaseConfigs[1] = DatabaseConfig.builder(endpointStandalone1.getHostAndPort(),
endpointStandalone1.getClientConfigBuilder().build()).weight(0.3f).build();
- provider = new MultiDatabaseConnectionProvider(
- new MultiDbConfig.Builder(databaseConfigs).build());
+ provider = new MultiDbConnectionProvider(new MultiDbConfig.Builder(databaseConfigs).build());
}
@AfterEach
@@ -117,8 +116,7 @@ public void testRunClusterFailoverPostProcessor() {
AtomicBoolean isValidTest = new AtomicBoolean(false);
- MultiDatabaseConnectionProvider localProvider = new MultiDatabaseConnectionProvider(
- builder.build());
+ MultiDbConnectionProvider localProvider = new MultiDbConnectionProvider(builder.build());
localProvider.setDatabaseSwitchListener(a -> {
isValidTest.set(true);
});
@@ -179,9 +177,9 @@ public void testConnectionPoolConfigApplied() {
endpointStandalone0.getClientConfigBuilder().build(), poolConfig);
databaseConfigs[1] = new DatabaseConfig(endpointStandalone1.getHostAndPort(),
endpointStandalone0.getClientConfigBuilder().build(), poolConfig);
- try (MultiDatabaseConnectionProvider customProvider = new MultiDatabaseConnectionProvider(
+ try (MultiDbConnectionProvider customProvider = new MultiDbConnectionProvider(
new MultiDbConfig.Builder(databaseConfigs).build())) {
- MultiDatabaseConnectionProvider.Database activeCluster = customProvider.getDatabase();
+ MultiDbConnectionProvider.Database activeCluster = customProvider.getDatabase();
ConnectionPool connectionPool = activeCluster.getConnectionPool();
assertEquals(8, connectionPool.getMaxTotal());
assertEquals(4, connectionPool.getMaxIdle());
@@ -209,7 +207,7 @@ void testHealthChecksStopAfterProviderClose() throws InterruptedException {
endpointStandalone0.getClientConfigBuilder().build())
.healthCheckStrategy(countingStrategy).build();
- MultiDatabaseConnectionProvider testProvider = new MultiDatabaseConnectionProvider(
+ MultiDbConnectionProvider testProvider = new MultiDbConnectionProvider(
new MultiDbConfig.Builder(Collections.singletonList(config)).build());
try {
@@ -244,7 +242,7 @@ public void userCommand_firstTemporary_thenPermanent_inOrder() {
databaseConfigs[1] = DatabaseConfig.builder(endpointStandalone1.getHostAndPort(),
endpointStandalone1.getClientConfigBuilder().build()).weight(0.3f).build();
- MultiDatabaseConnectionProvider testProvider = new MultiDatabaseConnectionProvider(
+ MultiDbConnectionProvider testProvider = new MultiDbConnectionProvider(
new MultiDbConfig.Builder(databaseConfigs).delayInBetweenFailoverAttempts(100)
.maxNumFailoverAttempts(2).retryMaxAttempts(1).build());
@@ -281,7 +279,7 @@ public void userCommand_connectionExceptions_thenMultipleTemporary_thenPermanent
// ATTENTION: these configuration settings are not random and
// adjusted to get exact numbers of failures with exact exception types
// and open to impact from other defaulted values withing the components in use.
- MultiDatabaseConnectionProvider testProvider = new MultiDatabaseConnectionProvider(
+ MultiDbConnectionProvider testProvider = new MultiDbConnectionProvider(
new MultiDbConfig.Builder(databaseConfigs).delayInBetweenFailoverAttempts(100)
.maxNumFailoverAttempts(2).retryMaxAttempts(1).circuitBreakerSlidingWindowSize(5)
.circuitBreakerFailureRateThreshold(60).build()) {
diff --git a/src/test/java/redis/clients/jedis/mcf/PeriodicFailbackTest.java b/src/test/java/redis/clients/jedis/mcf/PeriodicFailbackTest.java
index 54fc1e1f7a..f58df34e0c 100644
--- a/src/test/java/redis/clients/jedis/mcf/PeriodicFailbackTest.java
+++ b/src/test/java/redis/clients/jedis/mcf/PeriodicFailbackTest.java
@@ -2,7 +2,7 @@
import static org.junit.jupiter.api.Assertions.*;
import static org.mockito.Mockito.*;
-import static redis.clients.jedis.mcf.MultiDatabaseConnectionProviderHelper.onHealthStatusChange;
+import static redis.clients.jedis.mcf.MultiDbConnectionProviderHelper.onHealthStatusChange;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
@@ -52,7 +52,7 @@ void testPeriodicFailbackCheckWithDisabledCluster() throws InterruptedException
new MultiDbConfig.DatabaseConfig[] { cluster1, cluster2 }).failbackSupported(true)
.failbackCheckInterval(100).build();
- try (MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider(config)) {
+ try (MultiDbConnectionProvider provider = new MultiDbConnectionProvider(config)) {
// Initially, cluster2 should be active (highest weight: 2.0f vs 1.0f)
assertEquals(provider.getDatabase(endpoint2), provider.getDatabase());
@@ -64,7 +64,7 @@ void testPeriodicFailbackCheckWithDisabledCluster() throws InterruptedException
provider.switchToHealthyDatabase(SwitchReason.FORCED, provider.getDatabase(endpoint2));
// Manually trigger periodic check
- MultiDatabaseConnectionProviderHelper.periodicFailbackCheck(provider);
+ MultiDbConnectionProviderHelper.periodicFailbackCheck(provider);
// Should still be on cluster1 (cluster2 is in grace period)
assertEquals(provider.getDatabase(endpoint1), provider.getDatabase());
@@ -87,7 +87,7 @@ void testPeriodicFailbackCheckWithHealthyCluster() throws InterruptedException {
// grace
// period
- try (MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider(config)) {
+ try (MultiDbConnectionProvider provider = new MultiDbConnectionProvider(config)) {
// Initially, cluster2 should be active (highest weight: 2.0f vs 1.0f)
assertEquals(provider.getDatabase(endpoint2), provider.getDatabase());
@@ -104,14 +104,14 @@ void testPeriodicFailbackCheckWithHealthyCluster() throws InterruptedException {
onHealthStatusChange(provider, endpoint2, HealthStatus.UNHEALTHY, HealthStatus.HEALTHY);
// Trigger periodic check immediately - should still be on cluster1
- MultiDatabaseConnectionProviderHelper.periodicFailbackCheck(provider);
+ MultiDbConnectionProviderHelper.periodicFailbackCheck(provider);
assertEquals(provider.getDatabase(endpoint1), provider.getDatabase());
// Wait for grace period to expire
Thread.sleep(150);
// Trigger periodic check after grace period expires
- MultiDatabaseConnectionProviderHelper.periodicFailbackCheck(provider);
+ MultiDbConnectionProviderHelper.periodicFailbackCheck(provider);
// Should have failed back to cluster2 (higher weight, grace period expired)
assertEquals(provider.getDatabase(endpoint2), provider.getDatabase());
@@ -132,7 +132,7 @@ void testPeriodicFailbackCheckWithFailbackDisabled() throws InterruptedException
new MultiDbConfig.DatabaseConfig[] { cluster1, cluster2 }).failbackSupported(false) // Disabled
.failbackCheckInterval(50).build();
- try (MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider(config)) {
+ try (MultiDbConnectionProvider provider = new MultiDbConnectionProvider(config)) {
// Initially, cluster2 should be active (highest weight: 2.0f vs 1.0f)
assertEquals(provider.getDatabase(endpoint2), provider.getDatabase());
@@ -149,7 +149,7 @@ void testPeriodicFailbackCheckWithFailbackDisabled() throws InterruptedException
Thread.sleep(100);
// Trigger periodic check
- MultiDatabaseConnectionProviderHelper.periodicFailbackCheck(provider);
+ MultiDbConnectionProviderHelper.periodicFailbackCheck(provider);
// Should still be on cluster1 (failback disabled)
assertEquals(provider.getDatabase(endpoint1), provider.getDatabase());
@@ -178,7 +178,7 @@ void testPeriodicFailbackCheckSelectsHighestWeightCluster() throws InterruptedEx
// grace
// period
- try (MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider(config)) {
+ try (MultiDbConnectionProvider provider = new MultiDbConnectionProvider(config)) {
// Initially, cluster3 should be active (highest weight: 3.0f vs 2.0f vs 1.0f)
assertEquals(provider.getDatabase(endpoint3), provider.getDatabase());
@@ -202,7 +202,7 @@ void testPeriodicFailbackCheckSelectsHighestWeightCluster() throws InterruptedEx
Thread.sleep(150);
// Trigger periodic check
- MultiDatabaseConnectionProviderHelper.periodicFailbackCheck(provider);
+ MultiDbConnectionProviderHelper.periodicFailbackCheck(provider);
// Should have failed back to cluster3 (highest weight, grace period expired)
assertEquals(provider.getDatabase(endpoint3), provider.getDatabase());
diff --git a/src/test/java/redis/clients/jedis/misc/AutomaticFailoverTest.java b/src/test/java/redis/clients/jedis/misc/AutomaticFailoverTest.java
index c061c5c4e6..e4b95d2e52 100644
--- a/src/test/java/redis/clients/jedis/misc/AutomaticFailoverTest.java
+++ b/src/test/java/redis/clients/jedis/misc/AutomaticFailoverTest.java
@@ -18,8 +18,8 @@
import redis.clients.jedis.exceptions.JedisAccessControlException;
import redis.clients.jedis.exceptions.JedisConnectionException;
import redis.clients.jedis.mcf.ClusterSwitchEventArgs;
-import redis.clients.jedis.mcf.MultiDatabaseConnectionProvider;
-import redis.clients.jedis.mcf.MultiDatabaseConnectionProviderHelper;
+import redis.clients.jedis.mcf.MultiDbConnectionProvider;
+import redis.clients.jedis.mcf.MultiDbConnectionProviderHelper;
import redis.clients.jedis.mcf.SwitchReason;
import redis.clients.jedis.util.IOUtils;
@@ -68,7 +68,7 @@ public void cleanUp() {
@Test
public void pipelineWithSwitch() {
- MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider(
+ MultiDbConnectionProvider provider = new MultiDbConnectionProvider(
new MultiDbConfig.Builder(
getDatabaseConfigs(clientConfig, hostPortWithFailure, workingEndpoint.getHostAndPort()))
.build());
@@ -77,7 +77,7 @@ public void pipelineWithSwitch() {
AbstractPipeline pipe = client.pipelined();
pipe.set("pstr", "foobar");
pipe.hset("phash", "foo", "bar");
- MultiDatabaseConnectionProviderHelper.switchToHealthyCluster(provider,
+ MultiDbConnectionProviderHelper.switchToHealthyCluster(provider,
SwitchReason.HEALTH_CHECK, provider.getDatabase());
pipe.sync();
}
@@ -88,7 +88,7 @@ public void pipelineWithSwitch() {
@Test
public void transactionWithSwitch() {
- MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider(
+ MultiDbConnectionProvider provider = new MultiDbConnectionProvider(
new MultiDbConfig.Builder(
getDatabaseConfigs(clientConfig, hostPortWithFailure, workingEndpoint.getHostAndPort()))
.build());
@@ -97,7 +97,7 @@ public void transactionWithSwitch() {
AbstractTransaction tx = client.multi();
tx.set("tstr", "foobar");
tx.hset("thash", "foo", "bar");
- MultiDatabaseConnectionProviderHelper.switchToHealthyCluster(provider,
+ MultiDbConnectionProviderHelper.switchToHealthyCluster(provider,
SwitchReason.HEALTH_CHECK, provider.getDatabase());
assertEquals(Arrays.asList("OK", 1L), tx.exec());
}
@@ -119,7 +119,7 @@ public void commandFailoverUnresolvableHost() {
.circuitBreakerMinNumOfFailures(slidingWindowMinFails);
RedisFailoverReporter failoverReporter = new RedisFailoverReporter();
- MultiDatabaseConnectionProvider connectionProvider = new MultiDatabaseConnectionProvider(
+ MultiDbConnectionProvider connectionProvider = new MultiDbConnectionProvider(
builder.build());
connectionProvider.setDatabaseSwitchListener(failoverReporter);
@@ -162,7 +162,7 @@ public void commandFailover() {
.circuitBreakerSlidingWindowSize(slidingWindowSize);
RedisFailoverReporter failoverReporter = new RedisFailoverReporter();
- MultiDatabaseConnectionProvider connectionProvider = new MultiDatabaseConnectionProvider(
+ MultiDbConnectionProvider connectionProvider = new MultiDbConnectionProvider(
builder.build());
connectionProvider.setDatabaseSwitchListener(failoverReporter);
@@ -200,7 +200,7 @@ public void pipelineFailover() {
.fallbackExceptionList(Collections.singletonList(JedisConnectionException.class));
RedisFailoverReporter failoverReporter = new RedisFailoverReporter();
- MultiDatabaseConnectionProvider cacheProvider = new MultiDatabaseConnectionProvider(
+ MultiDbConnectionProvider cacheProvider = new MultiDbConnectionProvider(
builder.build());
cacheProvider.setDatabaseSwitchListener(failoverReporter);
@@ -232,7 +232,7 @@ public void failoverFromAuthError() {
.fallbackExceptionList(Collections.singletonList(JedisAccessControlException.class));
RedisFailoverReporter failoverReporter = new RedisFailoverReporter();
- MultiDatabaseConnectionProvider cacheProvider = new MultiDatabaseConnectionProvider(
+ MultiDbConnectionProvider cacheProvider = new MultiDbConnectionProvider(
builder.build());
cacheProvider.setDatabaseSwitchListener(failoverReporter);
diff --git a/src/test/java/redis/clients/jedis/providers/MultiClusterProviderHealthStatusChangeEventTest.java b/src/test/java/redis/clients/jedis/providers/MultiClusterProviderHealthStatusChangeEventTest.java
index ef14cc51ce..6d1b14009e 100644
--- a/src/test/java/redis/clients/jedis/providers/MultiClusterProviderHealthStatusChangeEventTest.java
+++ b/src/test/java/redis/clients/jedis/providers/MultiClusterProviderHealthStatusChangeEventTest.java
@@ -16,11 +16,11 @@
import redis.clients.jedis.JedisClientConfig;
import redis.clients.jedis.MultiDbConfig;
import redis.clients.jedis.mcf.HealthStatus;
-import redis.clients.jedis.mcf.MultiDatabaseConnectionProvider;
-import redis.clients.jedis.mcf.MultiDatabaseConnectionProviderHelper;
+import redis.clients.jedis.mcf.MultiDbConnectionProvider;
+import redis.clients.jedis.mcf.MultiDbConnectionProviderHelper;
/**
- * Tests for MultiDatabaseConnectionProvider event handling behavior during initialization and
+ * Tests for MultiDbConnectionProvider event handling behavior during initialization and
* throughout its lifecycle with HealthStatusChangeEvents.
*/
@ExtendWith(MockitoExtension.class)
@@ -60,7 +60,7 @@ void postInit_unhealthy_active_sets_grace_and_fails_over() throws Exception {
MultiDbConfig config = new MultiDbConfig.Builder(
new MultiDbConfig.DatabaseConfig[] { cluster1, cluster2 }).build();
- try (MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider(
+ try (MultiDbConnectionProvider provider = new MultiDbConnectionProvider(
config)) {
assertFalse(provider.getDatabase(endpoint1).isInGracePeriod());
@@ -68,7 +68,7 @@ void postInit_unhealthy_active_sets_grace_and_fails_over() throws Exception {
// This should process immediately since initialization is complete
assertDoesNotThrow(() -> {
- MultiDatabaseConnectionProviderHelper.onHealthStatusChange(provider, endpoint1,
+ MultiDbConnectionProviderHelper.onHealthStatusChange(provider, endpoint1,
HealthStatus.HEALTHY, HealthStatus.UNHEALTHY);
}, "Post-initialization events should be processed immediately");
@@ -92,14 +92,14 @@ void postInit_nonActive_changes_do_not_switch_active() throws Exception {
MultiDbConfig config = new MultiDbConfig.Builder(
new MultiDbConfig.DatabaseConfig[] { cluster1, cluster2 }).build();
- try (MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider(
+ try (MultiDbConnectionProvider provider = new MultiDbConnectionProvider(
config)) {
// Verify initial state
assertEquals(provider.getDatabase(endpoint1), provider.getDatabase(),
"Should start with endpoint1 active");
// Simulate multiple rapid events for the same endpoint (post-init behavior)
- MultiDatabaseConnectionProviderHelper.onHealthStatusChange(provider, endpoint1,
+ MultiDbConnectionProviderHelper.onHealthStatusChange(provider, endpoint1,
HealthStatus.HEALTHY, HealthStatus.UNHEALTHY);
// After first UNHEALTHY on active cluster: it enters grace period and provider fails over
@@ -108,7 +108,7 @@ void postInit_nonActive_changes_do_not_switch_active() throws Exception {
assertEquals(provider.getDatabase(endpoint2), provider.getDatabase(),
"Should fail over to endpoint2");
- MultiDatabaseConnectionProviderHelper.onHealthStatusChange(provider, endpoint1,
+ MultiDbConnectionProviderHelper.onHealthStatusChange(provider, endpoint1,
HealthStatus.UNHEALTHY, HealthStatus.HEALTHY);
// Healthy event for non-active cluster should not immediately revert active cluster
@@ -117,7 +117,7 @@ void postInit_nonActive_changes_do_not_switch_active() throws Exception {
assertTrue(provider.getDatabase(endpoint1).isInGracePeriod(),
"Grace period should still be in effect");
- MultiDatabaseConnectionProviderHelper.onHealthStatusChange(provider, endpoint1,
+ MultiDbConnectionProviderHelper.onHealthStatusChange(provider, endpoint1,
HealthStatus.HEALTHY, HealthStatus.UNHEALTHY);
// Further UNHEALTHY for non-active cluster is a no-op
@@ -140,7 +140,7 @@ void init_selects_highest_weight_healthy_when_checks_disabled() throws Exception
MultiDbConfig config = new MultiDbConfig.Builder(
new MultiDbConfig.DatabaseConfig[] { cluster1, cluster2 }).build();
- try (MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider(
+ try (MultiDbConnectionProvider provider = new MultiDbConnectionProvider(
config)) {
// This test verifies that multiple endpoints are properly initialized
@@ -166,7 +166,7 @@ void init_single_cluster_initializes_and_is_healthy() throws Exception {
// This test verifies that the provider initializes correctly and doesn't lose events
// In practice, with health checks disabled, no events should be generated during init
- try (MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider(
+ try (MultiDbConnectionProvider provider = new MultiDbConnectionProvider(
config)) {
// Verify successful initialization
assertNotNull(provider.getDatabase(), "Provider should have initialized successfully");
@@ -195,11 +195,11 @@ void postInit_two_hop_failover_chain_respected() throws Exception {
MultiDbConfig config = new MultiDbConfig.Builder(
new MultiDbConfig.DatabaseConfig[] { cluster1, cluster2, cluster3 }).build();
- try (MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider(
+ try (MultiDbConnectionProvider provider = new MultiDbConnectionProvider(
config)) {
// First event: endpoint1 (active) becomes UNHEALTHY -> failover to endpoint2, endpoint1
// enters grace
- MultiDatabaseConnectionProviderHelper.onHealthStatusChange(provider, endpoint1,
+ MultiDbConnectionProviderHelper.onHealthStatusChange(provider, endpoint1,
HealthStatus.HEALTHY, HealthStatus.UNHEALTHY);
assertTrue(provider.getDatabase(endpoint1).isInGracePeriod(),
"Endpoint1 should be in grace after unhealthy");
@@ -207,7 +207,7 @@ void postInit_two_hop_failover_chain_respected() throws Exception {
"Should have failed over to endpoint2");
// Second event: endpoint2 (now active) becomes UNHEALTHY -> failover to endpoint3
- MultiDatabaseConnectionProviderHelper.onHealthStatusChange(provider, endpoint2,
+ MultiDbConnectionProviderHelper.onHealthStatusChange(provider, endpoint2,
HealthStatus.HEALTHY, HealthStatus.UNHEALTHY);
assertTrue(provider.getDatabase(endpoint2).isInGracePeriod(),
"Endpoint2 should be in grace after unhealthy");
@@ -216,7 +216,7 @@ void postInit_two_hop_failover_chain_respected() throws Exception {
// Third event: endpoint1 becomes HEALTHY again -> no immediate switch due to grace period
// behavior
- MultiDatabaseConnectionProviderHelper.onHealthStatusChange(provider, endpoint1,
+ MultiDbConnectionProviderHelper.onHealthStatusChange(provider, endpoint1,
HealthStatus.UNHEALTHY, HealthStatus.HEALTHY);
assertEquals(provider.getDatabase(endpoint3), provider.getDatabase(),
"Active cluster should remain endpoint3");
@@ -236,18 +236,18 @@ void postInit_rapid_events_respect_grace_and_keep_active_stable() throws Excepti
MultiDbConfig config = new MultiDbConfig.Builder(
new MultiDbConfig.DatabaseConfig[] { cluster1, cluster2 }).build();
- try (MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider(
+ try (MultiDbConnectionProvider provider = new MultiDbConnectionProvider(
config)) {
// Verify initial state
assertEquals(HealthStatus.HEALTHY, provider.getDatabase(endpoint1).getHealthStatus(),
"Should start as HEALTHY");
// Send rapid sequence of events post-init
- MultiDatabaseConnectionProviderHelper.onHealthStatusChange(provider, endpoint1,
+ MultiDbConnectionProviderHelper.onHealthStatusChange(provider, endpoint1,
HealthStatus.HEALTHY, HealthStatus.UNHEALTHY); // triggers failover and grace
- MultiDatabaseConnectionProviderHelper.onHealthStatusChange(provider, endpoint1,
+ MultiDbConnectionProviderHelper.onHealthStatusChange(provider, endpoint1,
HealthStatus.UNHEALTHY, HealthStatus.HEALTHY); // non-active cluster becomes healthy
- MultiDatabaseConnectionProviderHelper.onHealthStatusChange(provider, endpoint1,
+ MultiDbConnectionProviderHelper.onHealthStatusChange(provider, endpoint1,
HealthStatus.HEALTHY, HealthStatus.UNHEALTHY); // still non-active and in grace; no change
// Final expectations: endpoint1 is in grace, provider remains on endpoint2
diff --git a/src/test/java/redis/clients/jedis/scenario/ActiveActiveFailoverTest.java b/src/test/java/redis/clients/jedis/scenario/ActiveActiveFailoverTest.java
index 9250e90a3e..596a10ff46 100644
--- a/src/test/java/redis/clients/jedis/scenario/ActiveActiveFailoverTest.java
+++ b/src/test/java/redis/clients/jedis/scenario/ActiveActiveFailoverTest.java
@@ -12,7 +12,7 @@
import redis.clients.jedis.MultiDbConfig.DatabaseConfig;
import redis.clients.jedis.exceptions.JedisConnectionException;
import redis.clients.jedis.mcf.ClusterSwitchEventArgs;
-import redis.clients.jedis.mcf.MultiDatabaseConnectionProvider;
+import redis.clients.jedis.mcf.MultiDbConnectionProvider;
import redis.clients.jedis.util.ClientTestUtil;
import java.io.IOException;
@@ -208,7 +208,7 @@ public void accept(ClusterSwitchEventArgs e) {
throw new RuntimeException(e);
}
- MultiDatabaseConnectionProvider provider = ClientTestUtil.getConnectionProvider(client);
+ MultiDbConnectionProvider provider = ClientTestUtil.getConnectionProvider(client);
ConnectionPool pool1 = provider.getDatabase(endpoint.getHostAndPort(0)).getConnectionPool();
ConnectionPool pool2 = provider.getDatabase(endpoint.getHostAndPort(1)).getConnectionPool();
From d402d89ff11e9e12f5cdbf836bdbfa523eaa194f Mon Sep 17 00:00:00 2001
From: ggivo
Date: Mon, 6 Oct 2025 14:43:16 +0300
Subject: [PATCH 09/17] Rename ClusterSwitchEventArgs to DatabaseSwitchEvent
---
.../jedis/builders/MultiDbClientBuilder.java | 6 ++--
.../jedis/mcf/ClusterSwitchEventArgs.java | 31 -------------------
.../jedis/mcf/DatabaseSwitchEvent.java | 30 ++++++++++++++++++
.../jedis/mcf/MultiDbConnectionProvider.java | 6 ++--
.../clients/jedis/MultiDbClientTest.java | 6 ++--
.../mcf/ActiveActiveLocalFailoverTest.java | 8 ++---
.../jedis/misc/AutomaticFailoverTest.java | 8 ++---
.../scenario/ActiveActiveFailoverTest.java | 10 +++---
8 files changed, 52 insertions(+), 53 deletions(-)
delete mode 100644 src/main/java/redis/clients/jedis/mcf/ClusterSwitchEventArgs.java
create mode 100644 src/main/java/redis/clients/jedis/mcf/DatabaseSwitchEvent.java
diff --git a/src/main/java/redis/clients/jedis/builders/MultiDbClientBuilder.java b/src/main/java/redis/clients/jedis/builders/MultiDbClientBuilder.java
index 12cc2efefb..3758d5c52b 100644
--- a/src/main/java/redis/clients/jedis/builders/MultiDbClientBuilder.java
+++ b/src/main/java/redis/clients/jedis/builders/MultiDbClientBuilder.java
@@ -6,7 +6,7 @@
import redis.clients.jedis.annots.Experimental;
import redis.clients.jedis.executors.CommandExecutor;
import redis.clients.jedis.mcf.CircuitBreakerCommandExecutor;
-import redis.clients.jedis.mcf.ClusterSwitchEventArgs;
+import redis.clients.jedis.mcf.DatabaseSwitchEvent;
import redis.clients.jedis.mcf.MultiDbConnectionProvider;
import redis.clients.jedis.providers.ConnectionProvider;
@@ -68,7 +68,7 @@ public abstract class MultiDbClientBuilder
// Multi-db specific configuration fields
private MultiDbConfig multiDbConfig = null;
- private Consumer databaseSwitchListener = null;
+ private Consumer databaseSwitchListener = null;
/**
* Sets the multi-database configuration.
@@ -94,7 +94,7 @@ public MultiDbClientBuilder multiDbConfig(MultiDbConfig config) {
* @param listener the database switch event listener
* @return this builder
*/
- public MultiDbClientBuilder databaseSwitchListener(Consumer listener) {
+ public MultiDbClientBuilder databaseSwitchListener(Consumer listener) {
this.databaseSwitchListener = listener;
return this;
}
diff --git a/src/main/java/redis/clients/jedis/mcf/ClusterSwitchEventArgs.java b/src/main/java/redis/clients/jedis/mcf/ClusterSwitchEventArgs.java
deleted file mode 100644
index a78c41864a..0000000000
--- a/src/main/java/redis/clients/jedis/mcf/ClusterSwitchEventArgs.java
+++ /dev/null
@@ -1,31 +0,0 @@
-package redis.clients.jedis.mcf;
-
-import redis.clients.jedis.Endpoint;
-import redis.clients.jedis.mcf.MultiDbConnectionProvider.Database;
-
-public class ClusterSwitchEventArgs {
-
- private final SwitchReason reason;
- private final String ClusterName;
- private final Endpoint Endpoint;
-
- public ClusterSwitchEventArgs(SwitchReason reason, Endpoint endpoint, Database database) {
- this.reason = reason;
- // TODO: @ggivo do we need cluster name?
- this.ClusterName = database.getCircuitBreaker().getName();
- this.Endpoint = endpoint;
- }
-
- public SwitchReason getReason() {
- return reason;
- }
-
- public String getClusterName() {
- return ClusterName;
- }
-
- public Endpoint getEndpoint() {
- return Endpoint;
- }
-
-}
diff --git a/src/main/java/redis/clients/jedis/mcf/DatabaseSwitchEvent.java b/src/main/java/redis/clients/jedis/mcf/DatabaseSwitchEvent.java
new file mode 100644
index 0000000000..6cc233cd7d
--- /dev/null
+++ b/src/main/java/redis/clients/jedis/mcf/DatabaseSwitchEvent.java
@@ -0,0 +1,30 @@
+package redis.clients.jedis.mcf;
+
+import redis.clients.jedis.Endpoint;
+import redis.clients.jedis.mcf.MultiDbConnectionProvider.Database;
+
+public class DatabaseSwitchEvent {
+
+ private final SwitchReason reason;
+ private final String databaseName;
+ private final Endpoint endpoint;
+
+ public DatabaseSwitchEvent(SwitchReason reason, Endpoint endpoint, Database database) {
+ this.reason = reason;
+ this.databaseName = database.getCircuitBreaker().getName();
+ this.endpoint = endpoint;
+ }
+
+ public SwitchReason getReason() {
+ return reason;
+ }
+
+ public String getDatabaseName() {
+ return databaseName;
+ }
+
+ public Endpoint getEndpoint() {
+ return endpoint;
+ }
+
+}
diff --git a/src/main/java/redis/clients/jedis/mcf/MultiDbConnectionProvider.java b/src/main/java/redis/clients/jedis/mcf/MultiDbConnectionProvider.java
index c39d137616..8d515627f3 100644
--- a/src/main/java/redis/clients/jedis/mcf/MultiDbConnectionProvider.java
+++ b/src/main/java/redis/clients/jedis/mcf/MultiDbConnectionProvider.java
@@ -80,7 +80,7 @@ public class MultiDbConnectionProvider implements ConnectionProvider {
* Functional interface for listening to cluster switch events. The event args contain the reason
* for the switch, the endpoint, and the cluster.
*/
- private Consumer databaseSwitchListener;
+ private Consumer databaseSwitchListener;
private List> fallbackExceptionList;
@@ -743,12 +743,12 @@ public boolean canIterateFrom(Database iterateFrom) {
public void onClusterSwitch(SwitchReason reason, Endpoint endpoint, Database database) {
if (databaseSwitchListener != null) {
- ClusterSwitchEventArgs eventArgs = new ClusterSwitchEventArgs(reason, endpoint, database);
+ DatabaseSwitchEvent eventArgs = new DatabaseSwitchEvent(reason, endpoint, database);
databaseSwitchListener.accept(eventArgs);
}
}
- public void setDatabaseSwitchListener(Consumer databaseSwitchListener) {
+ public void setDatabaseSwitchListener(Consumer databaseSwitchListener) {
this.databaseSwitchListener = databaseSwitchListener;
}
diff --git a/src/test/java/redis/clients/jedis/MultiDbClientTest.java b/src/test/java/redis/clients/jedis/MultiDbClientTest.java
index 5325f0e742..43673da1ed 100644
--- a/src/test/java/redis/clients/jedis/MultiDbClientTest.java
+++ b/src/test/java/redis/clients/jedis/MultiDbClientTest.java
@@ -17,7 +17,7 @@
import redis.clients.jedis.MultiDbConfig.DatabaseConfig;
import redis.clients.jedis.exceptions.JedisValidationException;
-import redis.clients.jedis.mcf.ClusterSwitchEventArgs;
+import redis.clients.jedis.mcf.DatabaseSwitchEvent;
import redis.clients.jedis.mcf.SwitchReason;
import java.io.IOException;
@@ -181,8 +181,8 @@ public void testWithDatabaseSwitchListener() {
.weight(50.0f).build())
.build();
- Consumer eventConsumer;
- List events = new ArrayList<>();
+ Consumer eventConsumer;
+ List events = new ArrayList<>();
eventConsumer = events::add;
try (MultiDbClient testClient = MultiDbClient.builder().databaseSwitchListener(eventConsumer)
diff --git a/src/test/java/redis/clients/jedis/mcf/ActiveActiveLocalFailoverTest.java b/src/test/java/redis/clients/jedis/mcf/ActiveActiveLocalFailoverTest.java
index f42030cd52..a5aae5e9bf 100644
--- a/src/test/java/redis/clients/jedis/mcf/ActiveActiveLocalFailoverTest.java
+++ b/src/test/java/redis/clients/jedis/mcf/ActiveActiveLocalFailoverTest.java
@@ -122,7 +122,7 @@ public void testFailover(boolean fastFailover, long minFailoverCompletionDuratio
// Use the parameterized fastFailover setting
builder.fastFailover(fastFailover);
- class FailoverReporter implements Consumer {
+ class FailoverReporter implements Consumer {
String currentClusterName = "not set";
@@ -139,10 +139,10 @@ public String getCurrentClusterName() {
}
@Override
- public void accept(ClusterSwitchEventArgs e) {
- this.currentClusterName = e.getClusterName();
+ public void accept(DatabaseSwitchEvent e) {
+ this.currentClusterName = e.getDatabaseName();
log.info("\n\n===={}=== \nJedis switching to cluster: {}\n====End of log===\n",
- e.getReason(), e.getClusterName());
+ e.getReason(), e.getDatabaseName());
if ((e.getReason() == SwitchReason.CIRCUIT_BREAKER
|| e.getReason() == SwitchReason.HEALTH_CHECK)) {
failoverHappened = true;
diff --git a/src/test/java/redis/clients/jedis/misc/AutomaticFailoverTest.java b/src/test/java/redis/clients/jedis/misc/AutomaticFailoverTest.java
index e4b95d2e52..ac74738226 100644
--- a/src/test/java/redis/clients/jedis/misc/AutomaticFailoverTest.java
+++ b/src/test/java/redis/clients/jedis/misc/AutomaticFailoverTest.java
@@ -17,7 +17,7 @@
import redis.clients.jedis.*;
import redis.clients.jedis.exceptions.JedisAccessControlException;
import redis.clients.jedis.exceptions.JedisConnectionException;
-import redis.clients.jedis.mcf.ClusterSwitchEventArgs;
+import redis.clients.jedis.mcf.DatabaseSwitchEvent;
import redis.clients.jedis.mcf.MultiDbConnectionProvider;
import redis.clients.jedis.mcf.MultiDbConnectionProviderHelper;
import redis.clients.jedis.mcf.SwitchReason;
@@ -250,13 +250,13 @@ public void failoverFromAuthError() {
jedis.close();
}
- static class RedisFailoverReporter implements Consumer {
+ static class RedisFailoverReporter implements Consumer {
boolean failedOver = false;
@Override
- public void accept(ClusterSwitchEventArgs e) {
- log.info("Jedis fail over to cluster: " + e.getClusterName());
+ public void accept(DatabaseSwitchEvent e) {
+ log.info("Jedis fail over to cluster: " + e.getDatabaseName());
failedOver = true;
}
}
diff --git a/src/test/java/redis/clients/jedis/scenario/ActiveActiveFailoverTest.java b/src/test/java/redis/clients/jedis/scenario/ActiveActiveFailoverTest.java
index 596a10ff46..e6ebc42b8d 100644
--- a/src/test/java/redis/clients/jedis/scenario/ActiveActiveFailoverTest.java
+++ b/src/test/java/redis/clients/jedis/scenario/ActiveActiveFailoverTest.java
@@ -11,7 +11,7 @@
import redis.clients.jedis.*;
import redis.clients.jedis.MultiDbConfig.DatabaseConfig;
import redis.clients.jedis.exceptions.JedisConnectionException;
-import redis.clients.jedis.mcf.ClusterSwitchEventArgs;
+import redis.clients.jedis.mcf.DatabaseSwitchEvent;
import redis.clients.jedis.mcf.MultiDbConnectionProvider;
import redis.clients.jedis.util.ClientTestUtil;
@@ -82,7 +82,7 @@ public void testFailover() {
.fastFailover(true)
.retryOnFailover(false)
.build();
- class FailoverReporter implements Consumer {
+ class FailoverReporter implements Consumer {
String currentClusterName = "not set";
@@ -99,10 +99,10 @@ public String getCurrentClusterName() {
}
@Override
- public void accept(ClusterSwitchEventArgs e) {
- this.currentClusterName = e.getClusterName();
+ public void accept(DatabaseSwitchEvent e) {
+ this.currentClusterName = e.getDatabaseName();
log.info("\n\n====FailoverEvent=== \nJedis failover to cluster: {}\n====FailoverEvent===\n\n",
- e.getClusterName());
+ e.getDatabaseName());
if (failoverHappened) {
failbackHappened = true;
From c73a1cd8425cb65742f6f4c261e0bb7d3f4ec4dd Mon Sep 17 00:00:00 2001
From: ggivo
Date: Mon, 6 Oct 2025 15:04:00 +0300
Subject: [PATCH 10/17] Fix error in test after renaming multiDbConfig
---
.../jedis/mcf/MultiClusterFailoverAttemptsConfigTest.java | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/src/test/java/redis/clients/jedis/mcf/MultiClusterFailoverAttemptsConfigTest.java b/src/test/java/redis/clients/jedis/mcf/MultiClusterFailoverAttemptsConfigTest.java
index 076eade442..857f6dc32a 100644
--- a/src/test/java/redis/clients/jedis/mcf/MultiClusterFailoverAttemptsConfigTest.java
+++ b/src/test/java/redis/clients/jedis/mcf/MultiClusterFailoverAttemptsConfigTest.java
@@ -154,7 +154,7 @@ private static void setBuilderFailoverConfig(MultiDbConfig.Builder builder, int
private void setProviderFailoverConfig(int maxAttempts, int delayMs) throws Exception {
// Access the underlying MultiDbConfig inside provider and adjust fields for this
// test
- Object cfg = ReflectionTestUtil.getField(provider, "MultiDbConfig");
+ Object cfg = ReflectionTestUtil.getField(provider, "multiDbConfig");
ReflectionTestUtil.setField(cfg, "maxNumFailoverAttempts", maxAttempts);
@@ -162,13 +162,13 @@ private void setProviderFailoverConfig(int maxAttempts, int delayMs) throws Exce
}
private int getProviderMaxAttempts() throws Exception {
- Object cfg = ReflectionTestUtil.getField(provider, "MultiDbConfig");
+ Object cfg = ReflectionTestUtil.getField(provider, "multiDbConfig");
return ReflectionTestUtil.getField(cfg, "maxNumFailoverAttempts");
}
private int getProviderDelayMs() throws Exception {
- Object cfg = ReflectionTestUtil.getField(provider, "MultiDbConfig");
+ Object cfg = ReflectionTestUtil.getField(provider, "multiDbConfig");
return ReflectionTestUtil.getField(cfg, "delayInBetweenFailoverAttempts");
}
From b89b703f95cb6d69e8a66d2f6446d9fc0f9fb18f Mon Sep 17 00:00:00 2001
From: ggivo
Date: Mon, 6 Oct 2025 16:51:15 +0300
Subject: [PATCH 11/17] Rename MultiClusterPipeline to MultiDbPipeline
---
src/main/java/redis/clients/jedis/MultiDbClient.java | 8 ++++----
src/main/java/redis/clients/jedis/UnifiedJedis.java | 4 ++--
...{MultiClusterPipeline.java => MultiDbPipeline.java} | 10 ++++------
3 files changed, 10 insertions(+), 12 deletions(-)
rename src/main/java/redis/clients/jedis/mcf/{MultiClusterPipeline.java => MultiDbPipeline.java} (86%)
diff --git a/src/main/java/redis/clients/jedis/MultiDbClient.java b/src/main/java/redis/clients/jedis/MultiDbClient.java
index bd65f119ff..a753739cec 100644
--- a/src/main/java/redis/clients/jedis/MultiDbClient.java
+++ b/src/main/java/redis/clients/jedis/MultiDbClient.java
@@ -6,7 +6,7 @@
import redis.clients.jedis.csc.Cache;
import redis.clients.jedis.executors.CommandExecutor;
import redis.clients.jedis.mcf.CircuitBreakerCommandExecutor;
-import redis.clients.jedis.mcf.MultiClusterPipeline;
+import redis.clients.jedis.mcf.MultiDbPipeline;
import redis.clients.jedis.mcf.MultiClusterTransaction;
import redis.clients.jedis.providers.ConnectionProvider;
import redis.clients.jedis.mcf.MultiDbConnectionProvider;
@@ -219,11 +219,11 @@ public void forceActiveEndpoint(Endpoint endpoint, long forcedActiveDurationMs)
* The returned pipeline supports the same resilience features as the main client, including
* automatic failover during batch execution.
*
- * @return a new MultiClusterPipeline instance
+ * @return a new MultiDbPipeline instance
*/
@Override
- public MultiClusterPipeline pipelined() {
- return new MultiClusterPipeline(getMultiDbConnectionProvider(), commandObjects);
+ public MultiDbPipeline pipelined() {
+ return new MultiDbPipeline(getMultiDbConnectionProvider(), commandObjects);
}
/**
diff --git a/src/main/java/redis/clients/jedis/UnifiedJedis.java b/src/main/java/redis/clients/jedis/UnifiedJedis.java
index ccb9d37df6..11e063b7c2 100644
--- a/src/main/java/redis/clients/jedis/UnifiedJedis.java
+++ b/src/main/java/redis/clients/jedis/UnifiedJedis.java
@@ -33,7 +33,7 @@
import redis.clients.jedis.resps.RawVector;
import redis.clients.jedis.json.JsonObjectMapper;
import redis.clients.jedis.mcf.CircuitBreakerCommandExecutor;
-import redis.clients.jedis.mcf.MultiClusterPipeline;
+import redis.clients.jedis.mcf.MultiDbPipeline;
import redis.clients.jedis.mcf.MultiDbConnectionProvider;
import redis.clients.jedis.mcf.MultiClusterTransaction;
import redis.clients.jedis.params.*;
@@ -5100,7 +5100,7 @@ public PipelineBase pipelined() {
if (provider == null) {
throw new IllegalStateException("It is not allowed to create Pipeline from this " + getClass());
} else if (provider instanceof MultiDbConnectionProvider) {
- return new MultiClusterPipeline((MultiDbConnectionProvider) provider, commandObjects);
+ return new MultiDbPipeline((MultiDbConnectionProvider) provider, commandObjects);
} else {
return new Pipeline(provider.getConnection(), true, commandObjects);
}
diff --git a/src/main/java/redis/clients/jedis/mcf/MultiClusterPipeline.java b/src/main/java/redis/clients/jedis/mcf/MultiDbPipeline.java
similarity index 86%
rename from src/main/java/redis/clients/jedis/mcf/MultiClusterPipeline.java
rename to src/main/java/redis/clients/jedis/mcf/MultiDbPipeline.java
index d302768fad..defb97d1a2 100644
--- a/src/main/java/redis/clients/jedis/mcf/MultiClusterPipeline.java
+++ b/src/main/java/redis/clients/jedis/mcf/MultiDbPipeline.java
@@ -11,17 +11,16 @@
/**
* This is high memory dependent solution as all the appending commands will be hold in memory until
- * {@link MultiClusterPipeline#sync() SYNC} (or {@link MultiClusterPipeline#close() CLOSE}) gets
- * called.
+ * {@link MultiDbPipeline#sync() SYNC} (or {@link MultiDbPipeline#close() CLOSE}) gets called.
*/
@Experimental
-public class MultiClusterPipeline extends PipelineBase implements Closeable {
+public class MultiDbPipeline extends PipelineBase implements Closeable {
private final CircuitBreakerFailoverConnectionProvider failoverProvider;
private final Queue>> commands = new LinkedList<>();
@Deprecated
- public MultiClusterPipeline(MultiDbConnectionProvider pooledProvider) {
+ public MultiDbPipeline(MultiDbConnectionProvider pooledProvider) {
super(new CommandObjects());
this.failoverProvider = new CircuitBreakerFailoverConnectionProvider(pooledProvider);
@@ -32,8 +31,7 @@ public MultiClusterPipeline(MultiDbConnectionProvider pooledProvider) {
}
}
- public MultiClusterPipeline(MultiDbConnectionProvider pooledProvider,
- CommandObjects commandObjects) {
+ public MultiDbPipeline(MultiDbConnectionProvider pooledProvider, CommandObjects commandObjects) {
super(commandObjects);
this.failoverProvider = new CircuitBreakerFailoverConnectionProvider(pooledProvider);
}
From 8fd1bfe05d2528d6d534a3f55cbd1262fa939b41 Mon Sep 17 00:00:00 2001
From: ggivo
Date: Mon, 6 Oct 2025 16:52:46 +0300
Subject: [PATCH 12/17] Rename MultiClusterTransaction t o MultiDbTransaction
---
src/main/java/redis/clients/jedis/MultiDbClient.java | 12 ++++++------
src/main/java/redis/clients/jedis/UnifiedJedis.java | 4 ++--
...usterTransaction.java => MultiDbTransaction.java} | 8 ++++----
3 files changed, 12 insertions(+), 12 deletions(-)
rename src/main/java/redis/clients/jedis/mcf/{MultiClusterTransaction.java => MultiDbTransaction.java} (94%)
diff --git a/src/main/java/redis/clients/jedis/MultiDbClient.java b/src/main/java/redis/clients/jedis/MultiDbClient.java
index a753739cec..ef6d2ca252 100644
--- a/src/main/java/redis/clients/jedis/MultiDbClient.java
+++ b/src/main/java/redis/clients/jedis/MultiDbClient.java
@@ -7,7 +7,7 @@
import redis.clients.jedis.executors.CommandExecutor;
import redis.clients.jedis.mcf.CircuitBreakerCommandExecutor;
import redis.clients.jedis.mcf.MultiDbPipeline;
-import redis.clients.jedis.mcf.MultiClusterTransaction;
+import redis.clients.jedis.mcf.MultiDbTransaction;
import redis.clients.jedis.providers.ConnectionProvider;
import redis.clients.jedis.mcf.MultiDbConnectionProvider;
@@ -232,11 +232,11 @@ public MultiDbPipeline pipelined() {
* The returned transaction supports the same resilience features as the main client, including
* automatic failover during transaction execution.
*
- * @return a new MultiClusterTransaction instance
+ * @return a new MultiDbTransaction instance
*/
@Override
- public MultiClusterTransaction multi() {
- return new MultiClusterTransaction((MultiDbConnectionProvider) provider, true, commandObjects);
+ public MultiDbTransaction multi() {
+ return new MultiDbTransaction((MultiDbConnectionProvider) provider, true, commandObjects);
}
/**
@@ -244,13 +244,13 @@ public MultiClusterTransaction multi() {
* @return transaction object
*/
@Override
- public MultiClusterTransaction transaction(boolean doMulti) {
+ public MultiDbTransaction transaction(boolean doMulti) {
if (provider == null) {
throw new IllegalStateException(
"It is not allowed to create Transaction from this " + getClass());
}
- return new MultiClusterTransaction(getMultiDbConnectionProvider(), doMulti, commandObjects);
+ return new MultiDbTransaction(getMultiDbConnectionProvider(), doMulti, commandObjects);
}
public Endpoint getActiveEndpoint() {
diff --git a/src/main/java/redis/clients/jedis/UnifiedJedis.java b/src/main/java/redis/clients/jedis/UnifiedJedis.java
index 11e063b7c2..8a59657ff6 100644
--- a/src/main/java/redis/clients/jedis/UnifiedJedis.java
+++ b/src/main/java/redis/clients/jedis/UnifiedJedis.java
@@ -35,7 +35,7 @@
import redis.clients.jedis.mcf.CircuitBreakerCommandExecutor;
import redis.clients.jedis.mcf.MultiDbPipeline;
import redis.clients.jedis.mcf.MultiDbConnectionProvider;
-import redis.clients.jedis.mcf.MultiClusterTransaction;
+import redis.clients.jedis.mcf.MultiDbTransaction;
import redis.clients.jedis.params.*;
import redis.clients.jedis.providers.*;
import redis.clients.jedis.resps.*;
@@ -5121,7 +5121,7 @@ public AbstractTransaction transaction(boolean doMulti) {
if (provider == null) {
throw new IllegalStateException("It is not allowed to create Transaction from this " + getClass());
} else if (provider instanceof MultiDbConnectionProvider) {
- return new MultiClusterTransaction((MultiDbConnectionProvider) provider, doMulti, commandObjects);
+ return new MultiDbTransaction((MultiDbConnectionProvider) provider, doMulti, commandObjects);
} else {
return new Transaction(provider.getConnection(), doMulti, true, commandObjects);
}
diff --git a/src/main/java/redis/clients/jedis/mcf/MultiClusterTransaction.java b/src/main/java/redis/clients/jedis/mcf/MultiDbTransaction.java
similarity index 94%
rename from src/main/java/redis/clients/jedis/mcf/MultiClusterTransaction.java
rename to src/main/java/redis/clients/jedis/mcf/MultiDbTransaction.java
index e4afa24887..ccfe9a0cd6 100644
--- a/src/main/java/redis/clients/jedis/mcf/MultiClusterTransaction.java
+++ b/src/main/java/redis/clients/jedis/mcf/MultiDbTransaction.java
@@ -20,7 +20,7 @@
* This is high memory dependent solution as all the appending commands will be hold in memory.
*/
@Experimental
-public class MultiClusterTransaction extends TransactionBase {
+public class MultiDbTransaction extends TransactionBase {
private static final Builder> NO_OP_BUILDER = BuilderFactory.RAW_OBJECT;
@@ -39,7 +39,7 @@ public class MultiClusterTransaction extends TransactionBase {
* @param provider
*/
@Deprecated
- public MultiClusterTransaction(MultiDbConnectionProvider provider) {
+ public MultiDbTransaction(MultiDbConnectionProvider provider) {
this(provider, true);
}
@@ -50,7 +50,7 @@ public MultiClusterTransaction(MultiDbConnectionProvider provider) {
* @param doMulti {@code false} should be set to enable manual WATCH, UNWATCH and MULTI
*/
@Deprecated
- public MultiClusterTransaction(MultiDbConnectionProvider provider, boolean doMulti) {
+ public MultiDbTransaction(MultiDbConnectionProvider provider, boolean doMulti) {
this.failoverProvider = new CircuitBreakerFailoverConnectionProvider(provider);
try (Connection connection = failoverProvider.getConnection()) {
@@ -68,7 +68,7 @@ public MultiClusterTransaction(MultiDbConnectionProvider provider, boolean doMul
* @param doMulti {@code false} should be set to enable manual WATCH, UNWATCH and MULTI
* @param commandObjects command objects
*/
- public MultiClusterTransaction(MultiDbConnectionProvider provider, boolean doMulti,
+ public MultiDbTransaction(MultiDbConnectionProvider provider, boolean doMulti,
CommandObjects commandObjects) {
super(commandObjects);
this.failoverProvider = new CircuitBreakerFailoverConnectionProvider(provider);
From 8fcb955300cde4fe9939ba76389aa1f4d3db9845 Mon Sep 17 00:00:00 2001
From: ggivo
Date: Mon, 6 Oct 2025 17:07:56 +0300
Subject: [PATCH 13/17] Rename MultiClusterTransaction to MultiDbTransaction
---
.../java/redis/clients/jedis/MultiDbClient.java | 8 ++++----
.../java/redis/clients/jedis/UnifiedJedis.java | 14 +-------------
.../jedis/builders/MultiDbClientBuilder.java | 8 ++++----
...ndExecutor.java => MultiDbCommandExecutor.java} | 5 ++---
...a => MultiDbCommandExecutorThresholdsTest.java} | 8 ++++----
5 files changed, 15 insertions(+), 28 deletions(-)
rename src/main/java/redis/clients/jedis/mcf/{CircuitBreakerCommandExecutor.java => MultiDbCommandExecutor.java} (94%)
rename src/test/java/redis/clients/jedis/mcf/{CircuitBreakerThresholdsTest.java => MultiDbCommandExecutorThresholdsTest.java} (97%)
diff --git a/src/main/java/redis/clients/jedis/MultiDbClient.java b/src/main/java/redis/clients/jedis/MultiDbClient.java
index ef6d2ca252..3ffeaf93aa 100644
--- a/src/main/java/redis/clients/jedis/MultiDbClient.java
+++ b/src/main/java/redis/clients/jedis/MultiDbClient.java
@@ -5,7 +5,7 @@
import redis.clients.jedis.builders.MultiDbClientBuilder;
import redis.clients.jedis.csc.Cache;
import redis.clients.jedis.executors.CommandExecutor;
-import redis.clients.jedis.mcf.CircuitBreakerCommandExecutor;
+import redis.clients.jedis.mcf.MultiDbCommandExecutor;
import redis.clients.jedis.mcf.MultiDbPipeline;
import redis.clients.jedis.mcf.MultiDbTransaction;
import redis.clients.jedis.providers.ConnectionProvider;
@@ -75,9 +75,9 @@
* resilience features.
*
* @author Ivo Gaydazhiev
- * @since 5.2.0
+ * @since 7.0.0
* @see MultiDbConnectionProvider
- * @see CircuitBreakerCommandExecutor
+ * @see MultiDbCommandExecutor
* @see MultiDbConfig
*/
@Experimental
@@ -90,7 +90,7 @@ public class MultiDbClient extends UnifiedJedis {
* the builder pattern for advanced configurations. For most use cases, prefer using
* {@link #builder()} to create instances.
*
- * @param commandExecutor the command executor (typically CircuitBreakerCommandExecutor)
+ * @param commandExecutor the command executor (typically MultiDbCommandExecutor)
* @param connectionProvider the connection provider (typically MultiDbConnectionProvider)
* @param commandObjects the command objects
* @param redisProtocol the Redis protocol version
diff --git a/src/main/java/redis/clients/jedis/UnifiedJedis.java b/src/main/java/redis/clients/jedis/UnifiedJedis.java
index 8a59657ff6..18616c2b71 100644
--- a/src/main/java/redis/clients/jedis/UnifiedJedis.java
+++ b/src/main/java/redis/clients/jedis/UnifiedJedis.java
@@ -32,7 +32,7 @@
import redis.clients.jedis.params.VSimParams;
import redis.clients.jedis.resps.RawVector;
import redis.clients.jedis.json.JsonObjectMapper;
-import redis.clients.jedis.mcf.CircuitBreakerCommandExecutor;
+import redis.clients.jedis.mcf.MultiDbCommandExecutor;
import redis.clients.jedis.mcf.MultiDbPipeline;
import redis.clients.jedis.mcf.MultiDbConnectionProvider;
import redis.clients.jedis.mcf.MultiDbTransaction;
@@ -232,18 +232,6 @@ public UnifiedJedis(ConnectionProvider provider, int maxAttempts, Duration maxTo
this(new RetryableCommandExecutor(provider, maxAttempts, maxTotalRetriesDuration), provider);
}
- /**
- * Constructor which supports multiple cluster/database endpoints each with their own isolated connection pool.
- *
- * With this Constructor users can seamlessly failover to Disaster Recovery (DR), Backup, and Active-Active cluster(s)
- * by using simple configuration which is passed through from Resilience4j - https://resilience4j.readme.io/docs
- *
- */
- @Experimental
- public UnifiedJedis(MultiDbConnectionProvider provider) {
- this(new CircuitBreakerCommandExecutor(provider), provider);
- }
-
/**
* The constructor to use a custom {@link CommandExecutor}.
*
diff --git a/src/main/java/redis/clients/jedis/builders/MultiDbClientBuilder.java b/src/main/java/redis/clients/jedis/builders/MultiDbClientBuilder.java
index 3758d5c52b..002de51666 100644
--- a/src/main/java/redis/clients/jedis/builders/MultiDbClientBuilder.java
+++ b/src/main/java/redis/clients/jedis/builders/MultiDbClientBuilder.java
@@ -5,7 +5,7 @@
import redis.clients.jedis.MultiDbConfig;
import redis.clients.jedis.annots.Experimental;
import redis.clients.jedis.executors.CommandExecutor;
-import redis.clients.jedis.mcf.CircuitBreakerCommandExecutor;
+import redis.clients.jedis.mcf.MultiDbCommandExecutor;
import redis.clients.jedis.mcf.DatabaseSwitchEvent;
import redis.clients.jedis.mcf.MultiDbConnectionProvider;
import redis.clients.jedis.providers.ConnectionProvider;
@@ -60,7 +60,7 @@
*
* @param the client type that this builder creates
* @author Ivo Gaydazhiev
- * @since 5.2.0
+ * @since 7.0.0
*/
@Experimental
public abstract class MultiDbClientBuilder
@@ -125,8 +125,8 @@ protected ConnectionProvider createDefaultConnectionProvider() {
@Override
protected CommandExecutor createDefaultCommandExecutor() {
- // For multi-db clients, we always use CircuitBreakerCommandExecutor
- return new CircuitBreakerCommandExecutor((MultiDbConnectionProvider) this.connectionProvider);
+ // For multi-db clients, we always use MultiDbCommandExecutor
+ return new MultiDbCommandExecutor((MultiDbConnectionProvider) this.connectionProvider);
}
@Override
diff --git a/src/main/java/redis/clients/jedis/mcf/CircuitBreakerCommandExecutor.java b/src/main/java/redis/clients/jedis/mcf/MultiDbCommandExecutor.java
similarity index 94%
rename from src/main/java/redis/clients/jedis/mcf/CircuitBreakerCommandExecutor.java
rename to src/main/java/redis/clients/jedis/mcf/MultiDbCommandExecutor.java
index 31d8d67a73..815266df53 100644
--- a/src/main/java/redis/clients/jedis/mcf/CircuitBreakerCommandExecutor.java
+++ b/src/main/java/redis/clients/jedis/mcf/MultiDbCommandExecutor.java
@@ -21,10 +21,9 @@
*
*/
@Experimental
-public class CircuitBreakerCommandExecutor extends CircuitBreakerFailoverBase
- implements CommandExecutor {
+public class MultiDbCommandExecutor extends CircuitBreakerFailoverBase implements CommandExecutor {
- public CircuitBreakerCommandExecutor(MultiDbConnectionProvider provider) {
+ public MultiDbCommandExecutor(MultiDbConnectionProvider provider) {
super(provider);
}
diff --git a/src/test/java/redis/clients/jedis/mcf/CircuitBreakerThresholdsTest.java b/src/test/java/redis/clients/jedis/mcf/MultiDbCommandExecutorThresholdsTest.java
similarity index 97%
rename from src/test/java/redis/clients/jedis/mcf/CircuitBreakerThresholdsTest.java
rename to src/test/java/redis/clients/jedis/mcf/MultiDbCommandExecutorThresholdsTest.java
index 7d439c6d46..46d041f9db 100644
--- a/src/test/java/redis/clients/jedis/mcf/CircuitBreakerThresholdsTest.java
+++ b/src/test/java/redis/clients/jedis/mcf/MultiDbCommandExecutorThresholdsTest.java
@@ -33,7 +33,7 @@ public class CircuitBreakerThresholdsTest {
private MultiDbConnectionProvider realProvider;
private MultiDbConnectionProvider spyProvider;
private Database cluster;
- private CircuitBreakerCommandExecutor executor;
+ private MultiDbCommandExecutor executor;
private CommandObject dummyCommand;
private TrackingConnectionPool poolMock;
private HostAndPort fakeEndpoint = new HostAndPort("fake", 6379);
@@ -61,7 +61,7 @@ public void setup() throws Exception {
cluster = spyProvider.getDatabase();
- executor = new CircuitBreakerCommandExecutor(spyProvider);
+ executor = new MultiDbCommandExecutor(spyProvider);
dummyCommand = new CommandObject<>(new CommandArguments(Protocol.Command.PING),
BuilderFactory.STRING);
@@ -129,7 +129,7 @@ public void rateBelowThreshold_doesNotFailover() throws Exception {
MultiDbConnectionProvider rp = new MultiDbConnectionProvider(cfgBuilder.build());
MultiDbConnectionProvider sp = spy(rp);
Database c = sp.getDatabase();
- try (CircuitBreakerCommandExecutor ex = new CircuitBreakerCommandExecutor(sp)) {
+ try (MultiDbCommandExecutor ex = new MultiDbCommandExecutor(sp)) {
CommandObject cmd = new CommandObject<>(new CommandArguments(Protocol.Command.PING),
BuilderFactory.STRING);
@@ -197,7 +197,7 @@ public void thresholdMatrix(int minFailures, float ratePercent, int successes, i
MultiDbConnectionProvider real = new MultiDbConnectionProvider(cfgBuilder.build());
MultiDbConnectionProvider spy = spy(real);
Database c = spy.getDatabase();
- try (CircuitBreakerCommandExecutor ex = new CircuitBreakerCommandExecutor(spy)) {
+ try (MultiDbCommandExecutor ex = new MultiDbCommandExecutor(spy)) {
CommandObject cmd = new CommandObject<>(new CommandArguments(Protocol.Command.PING),
BuilderFactory.STRING);
From c2e64a6740498562f12b0f8b22263ffa1ecffd8c Mon Sep 17 00:00:00 2001
From: ggivo
Date: Mon, 6 Oct 2025 17:13:19 +0300
Subject: [PATCH 14/17] Rename MultiClusterTransaction to MultiDbTransaction
---
...nnectionProvider.java => MultiDbConnectionSupplier.java} | 4 ++--
src/main/java/redis/clients/jedis/mcf/MultiDbPipeline.java | 6 +++---
.../java/redis/clients/jedis/mcf/MultiDbTransaction.java | 6 +++---
3 files changed, 8 insertions(+), 8 deletions(-)
rename src/main/java/redis/clients/jedis/mcf/{CircuitBreakerFailoverConnectionProvider.java => MultiDbConnectionSupplier.java} (91%)
diff --git a/src/main/java/redis/clients/jedis/mcf/CircuitBreakerFailoverConnectionProvider.java b/src/main/java/redis/clients/jedis/mcf/MultiDbConnectionSupplier.java
similarity index 91%
rename from src/main/java/redis/clients/jedis/mcf/CircuitBreakerFailoverConnectionProvider.java
rename to src/main/java/redis/clients/jedis/mcf/MultiDbConnectionSupplier.java
index 7dfd1ef527..7310ef8d63 100644
--- a/src/main/java/redis/clients/jedis/mcf/CircuitBreakerFailoverConnectionProvider.java
+++ b/src/main/java/redis/clients/jedis/mcf/MultiDbConnectionSupplier.java
@@ -14,9 +14,9 @@
* Active-Active cluster(s) by using simple configuration
*/
@Experimental
-public class CircuitBreakerFailoverConnectionProvider extends CircuitBreakerFailoverBase {
+public class MultiDbConnectionSupplier extends CircuitBreakerFailoverBase {
- public CircuitBreakerFailoverConnectionProvider(MultiDbConnectionProvider provider) {
+ public MultiDbConnectionSupplier(MultiDbConnectionProvider provider) {
super(provider);
}
diff --git a/src/main/java/redis/clients/jedis/mcf/MultiDbPipeline.java b/src/main/java/redis/clients/jedis/mcf/MultiDbPipeline.java
index defb97d1a2..bc0d950a6a 100644
--- a/src/main/java/redis/clients/jedis/mcf/MultiDbPipeline.java
+++ b/src/main/java/redis/clients/jedis/mcf/MultiDbPipeline.java
@@ -16,14 +16,14 @@
@Experimental
public class MultiDbPipeline extends PipelineBase implements Closeable {
- private final CircuitBreakerFailoverConnectionProvider failoverProvider;
+ private final MultiDbConnectionSupplier failoverProvider;
private final Queue>> commands = new LinkedList<>();
@Deprecated
public MultiDbPipeline(MultiDbConnectionProvider pooledProvider) {
super(new CommandObjects());
- this.failoverProvider = new CircuitBreakerFailoverConnectionProvider(pooledProvider);
+ this.failoverProvider = new MultiDbConnectionSupplier(pooledProvider);
try (Connection connection = failoverProvider.getConnection()) {
RedisProtocol proto = connection.getRedisProtocol();
@@ -33,7 +33,7 @@ public MultiDbPipeline(MultiDbConnectionProvider pooledProvider) {
public MultiDbPipeline(MultiDbConnectionProvider pooledProvider, CommandObjects commandObjects) {
super(commandObjects);
- this.failoverProvider = new CircuitBreakerFailoverConnectionProvider(pooledProvider);
+ this.failoverProvider = new MultiDbConnectionSupplier(pooledProvider);
}
@Override
diff --git a/src/main/java/redis/clients/jedis/mcf/MultiDbTransaction.java b/src/main/java/redis/clients/jedis/mcf/MultiDbTransaction.java
index ccfe9a0cd6..1688a2c635 100644
--- a/src/main/java/redis/clients/jedis/mcf/MultiDbTransaction.java
+++ b/src/main/java/redis/clients/jedis/mcf/MultiDbTransaction.java
@@ -26,7 +26,7 @@ public class MultiDbTransaction extends TransactionBase {
private static final String GRAPH_COMMANDS_NOT_SUPPORTED_MESSAGE = "Graph commands are not supported.";
- private final CircuitBreakerFailoverConnectionProvider failoverProvider;
+ private final MultiDbConnectionSupplier failoverProvider;
private final AtomicInteger extraCommandCount = new AtomicInteger();
private final Queue>> commands = new LinkedList<>();
@@ -51,7 +51,7 @@ public MultiDbTransaction(MultiDbConnectionProvider provider) {
*/
@Deprecated
public MultiDbTransaction(MultiDbConnectionProvider provider, boolean doMulti) {
- this.failoverProvider = new CircuitBreakerFailoverConnectionProvider(provider);
+ this.failoverProvider = new MultiDbConnectionSupplier(provider);
try (Connection connection = failoverProvider.getConnection()) {
RedisProtocol proto = connection.getRedisProtocol();
@@ -71,7 +71,7 @@ public MultiDbTransaction(MultiDbConnectionProvider provider, boolean doMulti) {
public MultiDbTransaction(MultiDbConnectionProvider provider, boolean doMulti,
CommandObjects commandObjects) {
super(commandObjects);
- this.failoverProvider = new CircuitBreakerFailoverConnectionProvider(provider);
+ this.failoverProvider = new MultiDbConnectionSupplier(provider);
if (doMulti) multi();
}
From 5a8ca16529cac8c7ad833f8a5d9934ea818f42af Mon Sep 17 00:00:00 2001
From: ggivo
Date: Mon, 6 Oct 2025 17:16:06 +0300
Subject: [PATCH 15/17] Rename CircuitBreakerFailoverBase to
MultiDbFailoverBase
---
.../java/redis/clients/jedis/mcf/MultiDbCommandExecutor.java | 2 +-
.../redis/clients/jedis/mcf/MultiDbConnectionSupplier.java | 2 +-
...rcuitBreakerFailoverBase.java => MultiDbFailoverBase.java} | 4 ++--
3 files changed, 4 insertions(+), 4 deletions(-)
rename src/main/java/redis/clients/jedis/mcf/{CircuitBreakerFailoverBase.java => MultiDbFailoverBase.java} (96%)
diff --git a/src/main/java/redis/clients/jedis/mcf/MultiDbCommandExecutor.java b/src/main/java/redis/clients/jedis/mcf/MultiDbCommandExecutor.java
index 815266df53..d3b7c48e2e 100644
--- a/src/main/java/redis/clients/jedis/mcf/MultiDbCommandExecutor.java
+++ b/src/main/java/redis/clients/jedis/mcf/MultiDbCommandExecutor.java
@@ -21,7 +21,7 @@
*
*/
@Experimental
-public class MultiDbCommandExecutor extends CircuitBreakerFailoverBase implements CommandExecutor {
+public class MultiDbCommandExecutor extends MultiDbFailoverBase implements CommandExecutor {
public MultiDbCommandExecutor(MultiDbConnectionProvider provider) {
super(provider);
diff --git a/src/main/java/redis/clients/jedis/mcf/MultiDbConnectionSupplier.java b/src/main/java/redis/clients/jedis/mcf/MultiDbConnectionSupplier.java
index 7310ef8d63..9bd1f35440 100644
--- a/src/main/java/redis/clients/jedis/mcf/MultiDbConnectionSupplier.java
+++ b/src/main/java/redis/clients/jedis/mcf/MultiDbConnectionSupplier.java
@@ -14,7 +14,7 @@
* Active-Active cluster(s) by using simple configuration
*/
@Experimental
-public class MultiDbConnectionSupplier extends CircuitBreakerFailoverBase {
+public class MultiDbConnectionSupplier extends MultiDbFailoverBase {
public MultiDbConnectionSupplier(MultiDbConnectionProvider provider) {
super(provider);
diff --git a/src/main/java/redis/clients/jedis/mcf/CircuitBreakerFailoverBase.java b/src/main/java/redis/clients/jedis/mcf/MultiDbFailoverBase.java
similarity index 96%
rename from src/main/java/redis/clients/jedis/mcf/CircuitBreakerFailoverBase.java
rename to src/main/java/redis/clients/jedis/mcf/MultiDbFailoverBase.java
index ba1ea98dec..3e9d5f2d39 100644
--- a/src/main/java/redis/clients/jedis/mcf/CircuitBreakerFailoverBase.java
+++ b/src/main/java/redis/clients/jedis/mcf/MultiDbFailoverBase.java
@@ -20,12 +20,12 @@
*
*/
@Experimental
-public class CircuitBreakerFailoverBase implements AutoCloseable {
+public class MultiDbFailoverBase implements AutoCloseable {
private final Lock lock = new ReentrantLock(true);
protected final MultiDbConnectionProvider provider;
- public CircuitBreakerFailoverBase(MultiDbConnectionProvider provider) {
+ public MultiDbFailoverBase(MultiDbConnectionProvider provider) {
this.provider = provider;
}
From 4cb000e87666ec47da92fe3bd94bbe593d2fbd3d Mon Sep 17 00:00:00 2001
From: ggivo
Date: Mon, 6 Oct 2025 17:41:26 +0300
Subject: [PATCH 16/17] Rename tests 'Cluster*'
---
.../redis/clients/jedis/UnifiedJedis.java | 1 -
...va => DatabaseEvaluateThresholdsTest.java} | 36 +++++------
... MultiDbCircuitBreakerThresholdsTest.java} | 2 +-
...ctionProviderDynamicEndpointUnitTest.java} | 47 +++++++-------
...onProviderFailoverAttemptsConfigTest.java} | 6 +-
...ConnectionProviderInitializationTest.java} | 62 +++++++++----------
6 files changed, 73 insertions(+), 81 deletions(-)
rename src/test/java/redis/clients/jedis/mcf/{ClusterEvaluateThresholdsTest.java => DatabaseEvaluateThresholdsTest.java} (87%)
rename src/test/java/redis/clients/jedis/mcf/{MultiDbCommandExecutorThresholdsTest.java => MultiDbCircuitBreakerThresholdsTest.java} (99%)
rename src/test/java/redis/clients/jedis/mcf/{MultiClusterDynamicEndpointUnitTest.java => MultiDbConnectionProviderDynamicEndpointUnitTest.java} (84%)
rename src/test/java/redis/clients/jedis/mcf/{MultiClusterFailoverAttemptsConfigTest.java => MultiDbConnectionProviderFailoverAttemptsConfigTest.java} (97%)
rename src/test/java/redis/clients/jedis/mcf/{MultiClusterInitializationTest.java => MultiDbConnectionProviderInitializationTest.java} (62%)
diff --git a/src/main/java/redis/clients/jedis/UnifiedJedis.java b/src/main/java/redis/clients/jedis/UnifiedJedis.java
index 18616c2b71..548380aa9d 100644
--- a/src/main/java/redis/clients/jedis/UnifiedJedis.java
+++ b/src/main/java/redis/clients/jedis/UnifiedJedis.java
@@ -32,7 +32,6 @@
import redis.clients.jedis.params.VSimParams;
import redis.clients.jedis.resps.RawVector;
import redis.clients.jedis.json.JsonObjectMapper;
-import redis.clients.jedis.mcf.MultiDbCommandExecutor;
import redis.clients.jedis.mcf.MultiDbPipeline;
import redis.clients.jedis.mcf.MultiDbConnectionProvider;
import redis.clients.jedis.mcf.MultiDbTransaction;
diff --git a/src/test/java/redis/clients/jedis/mcf/ClusterEvaluateThresholdsTest.java b/src/test/java/redis/clients/jedis/mcf/DatabaseEvaluateThresholdsTest.java
similarity index 87%
rename from src/test/java/redis/clients/jedis/mcf/ClusterEvaluateThresholdsTest.java
rename to src/test/java/redis/clients/jedis/mcf/DatabaseEvaluateThresholdsTest.java
index 8a6fd466c0..2892005cb4 100644
--- a/src/test/java/redis/clients/jedis/mcf/ClusterEvaluateThresholdsTest.java
+++ b/src/test/java/redis/clients/jedis/mcf/DatabaseEvaluateThresholdsTest.java
@@ -17,29 +17,29 @@
/**
* Tests for circuit breaker thresholds: both failure-rate threshold and minimum number of failures
* must be exceeded to trigger failover. Uses a real CircuitBreaker and real Retry, but mocks the
- * provider and cluster wiring to avoid network I/O.
+ * provider and {@link Database} wiring to avoid network I/O.
*/
-public class ClusterEvaluateThresholdsTest {
+public class DatabaseEvaluateThresholdsTest {
private MultiDbConnectionProvider provider;
- private Database cluster;
+ private Database database;
private CircuitBreaker circuitBreaker;
private CircuitBreaker.Metrics metrics;
@BeforeEach
public void setup() {
provider = mock(MultiDbConnectionProvider.class);
- cluster = mock(Database.class);
+ database = mock(Database.class);
circuitBreaker = mock(CircuitBreaker.class);
metrics = mock(CircuitBreaker.Metrics.class);
- when(cluster.getCircuitBreaker()).thenReturn(circuitBreaker);
+ when(database.getCircuitBreaker()).thenReturn(circuitBreaker);
when(circuitBreaker.getMetrics()).thenReturn(metrics);
when(circuitBreaker.getState()).thenReturn(CircuitBreaker.State.CLOSED);
// Configure the mock to call the real evaluateThresholds method
- doCallRealMethod().when(cluster).evaluateThresholds(anyBoolean());
+ doCallRealMethod().when(database).evaluateThresholds(anyBoolean());
}
@@ -50,13 +50,13 @@ public void setup() {
*/
@Test
public void belowMinFailures_doesNotFailover() {
- when(cluster.getCircuitBreakerMinNumOfFailures()).thenReturn(3);
+ when(database.getCircuitBreakerMinNumOfFailures()).thenReturn(3);
when(metrics.getNumberOfFailedCalls()).thenReturn(1); // +1 becomes 2, still < 3
when(metrics.getNumberOfSuccessfulCalls()).thenReturn(0);
- when(cluster.getCircuitBreakerFailureRateThreshold()).thenReturn(50.0f);
+ when(database.getCircuitBreakerFailureRateThreshold()).thenReturn(50.0f);
when(circuitBreaker.getState()).thenReturn(CircuitBreaker.State.CLOSED);
- cluster.evaluateThresholds(false);
+ database.evaluateThresholds(false);
verify(circuitBreaker, never()).transitionToOpenState();
verify(provider, never()).switchToHealthyDatabase(any(), any());
}
@@ -68,13 +68,13 @@ public void belowMinFailures_doesNotFailover() {
*/
@Test
public void minFailuresAndRateExceeded_triggersOpenState() {
- when(cluster.getCircuitBreakerMinNumOfFailures()).thenReturn(3);
+ when(database.getCircuitBreakerMinNumOfFailures()).thenReturn(3);
when(metrics.getNumberOfFailedCalls()).thenReturn(2); // +1 becomes 3, reaching minFailures
when(metrics.getNumberOfSuccessfulCalls()).thenReturn(0);
- when(cluster.getCircuitBreakerFailureRateThreshold()).thenReturn(50.0f);
+ when(database.getCircuitBreakerFailureRateThreshold()).thenReturn(50.0f);
when(circuitBreaker.getState()).thenReturn(CircuitBreaker.State.CLOSED);
- cluster.evaluateThresholds(false);
+ database.evaluateThresholds(false);
verify(circuitBreaker, times(1)).transitionToOpenState();
}
@@ -86,13 +86,13 @@ public void minFailuresAndRateExceeded_triggersOpenState() {
*/
@Test
public void rateBelowThreshold_doesNotFailover() {
- when(cluster.getCircuitBreakerMinNumOfFailures()).thenReturn(3);
+ when(database.getCircuitBreakerMinNumOfFailures()).thenReturn(3);
when(metrics.getNumberOfSuccessfulCalls()).thenReturn(3);
when(metrics.getNumberOfFailedCalls()).thenReturn(2); // +1 becomes 3, rate = 3/(3+3) = 50%
- when(cluster.getCircuitBreakerFailureRateThreshold()).thenReturn(80.0f);
+ when(database.getCircuitBreakerFailureRateThreshold()).thenReturn(80.0f);
when(circuitBreaker.getState()).thenReturn(CircuitBreaker.State.CLOSED);
- cluster.evaluateThresholds(false);
+ database.evaluateThresholds(false);
verify(circuitBreaker, never()).transitionToOpenState();
verify(provider, never()).switchToHealthyDatabase(any(), any());
@@ -165,13 +165,13 @@ public void providerBuilder_zeroRate_mapsToHundredAndHugeMinCalls() {
public void thresholdMatrix(int minFailures, float ratePercent, int successes, int failures,
boolean lastFailRecorded, boolean expectOpenState) {
- when(cluster.getCircuitBreakerMinNumOfFailures()).thenReturn(minFailures);
+ when(database.getCircuitBreakerMinNumOfFailures()).thenReturn(minFailures);
when(metrics.getNumberOfSuccessfulCalls()).thenReturn(successes);
when(metrics.getNumberOfFailedCalls()).thenReturn(failures);
- when(cluster.getCircuitBreakerFailureRateThreshold()).thenReturn(ratePercent);
+ when(database.getCircuitBreakerFailureRateThreshold()).thenReturn(ratePercent);
when(circuitBreaker.getState()).thenReturn(CircuitBreaker.State.CLOSED);
- cluster.evaluateThresholds(lastFailRecorded);
+ database.evaluateThresholds(lastFailRecorded);
if (expectOpenState) {
verify(circuitBreaker, times(1)).transitionToOpenState();
diff --git a/src/test/java/redis/clients/jedis/mcf/MultiDbCommandExecutorThresholdsTest.java b/src/test/java/redis/clients/jedis/mcf/MultiDbCircuitBreakerThresholdsTest.java
similarity index 99%
rename from src/test/java/redis/clients/jedis/mcf/MultiDbCommandExecutorThresholdsTest.java
rename to src/test/java/redis/clients/jedis/mcf/MultiDbCircuitBreakerThresholdsTest.java
index 46d041f9db..7a0f4319c6 100644
--- a/src/test/java/redis/clients/jedis/mcf/MultiDbCommandExecutorThresholdsTest.java
+++ b/src/test/java/redis/clients/jedis/mcf/MultiDbCircuitBreakerThresholdsTest.java
@@ -28,7 +28,7 @@
* must be exceeded to trigger failover. Uses a real CircuitBreaker and real Retry, but mocks the
* provider and cluster wiring to avoid network I/O.
*/
-public class CircuitBreakerThresholdsTest {
+public class MultiDbCircuitBreakerThresholdsTest {
private MultiDbConnectionProvider realProvider;
private MultiDbConnectionProvider spyProvider;
diff --git a/src/test/java/redis/clients/jedis/mcf/MultiClusterDynamicEndpointUnitTest.java b/src/test/java/redis/clients/jedis/mcf/MultiDbConnectionProviderDynamicEndpointUnitTest.java
similarity index 84%
rename from src/test/java/redis/clients/jedis/mcf/MultiClusterDynamicEndpointUnitTest.java
rename to src/test/java/redis/clients/jedis/mcf/MultiDbConnectionProviderDynamicEndpointUnitTest.java
index 6ddca7ae12..ceb5cc021c 100644
--- a/src/test/java/redis/clients/jedis/mcf/MultiClusterDynamicEndpointUnitTest.java
+++ b/src/test/java/redis/clients/jedis/mcf/MultiDbConnectionProviderDynamicEndpointUnitTest.java
@@ -5,7 +5,6 @@
import org.mockito.MockedConstruction;
import redis.clients.jedis.Connection;
-import redis.clients.jedis.ConnectionPool;
import redis.clients.jedis.DefaultJedisClientConfig;
import redis.clients.jedis.EndpointConfig;
import redis.clients.jedis.HostAndPort;
@@ -21,7 +20,7 @@
import static org.mockito.Mockito.mockConstruction;
import static org.mockito.Mockito.when;
-public class MultiClusterDynamicEndpointUnitTest {
+public class MultiDbConnectionProviderDynamicEndpointUnitTest {
private MultiDbConnectionProvider provider;
private JedisClientConfig clientConfig;
@@ -41,7 +40,7 @@ void setUp() {
provider = new MultiDbConnectionProvider(multiConfig);
}
- // Helper method to create cluster configurations
+ // Helper method to create database configurations
private DatabaseConfig createDatabaseConfig(HostAndPort hostAndPort, float weight) {
// Disable health check for unit tests to avoid real connections
return DatabaseConfig.builder(hostAndPort, clientConfig).weight(weight)
@@ -49,18 +48,18 @@ private DatabaseConfig createDatabaseConfig(HostAndPort hostAndPort, float weigh
}
@Test
- void testAddNewCluster() {
+ void testAddNewDatabase() {
DatabaseConfig newConfig = createDatabaseConfig(endpoint2.getHostAndPort(), 2.0f);
// Should not throw exception
assertDoesNotThrow(() -> provider.add(newConfig));
- // Verify the cluster was added by checking it can be retrieved
+ // Verify the database was added by checking it can be retrieved
assertNotNull(provider.getDatabase(endpoint2.getHostAndPort()));
}
@Test
- void testAddDuplicateCluster() {
+ void testAddDuplicateDatabase() {
DatabaseConfig duplicateConfig = createDatabaseConfig(endpoint1.getHostAndPort(), 2.0f);
// Should throw validation exception for duplicate endpoint
@@ -80,19 +79,18 @@ void testRemoveExistingCluster() {
try (MockedConstruction mockedPool = mockPool(mockConnection)) {
// Create initial provider with endpoint1
- DatabaseConfig clusterConfig1 = createDatabaseConfig(endpoint1.getHostAndPort(), 1.0f);
+ DatabaseConfig dbConfig1 = createDatabaseConfig(endpoint1.getHostAndPort(), 1.0f);
- MultiDbConfig multiConfig = MultiDbConfig.builder(new DatabaseConfig[] { clusterConfig1 })
- .build();
+ MultiDbConfig multiConfig = MultiDbConfig.builder(new DatabaseConfig[] { dbConfig1 }).build();
try (MultiDbConnectionProvider providerWithMockedPool = new MultiDbConnectionProvider(
multiConfig)) {
- // Add endpoint2 as second cluster
+ // Add endpoint2 as second database
DatabaseConfig newConfig = createDatabaseConfig(endpoint2.getHostAndPort(), 2.0f);
providerWithMockedPool.add(newConfig);
- // Now remove endpoint1 (original cluster)
+ // Now remove endpoint1 (original database)
assertDoesNotThrow(() -> providerWithMockedPool.remove(endpoint1.getHostAndPort()));
// Verify endpoint1 was removed
@@ -119,8 +117,8 @@ void testRemoveNonExistentCluster() {
}
@Test
- void testRemoveLastRemainingCluster() {
- // Should throw validation exception when trying to remove the last cluster
+ void testRemoveLastRemainingDatabase() {
+ // Should throw validation exception when trying to remove the last database
assertThrows(JedisValidationException.class, () -> provider.remove(endpoint1.getHostAndPort()));
}
@@ -132,7 +130,7 @@ void testRemoveNullEndpoint() {
@Test
void testAddAndRemoveMultipleClusters() {
- // Add endpoint2 as second cluster
+ // Add endpoint2 as second database
DatabaseConfig config2 = createDatabaseConfig(endpoint2.getHostAndPort(), 2.0f);
// Create a third endpoint for this test
@@ -142,7 +140,7 @@ void testAddAndRemoveMultipleClusters() {
provider.add(config2);
provider.add(config3);
- // Verify all clusters exist
+ // Verify all databases exist
assertNotNull(provider.getDatabase(endpoint1.getHostAndPort()));
assertNotNull(provider.getDatabase(endpoint2.getHostAndPort()));
assertNotNull(provider.getDatabase(endpoint3));
@@ -150,7 +148,7 @@ void testAddAndRemoveMultipleClusters() {
// Remove endpoint2
provider.remove(endpoint2.getHostAndPort());
- // Verify correct cluster was removed
+ // Verify correct database was removed
assertNull(provider.getDatabase(endpoint2.getHostAndPort()));
assertNotNull(provider.getDatabase(endpoint1.getHostAndPort()));
assertNotNull(provider.getDatabase(endpoint3));
@@ -158,14 +156,14 @@ void testAddAndRemoveMultipleClusters() {
@Test
void testActiveClusterHandlingOnAdd() {
- // The initial cluster should be active
+ // The initial database should be active
assertNotNull(provider.getDatabase());
// Add endpoint2 with higher weight
DatabaseConfig newConfig = createDatabaseConfig(endpoint2.getHostAndPort(), 5.0f);
provider.add(newConfig);
- // Active cluster should still be valid (implementation may or may not switch)
+ // Active database should still be valid (implementation may or may not switch)
assertNotNull(provider.getDatabase());
}
@@ -176,26 +174,25 @@ void testActiveClusterHandlingOnRemove() {
try (MockedConstruction mockedPool = mockPool(mockConnection)) {
// Create initial provider with endpoint1
- DatabaseConfig clusterConfig1 = createDatabaseConfig(endpoint1.getHostAndPort(), 1.0f);
+ DatabaseConfig dbConfig1 = createDatabaseConfig(endpoint1.getHostAndPort(), 1.0f);
- MultiDbConfig multiConfig = MultiDbConfig.builder(new DatabaseConfig[] { clusterConfig1 })
- .build();
+ MultiDbConfig multiConfig = MultiDbConfig.builder(new DatabaseConfig[] { dbConfig1 }).build();
try (MultiDbConnectionProvider providerWithMockedPool = new MultiDbConnectionProvider(
multiConfig)) {
- // Add endpoint2 as second cluster
+ // Add endpoint2 as second database
DatabaseConfig newConfig = createDatabaseConfig(endpoint2.getHostAndPort(), 2.0f);
providerWithMockedPool.add(newConfig);
- // Get current active cluster
+ // Get current active database
Object initialActiveCluster = providerWithMockedPool.getDatabase();
assertNotNull(initialActiveCluster);
- // Remove endpoint1 (original cluster, might be active)
+ // Remove endpoint1 (original database, might be active)
providerWithMockedPool.remove(endpoint1.getHostAndPort());
- // Should still have an active cluster
+ // Should still have an active database
assertNotNull(providerWithMockedPool.getDatabase());
}
}
diff --git a/src/test/java/redis/clients/jedis/mcf/MultiClusterFailoverAttemptsConfigTest.java b/src/test/java/redis/clients/jedis/mcf/MultiDbConnectionProviderFailoverAttemptsConfigTest.java
similarity index 97%
rename from src/test/java/redis/clients/jedis/mcf/MultiClusterFailoverAttemptsConfigTest.java
rename to src/test/java/redis/clients/jedis/mcf/MultiDbConnectionProviderFailoverAttemptsConfigTest.java
index 857f6dc32a..0b062e4298 100644
--- a/src/test/java/redis/clients/jedis/mcf/MultiClusterFailoverAttemptsConfigTest.java
+++ b/src/test/java/redis/clients/jedis/mcf/MultiDbConnectionProviderFailoverAttemptsConfigTest.java
@@ -22,9 +22,9 @@
/**
* Tests for how getMaxNumFailoverAttempts and getDelayInBetweenFailoverAttempts impact
- * MultiDbConnectionProvider behaviour when no healthy clusters are available.
+ * MultiDbConnectionProvider behaviour when no healthy databases are available.
*/
-public class MultiClusterFailoverAttemptsConfigTest {
+public class MultiDbConnectionProviderFailoverAttemptsConfigTest {
private HostAndPort endpoint0 = new HostAndPort("purposefully-incorrect", 0000);
private HostAndPort endpoint1 = new HostAndPort("purposefully-incorrect", 0001);
@@ -47,7 +47,7 @@ void setUp() throws Exception {
provider = new MultiDbConnectionProvider(builder.build());
- // Disable both clusters to force handleNoHealthyCluster path
+ // Disable both databases to force handleNoHealthyCluster path
provider.getDatabase(endpoint0).setDisabled(true);
provider.getDatabase(endpoint1).setDisabled(true);
}
diff --git a/src/test/java/redis/clients/jedis/mcf/MultiClusterInitializationTest.java b/src/test/java/redis/clients/jedis/mcf/MultiDbConnectionProviderInitializationTest.java
similarity index 62%
rename from src/test/java/redis/clients/jedis/mcf/MultiClusterInitializationTest.java
rename to src/test/java/redis/clients/jedis/mcf/MultiDbConnectionProviderInitializationTest.java
index e01af8f8f4..1935647d46 100644
--- a/src/test/java/redis/clients/jedis/mcf/MultiClusterInitializationTest.java
+++ b/src/test/java/redis/clients/jedis/mcf/MultiDbConnectionProviderInitializationTest.java
@@ -15,13 +15,14 @@
import redis.clients.jedis.HostAndPort;
import redis.clients.jedis.JedisClientConfig;
import redis.clients.jedis.MultiDbConfig;
+import redis.clients.jedis.MultiDbConfig.DatabaseConfig;
import redis.clients.jedis.exceptions.JedisValidationException;
/**
* Tests for MultiDbConnectionProvider initialization edge cases
*/
@ExtendWith(MockitoExtension.class)
-public class MultiClusterInitializationTest {
+public class MultiDbConnectionProviderInitializationTest {
private HostAndPort endpoint1;
private HostAndPort endpoint2;
@@ -48,28 +49,26 @@ private MockedConstruction mockPool() {
@Test
void testInitializationWithMixedHealthCheckConfiguration() {
try (MockedConstruction mockedPool = mockPool()) {
- // Create clusters with mixed health check configuration
- MultiDbConfig.DatabaseConfig cluster1 = MultiDbConfig.DatabaseConfig
- .builder(endpoint1, clientConfig).weight(1.0f).healthCheckEnabled(false) // No health
- // check
+ // Create databases with mixed health check configuration
+ DatabaseConfig db1 = DatabaseConfig.builder(endpoint1, clientConfig).weight(1.0f)
+ .healthCheckEnabled(false) // No health
+ // check
.build();
- MultiDbConfig.DatabaseConfig cluster2 = MultiDbConfig.DatabaseConfig
- .builder(endpoint2, clientConfig).weight(2.0f)
+ DatabaseConfig db2 = DatabaseConfig.builder(endpoint2, clientConfig).weight(2.0f)
.healthCheckStrategySupplier(EchoStrategy.DEFAULT) // With
// health
// check
.build();
- MultiDbConfig config = new MultiDbConfig.Builder(
- new MultiDbConfig.DatabaseConfig[] { cluster1, cluster2 }).build();
+ MultiDbConfig config = new MultiDbConfig.Builder(new DatabaseConfig[] { db1, db2 }).build();
try (MultiDbConnectionProvider provider = new MultiDbConnectionProvider(config)) {
// Should initialize successfully
assertNotNull(provider.getDatabase());
- // Should select cluster1 (no health check, assumed healthy) or cluster2 based on weight
- // Since cluster2 has higher weight and health checks, it should be selected if healthy
+ // Should select db1 (no health check, assumed healthy) or db2 based on weight
+ // Since db2 has higher weight and health checks, it should be selected if healthy
assertTrue(provider.getDatabase() == provider.getDatabase(endpoint1)
|| provider.getDatabase() == provider.getDatabase(endpoint2));
}
@@ -79,19 +78,18 @@ void testInitializationWithMixedHealthCheckConfiguration() {
@Test
void testInitializationWithAllHealthChecksDisabled() {
try (MockedConstruction mockedPool = mockPool()) {
- // Create clusters with no health checks
- MultiDbConfig.DatabaseConfig cluster1 = MultiDbConfig.DatabaseConfig
- .builder(endpoint1, clientConfig).weight(1.0f).healthCheckEnabled(false).build();
+ // Create databases with no health checks
+ DatabaseConfig db1 = DatabaseConfig.builder(endpoint1, clientConfig).weight(1.0f)
+ .healthCheckEnabled(false).build();
- MultiDbConfig.DatabaseConfig cluster2 = MultiDbConfig.DatabaseConfig
- .builder(endpoint2, clientConfig).weight(3.0f) // Higher weight
+ DatabaseConfig db22 = DatabaseConfig.builder(endpoint2, clientConfig).weight(3.0f) // Higher
+ // weight
.healthCheckEnabled(false).build();
- MultiDbConfig config = new MultiDbConfig.Builder(
- new MultiDbConfig.DatabaseConfig[] { cluster1, cluster2 }).build();
+ MultiDbConfig config = new MultiDbConfig.Builder(new DatabaseConfig[] { db1, db22 }).build();
try (MultiDbConnectionProvider provider = new MultiDbConnectionProvider(config)) {
- // Should select cluster2 (highest weight, no health checks)
+ // Should select db22 (highest weight, no health checks)
assertEquals(provider.getDatabase(endpoint2), provider.getDatabase());
}
}
@@ -100,14 +98,13 @@ void testInitializationWithAllHealthChecksDisabled() {
@Test
void testInitializationWithSingleCluster() {
try (MockedConstruction mockedPool = mockPool()) {
- MultiDbConfig.DatabaseConfig cluster = MultiDbConfig.DatabaseConfig
- .builder(endpoint1, clientConfig).weight(1.0f).healthCheckEnabled(false).build();
+ DatabaseConfig db = DatabaseConfig.builder(endpoint1, clientConfig).weight(1.0f)
+ .healthCheckEnabled(false).build();
- MultiDbConfig config = new MultiDbConfig.Builder(
- new MultiDbConfig.DatabaseConfig[] { cluster }).build();
+ MultiDbConfig config = new MultiDbConfig.Builder(new DatabaseConfig[] { db }).build();
try (MultiDbConnectionProvider provider = new MultiDbConnectionProvider(config)) {
- // Should select the only available cluster
+ // Should select the only available db
assertEquals(provider.getDatabase(endpoint1), provider.getDatabase());
}
}
@@ -123,33 +120,32 @@ void testErrorHandlingWithNullConfiguration() {
@Test
void testErrorHandlingWithEmptyClusterArray() {
assertThrows(JedisValidationException.class, () -> {
- new MultiDbConfig.Builder(new MultiDbConfig.DatabaseConfig[0]).build();
+ new MultiDbConfig.Builder(new DatabaseConfig[0]).build();
});
}
@Test
void testErrorHandlingWithNullDatabaseConfig() {
assertThrows(IllegalArgumentException.class, () -> {
- new MultiDbConfig.Builder(new MultiDbConfig.DatabaseConfig[] { null }).build();
+ new MultiDbConfig.Builder(new DatabaseConfig[] { null }).build();
});
}
@Test
void testInitializationWithZeroWeights() {
try (MockedConstruction mockedPool = mockPool()) {
- MultiDbConfig.DatabaseConfig cluster1 = MultiDbConfig.DatabaseConfig
- .builder(endpoint1, clientConfig).weight(0.0f) // Zero weight
+ DatabaseConfig db1 = DatabaseConfig.builder(endpoint1, clientConfig).weight(0.0f) // Zero
+ // weight
.healthCheckEnabled(false).build();
- MultiDbConfig.DatabaseConfig cluster2 = MultiDbConfig.DatabaseConfig
- .builder(endpoint2, clientConfig).weight(0.0f) // Zero weight
+ DatabaseConfig db2 = DatabaseConfig.builder(endpoint2, clientConfig).weight(0.0f) // Zero
+ // weight
.healthCheckEnabled(false).build();
- MultiDbConfig config = new MultiDbConfig.Builder(
- new MultiDbConfig.DatabaseConfig[] { cluster1, cluster2 }).build();
+ MultiDbConfig config = new MultiDbConfig.Builder(new DatabaseConfig[] { db1, db2 }).build();
try (MultiDbConnectionProvider provider = new MultiDbConnectionProvider(config)) {
- // Should still initialize and select one of the clusters
+ // Should still initialize and select one of the databases
assertNotNull(provider.getDatabase());
}
}
From 45264164af029d314f79f367c6c9c8f015660772 Mon Sep 17 00:00:00 2001
From: ggivo
Date: Mon, 6 Oct 2025 18:24:52 +0300
Subject: [PATCH 17/17] Revert removed UnifiedJedis(MultiDbConnectionProvider
provider) constructor
---
src/main/java/redis/clients/jedis/UnifiedJedis.java | 13 +++++++++++++
1 file changed, 13 insertions(+)
diff --git a/src/main/java/redis/clients/jedis/UnifiedJedis.java b/src/main/java/redis/clients/jedis/UnifiedJedis.java
index 548380aa9d..895d6280c6 100644
--- a/src/main/java/redis/clients/jedis/UnifiedJedis.java
+++ b/src/main/java/redis/clients/jedis/UnifiedJedis.java
@@ -28,6 +28,7 @@
import redis.clients.jedis.json.JsonSetParams;
import redis.clients.jedis.json.Path;
import redis.clients.jedis.json.Path2;
+import redis.clients.jedis.mcf.MultiDbCommandExecutor;
import redis.clients.jedis.params.VAddParams;
import redis.clients.jedis.params.VSimParams;
import redis.clients.jedis.resps.RawVector;
@@ -231,6 +232,18 @@ public UnifiedJedis(ConnectionProvider provider, int maxAttempts, Duration maxTo
this(new RetryableCommandExecutor(provider, maxAttempts, maxTotalRetriesDuration), provider);
}
+ /**
+ * Constructor which supports multiple cluster/database endpoints each with their own isolated connection pool.
+ *
+ * With this Constructor users can seamlessly failover to Disaster Recovery (DR), Backup, and Active-Active cluster(s)
+ * by using simple configuration which is passed through from Resilience4j - https://resilience4j.readme.io/docs
+ *
+ */
+ @Experimental
+ public UnifiedJedis(MultiDbConnectionProvider provider) {
+ this(new MultiDbCommandExecutor(provider), provider);
+ }
+
/**
* The constructor to use a custom {@link CommandExecutor}.
*