+ * This method allows adding a fully configured DatabaseConfig instance, providing maximum
+ * flexibility for advanced configurations including custom health check strategies, connection
+ * pool settings, etc.
+ *
+ * @param databaseConfig the pre-configured database configuration
+ * @return this builder
+ */
+ public Builder endpoint(DatabaseConfig databaseConfig) {
+ this.databaseConfigs.add(databaseConfig);
+ return this;
+ }
+
+ /**
+ * Adds a Redis endpoint with custom client configuration.
+ *
+ * This method allows specifying endpoint-specific configuration such as authentication, SSL
+ * settings, timeouts, etc. This configuration will override the default client configuration
+ * for this specific endpoint.
+ *
+ * @param endpoint the Redis server endpoint
+ * @param weight the weight for this endpoint (higher values = higher priority)
+ * @param clientConfig the client configuration for this endpoint
+ * @return this builder
+ */
+ public Builder endpoint(Endpoint endpoint, float weight, JedisClientConfig clientConfig) {
+
+ DatabaseConfig databaseConfig = DatabaseConfig.builder(endpoint, clientConfig).weight(weight)
+ .build();
+
+ this.databaseConfigs.add(databaseConfig);
+ return this;
}
// ============ Retry Configuration Methods ============
@@ -1281,9 +1219,9 @@ public Builder retryIgnoreExceptionList(List retryIgnoreExceptionList) {
/**
* Sets the failure rate threshold percentage that triggers circuit breaker activation.
*
- * When the failure rate equals or exceeds this threshold, the circuit breaker transitions to
- * the OPEN state and starts short-circuiting calls, enabling immediate failover to the next
- * available cluster.
+ * When both the failure rate and minimum number of failures exceeds this threshold, the circuit
+ * breaker transitions to the OPEN state and starts short-circuiting calls, enabling immediate
+ * failover to the next available cluster.
*
*
* Typical Values:
@@ -1295,74 +1233,16 @@ public Builder retryIgnoreExceptionList(List retryIgnoreExceptionList) {
*
* @param circuitBreakerFailureRateThreshold failure rate threshold as percentage (0.0 to 100.0)
* @return this builder instance for method chaining
+ * @see #circuitBreakerMinNumOfFailures(int)
*/
public Builder circuitBreakerFailureRateThreshold(float circuitBreakerFailureRateThreshold) {
+ checkThresholds(circuitBreakerMinNumOfFailures, circuitBreakerFailureRateThreshold);
this.circuitBreakerFailureRateThreshold = circuitBreakerFailureRateThreshold;
return this;
}
- /**
- * Sets the minimum number of calls required before circuit breaker can calculate failure rates.
- *
- * The circuit breaker needs sufficient data to make statistically meaningful decisions. Until
- * this minimum is reached, the circuit breaker remains CLOSED regardless of failure rate.
- *
- *
- * Considerations:
- *
- *
- * - Low values (5-10): Faster failure detection, higher chance of false
- * positives
- * - Medium values (50-100): Balanced approach (default: 100)
- * - High values (200+): More stable decisions, slower failure detection
- *
- * @param circuitBreakerSlidingWindowMinCalls minimum number of calls for failure rate
- * calculation
- * @return this builder instance for method chaining
- */
- public Builder circuitBreakerSlidingWindowMinCalls(int circuitBreakerSlidingWindowMinCalls) {
- this.circuitBreakerSlidingWindowMinCalls = circuitBreakerSlidingWindowMinCalls;
- return this;
- }
-
- /**
- * Sets the type of sliding window used for circuit breaker calculations.
- *
- * Available Types:
- *
- *
- * - COUNT_BASED: Tracks the last N calls (default)
- * - TIME_BASED: Tracks calls from the last N seconds
- *
- *
- * COUNT_BASED is generally preferred for consistent load patterns, while TIME_BASED works
- * better for variable load scenarios.
- *
- * @param circuitBreakerSlidingWindowType sliding window type
- * @return this builder instance for method chaining
- */
- public Builder circuitBreakerSlidingWindowType(
- SlidingWindowType circuitBreakerSlidingWindowType) {
- this.circuitBreakerSlidingWindowType = circuitBreakerSlidingWindowType;
- return this;
- }
-
/**
* Sets the size of the sliding window for circuit breaker calculations.
- *
- * The interpretation depends on the sliding window type:
- *
- *
- * - COUNT_BASED: Number of calls to track
- * - TIME_BASED: Number of seconds to track
- *
- *
- * Typical Values:
- *
- *
- * - COUNT_BASED: 50-200 calls (default: 100)
- * - TIME_BASED: 30-300 seconds
- *
* @param circuitBreakerSlidingWindowSize sliding window size
* @return this builder instance for method chaining
*/
@@ -1372,47 +1252,30 @@ public Builder circuitBreakerSlidingWindowSize(int circuitBreakerSlidingWindowSi
}
/**
- * Sets the duration threshold above which calls are considered slow.
+ * Sets the minimum number of failures before circuit breaker is tripped.
*
- * Calls exceeding this threshold contribute to the slow call rate, allowing the circuit breaker
- * to open based on performance degradation rather than just failures. This enables proactive
- * failover when clusters become slow.
+ * When both the number of failures and failure rate exceeds this threshold, the circuit breaker
+ * will trip and prevent further requests from being sent to the cluster until it has recovered.
*
*
- * Typical Values:
+ * Default: 1000
*
- *
- * - 1-5 seconds: For low-latency applications
- * - 10-30 seconds: For standard applications
- * - 60+ seconds: For batch or long-running operations (default: 60s)
- *
- * @param circuitBreakerSlowCallDurationThreshold slow call threshold in milliseconds
+ * @param circuitBreakerMinNumOfFailures minimum number of failures before circuit breaker is
+ * tripped
* @return this builder instance for method chaining
+ * @see #circuitBreakerFailureRateThreshold(float)
*/
- public Builder circuitBreakerSlowCallDurationThreshold(
- int circuitBreakerSlowCallDurationThreshold) {
- this.circuitBreakerSlowCallDurationThreshold = circuitBreakerSlowCallDurationThreshold;
+ public Builder circuitBreakerMinNumOfFailures(int circuitBreakerMinNumOfFailures) {
+ checkThresholds(circuitBreakerMinNumOfFailures, circuitBreakerFailureRateThreshold);
+ this.circuitBreakerMinNumOfFailures = circuitBreakerMinNumOfFailures;
return this;
}
- /**
- * Sets the slow call rate threshold percentage that triggers circuit breaker activation.
- *
- * When the percentage of slow calls equals or exceeds this threshold, the circuit breaker
- * opens. This allows failover based on performance degradation even when calls are technically
- * successful.
- *
- *
- * Note: Default value of 100% means only failures trigger the circuit breaker,
- * not slow calls. Lower values enable performance-based failover.
- *
- * @param circuitBreakerSlowCallRateThreshold slow call rate threshold as percentage (0.0 to
- * 100.0)
- * @return this builder instance for method chaining
- */
- public Builder circuitBreakerSlowCallRateThreshold(float circuitBreakerSlowCallRateThreshold) {
- this.circuitBreakerSlowCallRateThreshold = circuitBreakerSlowCallRateThreshold;
- return this;
+ private void checkThresholds(int failures, float rate) {
+ if (failures == 0 && rate == 0) {
+ throw new JedisValidationException(
+ "Both circuitBreakerMinNumOfFailures and circuitBreakerFailureRateThreshold can not be 0 at the same time!");
+ }
}
/**
@@ -1635,16 +1498,17 @@ public Builder delayInBetweenFailoverAttempts(int delayInBetweenFailoverAttempts
}
/**
- * Builds and returns a new MultiClusterClientConfig instance with all configured settings.
+ * Builds and returns a new MultiDbConfig instance with all configured settings.
*
* This method creates the final configuration object by copying all builder settings to the
* configuration instance. The builder can be reused after calling build() to create additional
* configurations with different settings.
*
- * @return a new MultiClusterClientConfig instance with the configured settings
+ * @return a new MultiDbConfig instance with the configured settings
*/
- public MultiClusterClientConfig build() {
- MultiClusterClientConfig config = new MultiClusterClientConfig(this.clusterConfigs);
+ public MultiDbConfig build() {
+
+ MultiDbConfig config = new MultiDbConfig(this.databaseConfigs.toArray(new DatabaseConfig[0]));
// Copy retry configuration
config.retryMaxAttempts = this.retryMaxAttempts;
@@ -1654,13 +1518,9 @@ public MultiClusterClientConfig build() {
config.retryIgnoreExceptionList = this.retryIgnoreExceptionList;
// Copy circuit breaker configuration
+ config.circuitBreakerMinNumOfFailures = this.circuitBreakerMinNumOfFailures;
config.circuitBreakerFailureRateThreshold = this.circuitBreakerFailureRateThreshold;
- config.circuitBreakerSlidingWindowMinCalls = this.circuitBreakerSlidingWindowMinCalls;
- config.circuitBreakerSlidingWindowType = this.circuitBreakerSlidingWindowType;
config.circuitBreakerSlidingWindowSize = this.circuitBreakerSlidingWindowSize;
- config.circuitBreakerSlowCallDurationThreshold = Duration
- .ofMillis(this.circuitBreakerSlowCallDurationThreshold);
- config.circuitBreakerSlowCallRateThreshold = this.circuitBreakerSlowCallRateThreshold;
config.circuitBreakerIncludedExceptionList = this.circuitBreakerIncludedExceptionList;
config.circuitBreakerIgnoreExceptionList = this.circuitBreakerIgnoreExceptionList;
@@ -1676,6 +1536,7 @@ public MultiClusterClientConfig build() {
return config;
}
+
}
}
\ No newline at end of file
diff --git a/src/main/java/redis/clients/jedis/UnifiedJedis.java b/src/main/java/redis/clients/jedis/UnifiedJedis.java
index b175dbd319..895d6280c6 100644
--- a/src/main/java/redis/clients/jedis/UnifiedJedis.java
+++ b/src/main/java/redis/clients/jedis/UnifiedJedis.java
@@ -28,14 +28,14 @@
import redis.clients.jedis.json.JsonSetParams;
import redis.clients.jedis.json.Path;
import redis.clients.jedis.json.Path2;
+import redis.clients.jedis.mcf.MultiDbCommandExecutor;
import redis.clients.jedis.params.VAddParams;
import redis.clients.jedis.params.VSimParams;
import redis.clients.jedis.resps.RawVector;
import redis.clients.jedis.json.JsonObjectMapper;
-import redis.clients.jedis.mcf.CircuitBreakerCommandExecutor;
-import redis.clients.jedis.mcf.MultiClusterPipeline;
-import redis.clients.jedis.mcf.MultiClusterPooledConnectionProvider;
-import redis.clients.jedis.mcf.MultiClusterTransaction;
+import redis.clients.jedis.mcf.MultiDbPipeline;
+import redis.clients.jedis.mcf.MultiDbConnectionProvider;
+import redis.clients.jedis.mcf.MultiDbTransaction;
import redis.clients.jedis.params.*;
import redis.clients.jedis.providers.*;
import redis.clients.jedis.resps.*;
@@ -240,8 +240,8 @@ public UnifiedJedis(ConnectionProvider provider, int maxAttempts, Duration maxTo
*
*/
@Experimental
- public UnifiedJedis(MultiClusterPooledConnectionProvider provider) {
- this(new CircuitBreakerCommandExecutor(provider), provider);
+ public UnifiedJedis(MultiDbConnectionProvider provider) {
+ this(new MultiDbCommandExecutor(provider), provider);
}
/**
@@ -5099,8 +5099,8 @@ public List tdigestByRevRank(String key, long... ranks) {
public PipelineBase pipelined() {
if (provider == null) {
throw new IllegalStateException("It is not allowed to create Pipeline from this " + getClass());
- } else if (provider instanceof MultiClusterPooledConnectionProvider) {
- return new MultiClusterPipeline((MultiClusterPooledConnectionProvider) provider, commandObjects);
+ } else if (provider instanceof MultiDbConnectionProvider) {
+ return new MultiDbPipeline((MultiDbConnectionProvider) provider, commandObjects);
} else {
return new Pipeline(provider.getConnection(), true, commandObjects);
}
@@ -5120,8 +5120,8 @@ public AbstractTransaction multi() {
public AbstractTransaction transaction(boolean doMulti) {
if (provider == null) {
throw new IllegalStateException("It is not allowed to create Transaction from this " + getClass());
- } else if (provider instanceof MultiClusterPooledConnectionProvider) {
- return new MultiClusterTransaction((MultiClusterPooledConnectionProvider) provider, doMulti, commandObjects);
+ } else if (provider instanceof MultiDbConnectionProvider) {
+ return new MultiDbTransaction((MultiDbConnectionProvider) provider, doMulti, commandObjects);
} else {
return new Transaction(provider.getConnection(), doMulti, true, commandObjects);
}
diff --git a/src/main/java/redis/clients/jedis/builders/MultiDbClientBuilder.java b/src/main/java/redis/clients/jedis/builders/MultiDbClientBuilder.java
new file mode 100644
index 0000000000..002de51666
--- /dev/null
+++ b/src/main/java/redis/clients/jedis/builders/MultiDbClientBuilder.java
@@ -0,0 +1,137 @@
+package redis.clients.jedis.builders;
+
+import java.util.function.Consumer;
+
+import redis.clients.jedis.MultiDbConfig;
+import redis.clients.jedis.annots.Experimental;
+import redis.clients.jedis.executors.CommandExecutor;
+import redis.clients.jedis.mcf.MultiDbCommandExecutor;
+import redis.clients.jedis.mcf.DatabaseSwitchEvent;
+import redis.clients.jedis.mcf.MultiDbConnectionProvider;
+import redis.clients.jedis.providers.ConnectionProvider;
+
+/**
+ * Builder for creating multi-db Redis clients with multi-endpoint support.
+ *
+ * This builder provides methods specific to multi-db Redis deployments, including multiple weighted
+ * endpoints, circuit breaker configuration, health checks, and automatic failover/failback
+ * capabilities.
+ *
+ *
+ * Key Features:
+ *
+ *
+ * - Multi-Endpoint Configuration: Add multiple Redis endpoints with individual
+ * weights
+ * - Circuit Breaker Integration: Built-in circuit breaker with configurable
+ * thresholds
+ * - Health Monitoring: Automatic health checks with configurable strategies
+ * - Event Handling: Listen to cluster switch events for monitoring and
+ * alerting
+ * - Flexible Configuration: Support for both simple and advanced multi-cluster
+ * configurations
+ *
+ *
+ * Usage Examples:
+ *
+ *
+ *
+ * MultiDbClient client = MultiDbClient.builder()
+ * .multiDbConfig(
+ * MultiDbConfig.builder()
+ * .endpoint(
+ * DatabaseConfig.builder(
+ * east,
+ * DefaultJedisClientConfig.builder().credentials(credentialsEast).build())
+ * .weight(100.0f)
+ * .build())
+ * .endpoint(DatabaseConfig.builder(
+ * west,
+ * DefaultJedisClientConfig.builder().credentials(credentialsWest).build())
+ * .weight(50.0f).build())
+ * .circuitBreakerFailureRateThreshold(50.0f)
+ * .retryMaxAttempts(3)
+ * .build()
+ * )
+ * .databaseSwitchListener(event ->
+ * System.out.println("Switched to: " + event.getEndpoint()))
+ * .build();
+ *
+ *
+ * @param the client type that this builder creates
+ * @author Ivo Gaydazhiev
+ * @since 7.0.0
+ */
+@Experimental
+public abstract class MultiDbClientBuilder
+ extends AbstractClientBuilder, C> {
+
+ // Multi-db specific configuration fields
+ private MultiDbConfig multiDbConfig = null;
+ private Consumer databaseSwitchListener = null;
+
+ /**
+ * Sets the multi-database configuration.
+ *
+ * This configuration controls circuit breaker behavior, retry logic, health checks, failback
+ * settings, and other resilience features. If not provided, default configuration will be used.
+ *
+ * @param config the multi-database configuration
+ * @return this builder
+ */
+ public MultiDbClientBuilder multiDbConfig(MultiDbConfig config) {
+ this.multiDbConfig = config;
+ return this;
+ }
+
+ /**
+ * Sets a listener for database switch events.
+ *
+ * The listener will be called whenever the client switches from one endpoint to another,
+ * providing information about the switch reason and the new active endpoint. This is useful for
+ * monitoring, alerting, and logging purposes.
+ *
+ * @param listener the database switch event listener
+ * @return this builder
+ */
+ public MultiDbClientBuilder databaseSwitchListener(Consumer listener) {
+ this.databaseSwitchListener = listener;
+ return this;
+ }
+
+ @Override
+ protected MultiDbClientBuilder self() {
+ return this;
+ }
+
+ @Override
+ protected ConnectionProvider createDefaultConnectionProvider() {
+
+ if (this.multiDbConfig == null || this.multiDbConfig.getDatabaseConfigs() == null
+ || this.multiDbConfig.getDatabaseConfigs().length < 1) {
+ throw new IllegalArgumentException("At least one endpoint must be specified");
+ }
+
+ // Create the multi-cluster connection provider
+ MultiDbConnectionProvider provider = new MultiDbConnectionProvider(multiDbConfig);
+
+ // Set database switch listener if provided
+ if (this.databaseSwitchListener != null) {
+ provider.setDatabaseSwitchListener(this.databaseSwitchListener);
+ }
+
+ return provider;
+ }
+
+ @Override
+ protected CommandExecutor createDefaultCommandExecutor() {
+ // For multi-db clients, we always use MultiDbCommandExecutor
+ return new MultiDbCommandExecutor((MultiDbConnectionProvider) this.connectionProvider);
+ }
+
+ @Override
+ protected void validateSpecificConfiguration() {
+
+ }
+
+}
diff --git a/src/main/java/redis/clients/jedis/mcf/CircuitBreakerThresholdsAdapter.java b/src/main/java/redis/clients/jedis/mcf/CircuitBreakerThresholdsAdapter.java
new file mode 100644
index 0000000000..bcd6ee208d
--- /dev/null
+++ b/src/main/java/redis/clients/jedis/mcf/CircuitBreakerThresholdsAdapter.java
@@ -0,0 +1,84 @@
+package redis.clients.jedis.mcf;
+
+import io.github.resilience4j.circuitbreaker.CircuitBreakerConfig.SlidingWindowType;
+import redis.clients.jedis.MultiDbConfig;
+
+/**
+ * Adapter that disables Resilience4j's built-in circuit breaker evaluation and help delegate
+ * threshold decisions to Jedis's custom dual-threshold logic.
+ *
+ * This adapter sets maximum values for failure rate (100%) and minimum calls (Integer.MAX_VALUE) to
+ * effectively disable Resilience4j's automatic circuit breaker transitions, allowing
+ * {@link MultiDbConnectionProvider.Database#evaluateThresholds(boolean)} to control when the
+ * circuit breaker opens based on both minimum failure count AND failure rate.
+ *
+ * @see MultiDbConnectionProvider.Database#evaluateThresholds(boolean)
+ */
+class CircuitBreakerThresholdsAdapter {
+ /** Maximum failure rate threshold (100%) to disable Resilience4j evaluation */
+ private static final float FAILURE_RATE_TRESHOLD_MAX = 100.0f;
+
+ /** Always set to 100% to disable Resilience4j's rate-based evaluation */
+ private float failureRateThreshold;
+
+ /** Always set to Integer.MAX_VALUE to disable Resilience4j's call-count evaluation */
+ private int minimumNumberOfCalls;
+
+ /** Sliding window size from configuration for metrics collection */
+ private int slidingWindowSize;
+
+ /**
+ * Returns Integer.MAX_VALUE to disable Resilience4j's minimum call evaluation.
+ * @return Integer.MAX_VALUE to prevent automatic circuit breaker evaluation
+ */
+ int getMinimumNumberOfCalls() {
+ return minimumNumberOfCalls;
+ }
+
+ /**
+ * Returns 100% to disable Resilience4j's failure rate evaluation.
+ * @return 100.0f to prevent automatic circuit breaker evaluation
+ */
+ float getFailureRateThreshold() {
+ return failureRateThreshold;
+ }
+
+ /**
+ * Returns TIME_BASED sliding window type for metrics collection.
+ * @return SlidingWindowType.TIME_BASED
+ */
+ SlidingWindowType getSlidingWindowType() {
+ return SlidingWindowType.TIME_BASED;
+ }
+
+ /**
+ * Returns the sliding window size for metrics collection.
+ * @return sliding window size in seconds
+ */
+ int getSlidingWindowSize() {
+ return slidingWindowSize;
+ }
+
+ /**
+ * Creates an adapter that disables Resilience4j's circuit breaker evaluation.
+ *
+ * Sets failure rate to 100% and minimum calls to Integer.MAX_VALUE to ensure Resilience4j never
+ * automatically opens the circuit breaker. Instead, Jedis's custom {@code evaluateThresholds()}
+ * method controls circuit breaker state based on the original configuration's dual-threshold
+ * logic.
+ *
+ * @param multiDbConfig configuration containing sliding window size
+ */
+ CircuitBreakerThresholdsAdapter(MultiDbConfig multiDbConfig) {
+
+ // IMPORTANT: failureRateThreshold is set to max theoretically disable Resilience4j's evaluation
+ // and rely on our custom evaluateThresholds() logic.
+ failureRateThreshold = FAILURE_RATE_TRESHOLD_MAX;
+
+ // IMPORTANT: minimumNumberOfCalls is set to max theoretically disable Resilience4j's evaluation
+ // and rely on our custom evaluateThresholds() logic.
+ minimumNumberOfCalls = Integer.MAX_VALUE;
+
+ slidingWindowSize = multiDbConfig.getCircuitBreakerSlidingWindowSize();
+ }
+}
diff --git a/src/main/java/redis/clients/jedis/mcf/ClusterSwitchEventArgs.java b/src/main/java/redis/clients/jedis/mcf/ClusterSwitchEventArgs.java
deleted file mode 100644
index 8000e616c4..0000000000
--- a/src/main/java/redis/clients/jedis/mcf/ClusterSwitchEventArgs.java
+++ /dev/null
@@ -1,30 +0,0 @@
-package redis.clients.jedis.mcf;
-
-import redis.clients.jedis.Endpoint;
-import redis.clients.jedis.mcf.MultiClusterPooledConnectionProvider.Cluster;
-
-public class ClusterSwitchEventArgs {
-
- private final SwitchReason reason;
- private final String ClusterName;
- private final Endpoint Endpoint;
-
- public ClusterSwitchEventArgs(SwitchReason reason, Endpoint endpoint, Cluster cluster) {
- this.reason = reason;
- this.ClusterName = cluster.getCircuitBreaker().getName();
- this.Endpoint = endpoint;
- }
-
- public SwitchReason getReason() {
- return reason;
- }
-
- public String getClusterName() {
- return ClusterName;
- }
-
- public Endpoint getEndpoint() {
- return Endpoint;
- }
-
-}
diff --git a/src/main/java/redis/clients/jedis/mcf/DatabaseSwitchEvent.java b/src/main/java/redis/clients/jedis/mcf/DatabaseSwitchEvent.java
new file mode 100644
index 0000000000..6cc233cd7d
--- /dev/null
+++ b/src/main/java/redis/clients/jedis/mcf/DatabaseSwitchEvent.java
@@ -0,0 +1,30 @@
+package redis.clients.jedis.mcf;
+
+import redis.clients.jedis.Endpoint;
+import redis.clients.jedis.mcf.MultiDbConnectionProvider.Database;
+
+public class DatabaseSwitchEvent {
+
+ private final SwitchReason reason;
+ private final String databaseName;
+ private final Endpoint endpoint;
+
+ public DatabaseSwitchEvent(SwitchReason reason, Endpoint endpoint, Database database) {
+ this.reason = reason;
+ this.databaseName = database.getCircuitBreaker().getName();
+ this.endpoint = endpoint;
+ }
+
+ public SwitchReason getReason() {
+ return reason;
+ }
+
+ public String getDatabaseName() {
+ return databaseName;
+ }
+
+ public Endpoint getEndpoint() {
+ return endpoint;
+ }
+
+}
diff --git a/src/main/java/redis/clients/jedis/mcf/EchoStrategy.java b/src/main/java/redis/clients/jedis/mcf/EchoStrategy.java
index 512f1609fc..6be05e2cfb 100644
--- a/src/main/java/redis/clients/jedis/mcf/EchoStrategy.java
+++ b/src/main/java/redis/clients/jedis/mcf/EchoStrategy.java
@@ -8,21 +8,22 @@
import redis.clients.jedis.JedisClientConfig;
import redis.clients.jedis.JedisPooled;
import redis.clients.jedis.UnifiedJedis;
-import redis.clients.jedis.MultiClusterClientConfig.StrategySupplier;
+import redis.clients.jedis.MultiDbConfig.StrategySupplier;
public class EchoStrategy implements HealthCheckStrategy {
+ private static final int MAX_HEALTH_CHECK_POOL_SIZE = 2;
private final UnifiedJedis jedis;
private final HealthCheckStrategy.Config config;
public EchoStrategy(HostAndPort hostAndPort, JedisClientConfig jedisClientConfig) {
- this(hostAndPort, jedisClientConfig, HealthCheckStrategy.Config.builder().build());
+ this(hostAndPort, jedisClientConfig, HealthCheckStrategy.Config.create());
}
public EchoStrategy(HostAndPort hostAndPort, JedisClientConfig jedisClientConfig,
HealthCheckStrategy.Config config) {
GenericObjectPoolConfig poolConfig = new GenericObjectPoolConfig<>();
- poolConfig.setMaxTotal(2);
+ poolConfig.setMaxTotal(MAX_HEALTH_CHECK_POOL_SIZE);
this.jedis = new JedisPooled(hostAndPort, jedisClientConfig, poolConfig);
this.config = config;
}
diff --git a/src/main/java/redis/clients/jedis/mcf/HealthCheckStrategy.java b/src/main/java/redis/clients/jedis/mcf/HealthCheckStrategy.java
index 7e2b43d6db..7d1e5292ca 100644
--- a/src/main/java/redis/clients/jedis/mcf/HealthCheckStrategy.java
+++ b/src/main/java/redis/clients/jedis/mcf/HealthCheckStrategy.java
@@ -49,6 +49,11 @@ default void close() {
int getDelayInBetweenProbes();
public static class Config {
+ private static final int INTERVAL_DEFAULT = 5000;
+ private static final int TIMEOUT_DEFAULT = 1000;
+ private static final int NUM_PROBES_DEFAULT = 3;
+ private static final int DELAY_IN_BETWEEN_PROBES_DEFAULT = 500;
+
protected final int interval;
protected final int timeout;
protected final int numProbes;
@@ -97,14 +102,14 @@ public ProbingPolicy getPolicy() {
* @return a new Config instance
*/
public static Config create() {
- return new Builder<>().build();
+ return builder().build();
}
/**
* Create a new builder for HealthCheckStrategy.Config.
* @return a new Builder instance
*/
- public static Builder, Config> builder() {
+ public static Builder, ? extends Config> builder() {
return new Builder<>();
}
@@ -114,11 +119,11 @@ public static Builder, Config> builder() {
* @param the config type being built
*/
public static class Builder, C extends Config> {
- protected int interval = 1000;
- protected int timeout = 1000;
- protected int numProbes = 3;
+ protected int interval = INTERVAL_DEFAULT;
+ protected int timeout = TIMEOUT_DEFAULT;
+ protected int numProbes = NUM_PROBES_DEFAULT;
protected ProbingPolicy policy = ProbingPolicy.BuiltIn.ALL_SUCCESS;
- protected int delayInBetweenProbes = 100;
+ protected int delayInBetweenProbes = DELAY_IN_BETWEEN_PROBES_DEFAULT;
/**
* Set the interval between health checks in milliseconds.
diff --git a/src/main/java/redis/clients/jedis/mcf/JedisFailoverException.java b/src/main/java/redis/clients/jedis/mcf/JedisFailoverException.java
index 3543517703..c431764d42 100644
--- a/src/main/java/redis/clients/jedis/mcf/JedisFailoverException.java
+++ b/src/main/java/redis/clients/jedis/mcf/JedisFailoverException.java
@@ -11,7 +11,7 @@
* @see JedisFailoverException.JedisTemporarilyNotAvailableException
*/
public class JedisFailoverException extends JedisConnectionException {
- private static final String MESSAGE = "Cluster/database endpoint could not failover since the MultiClusterClientConfig was not "
+ private static final String MESSAGE = "Cluster/database endpoint could not failover since the MultiDbConfig was not "
+ "provided with an additional cluster/database endpoint according to its prioritized sequence. "
+ "If applicable, consider falling back OR restarting with an available cluster/database endpoint";
@@ -28,9 +28,8 @@ public JedisFailoverException() {
* the max number of failover attempts has been exceeded. And there is still no healthy cluster.
*
* See the configuration properties
- * {@link redis.clients.jedis.MultiClusterClientConfig#maxNumFailoverAttempts} and
- * {@link redis.clients.jedis.MultiClusterClientConfig#delayInBetweenFailoverAttempts} for more
- * details.
+ * {@link redis.clients.jedis.MultiDbConfig#maxNumFailoverAttempts} and
+ * {@link redis.clients.jedis.MultiDbConfig#delayInBetweenFailoverAttempts} for more details.
*/
public static class JedisPermanentlyNotAvailableException extends JedisFailoverException {
public JedisPermanentlyNotAvailableException(String s) {
@@ -49,9 +48,8 @@ public JedisPermanentlyNotAvailableException() {
* temporary condition and it is possible that there will be a healthy cluster available.
*
* See the configuration properties
- * {@link redis.clients.jedis.MultiClusterClientConfig#maxNumFailoverAttempts} and
- * {@link redis.clients.jedis.MultiClusterClientConfig#delayInBetweenFailoverAttempts} for more
- * details.
+ * {@link redis.clients.jedis.MultiDbConfig#maxNumFailoverAttempts} and
+ * {@link redis.clients.jedis.MultiDbConfig#delayInBetweenFailoverAttempts} for more details.
*/
public static class JedisTemporarilyNotAvailableException extends JedisFailoverException {
diff --git a/src/main/java/redis/clients/jedis/mcf/LagAwareStrategy.java b/src/main/java/redis/clients/jedis/mcf/LagAwareStrategy.java
index fcbe0a11ec..03489b7be3 100644
--- a/src/main/java/redis/clients/jedis/mcf/LagAwareStrategy.java
+++ b/src/main/java/redis/clients/jedis/mcf/LagAwareStrategy.java
@@ -94,7 +94,7 @@ public HealthStatus doHealthCheck(Endpoint endpoint) {
public static class Config extends HealthCheckStrategy.Config {
public static final boolean EXTENDED_CHECK_DEFAULT = true;
- public static final Duration AVAILABILITY_LAG_TOLERANCE_DEFAULT = Duration.ofMillis(100);
+ public static final Duration AVAILABILITY_LAG_TOLERANCE_DEFAULT = Duration.ofMillis(5000);
private final Endpoint restEndpoint;
private final Supplier credentialsSupplier;
@@ -102,7 +102,7 @@ public static class Config extends HealthCheckStrategy.Config {
// SSL configuration for HTTPS connections to Redis Enterprise REST API
private final SslOptions sslOptions;
- // Maximum acceptable lag in milliseconds (default: 100);
+ // Maximum acceptable lag in milliseconds (default: 5000);
private final Duration availability_lag_tolerance;
// Enable extended lag checking (default: true - performs lag validation in addition to standard
@@ -111,7 +111,7 @@ public static class Config extends HealthCheckStrategy.Config {
private final boolean extendedCheckEnabled;
public Config(Endpoint restEndpoint, Supplier credentialsSupplier) {
- this(builder(restEndpoint, credentialsSupplier).interval(1000).timeout(1000).numProbes(3)
+ this(builder(restEndpoint, credentialsSupplier)
.availabilityLagTolerance(AVAILABILITY_LAG_TOLERANCE_DEFAULT)
.extendedCheckEnabled(EXTENDED_CHECK_DEFAULT));
}
@@ -157,6 +157,15 @@ public static ConfigBuilder builder(Endpoint restEndpoint,
return new ConfigBuilder(restEndpoint, credentialsSupplier);
}
+ /**
+ * Use {@link LagAwareStrategy.Config#builder(Endpoint, Supplier)} instead.
+ * @return a new Builder instance
+ */
+ public static ConfigBuilder builder() {
+ throw new UnsupportedOperationException(
+ "Endpoint and credentials are required to build LagAwareStrategy.Config.");
+ }
+
/**
* Create a new Config instance with default values.
*
diff --git a/src/main/java/redis/clients/jedis/mcf/CircuitBreakerCommandExecutor.java b/src/main/java/redis/clients/jedis/mcf/MultiDbCommandExecutor.java
similarity index 65%
rename from src/main/java/redis/clients/jedis/mcf/CircuitBreakerCommandExecutor.java
rename to src/main/java/redis/clients/jedis/mcf/MultiDbCommandExecutor.java
index 06c97a4e90..d3b7c48e2e 100644
--- a/src/main/java/redis/clients/jedis/mcf/CircuitBreakerCommandExecutor.java
+++ b/src/main/java/redis/clients/jedis/mcf/MultiDbCommandExecutor.java
@@ -1,6 +1,6 @@
package redis.clients.jedis.mcf;
-import io.github.resilience4j.circuitbreaker.CircuitBreaker;
+import io.github.resilience4j.circuitbreaker.CircuitBreaker.State;
import io.github.resilience4j.decorators.Decorators;
import io.github.resilience4j.decorators.Decorators.DecorateSupplier;
@@ -9,7 +9,7 @@
import redis.clients.jedis.annots.Experimental;
import redis.clients.jedis.exceptions.JedisConnectionException;
import redis.clients.jedis.executors.CommandExecutor;
-import redis.clients.jedis.mcf.MultiClusterPooledConnectionProvider.Cluster;
+import redis.clients.jedis.mcf.MultiDbConnectionProvider.Database;
/**
* @author Allen Terleto (aterleto)
@@ -21,32 +21,37 @@
*
*/
@Experimental
-public class CircuitBreakerCommandExecutor extends CircuitBreakerFailoverBase
- implements CommandExecutor {
+public class MultiDbCommandExecutor extends MultiDbFailoverBase implements CommandExecutor {
- public CircuitBreakerCommandExecutor(MultiClusterPooledConnectionProvider provider) {
+ public MultiDbCommandExecutor(MultiDbConnectionProvider provider) {
super(provider);
}
@Override
public T executeCommand(CommandObject commandObject) {
- Cluster cluster = provider.getCluster(); // Pass this by reference for thread safety
+ Database database = provider.getDatabase(); // Pass this by reference for thread safety
DecorateSupplier supplier = Decorators
- .ofSupplier(() -> this.handleExecuteCommand(commandObject, cluster));
+ .ofSupplier(() -> this.handleExecuteCommand(commandObject, database));
- supplier.withCircuitBreaker(cluster.getCircuitBreaker());
- supplier.withRetry(cluster.getRetry());
+ supplier.withCircuitBreaker(database.getCircuitBreaker());
+ supplier.withRetry(database.getRetry());
supplier.withFallback(provider.getFallbackExceptionList(),
- e -> this.handleClusterFailover(commandObject, cluster));
-
- return supplier.decorate().get();
+ e -> this.handleClusterFailover(commandObject, database));
+ try {
+ return supplier.decorate().get();
+ } catch (Exception e) {
+ if (database.getCircuitBreaker().getState() == State.OPEN && isActiveDatabase(database)) {
+ clusterFailover(database);
+ }
+ throw e;
+ }
}
/**
* Functional interface wrapped in retry and circuit breaker logic to handle happy path scenarios
*/
- private T handleExecuteCommand(CommandObject commandObject, Cluster cluster) {
+ private T handleExecuteCommand(CommandObject commandObject, Database cluster) {
Connection connection;
try {
connection = cluster.getConnection();
@@ -57,32 +62,22 @@ private T handleExecuteCommand(CommandObject commandObject, Cluster clust
try {
return connection.executeCommand(commandObject);
} catch (Exception e) {
- if (cluster.retryOnFailover() && !isActiveCluster(cluster)
- && isCircuitBreakerTrackedException(e, cluster.getCircuitBreaker())) {
+ if (cluster.retryOnFailover() && !isActiveDatabase(cluster)
+ && isCircuitBreakerTrackedException(e, cluster)) {
throw new ConnectionFailoverException(
"Command failed during failover: " + cluster.getCircuitBreaker().getName(), e);
}
-
throw e;
} finally {
connection.close();
}
}
- private boolean isCircuitBreakerTrackedException(Exception e, CircuitBreaker cb) {
- return cb.getCircuitBreakerConfig().getRecordExceptionPredicate().test(e);
- }
-
- private boolean isActiveCluster(Cluster cluster) {
- Cluster activeCluster = provider.getCluster();
- return activeCluster != null && activeCluster.equals(cluster);
- }
-
/**
* Functional interface wrapped in retry and circuit breaker logic to handle open circuit breaker
* failure scenarios
*/
- private T handleClusterFailover(CommandObject commandObject, Cluster cluster) {
+ private T handleClusterFailover(CommandObject commandObject, Database cluster) {
clusterFailover(cluster);
diff --git a/src/main/java/redis/clients/jedis/mcf/MultiClusterPooledConnectionProvider.java b/src/main/java/redis/clients/jedis/mcf/MultiDbConnectionProvider.java
similarity index 56%
rename from src/main/java/redis/clients/jedis/mcf/MultiClusterPooledConnectionProvider.java
rename to src/main/java/redis/clients/jedis/mcf/MultiDbConnectionProvider.java
index 50d7cfd30c..8d515627f3 100644
--- a/src/main/java/redis/clients/jedis/mcf/MultiClusterPooledConnectionProvider.java
+++ b/src/main/java/redis/clients/jedis/mcf/MultiDbConnectionProvider.java
@@ -1,6 +1,7 @@
package redis.clients.jedis.mcf;
import io.github.resilience4j.circuitbreaker.CircuitBreaker;
+import io.github.resilience4j.circuitbreaker.CircuitBreaker.Metrics;
import io.github.resilience4j.circuitbreaker.CircuitBreaker.State;
import io.github.resilience4j.circuitbreaker.CircuitBreakerConfig;
import io.github.resilience4j.circuitbreaker.CircuitBreakerRegistry;
@@ -11,8 +12,10 @@
import java.util.Collections;
import java.util.Comparator;
+import java.util.HashSet;
import java.util.List;
import java.util.Map;
+import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
@@ -30,7 +33,7 @@
import org.slf4j.LoggerFactory;
import redis.clients.jedis.*;
-import redis.clients.jedis.MultiClusterClientConfig.ClusterConfig;
+import redis.clients.jedis.MultiDbConfig.DatabaseConfig;
import redis.clients.jedis.annots.Experimental;
import redis.clients.jedis.annots.VisibleForTesting;
import redis.clients.jedis.exceptions.JedisConnectionException;
@@ -38,8 +41,7 @@
import redis.clients.jedis.exceptions.JedisValidationException;
import redis.clients.jedis.mcf.JedisFailoverException.*;
import redis.clients.jedis.providers.ConnectionProvider;
-import redis.clients.jedis.MultiClusterClientConfig.StrategySupplier;
-
+import redis.clients.jedis.MultiDbConfig.StrategySupplier;
import redis.clients.jedis.util.Pool;
/**
@@ -49,38 +51,36 @@
* isolated connection pool. With this ConnectionProvider users can seamlessly failover to
* Disaster Recovery (DR), Backup, and Active-Active cluster(s) by using simple
* configuration which is passed through from Resilience4j -
- * https://resilience4j.readme.io/docs
+ * docs
*
- * Support for manual failback is provided by way of {@link #setActiveCluster(Endpoint)}
+ * Support for manual failback is provided by way of {@link #setActiveDatabase(Endpoint)}
*
*/
-// TODO: move?
@Experimental
-public class MultiClusterPooledConnectionProvider implements ConnectionProvider {
+public class MultiDbConnectionProvider implements ConnectionProvider {
private final Logger log = LoggerFactory.getLogger(getClass());
/**
- * Ordered map of cluster/database endpoints which were provided at startup via the
- * MultiClusterClientConfig. Users can move down (failover) or (up) failback the map depending on
+ * Ordered map of database. Users can move down (failover) or (up) failback the map depending on
* their availability and order.
*/
- private final Map multiClusterMap = new ConcurrentHashMap<>();
+ private final Map databaseMap = new ConcurrentHashMap<>();
/**
- * Indicates the actively used cluster/database endpoint (connection pool) amongst the
- * pre-configured list which were provided at startup via the MultiClusterClientConfig. All
- * traffic will be routed with this cluster/database
+ * Indicates the actively used database endpoint (connection pool) amongst the pre-configured list
+ * which were provided at startup via the MultiDbConfig. All traffic will be routed with this
+ * database
*/
- private volatile Cluster activeCluster;
+ private volatile Database activeDatabase;
- private final Lock activeClusterChangeLock = new ReentrantLock(true);
+ private final Lock activeDatabaseChangeLock = new ReentrantLock(true);
/**
* Functional interface for listening to cluster switch events. The event args contain the reason
* for the switch, the endpoint, and the cluster.
*/
- private Consumer clusterSwitchListener;
+ private Consumer databaseSwitchListener;
private List> fallbackExceptionList;
@@ -98,33 +98,33 @@ public class MultiClusterPooledConnectionProvider implements ConnectionProvider
return t;
});
- // Store retry and circuit breaker configs for dynamic cluster addition/removal
+ // Store retry and circuit breaker configs for dynamic database addition/removal
private RetryConfig retryConfig;
private CircuitBreakerConfig circuitBreakerConfig;
- private MultiClusterClientConfig multiClusterClientConfig;
+ private MultiDbConfig multiDbConfig;
private AtomicLong failoverFreezeUntil = new AtomicLong(0);
private AtomicInteger failoverAttemptCount = new AtomicInteger(0);
- public MultiClusterPooledConnectionProvider(MultiClusterClientConfig multiClusterClientConfig) {
+ public MultiDbConnectionProvider(MultiDbConfig multiDbConfig) {
- if (multiClusterClientConfig == null) throw new JedisValidationException(
- "MultiClusterClientConfig must not be NULL for MultiClusterPooledConnectionProvider");
+ if (multiDbConfig == null) throw new JedisValidationException(
+ "MultiDbConfig must not be NULL for MultiDbConnectionProvider");
- this.multiClusterClientConfig = multiClusterClientConfig;
+ this.multiDbConfig = multiDbConfig;
////////////// Configure Retry ////////////////////
RetryConfig.Builder retryConfigBuilder = RetryConfig.custom();
- retryConfigBuilder.maxAttempts(multiClusterClientConfig.getRetryMaxAttempts());
+ retryConfigBuilder.maxAttempts(multiDbConfig.getRetryMaxAttempts());
retryConfigBuilder.intervalFunction(
- IntervalFunction.ofExponentialBackoff(multiClusterClientConfig.getRetryWaitDuration(),
- multiClusterClientConfig.getRetryWaitDurationExponentialBackoffMultiplier()));
+ IntervalFunction.ofExponentialBackoff(multiDbConfig.getRetryWaitDuration(),
+ multiDbConfig.getRetryWaitDurationExponentialBackoffMultiplier()));
retryConfigBuilder.failAfterMaxAttempts(false); // JedisConnectionException will be thrown
retryConfigBuilder.retryExceptions(
- multiClusterClientConfig.getRetryIncludedExceptionList().stream().toArray(Class[]::new));
+ multiDbConfig.getRetryIncludedExceptionList().stream().toArray(Class[]::new));
- List retryIgnoreExceptionList = multiClusterClientConfig.getRetryIgnoreExceptionList();
+ List retryIgnoreExceptionList = multiDbConfig.getRetryIgnoreExceptionList();
if (retryIgnoreExceptionList != null)
retryConfigBuilder.ignoreExceptions(retryIgnoreExceptionList.stream().toArray(Class[]::new));
@@ -133,20 +133,15 @@ public MultiClusterPooledConnectionProvider(MultiClusterClientConfig multiCluste
////////////// Configure Circuit Breaker ////////////////////
CircuitBreakerConfig.Builder circuitBreakerConfigBuilder = CircuitBreakerConfig.custom();
- circuitBreakerConfigBuilder
- .failureRateThreshold(multiClusterClientConfig.getCircuitBreakerFailureRateThreshold());
- circuitBreakerConfigBuilder
- .slowCallRateThreshold(multiClusterClientConfig.getCircuitBreakerSlowCallRateThreshold());
- circuitBreakerConfigBuilder.slowCallDurationThreshold(
- multiClusterClientConfig.getCircuitBreakerSlowCallDurationThreshold());
- circuitBreakerConfigBuilder
- .minimumNumberOfCalls(multiClusterClientConfig.getCircuitBreakerSlidingWindowMinCalls());
- circuitBreakerConfigBuilder
- .slidingWindowType(multiClusterClientConfig.getCircuitBreakerSlidingWindowType());
- circuitBreakerConfigBuilder
- .slidingWindowSize(multiClusterClientConfig.getCircuitBreakerSlidingWindowSize());
- circuitBreakerConfigBuilder.recordExceptions(multiClusterClientConfig
- .getCircuitBreakerIncludedExceptionList().stream().toArray(Class[]::new));
+
+ CircuitBreakerThresholdsAdapter adapter = new CircuitBreakerThresholdsAdapter(multiDbConfig);
+ circuitBreakerConfigBuilder.minimumNumberOfCalls(adapter.getMinimumNumberOfCalls());
+ circuitBreakerConfigBuilder.failureRateThreshold(adapter.getFailureRateThreshold());
+ circuitBreakerConfigBuilder.slidingWindowSize(adapter.getSlidingWindowSize());
+ circuitBreakerConfigBuilder.slidingWindowType(adapter.getSlidingWindowType());
+
+ circuitBreakerConfigBuilder.recordExceptions(
+ multiDbConfig.getCircuitBreakerIncludedExceptionList().stream().toArray(Class[]::new));
circuitBreakerConfigBuilder.automaticTransitionFromOpenToHalfOpenEnabled(false); // State
// transitions
// are
@@ -155,45 +150,45 @@ public MultiClusterPooledConnectionProvider(MultiClusterClientConfig multiCluste
// states
// are used
- List circuitBreakerIgnoreExceptionList = multiClusterClientConfig
+ List circuitBreakerIgnoreExceptionList = multiDbConfig
.getCircuitBreakerIgnoreExceptionList();
if (circuitBreakerIgnoreExceptionList != null) circuitBreakerConfigBuilder
.ignoreExceptions(circuitBreakerIgnoreExceptionList.stream().toArray(Class[]::new));
this.circuitBreakerConfig = circuitBreakerConfigBuilder.build();
- ////////////// Configure Cluster Map ////////////////////
+ ////////////// Configure Database Map ////////////////////
- ClusterConfig[] clusterConfigs = multiClusterClientConfig.getClusterConfigs();
+ DatabaseConfig[] databaseConfigs = multiDbConfig.getDatabaseConfigs();
- // Now add clusters - health checks will start but events will be queued
- for (ClusterConfig config : clusterConfigs) {
- addClusterInternal(multiClusterClientConfig, config);
+ // Now add databases - health checks will start but events will be queued
+ for (DatabaseConfig config : databaseConfigs) {
+ addClusterInternal(multiDbConfig, config);
}
// Initialize StatusTracker for waiting on health check results
StatusTracker statusTracker = new StatusTracker(healthStatusManager);
// Wait for initial health check results and select active cluster based on weights
- activeCluster = waitForInitialHealthyCluster(statusTracker);
+ activeDatabase = waitForInitialHealthyCluster(statusTracker);
// Mark initialization as complete - handleHealthStatusChange can now process events
initializationComplete = true;
- Cluster temp = activeCluster;
+ Database temp = activeDatabase;
if (!temp.isHealthy()) {
- // Race condition: Direct assignment to 'activeCluster' is not thread safe because
+ // Race condition: Direct assignment to 'activeDatabase' is not thread safe because
// 'onHealthStatusChange' may execute concurrently once 'initializationComplete'
// is set to true.
- // Simple rule is to never assign value of 'activeCluster' outside of
- // 'activeClusterChangeLock' once the 'initializationComplete' is done.
+ // Simple rule is to never assign value of 'activeDatabase' outside of
+ // 'activeDatabaseChangeLock' once the 'initializationComplete' is done.
waitForInitialHealthyCluster(statusTracker);
- switchToHealthyCluster(SwitchReason.HEALTH_CHECK, temp);
+ switchToHealthyDatabase(SwitchReason.HEALTH_CHECK, temp);
}
- this.fallbackExceptionList = multiClusterClientConfig.getFallbackExceptionList();
+ this.fallbackExceptionList = multiDbConfig.getFallbackExceptionList();
// Start periodic failback checker
- if (multiClusterClientConfig.isFailbackSupported()) {
- long failbackInterval = multiClusterClientConfig.getFailbackCheckInterval();
+ if (multiDbConfig.isFailbackSupported()) {
+ long failbackInterval = multiDbConfig.getFailbackCheckInterval();
failbackScheduler.scheduleAtFixedRate(this::periodicFailbackCheck, failbackInterval,
failbackInterval, TimeUnit.MILLISECONDS);
}
@@ -201,25 +196,25 @@ public MultiClusterPooledConnectionProvider(MultiClusterClientConfig multiCluste
/**
* Adds a new cluster endpoint to the provider.
- * @param clusterConfig the configuration for the new cluster
+ * @param databaseConfig the configuration for the new database
* @throws JedisValidationException if the endpoint already exists
*/
- public void add(ClusterConfig clusterConfig) {
- if (clusterConfig == null) {
- throw new JedisValidationException("ClusterConfig must not be null");
+ public void add(DatabaseConfig databaseConfig) {
+ if (databaseConfig == null) {
+ throw new JedisValidationException("DatabaseConfig must not be null");
}
- Endpoint endpoint = clusterConfig.getHostAndPort();
- if (multiClusterMap.containsKey(endpoint)) {
+ Endpoint endpoint = databaseConfig.getEndpoint();
+ if (databaseMap.containsKey(endpoint)) {
throw new JedisValidationException(
"Endpoint " + endpoint + " already exists in the provider");
}
- activeClusterChangeLock.lock();
+ activeDatabaseChangeLock.lock();
try {
- addClusterInternal(multiClusterClientConfig, clusterConfig);
+ addClusterInternal(multiDbConfig, databaseConfig);
} finally {
- activeClusterChangeLock.unlock();
+ activeDatabaseChangeLock.unlock();
}
}
@@ -234,35 +229,35 @@ public void remove(Endpoint endpoint) {
throw new JedisValidationException("Endpoint must not be null");
}
- if (!multiClusterMap.containsKey(endpoint)) {
+ if (!databaseMap.containsKey(endpoint)) {
throw new JedisValidationException(
"Endpoint " + endpoint + " does not exist in the provider");
}
- if (multiClusterMap.size() < 2) {
+ if (databaseMap.size() < 2) {
throw new JedisValidationException("Cannot remove the last remaining endpoint");
}
log.debug("Removing endpoint {}", endpoint);
- Map.Entry notificationData = null;
- activeClusterChangeLock.lock();
+ Map.Entry notificationData = null;
+ activeDatabaseChangeLock.lock();
try {
- Cluster clusterToRemove = multiClusterMap.get(endpoint);
- boolean isActiveCluster = (activeCluster == clusterToRemove);
+ Database databaseToRemove = databaseMap.get(endpoint);
+ boolean isActiveDatabase = (activeDatabase == databaseToRemove);
- if (isActiveCluster) {
+ if (isActiveDatabase) {
log.info("Active cluster is being removed. Finding a new active cluster...");
- Map.Entry candidate = findWeightedHealthyClusterToIterate(
- clusterToRemove);
+ Map.Entry candidate = findWeightedHealthyClusterToIterate(
+ databaseToRemove);
if (candidate != null) {
- Cluster selectedCluster = candidate.getValue();
- if (setActiveCluster(selectedCluster, true)) {
+ Database selectedCluster = candidate.getValue();
+ if (setActiveDatabase(selectedCluster, true)) {
log.info("New active cluster set to {}", candidate.getKey());
notificationData = candidate;
}
} else {
throw new JedisException(
- "Cluster can not be removed due to no healthy cluster available to switch!");
+ "Database can not be removed due to no healthy cluster available to switch!");
}
}
@@ -271,15 +266,15 @@ public void remove(Endpoint endpoint) {
healthStatusManager.remove(endpoint);
// Remove from cluster map
- multiClusterMap.remove(endpoint);
+ databaseMap.remove(endpoint);
// Close the cluster resources
- if (clusterToRemove != null) {
- clusterToRemove.setDisabled(true);
- clusterToRemove.close();
+ if (databaseToRemove != null) {
+ databaseToRemove.setDisabled(true);
+ databaseToRemove.close();
}
} finally {
- activeClusterChangeLock.unlock();
+ activeDatabaseChangeLock.unlock();
}
if (notificationData != null) {
onClusterSwitch(SwitchReason.FORCED, notificationData.getKey(), notificationData.getValue());
@@ -287,17 +282,16 @@ public void remove(Endpoint endpoint) {
}
/**
- * Internal method to add a cluster configuration. This method is not thread-safe and should be
+ * Internal method to add a database configuration. This method is not thread-safe and should be
* called within appropriate locks.
*/
- private void addClusterInternal(MultiClusterClientConfig multiClusterClientConfig,
- ClusterConfig config) {
- if (multiClusterMap.containsKey(config.getHostAndPort())) {
+ private void addClusterInternal(MultiDbConfig multiDbConfig, DatabaseConfig config) {
+ if (databaseMap.containsKey(config.getEndpoint())) {
throw new JedisValidationException(
- "Endpoint " + config.getHostAndPort() + " already exists in the provider");
+ "Endpoint " + config.getEndpoint() + " already exists in the provider");
}
- String clusterId = "cluster:" + config.getHostAndPort();
+ String clusterId = "database:" + config.getEndpoint();
Retry retry = RetryRegistry.of(retryConfig).retry(clusterId);
@@ -315,25 +309,35 @@ private void addClusterInternal(MultiClusterClientConfig multiClusterClientConfi
circuitBreakerEventPublisher.onSlowCallRateExceeded(event -> log.error(String.valueOf(event)));
TrackingConnectionPool pool = TrackingConnectionPool.builder()
- .hostAndPort(config.getHostAndPort()).clientConfig(config.getJedisClientConfig())
+ .hostAndPort(hostPort(config.getEndpoint())).clientConfig(config.getJedisClientConfig())
.poolConfig(config.getConnectionPoolConfig()).build();
- Cluster cluster;
+ Database database;
StrategySupplier strategySupplier = config.getHealthCheckStrategySupplier();
if (strategySupplier != null) {
- HealthCheckStrategy hcs = strategySupplier.get(config.getHostAndPort(),
+ HealthCheckStrategy hcs = strategySupplier.get(hostPort(config.getEndpoint()),
config.getJedisClientConfig());
// Register listeners BEFORE adding clusters to avoid missing events
- healthStatusManager.registerListener(config.getHostAndPort(), this::onHealthStatusChange);
- HealthCheck hc = healthStatusManager.add(config.getHostAndPort(), hcs);
- cluster = new Cluster(pool, retry, hc, circuitBreaker, config.getWeight(),
- multiClusterClientConfig);
+ healthStatusManager.registerListener(config.getEndpoint(), this::onHealthStatusChange);
+ HealthCheck hc = healthStatusManager.add(config.getEndpoint(), hcs);
+ database = new Database(config.getEndpoint(), pool, retry, hc, circuitBreaker,
+ config.getWeight(), multiDbConfig);
} else {
- cluster = new Cluster(pool, retry, circuitBreaker, config.getWeight(),
- multiClusterClientConfig);
+ database = new Database(config.getEndpoint(), pool, retry, circuitBreaker, config.getWeight(),
+ multiDbConfig);
}
- multiClusterMap.put(config.getHostAndPort(), cluster);
+ databaseMap.put(config.getEndpoint(), database);
+
+ // this is the place where we listen tracked errors and check if
+ // thresholds are exceeded for the database
+ circuitBreakerEventPublisher.onError(event -> {
+ database.evaluateThresholds(false);
+ });
+ }
+
+ private HostAndPort hostPort(Endpoint endpoint) {
+ return new HostAndPort(endpoint.getHost(), endpoint.getPort());
}
/**
@@ -346,14 +350,14 @@ void onHealthStatusChange(HealthStatusChangeEvent eventArgs) {
HealthStatus newStatus = eventArgs.getNewStatus();
log.debug("Health status changed for {} from {} to {}", endpoint, eventArgs.getOldStatus(),
newStatus);
- Cluster clusterWithHealthChange = multiClusterMap.get(endpoint);
+ Database clusterWithHealthChange = databaseMap.get(endpoint);
if (clusterWithHealthChange == null) return;
if (initializationComplete) {
- if (!newStatus.isHealthy() && clusterWithHealthChange == activeCluster) {
+ if (!newStatus.isHealthy() && clusterWithHealthChange == activeDatabase) {
clusterWithHealthChange.setGracePeriod();
- switchToHealthyCluster(SwitchReason.HEALTH_CHECK, clusterWithHealthChange);
+ switchToHealthyDatabase(SwitchReason.HEALTH_CHECK, clusterWithHealthChange);
}
}
}
@@ -366,46 +370,46 @@ void onHealthStatusChange(HealthStatusChangeEvent eventArgs) {
* @return the first healthy cluster found, ordered by weight (highest first)
* @throws JedisConnectionException if all clusters are unhealthy
*/
- private Cluster waitForInitialHealthyCluster(StatusTracker statusTracker) {
+ private Database waitForInitialHealthyCluster(StatusTracker statusTracker) {
// Sort clusters by weight in descending order
- List> sortedClusters = multiClusterMap.entrySet().stream()
- .sorted(Map.Entry. comparingByValue(
- Comparator.comparing(Cluster::getWeight).reversed()))
+ List> sortedClusters = databaseMap.entrySet().stream()
+ .sorted(Map.Entry. comparingByValue(
+ Comparator.comparing(Database::getWeight).reversed()))
.collect(Collectors.toList());
log.info("Selecting initial cluster from {} configured clusters", sortedClusters.size());
// Select cluster in weight order
- for (Map.Entry entry : sortedClusters) {
+ for (Map.Entry entry : sortedClusters) {
Endpoint endpoint = entry.getKey();
- Cluster cluster = entry.getValue();
+ Database database = entry.getValue();
- log.info("Evaluating cluster {} (weight: {})", endpoint, cluster.getWeight());
+ log.info("Evaluating database {} (weight: {})", endpoint, database.getWeight());
HealthStatus status;
// Check if health checks are enabled for this endpoint
if (healthStatusManager.hasHealthCheck(endpoint)) {
log.info("Health checks enabled for {}, waiting for result", endpoint);
- // Wait for this cluster's health status to be determined
+ // Wait for this database's health status to be determined
status = statusTracker.waitForHealthStatus(endpoint);
} else {
// No health check configured - assume healthy
- log.info("No health check configured for cluster {}, defaulting to HEALTHY", endpoint);
+ log.info("No health check configured for database {}, defaulting to HEALTHY", endpoint);
status = HealthStatus.HEALTHY;
}
if (status.isHealthy()) {
- log.info("Found healthy cluster: {} (weight: {})", endpoint, cluster.getWeight());
- return cluster;
+ log.info("Found healthy database: {} (weight: {})", endpoint, database.getWeight());
+ return database;
} else {
- log.info("Cluster {} is unhealthy, trying next cluster", endpoint);
+ log.info("Database {} is unhealthy, trying next database", endpoint);
}
}
// All clusters are unhealthy
throw new JedisConnectionException(
- "All configured clusters are unhealthy. Cannot initialize MultiClusterPooledConnectionProvider.");
+ "All configured clusters are unhealthy. Cannot initialize MultiDbConnectionProvider.");
}
/**
@@ -415,36 +419,36 @@ private Cluster waitForInitialHealthyCluster(StatusTracker statusTracker) {
void periodicFailbackCheck() {
try {
// Find the best candidate cluster for failback
- Map.Entry bestCandidate = null;
- float bestWeight = activeCluster.getWeight();
+ Map.Entry bestCandidate = null;
+ float bestWeight = activeDatabase.getWeight();
- for (Map.Entry entry : multiClusterMap.entrySet()) {
- Cluster cluster = entry.getValue();
+ for (Map.Entry entry : databaseMap.entrySet()) {
+ Database database = entry.getValue();
- // Skip if this is already the active cluster
- if (cluster == activeCluster) {
+ // Skip if this is already the active database
+ if (database == activeDatabase) {
continue;
}
- // Skip if cluster is not healthy
- if (!cluster.isHealthy()) {
+ // Skip if database is not healthy
+ if (!database.isHealthy()) {
continue;
}
- // This cluster is a valid candidate
- if (cluster.getWeight() > bestWeight) {
+ // This database is a valid candidate
+ if (database.getWeight() > bestWeight) {
bestCandidate = entry;
- bestWeight = cluster.getWeight();
+ bestWeight = database.getWeight();
}
}
// Perform failback if we found a better candidate
if (bestCandidate != null) {
- Cluster selectedCluster = bestCandidate.getValue();
+ Database selectedCluster = bestCandidate.getValue();
log.info("Performing failback from {} to {} (higher weight cluster available)",
- activeCluster.getCircuitBreaker().getName(),
+ activeDatabase.getCircuitBreaker().getName(),
selectedCluster.getCircuitBreaker().getName());
- if (setActiveCluster(selectedCluster, true)) {
+ if (setActiveDatabase(selectedCluster, true)) {
onClusterSwitch(SwitchReason.FAILBACK, bestCandidate.getKey(), selectedCluster);
}
}
@@ -453,24 +457,24 @@ void periodicFailbackCheck() {
}
}
- Endpoint switchToHealthyCluster(SwitchReason reason, Cluster iterateFrom) {
- Map.Entry clusterToIterate = findWeightedHealthyClusterToIterate(
+ Endpoint switchToHealthyDatabase(SwitchReason reason, Database iterateFrom) {
+ Map.Entry databaseToIterate = findWeightedHealthyClusterToIterate(
iterateFrom);
- if (clusterToIterate == null) {
+ if (databaseToIterate == null) {
// throws exception anyway since not able to iterate
handleNoHealthyCluster();
}
- Cluster cluster = clusterToIterate.getValue();
- boolean changed = setActiveCluster(cluster, false);
+ Database database = databaseToIterate.getValue();
+ boolean changed = setActiveDatabase(database, false);
if (!changed) return null;
failoverAttemptCount.set(0);
- onClusterSwitch(reason, clusterToIterate.getKey(), cluster);
- return clusterToIterate.getKey();
+ onClusterSwitch(reason, databaseToIterate.getKey(), database);
+ return databaseToIterate.getKey();
}
private void handleNoHealthyCluster() {
- int max = multiClusterClientConfig.getMaxNumFailoverAttempts();
+ int max = multiDbConfig.getMaxNumFailoverAttempts();
log.error("No healthy cluster available to switch to");
if (failoverAttemptCount.get() > max) {
throw new JedisPermanentlyNotAvailableException();
@@ -489,7 +493,7 @@ private boolean markAsFreeze() {
long until = failoverFreezeUntil.get();
long now = System.currentTimeMillis();
if (until <= now) {
- long nextUntil = now + multiClusterClientConfig.getDelayInBetweenFailoverAttempts();
+ long nextUntil = now + multiDbConfig.getDelayInBetweenFailoverAttempts();
if (failoverFreezeUntil.compareAndSet(until, nextUntil)) {
return true;
}
@@ -508,20 +512,20 @@ private boolean markAsFreeze() {
*/
@VisibleForTesting
public void assertOperability() {
- Cluster current = activeCluster;
+ Database current = activeDatabase;
if (!current.isHealthy() && !this.canIterateFrom(current)) {
handleNoHealthyCluster();
}
}
- private static Comparator> maxByWeight = Map.Entry
- . comparingByValue(Comparator.comparing(Cluster::getWeight));
+ private static Comparator> maxByWeight = Map.Entry
+ . comparingByValue(Comparator.comparing(Database::getWeight));
- private static Predicate> filterByHealth = c -> c.getValue()
+ private static Predicate> filterByHealth = c -> c.getValue()
.isHealthy();
- private Map.Entry findWeightedHealthyClusterToIterate(Cluster iterateFrom) {
- return multiClusterMap.entrySet().stream().filter(filterByHealth)
+ private Map.Entry findWeightedHealthyClusterToIterate(Database iterateFrom) {
+ return databaseMap.entrySet().stream().filter(filterByHealth)
.filter(entry -> entry.getValue() != iterateFrom).max(maxByWeight).orElse(null);
}
@@ -532,12 +536,12 @@ private Map.Entry findWeightedHealthyClusterToIterate(Cluster
* from the target connection.
*/
public void validateTargetConnection(Endpoint endpoint) {
- Cluster cluster = multiClusterMap.get(endpoint);
- validateTargetConnection(cluster);
+ Database database = databaseMap.get(endpoint);
+ validateTargetConnection(database);
}
- private void validateTargetConnection(Cluster cluster) {
- CircuitBreaker circuitBreaker = cluster.getCircuitBreaker();
+ private void validateTargetConnection(Database database) {
+ CircuitBreaker circuitBreaker = database.getCircuitBreaker();
State originalState = circuitBreaker.getState();
try {
@@ -548,7 +552,7 @@ private void validateTargetConnection(Cluster cluster) {
// yet
circuitBreaker.transitionToClosedState();
- try (Connection targetConnection = cluster.getConnection()) {
+ try (Connection targetConnection = database.getConnection()) {
targetConnection.ping();
}
} catch (Exception e) {
@@ -563,68 +567,82 @@ private void validateTargetConnection(Cluster cluster) {
}
}
- public void setActiveCluster(Endpoint endpoint) {
+ /**
+ * Returns the set of all configured endpoints.
+ * @return
+ */
+ public Set getEndpoints() {
+ return new HashSet<>(databaseMap.keySet());
+ }
+
+ public void setActiveDatabase(Endpoint endpoint) {
if (endpoint == null) {
throw new JedisValidationException(
"Provided endpoint is null. Please use one from the configuration");
}
- Cluster cluster = multiClusterMap.get(endpoint);
- if (cluster == null) {
+ Database database = databaseMap.get(endpoint);
+ if (database == null) {
throw new JedisValidationException("Provided endpoint: " + endpoint + " is not within "
+ "the configured endpoints. Please use one from the configuration");
}
- if (setActiveCluster(cluster, true)) {
- onClusterSwitch(SwitchReason.FORCED, endpoint, cluster);
+ if (setActiveDatabase(database, true)) {
+ onClusterSwitch(SwitchReason.FORCED, endpoint, database);
}
}
- public void forceActiveCluster(Endpoint endpoint, long forcedActiveDuration) {
- Cluster cluster = multiClusterMap.get(endpoint);
- cluster.clearGracePeriod();
- if (!cluster.isHealthy()) {
+ public void forceActiveDatabase(Endpoint endpoint, long forcedActiveDuration) {
+ Database database = databaseMap.get(endpoint);
+
+ if (database == null) {
+ throw new JedisValidationException("Provided endpoint: " + endpoint + " is not within "
+ + "the configured endpoints. Please use one from the configuration");
+ }
+
+ database.clearGracePeriod();
+ if (!database.isHealthy()) {
throw new JedisValidationException("Provided endpoint: " + endpoint
+ " is not healthy. Please consider a healthy endpoint from the configuration");
}
- multiClusterMap.entrySet().stream().forEach(entry -> {
+ databaseMap.entrySet().stream().forEach(entry -> {
if (entry.getKey() != endpoint) {
entry.getValue().setGracePeriod(forcedActiveDuration);
}
});
- setActiveCluster(endpoint);
+ setActiveDatabase(endpoint);
}
- private boolean setActiveCluster(Cluster cluster, boolean validateConnection) {
- // Cluster cluster = clusterEntry.getValue();
+ private boolean setActiveDatabase(Database database, boolean validateConnection) {
+ // Database database = clusterEntry.getValue();
// Field-level synchronization is used to avoid the edge case in which
// incrementActiveMultiClusterIndex() is called at the same time
- activeClusterChangeLock.lock();
- Cluster oldCluster;
+ activeDatabaseChangeLock.lock();
+ Database oldCluster;
try {
- // Allows an attempt to reset the current cluster from a FORCED_OPEN to CLOSED state in the
+ // Allows an attempt to reset the current database from a FORCED_OPEN to CLOSED state in the
// event that no failover is possible
- if (activeCluster == cluster && !cluster.isCBForcedOpen()) return false;
+ if (activeDatabase == database && !database.isCBForcedOpen()) return false;
- if (validateConnection) validateTargetConnection(cluster);
+ if (validateConnection) validateTargetConnection(database);
- String originalClusterName = getClusterCircuitBreaker().getName();
+ String originalClusterName = getDatabaseCircuitBreaker().getName();
- if (activeCluster == cluster)
- log.warn("Cluster/database endpoint '{}' successfully closed its circuit breaker",
+ if (activeDatabase == database)
+ log.warn("Database/database endpoint '{}' successfully closed its circuit breaker",
originalClusterName);
- else log.warn("Cluster/database endpoint successfully updated from '{}' to '{}'",
- originalClusterName, cluster.circuitBreaker.getName());
- oldCluster = activeCluster;
- activeCluster = cluster;
+ else log.warn("Database/database endpoint successfully updated from '{}' to '{}'",
+ originalClusterName, database.circuitBreaker.getName());
+ oldCluster = activeDatabase;
+ activeDatabase = database;
} finally {
- activeClusterChangeLock.unlock();
+ activeDatabaseChangeLock.unlock();
}
- boolean switched = oldCluster != cluster;
- if (switched && this.multiClusterClientConfig.isFastFailover()) {
- log.info("Forcing disconnect of all active connections in old cluster: {}",
+ boolean switched = oldCluster != database;
+ if (switched && this.multiDbConfig.isFastFailover()) {
+ log.info("Forcing disconnect of all active connections in old database: {}",
oldCluster.circuitBreaker.getName());
oldCluster.forceDisconnect();
- log.info("Disconnected all active connections in old cluster: {}",
+ log.info("Disconnected all active connections in old database: {}",
oldCluster.circuitBreaker.getName());
}
@@ -650,109 +668,139 @@ public void close() {
}
// Close all cluster connection pools
- for (Cluster cluster : multiClusterMap.values()) {
- cluster.close();
+ for (Database database : databaseMap.values()) {
+ database.close();
}
}
@Override
public Connection getConnection() {
- return activeCluster.getConnection();
+ return activeDatabase.getConnection();
}
public Connection getConnection(Endpoint endpoint) {
- return multiClusterMap.get(endpoint).getConnection();
+ return databaseMap.get(endpoint).getConnection();
}
@Override
public Connection getConnection(CommandArguments args) {
- return activeCluster.getConnection();
+ return activeDatabase.getConnection();
}
@Override
public Map, Pool> getConnectionMap() {
- ConnectionPool connectionPool = activeCluster.connectionPool;
+ ConnectionPool connectionPool = activeDatabase.connectionPool;
return Collections.singletonMap(connectionPool.getFactory(), connectionPool);
}
- public Cluster getCluster() {
- return activeCluster;
+ public Database getDatabase() {
+ return activeDatabase;
}
@VisibleForTesting
- public Cluster getCluster(Endpoint endpoint) {
- return multiClusterMap.get(endpoint);
+ public Database getDatabase(Endpoint endpoint) {
+ return databaseMap.get(endpoint);
}
- public CircuitBreaker getClusterCircuitBreaker() {
- return activeCluster.getCircuitBreaker();
+ /**
+ * Returns the active endpoint
+ *
+ * Active endpoint is the one which is currently being used for all operations. It can change at
+ * any time due to health checks, failover, failback, etc.
+ * @return the active cluster endpoint
+ */
+ public Endpoint getActiveEndpoint() {
+ return activeDatabase.getEndpoint();
+ }
+
+ /**
+ * Returns the health state of the given endpoint
+ * @param endpoint the endpoint to check
+ * @return the health status of the endpoint
+ */
+ public boolean isHealthy(Endpoint endpoint) {
+ Database database = getDatabase(endpoint);
+ if (database == null) {
+ throw new JedisValidationException(
+ "Endpoint " + endpoint + " does not exist in the provider");
+ }
+ return database.isHealthy();
+ }
+
+ public CircuitBreaker getDatabaseCircuitBreaker() {
+ return activeDatabase.getCircuitBreaker();
}
/**
* Indicates the final cluster/database endpoint (connection pool), according to the
- * pre-configured list provided at startup via the MultiClusterClientConfig, is unavailable and
- * therefore no further failover is possible. Users can manually failback to an available cluster
+ * pre-configured list provided at startup via the MultiDbConfig, is unavailable and therefore no
+ * further failover is possible. Users can manually failback to an available cluster
*/
- public boolean canIterateFrom(Cluster iterateFrom) {
- Map.Entry e = findWeightedHealthyClusterToIterate(iterateFrom);
+ public boolean canIterateFrom(Database iterateFrom) {
+ Map.Entry e = findWeightedHealthyClusterToIterate(iterateFrom);
return e != null;
}
- public void onClusterSwitch(SwitchReason reason, Endpoint endpoint, Cluster cluster) {
- if (clusterSwitchListener != null) {
- ClusterSwitchEventArgs eventArgs = new ClusterSwitchEventArgs(reason, endpoint, cluster);
- clusterSwitchListener.accept(eventArgs);
+ public void onClusterSwitch(SwitchReason reason, Endpoint endpoint, Database database) {
+ if (databaseSwitchListener != null) {
+ DatabaseSwitchEvent eventArgs = new DatabaseSwitchEvent(reason, endpoint, database);
+ databaseSwitchListener.accept(eventArgs);
}
}
- public void setClusterSwitchListener(Consumer clusterSwitchListener) {
- this.clusterSwitchListener = clusterSwitchListener;
+ public void setDatabaseSwitchListener(Consumer databaseSwitchListener) {
+ this.databaseSwitchListener = databaseSwitchListener;
}
public List> getFallbackExceptionList() {
return fallbackExceptionList;
}
- public static class Cluster {
+ public static class Database {
private TrackingConnectionPool connectionPool;
private final Retry retry;
private final CircuitBreaker circuitBreaker;
private final float weight;
private final HealthCheck healthCheck;
- private final MultiClusterClientConfig multiClusterClientConfig;
+ private final MultiDbConfig multiDbConfig;
private boolean disabled = false;
+ private final Endpoint endpoint;
// Grace period tracking
private volatile long gracePeriodEndsAt = 0;
private final Logger log = LoggerFactory.getLogger(getClass());
- private Cluster(TrackingConnectionPool connectionPool, Retry retry,
- CircuitBreaker circuitBreaker, float weight,
- MultiClusterClientConfig multiClusterClientConfig) {
+ private Database(Endpoint endpoint, TrackingConnectionPool connectionPool, Retry retry,
+ CircuitBreaker circuitBreaker, float weight, MultiDbConfig multiDbConfig) {
+ this.endpoint = endpoint;
this.connectionPool = connectionPool;
this.retry = retry;
this.circuitBreaker = circuitBreaker;
this.weight = weight;
- this.multiClusterClientConfig = multiClusterClientConfig;
+ this.multiDbConfig = multiDbConfig;
this.healthCheck = null;
}
- private Cluster(TrackingConnectionPool connectionPool, Retry retry, HealthCheck hc,
- CircuitBreaker circuitBreaker, float weight,
- MultiClusterClientConfig multiClusterClientConfig) {
+ private Database(Endpoint endpoint, TrackingConnectionPool connectionPool, Retry retry,
+ HealthCheck hc, CircuitBreaker circuitBreaker, float weight, MultiDbConfig multiDbConfig) {
+ this.endpoint = endpoint;
this.connectionPool = connectionPool;
this.retry = retry;
this.circuitBreaker = circuitBreaker;
this.weight = weight;
- this.multiClusterClientConfig = multiClusterClientConfig;
+ this.multiDbConfig = multiDbConfig;
this.healthCheck = hc;
}
+ public Endpoint getEndpoint() {
+ return endpoint;
+ }
+
public Connection getConnection() {
- if (!isHealthy()) throw new JedisConnectionException("Cluster is not healthy");
+ if (!isHealthy()) throw new JedisConnectionException("Database is not healthy");
if (connectionPool.isClosed()) {
connectionPool = TrackingConnectionPool.from(connectionPool);
}
@@ -797,7 +845,15 @@ public boolean isHealthy() {
}
public boolean retryOnFailover() {
- return multiClusterClientConfig.isRetryOnFailover();
+ return multiDbConfig.isRetryOnFailover();
+ }
+
+ public int getCircuitBreakerMinNumOfFailures() {
+ return multiDbConfig.getCircuitBreakerMinNumOfFailures();
+ }
+
+ public float getCircuitBreakerFailureRateThreshold() {
+ return multiDbConfig.getCircuitBreakerFailureRateThreshold();
}
public boolean isDisabled() {
@@ -819,7 +875,7 @@ public boolean isInGracePeriod() {
* Sets the grace period for this cluster
*/
public void setGracePeriod() {
- setGracePeriod(multiClusterClientConfig.getGracePeriod());
+ setGracePeriod(multiDbConfig.getGracePeriod());
}
public void setGracePeriod(long gracePeriod) {
@@ -836,7 +892,7 @@ public void clearGracePeriod() {
* Whether failback is supported by client
*/
public boolean isFailbackSupported() {
- return multiClusterClientConfig.isFailbackSupported();
+ return multiDbConfig.isFailbackSupported();
}
public void forceDisconnect() {
@@ -847,11 +903,36 @@ public void close() {
connectionPool.close();
}
+ void evaluateThresholds(boolean lastFailRecorded) {
+ if (getCircuitBreaker().getState() == State.CLOSED
+ && isThresholdsExceeded(this, lastFailRecorded)) {
+ getCircuitBreaker().transitionToOpenState();
+ }
+ }
+
+ private static boolean isThresholdsExceeded(Database database, boolean lastFailRecorded) {
+ Metrics metrics = database.getCircuitBreaker().getMetrics();
+ // ATTENTION: this is to increment fails in regard to the current call that is failing,
+ // DO NOT remove the increment, it will change the behaviour in case of initial requests to
+ // database fail
+ int fails = metrics.getNumberOfFailedCalls() + (lastFailRecorded ? 0 : 1);
+ int succ = metrics.getNumberOfSuccessfulCalls();
+ if (fails >= database.getCircuitBreakerMinNumOfFailures()) {
+ float ratePercentThreshold = database.getCircuitBreakerFailureRateThreshold();// 0..100
+ int total = fails + succ;
+ if (total == 0) return false;
+ float failureRatePercent = (fails * 100.0f) / total;
+ return failureRatePercent >= ratePercentThreshold;
+ }
+ return false;
+ }
+
@Override
public String toString() {
return circuitBreaker.getName() + "{" + "connectionPool=" + connectionPool + ", retry="
+ retry + ", circuitBreaker=" + circuitBreaker + ", weight=" + weight + ", healthStatus="
- + getHealthStatus() + ", multiClusterClientConfig=" + multiClusterClientConfig + '}';
+ + getHealthStatus() + ", multiClusterClientConfig=" + multiDbConfig + '}';
}
+
}
}
\ No newline at end of file
diff --git a/src/main/java/redis/clients/jedis/mcf/CircuitBreakerFailoverConnectionProvider.java b/src/main/java/redis/clients/jedis/mcf/MultiDbConnectionSupplier.java
similarity index 64%
rename from src/main/java/redis/clients/jedis/mcf/CircuitBreakerFailoverConnectionProvider.java
rename to src/main/java/redis/clients/jedis/mcf/MultiDbConnectionSupplier.java
index 24e41022b4..9bd1f35440 100644
--- a/src/main/java/redis/clients/jedis/mcf/CircuitBreakerFailoverConnectionProvider.java
+++ b/src/main/java/redis/clients/jedis/mcf/MultiDbConnectionSupplier.java
@@ -1,28 +1,27 @@
package redis.clients.jedis.mcf;
-import io.github.resilience4j.circuitbreaker.CircuitBreaker;
+import io.github.resilience4j.circuitbreaker.CircuitBreaker.State;
import io.github.resilience4j.decorators.Decorators;
import io.github.resilience4j.decorators.Decorators.DecorateSupplier;
import redis.clients.jedis.Connection;
import redis.clients.jedis.annots.Experimental;
-import redis.clients.jedis.mcf.MultiClusterPooledConnectionProvider.Cluster;
+import redis.clients.jedis.mcf.MultiDbConnectionProvider.Database;
/**
* ConnectionProvider with built-in retry, circuit-breaker, and failover to another cluster/database
* endpoint. With this executor users can seamlessly failover to Disaster Recovery (DR), Backup, and
- * Active-Active cluster(s) by using simple configuration which is passed through from Resilience4j
- * - https://resilience4j.readme.io/docs
+ * Active-Active cluster(s) by using simple configuration
*/
@Experimental
-public class CircuitBreakerFailoverConnectionProvider extends CircuitBreakerFailoverBase {
+public class MultiDbConnectionSupplier extends MultiDbFailoverBase {
- public CircuitBreakerFailoverConnectionProvider(MultiClusterPooledConnectionProvider provider) {
+ public MultiDbConnectionSupplier(MultiDbConnectionProvider provider) {
super(provider);
}
public Connection getConnection() {
- Cluster cluster = provider.getCluster(); // Pass this by reference for thread safety
+ Database cluster = provider.getDatabase(); // Pass this by reference for thread safety
DecorateSupplier supplier = Decorators
.ofSupplier(() -> this.handleGetConnection(cluster));
@@ -32,13 +31,20 @@ public Connection getConnection() {
supplier.withFallback(provider.getFallbackExceptionList(),
e -> this.handleClusterFailover(cluster));
- return supplier.decorate().get();
+ try {
+ return supplier.decorate().get();
+ } catch (Exception e) {
+ if (cluster.getCircuitBreaker().getState() == State.OPEN && isActiveDatabase(cluster)) {
+ clusterFailover(cluster);
+ }
+ throw e;
+ }
}
/**
* Functional interface wrapped in retry and circuit breaker logic to handle happy path scenarios
*/
- private Connection handleGetConnection(Cluster cluster) {
+ private Connection handleGetConnection(Database cluster) {
Connection connection = cluster.getConnection();
connection.ping();
return connection;
@@ -48,7 +54,7 @@ private Connection handleGetConnection(Cluster cluster) {
* Functional interface wrapped in retry and circuit breaker logic to handle open circuit breaker
* failure scenarios
*/
- private Connection handleClusterFailover(Cluster cluster) {
+ private Connection handleClusterFailover(Database cluster) {
clusterFailover(cluster);
diff --git a/src/main/java/redis/clients/jedis/mcf/CircuitBreakerFailoverBase.java b/src/main/java/redis/clients/jedis/mcf/MultiDbFailoverBase.java
similarity index 58%
rename from src/main/java/redis/clients/jedis/mcf/CircuitBreakerFailoverBase.java
rename to src/main/java/redis/clients/jedis/mcf/MultiDbFailoverBase.java
index 462a5ff427..3e9d5f2d39 100644
--- a/src/main/java/redis/clients/jedis/mcf/CircuitBreakerFailoverBase.java
+++ b/src/main/java/redis/clients/jedis/mcf/MultiDbFailoverBase.java
@@ -1,10 +1,12 @@
package redis.clients.jedis.mcf;
import io.github.resilience4j.circuitbreaker.CircuitBreaker;
+
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
+
import redis.clients.jedis.annots.Experimental;
-import redis.clients.jedis.mcf.MultiClusterPooledConnectionProvider.Cluster;
+import redis.clients.jedis.mcf.MultiDbConnectionProvider.Database;
import redis.clients.jedis.util.IOUtils;
/**
@@ -18,12 +20,12 @@
*
*/
@Experimental
-public class CircuitBreakerFailoverBase implements AutoCloseable {
+public class MultiDbFailoverBase implements AutoCloseable {
private final Lock lock = new ReentrantLock(true);
- protected final MultiClusterPooledConnectionProvider provider;
+ protected final MultiDbConnectionProvider provider;
- public CircuitBreakerFailoverBase(MultiClusterPooledConnectionProvider provider) {
+ public MultiDbFailoverBase(MultiDbConnectionProvider provider) {
this.provider = provider;
}
@@ -36,10 +38,10 @@ public void close() {
* Functional interface wrapped in retry and circuit breaker logic to handle open circuit breaker
* failure scenarios
*/
- protected void clusterFailover(Cluster cluster) {
+ protected void clusterFailover(Database database) {
lock.lock();
- CircuitBreaker circuitBreaker = cluster.getCircuitBreaker();
+ CircuitBreaker circuitBreaker = database.getCircuitBreaker();
try {
// Check state to handle race conditions since iterateActiveCluster() is
// non-idempotent
@@ -49,29 +51,29 @@ protected void clusterFailover(Cluster cluster) {
// event publishing.
// To recover/transition from this forced state the user will need to manually failback
- Cluster activeCluster = provider.getCluster();
- // This should be possible only if active cluster is switched from by other reasons than
+ Database activeDatabase = provider.getDatabase();
+ // This should be possible only if active database is switched from by other reasons than
// circuit breaker, just before circuit breaker triggers
- if (activeCluster != cluster) {
+ if (activeDatabase != database) {
return;
}
- cluster.setGracePeriod();
+ database.setGracePeriod();
circuitBreaker.transitionToForcedOpenState();
- // Iterating the active cluster will allow subsequent calls to the executeCommand() to use
+ // Iterating the active database will allow subsequent calls to the executeCommand() to use
// the next
- // cluster's connection pool - according to the configuration's prioritization/order/weight
- provider.switchToHealthyCluster(SwitchReason.CIRCUIT_BREAKER, cluster);
+ // database's connection pool - according to the configuration's prioritization/order/weight
+ provider.switchToHealthyDatabase(SwitchReason.CIRCUIT_BREAKER, database);
}
// this check relies on the fact that many failover attempts can hit with the same CB,
// only the first one will trigger a failover, and make the CB FORCED_OPEN.
- // when the rest reaches here, the active cluster is already the next one, and should be
+ // when the rest reaches here, the active database is already the next one, and should be
// different than
// active CB. If its the same one and there are no more clusters to failover to, then throw an
// exception
- else if (cluster == provider.getCluster()) {
- provider.switchToHealthyCluster(SwitchReason.CIRCUIT_BREAKER, cluster);
+ else if (database == provider.getDatabase()) {
+ provider.switchToHealthyDatabase(SwitchReason.CIRCUIT_BREAKER, database);
}
// Ignore exceptions since we are already in a failure state
} finally {
@@ -79,4 +81,13 @@ else if (cluster == provider.getCluster()) {
}
}
+ boolean isActiveDatabase(Database database) {
+ Database activeDatabase = provider.getDatabase();
+ return activeDatabase != null && activeDatabase.equals(database);
+ }
+
+ static boolean isCircuitBreakerTrackedException(Exception e, Database database) {
+ return database.getCircuitBreaker().getCircuitBreakerConfig().getRecordExceptionPredicate()
+ .test(e);
+ }
}
diff --git a/src/main/java/redis/clients/jedis/mcf/MultiClusterPipeline.java b/src/main/java/redis/clients/jedis/mcf/MultiDbPipeline.java
similarity index 77%
rename from src/main/java/redis/clients/jedis/mcf/MultiClusterPipeline.java
rename to src/main/java/redis/clients/jedis/mcf/MultiDbPipeline.java
index c227b27e99..bc0d950a6a 100644
--- a/src/main/java/redis/clients/jedis/mcf/MultiClusterPipeline.java
+++ b/src/main/java/redis/clients/jedis/mcf/MultiDbPipeline.java
@@ -11,20 +11,19 @@
/**
* This is high memory dependent solution as all the appending commands will be hold in memory until
- * {@link MultiClusterPipeline#sync() SYNC} (or {@link MultiClusterPipeline#close() CLOSE}) gets
- * called.
+ * {@link MultiDbPipeline#sync() SYNC} (or {@link MultiDbPipeline#close() CLOSE}) gets called.
*/
@Experimental
-public class MultiClusterPipeline extends PipelineBase implements Closeable {
+public class MultiDbPipeline extends PipelineBase implements Closeable {
- private final CircuitBreakerFailoverConnectionProvider failoverProvider;
+ private final MultiDbConnectionSupplier failoverProvider;
private final Queue>> commands = new LinkedList<>();
@Deprecated
- public MultiClusterPipeline(MultiClusterPooledConnectionProvider pooledProvider) {
+ public MultiDbPipeline(MultiDbConnectionProvider pooledProvider) {
super(new CommandObjects());
- this.failoverProvider = new CircuitBreakerFailoverConnectionProvider(pooledProvider);
+ this.failoverProvider = new MultiDbConnectionSupplier(pooledProvider);
try (Connection connection = failoverProvider.getConnection()) {
RedisProtocol proto = connection.getRedisProtocol();
@@ -32,10 +31,9 @@ public MultiClusterPipeline(MultiClusterPooledConnectionProvider pooledProvider)
}
}
- public MultiClusterPipeline(MultiClusterPooledConnectionProvider pooledProvider,
- CommandObjects commandObjects) {
+ public MultiDbPipeline(MultiDbConnectionProvider pooledProvider, CommandObjects commandObjects) {
super(commandObjects);
- this.failoverProvider = new CircuitBreakerFailoverConnectionProvider(pooledProvider);
+ this.failoverProvider = new MultiDbConnectionSupplier(pooledProvider);
}
@Override
diff --git a/src/main/java/redis/clients/jedis/mcf/MultiClusterTransaction.java b/src/main/java/redis/clients/jedis/mcf/MultiDbTransaction.java
similarity index 90%
rename from src/main/java/redis/clients/jedis/mcf/MultiClusterTransaction.java
rename to src/main/java/redis/clients/jedis/mcf/MultiDbTransaction.java
index 2de927826c..1688a2c635 100644
--- a/src/main/java/redis/clients/jedis/mcf/MultiClusterTransaction.java
+++ b/src/main/java/redis/clients/jedis/mcf/MultiDbTransaction.java
@@ -20,13 +20,13 @@
* This is high memory dependent solution as all the appending commands will be hold in memory.
*/
@Experimental
-public class MultiClusterTransaction extends TransactionBase {
+public class MultiDbTransaction extends TransactionBase {
private static final Builder> NO_OP_BUILDER = BuilderFactory.RAW_OBJECT;
private static final String GRAPH_COMMANDS_NOT_SUPPORTED_MESSAGE = "Graph commands are not supported.";
- private final CircuitBreakerFailoverConnectionProvider failoverProvider;
+ private final MultiDbConnectionSupplier failoverProvider;
private final AtomicInteger extraCommandCount = new AtomicInteger();
private final Queue>> commands = new LinkedList<>();
@@ -39,7 +39,7 @@ public class MultiClusterTransaction extends TransactionBase {
* @param provider
*/
@Deprecated
- public MultiClusterTransaction(MultiClusterPooledConnectionProvider provider) {
+ public MultiDbTransaction(MultiDbConnectionProvider provider) {
this(provider, true);
}
@@ -50,8 +50,8 @@ public MultiClusterTransaction(MultiClusterPooledConnectionProvider provider) {
* @param doMulti {@code false} should be set to enable manual WATCH, UNWATCH and MULTI
*/
@Deprecated
- public MultiClusterTransaction(MultiClusterPooledConnectionProvider provider, boolean doMulti) {
- this.failoverProvider = new CircuitBreakerFailoverConnectionProvider(provider);
+ public MultiDbTransaction(MultiDbConnectionProvider provider, boolean doMulti) {
+ this.failoverProvider = new MultiDbConnectionSupplier(provider);
try (Connection connection = failoverProvider.getConnection()) {
RedisProtocol proto = connection.getRedisProtocol();
@@ -68,10 +68,10 @@ public MultiClusterTransaction(MultiClusterPooledConnectionProvider provider, bo
* @param doMulti {@code false} should be set to enable manual WATCH, UNWATCH and MULTI
* @param commandObjects command objects
*/
- public MultiClusterTransaction(MultiClusterPooledConnectionProvider provider, boolean doMulti,
+ public MultiDbTransaction(MultiDbConnectionProvider provider, boolean doMulti,
CommandObjects commandObjects) {
super(commandObjects);
- this.failoverProvider = new CircuitBreakerFailoverConnectionProvider(provider);
+ this.failoverProvider = new MultiDbConnectionSupplier(provider);
if (doMulti) multi();
}
diff --git a/src/test/java/redis/clients/jedis/MultiDbClientTest.java b/src/test/java/redis/clients/jedis/MultiDbClientTest.java
new file mode 100644
index 0000000000..43673da1ed
--- /dev/null
+++ b/src/test/java/redis/clients/jedis/MultiDbClientTest.java
@@ -0,0 +1,206 @@
+package redis.clients.jedis;
+
+import eu.rekawek.toxiproxy.Proxy;
+import eu.rekawek.toxiproxy.ToxiproxyClient;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Tag;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.AfterEach;
+
+import static org.awaitility.Awaitility.await;
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.hasItems;
+import static org.hamcrest.Matchers.not;
+import static org.junit.jupiter.api.Assertions.*;
+
+import redis.clients.jedis.MultiDbConfig.DatabaseConfig;
+import redis.clients.jedis.exceptions.JedisValidationException;
+import redis.clients.jedis.mcf.DatabaseSwitchEvent;
+import redis.clients.jedis.mcf.SwitchReason;
+
+import java.io.IOException;
+import java.time.Duration;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.function.Consumer;
+
+/**
+ * Basic tests for MultiDbClient functionality.
+ */
+@Tag("integration")
+public class MultiDbClientTest {
+
+ private MultiDbClient client;
+ private static final EndpointConfig endpoint1 = HostAndPorts.getRedisEndpoint("redis-failover-1");
+ private static final EndpointConfig endpoint2 = HostAndPorts.getRedisEndpoint("redis-failover-2");
+
+ private static final ToxiproxyClient tp = new ToxiproxyClient("localhost", 8474);
+ private static Proxy redisProxy1;
+ private static Proxy redisProxy2;
+
+ @BeforeAll
+ public static void setupAdminClients() throws IOException {
+ if (tp.getProxyOrNull("redis-1") != null) {
+ tp.getProxy("redis-1").delete();
+ }
+ if (tp.getProxyOrNull("redis-2") != null) {
+ tp.getProxy("redis-2").delete();
+ }
+
+ redisProxy1 = tp.createProxy("redis-1", "0.0.0.0:29379", "redis-failover-1:9379");
+ redisProxy2 = tp.createProxy("redis-2", "0.0.0.0:29380", "redis-failover-2:9380");
+ }
+
+ @BeforeEach
+ void setUp() {
+ // Create a simple resilient client with mock endpoints for testing
+ MultiDbConfig clientConfig = MultiDbConfig.builder()
+ .endpoint(endpoint1.getHostAndPort(), 100.0f, endpoint1.getClientConfigBuilder().build())
+ .endpoint(endpoint2.getHostAndPort(), 50.0f, endpoint2.getClientConfigBuilder().build())
+ .build();
+
+ client = MultiDbClient.builder().multiDbConfig(clientConfig).build();
+ }
+
+ @AfterEach
+ void tearDown() {
+ if (client != null) {
+ client.close();
+ }
+ }
+
+ @Test
+ void testAddRemoveEndpointWithEndpointInterface() {
+ Endpoint newEndpoint = new HostAndPort("unavailable", 6381);
+
+ assertDoesNotThrow(
+ () -> client.addEndpoint(newEndpoint, 25.0f, DefaultJedisClientConfig.builder().build()));
+
+ assertThat(client.getEndpoints(), hasItems(newEndpoint));
+
+ assertDoesNotThrow(() -> client.removeEndpoint(newEndpoint));
+
+ assertThat(client.getEndpoints(), not(hasItems(newEndpoint)));
+ }
+
+ @Test
+ void testAddRemoveEndpointWithDatabaseConfig() {
+ // todo : (@ggivo) Replace HostAndPort with Endpoint
+ HostAndPort newEndpoint = new HostAndPort("unavailable", 6381);
+
+ DatabaseConfig newConfig = DatabaseConfig
+ .builder(newEndpoint, DefaultJedisClientConfig.builder().build()).weight(25.0f).build();
+
+ assertDoesNotThrow(() -> client.addEndpoint(newConfig));
+
+ assertThat(client.getEndpoints(), hasItems(newEndpoint));
+
+ assertDoesNotThrow(() -> client.removeEndpoint(newEndpoint));
+
+ assertThat(client.getEndpoints(), not(hasItems(newEndpoint)));
+ }
+
+ @Test
+ void testSetActiveDatabase() {
+ Endpoint endpoint = client.getActiveEndpoint();
+
+ awaitIsHealthy(endpoint1.getHostAndPort());
+ awaitIsHealthy(endpoint2.getHostAndPort());
+ // Ensure we have a healthy endpoint to switch to
+ Endpoint newEndpoint = client.getEndpoints().stream()
+ .filter(e -> e.equals(endpoint) && client.isHealthy(e)).findFirst().orElse(null);
+ assertNotNull(newEndpoint);
+
+ // Switch to the new endpoint
+ client.setActiveDatabase(newEndpoint);
+
+ assertEquals(newEndpoint, client.getActiveEndpoint());
+ }
+
+ @Test
+ void testBuilderWithMultipleEndpointTypes() {
+ MultiDbConfig clientConfig = MultiDbConfig.builder()
+ .endpoint(endpoint1.getHostAndPort(), 100.0f, DefaultJedisClientConfig.builder().build())
+ .endpoint(DatabaseConfig
+ .builder(endpoint2.getHostAndPort(), DefaultJedisClientConfig.builder().build())
+ .weight(50.0f).build())
+ .build();
+
+ try (MultiDbClient testClient = MultiDbClient.builder().multiDbConfig(clientConfig).build()) {
+ assertThat(testClient.getEndpoints().size(), equalTo(2));
+ assertThat(testClient.getEndpoints(),
+ hasItems(endpoint1.getHostAndPort(), endpoint2.getHostAndPort()));
+ }
+ }
+
+ @Test
+ public void testForceActiveEndpoint() {
+ Endpoint endpoint = client.getActiveEndpoint();
+
+ // Ensure we have a healthy endpoint to switch to
+ awaitIsHealthy(endpoint1.getHostAndPort());
+ awaitIsHealthy(endpoint2.getHostAndPort());
+ Endpoint newEndpoint = client.getEndpoints().stream()
+ .filter(e -> e.equals(endpoint) && client.isHealthy(e)).findFirst().orElse(null);
+ assertNotNull(newEndpoint);
+
+ // Force switch to the new endpoint for 10 seconds
+ client.forceActiveEndpoint(newEndpoint, Duration.ofMillis(100).toMillis());
+
+ // Verify the active endpoint has changed
+ assertEquals(newEndpoint, client.getActiveEndpoint());
+ }
+
+ @Test
+ public void testForceActiveEndpointWithNonHealthyEndpoint() {
+ Endpoint newEndpoint = new HostAndPort("unavailable", 6381);
+ client.addEndpoint(newEndpoint, 25.0f, DefaultJedisClientConfig.builder().build());
+
+ assertThrows(JedisValidationException.class,
+ () -> client.forceActiveEndpoint(newEndpoint, Duration.ofMillis(100).toMillis()));
+ }
+
+ @Test
+ public void testForceActiveEndpointWithNonExistingEndpoint() {
+ Endpoint newEndpoint = new HostAndPort("unavailable", 6381);
+ assertThrows(JedisValidationException.class,
+ () -> client.forceActiveEndpoint(newEndpoint, Duration.ofMillis(100).toMillis()));
+ }
+
+ @Test
+ public void testWithDatabaseSwitchListener() {
+
+ MultiDbConfig endpointsConfig = MultiDbConfig.builder()
+ .endpoint(DatabaseConfig
+ .builder(endpoint1.getHostAndPort(), endpoint1.getClientConfigBuilder().build())
+ .weight(100.0f).build())
+ .endpoint(DatabaseConfig
+ .builder(endpoint2.getHostAndPort(), endpoint2.getClientConfigBuilder().build())
+ .weight(50.0f).build())
+ .build();
+
+ Consumer eventConsumer;
+ List events = new ArrayList<>();
+ eventConsumer = events::add;
+
+ try (MultiDbClient testClient = MultiDbClient.builder().databaseSwitchListener(eventConsumer)
+ .multiDbConfig(endpointsConfig).build()) {
+
+ assertThat(events.size(), equalTo(0));
+
+ awaitIsHealthy(endpoint2.getHostAndPort());
+ testClient.setActiveDatabase(endpoint2.getHostAndPort());
+
+ assertThat(events.size(), equalTo(1));
+ assertThat(events.get(0).getEndpoint(), equalTo(endpoint2.getHostAndPort()));
+ assertThat(events.get(0).getReason(), equalTo(SwitchReason.FORCED));
+ }
+ }
+
+ private void awaitIsHealthy(HostAndPort hostAndPort) {
+ await().atMost(Duration.ofSeconds(1)).until(() -> client.isHealthy(hostAndPort));
+ }
+
+}
diff --git a/src/test/java/redis/clients/jedis/builders/UnifiedJedisConstructorReflectionTest.java b/src/test/java/redis/clients/jedis/builders/UnifiedJedisConstructorReflectionTest.java
index f2b2f56e14..cd2ca8e4c5 100644
--- a/src/test/java/redis/clients/jedis/builders/UnifiedJedisConstructorReflectionTest.java
+++ b/src/test/java/redis/clients/jedis/builders/UnifiedJedisConstructorReflectionTest.java
@@ -184,8 +184,7 @@ private static boolean clusterConstructorThatShouldBeDeprecatedAndRemoved(Constr
private static boolean multiClusterPooledConnectionProviderShouldBeReplacedWithResilientClient(
Constructor> ctor) {
Class>[] types = ctor.getParameterTypes();
- return types.length == 1
- && types[0].getSimpleName().equals("MultiClusterPooledConnectionProvider");
+ return types.length == 1 && types[0].getSimpleName().equals("MultiDbConnectionProvider");
}
private static String prettySignature(Constructor> ctor) {
diff --git a/src/test/java/redis/clients/jedis/failover/FailoverIntegrationTest.java b/src/test/java/redis/clients/jedis/failover/FailoverIntegrationTest.java
index 902ed73aa5..b3c19fdda5 100644
--- a/src/test/java/redis/clients/jedis/failover/FailoverIntegrationTest.java
+++ b/src/test/java/redis/clients/jedis/failover/FailoverIntegrationTest.java
@@ -16,10 +16,10 @@
import redis.clients.jedis.EndpointConfig;
import redis.clients.jedis.HostAndPorts;
import redis.clients.jedis.JedisClientConfig;
-import redis.clients.jedis.MultiClusterClientConfig;
+import redis.clients.jedis.MultiDbConfig;
import redis.clients.jedis.UnifiedJedis;
import redis.clients.jedis.exceptions.JedisConnectionException;
-import redis.clients.jedis.mcf.MultiClusterPooledConnectionProvider;
+import redis.clients.jedis.mcf.MultiDbConnectionProvider;
import redis.clients.jedis.scenario.RecommendedSettings;
import java.io.IOException;
@@ -30,12 +30,12 @@
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicInteger;
import java.util.function.Function;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import java.util.stream.Collectors;
-import static io.github.resilience4j.circuitbreaker.CircuitBreakerConfig.SlidingWindowType.COUNT_BASED;
import static org.awaitility.Awaitility.await;
import static org.hamcrest.MatcherAssert.assertThat;
import static org.hamcrest.Matchers.instanceOf;
@@ -57,7 +57,7 @@ public class FailoverIntegrationTest {
private static UnifiedJedis jedis2;
private static String JEDIS1_ID = "";
private static String JEDIS2_ID = "";
- private MultiClusterPooledConnectionProvider provider;
+ private MultiDbConnectionProvider provider;
private UnifiedJedis failoverClient;
@BeforeAll
@@ -138,7 +138,7 @@ public void testAutomaticFailoverWhenServerBecomesUnavailable() throws Exception
assertThat(getNodeId(failoverClient.info("server")), equalTo(JEDIS1_ID));
await().atMost(1, TimeUnit.SECONDS).pollInterval(50, TimeUnit.MILLISECONDS)
- .until(() -> provider.getCluster(endpoint2.getHostAndPort()).isHealthy());
+ .until(() -> provider.getDatabase(endpoint2.getHostAndPort()).isHealthy());
// Disable redisProxy1
redisProxy1.disable();
@@ -149,8 +149,8 @@ public void testAutomaticFailoverWhenServerBecomesUnavailable() throws Exception
// 3. Subsequent calls should be routed to Endpoint 2
assertThrows(JedisConnectionException.class, () -> failoverClient.info("server"));
- assertThat(provider.getCluster(endpoint1.getHostAndPort()).getCircuitBreaker().getState(),
- equalTo(CircuitBreaker.State.OPEN));
+ assertThat(provider.getDatabase(endpoint1.getHostAndPort()).getCircuitBreaker().getState(),
+ equalTo(CircuitBreaker.State.FORCED_OPEN));
// Check that the failoverClient is now using Endpoint 2
assertThat(getNodeId(failoverClient.info("server")), equalTo(JEDIS2_ID));
@@ -160,8 +160,8 @@ public void testAutomaticFailoverWhenServerBecomesUnavailable() throws Exception
// Endpoint1 and Endpoint2 are NOT available,
assertThrows(JedisConnectionException.class, () -> failoverClient.info("server"));
- assertThat(provider.getCluster(endpoint2.getHostAndPort()).getCircuitBreaker().getState(),
- equalTo(CircuitBreaker.State.OPEN));
+ assertThat(provider.getDatabase(endpoint2.getHostAndPort()).getCircuitBreaker().getState(),
+ equalTo(CircuitBreaker.State.FORCED_OPEN));
// and since no other nodes are available, it should propagate the errors to the caller
// subsequent calls
@@ -173,18 +173,21 @@ public void testManualFailoverNewCommandsAreSentToActiveCluster() throws Interru
assertThat(getNodeId(failoverClient.info("server")), equalTo(JEDIS1_ID));
await().atMost(1, TimeUnit.SECONDS).pollInterval(50, TimeUnit.MILLISECONDS)
- .until(() -> provider.getCluster(endpoint2.getHostAndPort()).isHealthy());
+ .until(() -> provider.getDatabase(endpoint2.getHostAndPort()).isHealthy());
- provider.setActiveCluster(endpoint2.getHostAndPort());
+ provider.setActiveDatabase(endpoint2.getHostAndPort());
assertThat(getNodeId(failoverClient.info("server")), equalTo(JEDIS2_ID));
}
- private List getClusterConfigs(
- JedisClientConfig clientConfig, EndpointConfig... endpoints) {
+ private List getDatabaseConfigs(JedisClientConfig clientConfig,
+ EndpointConfig... endpoints) {
- return Arrays.stream(endpoints).map(
- e -> MultiClusterClientConfig.ClusterConfig.builder(e.getHostAndPort(), clientConfig).build())
+ int weight = endpoints.length;
+ AtomicInteger weightCounter = new AtomicInteger(weight);
+ return Arrays.stream(endpoints)
+ .map(e -> MultiDbConfig.DatabaseConfig.builder(e.getHostAndPort(), clientConfig)
+ .weight(1.0f / weightCounter.getAndIncrement()).healthCheckEnabled(false).build())
.collect(Collectors.toList());
}
@@ -194,21 +197,21 @@ public void testManualFailoverInflightCommandsCompleteGracefully()
throws ExecutionException, InterruptedException {
await().atMost(1, TimeUnit.SECONDS).pollInterval(50, TimeUnit.MILLISECONDS)
- .until(() -> provider.getCluster(endpoint2.getHostAndPort()).isHealthy());
+ .until(() -> provider.getDatabase(endpoint2.getHostAndPort()).isHealthy());
assertThat(getNodeId(failoverClient.info("server")), equalTo(JEDIS1_ID));
// We will trigger failover while this command is in-flight
Future> blpop = executor.submit(() -> failoverClient.blpop(1000, "test-list"));
- provider.setActiveCluster(endpoint2.getHostAndPort());
+ provider.setActiveDatabase(endpoint2.getHostAndPort());
// After the manual failover, commands should be executed against Endpoint 2
assertThat(getNodeId(failoverClient.info("server")), equalTo(JEDIS2_ID));
// Failover was manually triggered, and there were no errors
// previous endpoint CB should still be in CLOSED state
- assertThat(provider.getCluster(endpoint1.getHostAndPort()).getCircuitBreaker().getState(),
+ assertThat(provider.getDatabase(endpoint1.getHostAndPort()).getCircuitBreaker().getState(),
equalTo(CircuitBreaker.State.CLOSED));
jedis1.rpush("test-list", "somevalue");
@@ -225,12 +228,12 @@ public void testManualFailoverInflightCommandsWithErrorsPropagateError() throws
assertThat(getNodeId(failoverClient.info("server")), equalTo(JEDIS1_ID));
await().atMost(1, TimeUnit.SECONDS).pollInterval(50, TimeUnit.MILLISECONDS)
- .until(() -> provider.getCluster(endpoint2.getHostAndPort()).isHealthy());
+ .until(() -> provider.getDatabase(endpoint2.getHostAndPort()).isHealthy());
Future> blpop = executor.submit(() -> failoverClient.blpop(10000, "test-list-1"));
// trigger failover manually
- provider.setActiveCluster(endpoint2.getHostAndPort());
+ provider.setActiveDatabase(endpoint2.getHostAndPort());
Future infoCmd = executor.submit(() -> failoverClient.info("server"));
// After the manual failover, commands should be executed against Endpoint 2
@@ -244,7 +247,7 @@ public void testManualFailoverInflightCommandsWithErrorsPropagateError() throws
assertThat(exception.getCause(), instanceOf(JedisConnectionException.class));
// Check that the circuit breaker for Endpoint 1 is open after the error
- assertThat(provider.getCluster(endpoint1.getHostAndPort()).getCircuitBreaker().getState(),
+ assertThat(provider.getDatabase(endpoint1.getHostAndPort()).getCircuitBreaker().getState(),
equalTo(CircuitBreaker.State.OPEN));
// Ensure that the active cluster is still Endpoint 2
@@ -258,19 +261,15 @@ public void testManualFailoverInflightCommandsWithErrorsPropagateError() throws
*/
@Test
public void testCircuitBreakerCountsEachConnectionErrorSeparately() throws IOException {
- MultiClusterClientConfig failoverConfig = new MultiClusterClientConfig.Builder(
- getClusterConfigs(
- DefaultJedisClientConfig.builder()
- .socketTimeoutMillis(RecommendedSettings.DEFAULT_TIMEOUT_MS)
- .connectionTimeoutMillis(RecommendedSettings.DEFAULT_TIMEOUT_MS).build(),
- endpoint1, endpoint2)).retryMaxAttempts(2).retryWaitDuration(1)
- .circuitBreakerSlidingWindowType(COUNT_BASED).circuitBreakerSlidingWindowSize(3)
- .circuitBreakerFailureRateThreshold(50) // 50% failure
- // rate threshold
- .circuitBreakerSlidingWindowMinCalls(3).build();
-
- MultiClusterPooledConnectionProvider provider = new MultiClusterPooledConnectionProvider(
- failoverConfig);
+ MultiDbConfig failoverConfig = new MultiDbConfig.Builder(getDatabaseConfigs(
+ DefaultJedisClientConfig.builder().socketTimeoutMillis(RecommendedSettings.DEFAULT_TIMEOUT_MS)
+ .connectionTimeoutMillis(RecommendedSettings.DEFAULT_TIMEOUT_MS).build(),
+ endpoint1, endpoint2)).retryMaxAttempts(2).retryWaitDuration(1)
+ .circuitBreakerSlidingWindowSize(3).circuitBreakerMinNumOfFailures(2)
+ .circuitBreakerFailureRateThreshold(50f) // %50 failure rate
+ .build();
+
+ MultiDbConnectionProvider provider = new MultiDbConnectionProvider(failoverConfig);
try (UnifiedJedis client = new UnifiedJedis(provider)) {
// Verify initial connection to first endpoint
assertThat(getNodeId(client.info("server")), equalTo(JEDIS1_ID));
@@ -296,8 +295,8 @@ public void testCircuitBreakerCountsEachConnectionErrorSeparately() throws IOExc
assertThrows(JedisConnectionException.class, () -> client.info("server"));
// Circuit breaker should be open after just one command with retries
- assertThat(provider.getCluster(endpoint1.getHostAndPort()).getCircuitBreaker().getState(),
- equalTo(CircuitBreaker.State.OPEN));
+ assertThat(provider.getDatabase(endpoint1.getHostAndPort()).getCircuitBreaker().getState(),
+ equalTo(CircuitBreaker.State.FORCED_OPEN));
// Next command should be routed to the second endpoint
// Command 2
@@ -316,7 +315,7 @@ public void testCircuitBreakerCountsEachConnectionErrorSeparately() throws IOExc
@Test
public void testInflightCommandsAreRetriedAfterFailover() throws Exception {
- MultiClusterPooledConnectionProvider customProvider = createProvider(
+ MultiDbConnectionProvider customProvider = createProvider(
builder -> builder.retryOnFailover(true));
// Create a custom client with retryOnFailover enabled for this specific test
@@ -340,7 +339,7 @@ public void testInflightCommandsAreRetriedAfterFailover() throws Exception {
assertThat(getNodeId(customClient.info("server")), equalTo(JEDIS2_ID));
// Check that the circuit breaker for Endpoint 1 is open
assertThat(
- customProvider.getCluster(endpoint1.getHostAndPort()).getCircuitBreaker().getState(),
+ customProvider.getDatabase(endpoint1.getHostAndPort()).getCircuitBreaker().getState(),
equalTo(CircuitBreaker.State.FORCED_OPEN));
// Disable redisProxy1 to enforce connection drop for the in-flight (blpop) command
@@ -358,7 +357,7 @@ public void testInflightCommandsAreRetriedAfterFailover() throws Exception {
@Test
public void testInflightCommandsAreNotRetriedAfterFailover() throws Exception {
// Create a custom provider and client with retry disabled for this specific test
- MultiClusterPooledConnectionProvider customProvider = createProvider(
+ MultiDbConnectionProvider customProvider = createProvider(
builder -> builder.retryOnFailover(false));
try (UnifiedJedis customClient = new UnifiedJedis(customProvider)) {
@@ -374,8 +373,8 @@ public void testInflightCommandsAreNotRetriedAfterFailover() throws Exception {
// Check that the circuit breaker for Endpoint 1 is open
assertThat(
- customProvider.getCluster(endpoint1.getHostAndPort()).getCircuitBreaker().getState(),
- equalTo(CircuitBreaker.State.OPEN));
+ customProvider.getDatabase(endpoint1.getHostAndPort()).getCircuitBreaker().getState(),
+ equalTo(CircuitBreaker.State.FORCED_OPEN));
// Disable redisProxy1 to enforce the current blpop command failure
redisProxy1.disable();
@@ -415,43 +414,41 @@ private static String generateTestValue(int byteSize) {
}
/**
- * Creates a MultiClusterPooledConnectionProvider with standard configuration
+ * Creates a MultiDbConnectionProvider with standard configuration
* @return A configured provider
*/
- private MultiClusterPooledConnectionProvider createProvider() {
+ private MultiDbConnectionProvider createProvider() {
JedisClientConfig clientConfig = DefaultJedisClientConfig.builder()
.socketTimeoutMillis(RecommendedSettings.DEFAULT_TIMEOUT_MS)
.connectionTimeoutMillis(RecommendedSettings.DEFAULT_TIMEOUT_MS).build();
- MultiClusterClientConfig failoverConfig = new MultiClusterClientConfig.Builder(
- getClusterConfigs(clientConfig, endpoint1, endpoint2)).retryMaxAttempts(1)
- .retryWaitDuration(1).circuitBreakerSlidingWindowType(COUNT_BASED)
- .circuitBreakerSlidingWindowSize(1).circuitBreakerFailureRateThreshold(100)
- .circuitBreakerSlidingWindowMinCalls(1).build();
+ MultiDbConfig failoverConfig = new MultiDbConfig.Builder(
+ getDatabaseConfigs(clientConfig, endpoint1, endpoint2)).retryMaxAttempts(1)
+ .retryWaitDuration(1).circuitBreakerSlidingWindowSize(3)
+ .circuitBreakerMinNumOfFailures(1).circuitBreakerFailureRateThreshold(50f).build();
- return new MultiClusterPooledConnectionProvider(failoverConfig);
+ return new MultiDbConnectionProvider(failoverConfig);
}
/**
- * Creates a MultiClusterPooledConnectionProvider with standard configuration
+ * Creates a MultiDbConnectionProvider with standard configuration
* @return A configured provider
*/
- private MultiClusterPooledConnectionProvider createProvider(
- Function configCustomizer) {
+ private MultiDbConnectionProvider createProvider(
+ Function configCustomizer) {
JedisClientConfig clientConfig = DefaultJedisClientConfig.builder()
.socketTimeoutMillis(RecommendedSettings.DEFAULT_TIMEOUT_MS)
.connectionTimeoutMillis(RecommendedSettings.DEFAULT_TIMEOUT_MS).build();
- MultiClusterClientConfig.Builder builder = new MultiClusterClientConfig.Builder(
- getClusterConfigs(clientConfig, endpoint1, endpoint2)).retryMaxAttempts(1)
- .retryWaitDuration(1).circuitBreakerSlidingWindowType(COUNT_BASED)
- .circuitBreakerSlidingWindowSize(1).circuitBreakerFailureRateThreshold(100)
- .circuitBreakerSlidingWindowMinCalls(1);
+ MultiDbConfig.Builder builder = new MultiDbConfig.Builder(
+ getDatabaseConfigs(clientConfig, endpoint1, endpoint2)).retryMaxAttempts(1)
+ .retryWaitDuration(1).circuitBreakerSlidingWindowSize(3)
+ .circuitBreakerMinNumOfFailures(1).circuitBreakerFailureRateThreshold(50f);
if (configCustomizer != null) {
builder = configCustomizer.apply(builder);
}
- return new MultiClusterPooledConnectionProvider(builder.build());
+ return new MultiDbConnectionProvider(builder.build());
}
}
diff --git a/src/test/java/redis/clients/jedis/mcf/ActiveActiveLocalFailoverTest.java b/src/test/java/redis/clients/jedis/mcf/ActiveActiveLocalFailoverTest.java
index f2134f0ea3..a5aae5e9bf 100644
--- a/src/test/java/redis/clients/jedis/mcf/ActiveActiveLocalFailoverTest.java
+++ b/src/test/java/redis/clients/jedis/mcf/ActiveActiveLocalFailoverTest.java
@@ -1,7 +1,5 @@
package redis.clients.jedis.mcf;
-import io.github.resilience4j.circuitbreaker.CircuitBreakerConfig;
-
import io.github.resilience4j.ratelimiter.RateLimiterConfig;
import org.hamcrest.Matchers;
import org.junit.jupiter.api.AfterAll;
@@ -18,7 +16,7 @@
import eu.rekawek.toxiproxy.ToxiproxyClient;
import eu.rekawek.toxiproxy.model.Toxic;
import redis.clients.jedis.*;
-import redis.clients.jedis.MultiClusterClientConfig.ClusterConfig;
+import redis.clients.jedis.MultiDbConfig.DatabaseConfig;
import redis.clients.jedis.scenario.ActiveActiveFailoverTest;
import redis.clients.jedis.scenario.MultiThreadedFakeApp;
import redis.clients.jedis.scenario.RecommendedSettings;
@@ -96,22 +94,20 @@ public void testFailover(boolean fastFailover, long minFailoverCompletionDuratio
"TESTING WITH PARAMETERS: fastFailover: {} numberOfThreads: {} minFailoverCompletionDuration: {} maxFailoverCompletionDuration: {] ",
fastFailover, numberOfThreads, minFailoverCompletionDuration, maxFailoverCompletionDuration);
- MultiClusterClientConfig.ClusterConfig[] clusterConfig = new MultiClusterClientConfig.ClusterConfig[2];
+ MultiDbConfig.DatabaseConfig[] clusterConfig = new MultiDbConfig.DatabaseConfig[2];
JedisClientConfig config = endpoint1.getClientConfigBuilder()
.socketTimeoutMillis(RecommendedSettings.DEFAULT_TIMEOUT_MS)
.connectionTimeoutMillis(RecommendedSettings.DEFAULT_TIMEOUT_MS).build();
- clusterConfig[0] = ClusterConfig.builder(endpoint1.getHostAndPort(), config)
+ clusterConfig[0] = DatabaseConfig.builder(endpoint1.getHostAndPort(), config)
.connectionPoolConfig(RecommendedSettings.poolConfig).weight(1.0f).build();
- clusterConfig[1] = ClusterConfig.builder(endpoint2.getHostAndPort(), config)
+ clusterConfig[1] = DatabaseConfig.builder(endpoint2.getHostAndPort(), config)
.connectionPoolConfig(RecommendedSettings.poolConfig).weight(0.5f).build();
- MultiClusterClientConfig.Builder builder = new MultiClusterClientConfig.Builder(clusterConfig);
+ MultiDbConfig.Builder builder = new MultiDbConfig.Builder(clusterConfig);
- builder.circuitBreakerSlidingWindowType(CircuitBreakerConfig.SlidingWindowType.TIME_BASED);
builder.circuitBreakerSlidingWindowSize(1); // SLIDING WINDOW SIZE IN SECONDS
- builder.circuitBreakerSlidingWindowMinCalls(1);
builder.circuitBreakerFailureRateThreshold(10.0f); // percentage of failures to trigger circuit
// breaker
@@ -126,7 +122,7 @@ public void testFailover(boolean fastFailover, long minFailoverCompletionDuratio
// Use the parameterized fastFailover setting
builder.fastFailover(fastFailover);
- class FailoverReporter implements Consumer {
+ class FailoverReporter implements Consumer {
String currentClusterName = "not set";
@@ -143,10 +139,10 @@ public String getCurrentClusterName() {
}
@Override
- public void accept(ClusterSwitchEventArgs e) {
- this.currentClusterName = e.getClusterName();
+ public void accept(DatabaseSwitchEvent e) {
+ this.currentClusterName = e.getDatabaseName();
log.info("\n\n===={}=== \nJedis switching to cluster: {}\n====End of log===\n",
- e.getReason(), e.getClusterName());
+ e.getReason(), e.getDatabaseName());
if ((e.getReason() == SwitchReason.CIRCUIT_BREAKER
|| e.getReason() == SwitchReason.HEALTH_CHECK)) {
failoverHappened = true;
@@ -166,11 +162,10 @@ public void accept(ClusterSwitchEventArgs e) {
ensureEndpointAvailability(endpoint2.getHostAndPort(), config);
// Create the connection provider
- MultiClusterPooledConnectionProvider provider = new MultiClusterPooledConnectionProvider(
- builder.build());
+ MultiDbConnectionProvider provider = new MultiDbConnectionProvider(builder.build());
FailoverReporter reporter = new FailoverReporter();
- provider.setClusterSwitchListener(reporter);
- provider.setActiveCluster(endpoint1.getHostAndPort());
+ provider.setDatabaseSwitchListener(reporter);
+ provider.setActiveDatabase(endpoint1.getHostAndPort());
UnifiedJedis client = new UnifiedJedis(provider);
@@ -182,7 +177,7 @@ public void accept(ClusterSwitchEventArgs e) {
AtomicBoolean unexpectedErrors = new AtomicBoolean(false);
AtomicReference lastException = new AtomicReference();
AtomicLong stopRunningAt = new AtomicLong();
- String cluster2Id = provider.getCluster(endpoint2.getHostAndPort()).getCircuitBreaker()
+ String cluster2Id = provider.getDatabase(endpoint2.getHostAndPort()).getCircuitBreaker()
.getName();
// Start thread that imitates an application that uses the client
@@ -200,7 +195,7 @@ public void accept(ClusterSwitchEventArgs e) {
while (true) {
try {
if (System.currentTimeMillis() > stopRunningAt.get()) break;
- currentClusterId = provider.getCluster().getCircuitBreaker().getName();
+ currentClusterId = provider.getDatabase().getCircuitBreaker().getName();
Map executionInfo = new HashMap() {
{
put("threadId", String.valueOf(threadId));
@@ -289,7 +284,7 @@ public boolean isCompleted(Duration checkInterval, Duration delayAfter, Duration
}
log.info("Fake app completed");
- ConnectionPool pool = provider.getCluster(endpoint1.getHostAndPort()).getConnectionPool();
+ ConnectionPool pool = provider.getDatabase(endpoint1.getHostAndPort()).getConnectionPool();
log.info("First connection pool state: active: {}, idle: {}", pool.getNumActive(),
pool.getNumIdle());
diff --git a/src/test/java/redis/clients/jedis/mcf/DatabaseEvaluateThresholdsTest.java b/src/test/java/redis/clients/jedis/mcf/DatabaseEvaluateThresholdsTest.java
new file mode 100644
index 0000000000..2892005cb4
--- /dev/null
+++ b/src/test/java/redis/clients/jedis/mcf/DatabaseEvaluateThresholdsTest.java
@@ -0,0 +1,183 @@
+package redis.clients.jedis.mcf;
+
+import static org.junit.jupiter.api.Assertions.*;
+import static org.mockito.ArgumentMatchers.*;
+import static org.mockito.Mockito.*;
+
+import io.github.resilience4j.circuitbreaker.CircuitBreaker;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.params.ParameterizedTest;
+import org.junit.jupiter.params.provider.CsvSource;
+import redis.clients.jedis.DefaultJedisClientConfig;
+import redis.clients.jedis.HostAndPort;
+import redis.clients.jedis.MultiDbConfig;
+import redis.clients.jedis.mcf.MultiDbConnectionProvider.Database;
+
+/**
+ * Tests for circuit breaker thresholds: both failure-rate threshold and minimum number of failures
+ * must be exceeded to trigger failover. Uses a real CircuitBreaker and real Retry, but mocks the
+ * provider and {@link Database} wiring to avoid network I/O.
+ */
+public class DatabaseEvaluateThresholdsTest {
+
+ private MultiDbConnectionProvider provider;
+ private Database database;
+ private CircuitBreaker circuitBreaker;
+ private CircuitBreaker.Metrics metrics;
+
+ @BeforeEach
+ public void setup() {
+ provider = mock(MultiDbConnectionProvider.class);
+ database = mock(Database.class);
+
+ circuitBreaker = mock(CircuitBreaker.class);
+ metrics = mock(CircuitBreaker.Metrics.class);
+
+ when(database.getCircuitBreaker()).thenReturn(circuitBreaker);
+ when(circuitBreaker.getMetrics()).thenReturn(metrics);
+ when(circuitBreaker.getState()).thenReturn(CircuitBreaker.State.CLOSED);
+
+ // Configure the mock to call the real evaluateThresholds method
+ doCallRealMethod().when(database).evaluateThresholds(anyBoolean());
+
+ }
+
+ /**
+ * Below minimum failures; even if all calls are failures, failover should NOT trigger. Note: The
+ * isThresholdsExceeded method adds +1 to account for the current failing call, so we set
+ * failures=1 which becomes 2 with +1, still below minFailures=3.
+ */
+ @Test
+ public void belowMinFailures_doesNotFailover() {
+ when(database.getCircuitBreakerMinNumOfFailures()).thenReturn(3);
+ when(metrics.getNumberOfFailedCalls()).thenReturn(1); // +1 becomes 2, still < 3
+ when(metrics.getNumberOfSuccessfulCalls()).thenReturn(0);
+ when(database.getCircuitBreakerFailureRateThreshold()).thenReturn(50.0f);
+ when(circuitBreaker.getState()).thenReturn(CircuitBreaker.State.CLOSED);
+
+ database.evaluateThresholds(false);
+ verify(circuitBreaker, never()).transitionToOpenState();
+ verify(provider, never()).switchToHealthyDatabase(any(), any());
+ }
+
+ /**
+ * Reaching minFailures and exceeding failure rate threshold should trigger circuit breaker to
+ * OPEN state. Note: The isThresholdsExceeded method adds +1 to account for the current failing
+ * call, so we set failures=2 which becomes 3 with +1, reaching minFailures=3.
+ */
+ @Test
+ public void minFailuresAndRateExceeded_triggersOpenState() {
+ when(database.getCircuitBreakerMinNumOfFailures()).thenReturn(3);
+ when(metrics.getNumberOfFailedCalls()).thenReturn(2); // +1 becomes 3, reaching minFailures
+ when(metrics.getNumberOfSuccessfulCalls()).thenReturn(0);
+ when(database.getCircuitBreakerFailureRateThreshold()).thenReturn(50.0f);
+ when(circuitBreaker.getState()).thenReturn(CircuitBreaker.State.CLOSED);
+
+ database.evaluateThresholds(false);
+ verify(circuitBreaker, times(1)).transitionToOpenState();
+ }
+
+ /**
+ * Even after reaching minFailures, if failure rate is below threshold, do not failover. Note: The
+ * isThresholdsExceeded method adds +1 to account for the current failing call, so we set
+ * failures=2 which becomes 3 with +1, reaching minFailures=3. Rate calculation: (3 failures) / (3
+ * failures + 3 successes) = 50% < 80% threshold.
+ */
+ @Test
+ public void rateBelowThreshold_doesNotFailover() {
+ when(database.getCircuitBreakerMinNumOfFailures()).thenReturn(3);
+ when(metrics.getNumberOfSuccessfulCalls()).thenReturn(3);
+ when(metrics.getNumberOfFailedCalls()).thenReturn(2); // +1 becomes 3, rate = 3/(3+3) = 50%
+ when(database.getCircuitBreakerFailureRateThreshold()).thenReturn(80.0f);
+ when(circuitBreaker.getState()).thenReturn(CircuitBreaker.State.CLOSED);
+
+ database.evaluateThresholds(false);
+
+ verify(circuitBreaker, never()).transitionToOpenState();
+ verify(provider, never()).switchToHealthyDatabase(any(), any());
+ }
+
+ @Test
+ public void providerBuilder_zeroRate_mapsToHundredAndHugeMinCalls() {
+ MultiDbConfig.Builder cfgBuilder = MultiDbConfig
+ .builder(java.util.Arrays.asList(MultiDbConfig.DatabaseConfig
+ .builder(new HostAndPort("localhost", 6379), DefaultJedisClientConfig.builder().build())
+ .healthCheckEnabled(false).build()));
+ cfgBuilder.circuitBreakerFailureRateThreshold(0.0f).circuitBreakerMinNumOfFailures(3)
+ .circuitBreakerSlidingWindowSize(10);
+ MultiDbConfig mcc = cfgBuilder.build();
+
+ CircuitBreakerThresholdsAdapter adapter = new CircuitBreakerThresholdsAdapter(mcc);
+
+ assertEquals(100.0f, adapter.getFailureRateThreshold(), 0.0001f);
+ assertEquals(Integer.MAX_VALUE, adapter.getMinimumNumberOfCalls());
+ }
+
+ @ParameterizedTest
+ @CsvSource({
+ // Format: "minFails, rate%, success, fails, lastFailRecorded, expected"
+
+ // === Basic threshold crossing cases ===
+ "0, 1.0, 0, 1, false, true", // +1 = 2 fails, rate=100% >= 1%, min=0 -> trigger
+ "0, 1.0, 0, 1, true, true", // +0 = 1 fails, rate=100% >= 1%, min=0 -> trigger
+
+ "1, 1.0, 0, 0, false, true", // +1 = 1 fails, rate=100% >= 1%, min=1 -> trigger
+ "1, 1.0, 0, 0, true, false", // +0 = 0 fails, 0 < 1 min -> no trigger
+
+ "3, 50.0, 0, 2, false, true", // +1 = 3 fails, rate=100% >= 50%, min=3 -> trigger
+ "3, 50.0, 0, 2, true, false", // +0 = 2 fails, 2 < 3 min -> no trigger
+
+ // === Rate threshold boundary cases ===
+ "1, 100.0, 0, 0, false, true", // +1 = 1 fails, rate=100% >= 100%, min=1 -> trigger
+ "1, 100.0, 0, 0, true, false", // +0 = 0 fails, 0 < 1 min -> no trigger
+
+ "0, 100.0, 99, 1, false, false", // +1 = 2 fails, rate=1.98% < 100% -> no trigger
+ "0, 100.0, 99, 1, true, false", // +0 = 1 fails, rate=1.0% < 100% -> no trigger
+
+ "0, 1.0, 99, 1, false, true", // +1 = 2 fails, rate=1.98% >= 1%, min=0 -> trigger
+ "0, 1.0, 99, 1, true, true", // +0 = 1 fails, rate=1.0% >= 1%, min=0 -> trigger
+
+ // === Zero rate threshold (always trigger if min failures met) ===
+ "1, 0.0, 0, 0, false, true", // +1 = 1 fails, rate=100% >= 0%, min=1 -> trigger
+ "1, 0.0, 0, 0, true, false", // +0 = 0 fails, 0 < 1 min -> no trigger
+ "1, 0.0, 100, 0, false, true", // +1 = 1 fails, rate=0.99% >= 0%, min=1 -> trigger
+ "1, 0.0, 100, 0, true, false", // +0 = 0 fails, 0 < 1 min -> no trigger
+
+ // === High minimum failures cases ===
+ "3, 50.0, 3, 1, false, false", // +1 = 2 fails, 2 < 3 min -> no trigger
+ "3, 50.0, 3, 1, true, false", // +0 = 1 fails, 1 < 3 min -> no trigger
+ "1000, 1.0, 198, 2, false, false", // +1 = 3 fails, 3 < 1000 min -> no trigger
+ "1000, 1.0, 198, 2, true, false", // +0 = 2 fails, 2 < 1000 min -> no trigger
+
+ // === Corner cases ===
+ "0, 50.0, 0, 0, false, true", // +1 = 1 fails, rate=100% >= 50%, min=0 -> trigger
+ "0, 50.0, 0, 0, true, false", // +0 = 0 fails, no calls -> no trigger
+ "1, 50.0, 1, 1, false, true", // +1 = 2 fails, rate=66.7% >= 50%, min=1 -> trigger
+ "1, 50.0, 1, 1, true, true", // +0 = 1 fails, rate=50% >= 50%, min=1 -> trigger
+ "2, 33.0, 2, 1, false, true", // +1 = 2 fails, rate=50% >= 33%, min=2 -> trigger
+ "2, 33.0, 2, 1, true, false", // +0 = 1 fails, 1 < 2 min -> no trigger
+ "5, 20.0, 20, 4, false, true", // +1 = 5 fails, rate=20% >= 20%, min=5 -> trigger
+ "5, 20.0, 20, 4, true, false", // +0 = 4 fails, 4 < 5 min -> no trigger
+ "3, 75.0, 1, 2, false, true", // +1 = 3 fails, rate=75% >= 75%, min=3 -> trigger
+ "3, 75.0, 1, 2, true, false", // +0 = 2 fails, 2 < 3 min -> no trigger
+ })
+ public void thresholdMatrix(int minFailures, float ratePercent, int successes, int failures,
+ boolean lastFailRecorded, boolean expectOpenState) {
+
+ when(database.getCircuitBreakerMinNumOfFailures()).thenReturn(minFailures);
+ when(metrics.getNumberOfSuccessfulCalls()).thenReturn(successes);
+ when(metrics.getNumberOfFailedCalls()).thenReturn(failures);
+ when(database.getCircuitBreakerFailureRateThreshold()).thenReturn(ratePercent);
+ when(circuitBreaker.getState()).thenReturn(CircuitBreaker.State.CLOSED);
+
+ database.evaluateThresholds(lastFailRecorded);
+
+ if (expectOpenState) {
+ verify(circuitBreaker, times(1)).transitionToOpenState();
+ } else {
+ verify(circuitBreaker, never()).transitionToOpenState();
+ }
+ }
+
+}
diff --git a/src/test/java/redis/clients/jedis/mcf/DefaultValuesTest.java b/src/test/java/redis/clients/jedis/mcf/DefaultValuesTest.java
new file mode 100644
index 0000000000..e3e5f3f05e
--- /dev/null
+++ b/src/test/java/redis/clients/jedis/mcf/DefaultValuesTest.java
@@ -0,0 +1,77 @@
+package redis.clients.jedis.mcf;
+
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+
+import java.time.Duration;
+
+import org.junit.jupiter.api.Test;
+import redis.clients.jedis.DefaultJedisClientConfig;
+import redis.clients.jedis.HostAndPort;
+import redis.clients.jedis.JedisClientConfig;
+import redis.clients.jedis.MultiDbConfig;
+
+public class DefaultValuesTest {
+
+ HostAndPort fakeEndpoint = new HostAndPort("fake", 6379);
+ JedisClientConfig config = DefaultJedisClientConfig.builder().build();
+
+ @Test
+ void testDefaultValuesInConfig() {
+
+ MultiDbConfig.DatabaseConfig clusterConfig = MultiDbConfig.DatabaseConfig
+ .builder(fakeEndpoint, config).build();
+ MultiDbConfig multiConfig = new MultiDbConfig.Builder(
+ new MultiDbConfig.DatabaseConfig[] { clusterConfig }).build();
+
+ // check for grace period
+ assertEquals(60000, multiConfig.getGracePeriod());
+
+ // check for cluster config
+ assertEquals(clusterConfig, multiConfig.getDatabaseConfigs()[0]);
+
+ // check healthchecks enabled
+ assertNotNull(clusterConfig.getHealthCheckStrategySupplier());
+
+ // check default healthcheck strategy is echo
+ assertEquals(EchoStrategy.DEFAULT, clusterConfig.getHealthCheckStrategySupplier());
+
+ // check number of probes
+ assertEquals(3,
+ clusterConfig.getHealthCheckStrategySupplier().get(fakeEndpoint, config).getNumProbes());
+
+ assertEquals(500, clusterConfig.getHealthCheckStrategySupplier().get(fakeEndpoint, config)
+ .getDelayInBetweenProbes());
+
+ assertEquals(ProbingPolicy.BuiltIn.ALL_SUCCESS,
+ clusterConfig.getHealthCheckStrategySupplier().get(fakeEndpoint, config).getPolicy());
+
+ // check health check interval
+ assertEquals(5000,
+ clusterConfig.getHealthCheckStrategySupplier().get(fakeEndpoint, config).getInterval());
+
+ // check lag aware tolerance
+ LagAwareStrategy.Config lagAwareConfig = LagAwareStrategy.Config
+ .builder(fakeEndpoint, config.getCredentialsProvider()).build();
+ assertEquals(Duration.ofMillis(5000), lagAwareConfig.getAvailabilityLagTolerance());
+
+ // TODO: check CB number of failures threshold -- 1000
+ // assertEquals(1000, multiConfig.circuitBreakerMinNumOfFailures());
+
+ // check CB failure rate threshold
+ assertEquals(10, multiConfig.getCircuitBreakerFailureRateThreshold());
+
+ // check CB sliding window size
+ assertEquals(2, multiConfig.getCircuitBreakerSlidingWindowSize());
+
+ // check failback check interval
+ assertEquals(120000, multiConfig.getFailbackCheckInterval());
+
+ // check failover max attempts before give up
+ assertEquals(10, multiConfig.getMaxNumFailoverAttempts());
+
+ // check delay between failover attempts
+ assertEquals(12000, multiConfig.getDelayInBetweenFailoverAttempts());
+
+ }
+}
diff --git a/src/test/java/redis/clients/jedis/mcf/FailbackMechanismIntegrationTest.java b/src/test/java/redis/clients/jedis/mcf/FailbackMechanismIntegrationTest.java
index afedc66f4d..34e521683e 100644
--- a/src/test/java/redis/clients/jedis/mcf/FailbackMechanismIntegrationTest.java
+++ b/src/test/java/redis/clients/jedis/mcf/FailbackMechanismIntegrationTest.java
@@ -17,7 +17,7 @@
import redis.clients.jedis.DefaultJedisClientConfig;
import redis.clients.jedis.HostAndPort;
import redis.clients.jedis.JedisClientConfig;
-import redis.clients.jedis.MultiClusterClientConfig;
+import redis.clients.jedis.MultiDbConfig;
@ExtendWith(MockitoExtension.class)
class FailbackMechanismIntegrationTest {
@@ -49,40 +49,38 @@ private MockedConstruction mockPool() {
void testFailbackDisabledDoesNotPerformFailback() throws InterruptedException {
try (MockedConstruction mockedPool = mockPool()) {
// Create clusters with different weights
- MultiClusterClientConfig.ClusterConfig cluster1 = MultiClusterClientConfig.ClusterConfig
+ MultiDbConfig.DatabaseConfig cluster1 = MultiDbConfig.DatabaseConfig
.builder(endpoint1, clientConfig).weight(1.0f).healthCheckEnabled(false).build(); // Lower
// weight
- MultiClusterClientConfig.ClusterConfig cluster2 = MultiClusterClientConfig.ClusterConfig
+ MultiDbConfig.DatabaseConfig cluster2 = MultiDbConfig.DatabaseConfig
.builder(endpoint2, clientConfig).weight(2.0f) // Higher weight
.healthCheckEnabled(false).build();
- MultiClusterClientConfig config = new MultiClusterClientConfig.Builder(
- new MultiClusterClientConfig.ClusterConfig[] { cluster1, cluster2 })
- .failbackSupported(false) // Disabled
+ MultiDbConfig config = new MultiDbConfig.Builder(
+ new MultiDbConfig.DatabaseConfig[] { cluster1, cluster2 }).failbackSupported(false) // Disabled
.failbackCheckInterval(100) // Short interval for testing
.build();
- try (MultiClusterPooledConnectionProvider provider = new MultiClusterPooledConnectionProvider(
- config)) {
+ try (MultiDbConnectionProvider provider = new MultiDbConnectionProvider(config)) {
// Initially, cluster2 should be active (highest weight)
- assertEquals(provider.getCluster(endpoint2), provider.getCluster());
+ assertEquals(provider.getDatabase(endpoint2), provider.getDatabase());
// Make cluster2 unhealthy to force failover to cluster1
- MultiClusterPooledConnectionProviderHelper.onHealthStatusChange(provider, endpoint2,
+ MultiDbConnectionProviderHelper.onHealthStatusChange(provider, endpoint2,
HealthStatus.HEALTHY, HealthStatus.UNHEALTHY);
// Should now be on cluster1 (only healthy option)
- assertEquals(provider.getCluster(endpoint1), provider.getCluster());
+ assertEquals(provider.getDatabase(endpoint1), provider.getDatabase());
// Make cluster2 healthy again (higher weight - would normally trigger failback)
- MultiClusterPooledConnectionProviderHelper.onHealthStatusChange(provider, endpoint2,
+ MultiDbConnectionProviderHelper.onHealthStatusChange(provider, endpoint2,
HealthStatus.UNHEALTHY, HealthStatus.HEALTHY);
// Wait longer than failback interval
// Should still be on cluster1 since failback is disabled
await().atMost(Durations.FIVE_HUNDRED_MILLISECONDS).pollInterval(FIFTY_MILLISECONDS)
- .until(() -> provider.getCluster(endpoint1) == provider.getCluster());
+ .until(() -> provider.getDatabase(endpoint1) == provider.getDatabase());
}
}
}
@@ -91,39 +89,38 @@ void testFailbackDisabledDoesNotPerformFailback() throws InterruptedException {
void testFailbackToHigherWeightCluster() throws InterruptedException {
try (MockedConstruction mockedPool = mockPool()) {
// Create clusters with different weights
- MultiClusterClientConfig.ClusterConfig cluster1 = MultiClusterClientConfig.ClusterConfig
+ MultiDbConfig.DatabaseConfig cluster1 = MultiDbConfig.DatabaseConfig
.builder(endpoint1, clientConfig).weight(2.0f) // Higher weight
.healthCheckEnabled(false).build();
- MultiClusterClientConfig.ClusterConfig cluster2 = MultiClusterClientConfig.ClusterConfig
+ MultiDbConfig.DatabaseConfig cluster2 = MultiDbConfig.DatabaseConfig
.builder(endpoint2, clientConfig).weight(1.0f) // Lower weight
.healthCheckEnabled(false).build();
- MultiClusterClientConfig config = new MultiClusterClientConfig.Builder(
- new MultiClusterClientConfig.ClusterConfig[] { cluster1, cluster2 })
- .failbackSupported(true).failbackCheckInterval(100) // Short interval for testing
+ MultiDbConfig config = new MultiDbConfig.Builder(
+ new MultiDbConfig.DatabaseConfig[] { cluster1, cluster2 }).failbackSupported(true)
+ .failbackCheckInterval(100) // Short interval for testing
.gracePeriod(100).build();
- try (MultiClusterPooledConnectionProvider provider = new MultiClusterPooledConnectionProvider(
- config)) {
+ try (MultiDbConnectionProvider provider = new MultiDbConnectionProvider(config)) {
// Initially, cluster1 should be active (highest weight)
- assertEquals(provider.getCluster(endpoint1), provider.getCluster());
+ assertEquals(provider.getDatabase(endpoint1), provider.getDatabase());
// Make cluster1 unhealthy to force failover to cluster2
- MultiClusterPooledConnectionProviderHelper.onHealthStatusChange(provider, endpoint1,
+ MultiDbConnectionProviderHelper.onHealthStatusChange(provider, endpoint1,
HealthStatus.HEALTHY, HealthStatus.UNHEALTHY);
// Should now be on cluster2 (lower weight, but only healthy option)
- assertEquals(provider.getCluster(endpoint2), provider.getCluster());
+ assertEquals(provider.getDatabase(endpoint2), provider.getDatabase());
// Make cluster1 healthy again
- MultiClusterPooledConnectionProviderHelper.onHealthStatusChange(provider, endpoint1,
+ MultiDbConnectionProviderHelper.onHealthStatusChange(provider, endpoint1,
HealthStatus.UNHEALTHY, HealthStatus.HEALTHY);
// Wait for failback check interval + some buffer
// Should have failed back to cluster1 (higher weight)
await().atMost(Durations.FIVE_HUNDRED_MILLISECONDS).pollInterval(FIFTY_MILLISECONDS)
- .until(() -> provider.getCluster(endpoint1) == provider.getCluster());
+ .until(() -> provider.getDatabase(endpoint1) == provider.getDatabase());
}
}
}
@@ -132,43 +129,42 @@ void testFailbackToHigherWeightCluster() throws InterruptedException {
void testNoFailbackToLowerWeightCluster() throws InterruptedException {
try (MockedConstruction mockedPool = mockPool()) {
// Create three clusters with different weights to properly test no failback to lower weight
- MultiClusterClientConfig.ClusterConfig cluster1 = MultiClusterClientConfig.ClusterConfig
+ MultiDbConfig.DatabaseConfig cluster1 = MultiDbConfig.DatabaseConfig
.builder(endpoint1, clientConfig).weight(1.0f) // Lowest weight
.healthCheckEnabled(false).build();
- MultiClusterClientConfig.ClusterConfig cluster2 = MultiClusterClientConfig.ClusterConfig
+ MultiDbConfig.DatabaseConfig cluster2 = MultiDbConfig.DatabaseConfig
.builder(endpoint2, clientConfig).weight(2.0f) // Medium weight
.healthCheckEnabled(false).build();
- MultiClusterClientConfig.ClusterConfig cluster3 = MultiClusterClientConfig.ClusterConfig
+ MultiDbConfig.DatabaseConfig cluster3 = MultiDbConfig.DatabaseConfig
.builder(endpoint3, clientConfig).weight(3.0f) // Highest weight
.healthCheckEnabled(false).build();
- MultiClusterClientConfig config = new MultiClusterClientConfig.Builder(
- new MultiClusterClientConfig.ClusterConfig[] { cluster1, cluster2, cluster3 })
+ MultiDbConfig config = new MultiDbConfig.Builder(
+ new MultiDbConfig.DatabaseConfig[] { cluster1, cluster2, cluster3 })
.failbackSupported(true).failbackCheckInterval(100).build();
- try (MultiClusterPooledConnectionProvider provider = new MultiClusterPooledConnectionProvider(
- config)) {
+ try (MultiDbConnectionProvider provider = new MultiDbConnectionProvider(config)) {
// Initially, cluster3 should be active (highest weight)
- assertEquals(provider.getCluster(endpoint3), provider.getCluster());
+ assertEquals(provider.getDatabase(endpoint3), provider.getDatabase());
// Make cluster3 unhealthy to force failover to cluster2 (medium weight)
- MultiClusterPooledConnectionProviderHelper.onHealthStatusChange(provider, endpoint3,
+ MultiDbConnectionProviderHelper.onHealthStatusChange(provider, endpoint3,
HealthStatus.HEALTHY, HealthStatus.UNHEALTHY);
// Should now be on cluster2 (highest weight among healthy clusters)
- assertEquals(provider.getCluster(endpoint2), provider.getCluster());
+ assertEquals(provider.getDatabase(endpoint2), provider.getDatabase());
// Make cluster1 (lowest weight) healthy - this should NOT trigger failback
// since we don't failback to lower weight clusters
- MultiClusterPooledConnectionProviderHelper.onHealthStatusChange(provider, endpoint1,
+ MultiDbConnectionProviderHelper.onHealthStatusChange(provider, endpoint1,
HealthStatus.UNHEALTHY, HealthStatus.HEALTHY);
// Wait for failback check interval
// Should still be on cluster2 (no failback to lower weight cluster1)
await().atMost(Durations.FIVE_HUNDRED_MILLISECONDS).pollInterval(FIFTY_MILLISECONDS)
- .until(() -> provider.getCluster(endpoint2) == provider.getCluster());
+ .until(() -> provider.getDatabase(endpoint2) == provider.getDatabase());
}
}
}
@@ -176,39 +172,38 @@ void testNoFailbackToLowerWeightCluster() throws InterruptedException {
@Test
void testFailbackToHigherWeightClusterImmediately() throws InterruptedException {
try (MockedConstruction mockedPool = mockPool()) {
- MultiClusterClientConfig.ClusterConfig cluster1 = MultiClusterClientConfig.ClusterConfig
+ MultiDbConfig.DatabaseConfig cluster1 = MultiDbConfig.DatabaseConfig
.builder(endpoint1, clientConfig).weight(2.0f).healthCheckEnabled(false).build(); // Higher
// weight
- MultiClusterClientConfig.ClusterConfig cluster2 = MultiClusterClientConfig.ClusterConfig
+ MultiDbConfig.DatabaseConfig cluster2 = MultiDbConfig.DatabaseConfig
.builder(endpoint2, clientConfig).weight(1.0f).healthCheckEnabled(false).build(); // Lower
// weight
- MultiClusterClientConfig config = new MultiClusterClientConfig.Builder(
- new MultiClusterClientConfig.ClusterConfig[] { cluster1, cluster2 })
- .failbackSupported(true).failbackCheckInterval(100).gracePeriod(50).build();
+ MultiDbConfig config = new MultiDbConfig.Builder(
+ new MultiDbConfig.DatabaseConfig[] { cluster1, cluster2 }).failbackSupported(true)
+ .failbackCheckInterval(100).gracePeriod(50).build();
- try (MultiClusterPooledConnectionProvider provider = new MultiClusterPooledConnectionProvider(
- config)) {
+ try (MultiDbConnectionProvider provider = new MultiDbConnectionProvider(config)) {
// Initially, cluster1 should be active (highest weight)
- assertEquals(provider.getCluster(endpoint1), provider.getCluster());
+ assertEquals(provider.getDatabase(endpoint1), provider.getDatabase());
// Make cluster1 unhealthy to force failover to cluster2
- MultiClusterPooledConnectionProviderHelper.onHealthStatusChange(provider, endpoint1,
+ MultiDbConnectionProviderHelper.onHealthStatusChange(provider, endpoint1,
HealthStatus.HEALTHY, HealthStatus.UNHEALTHY);
// Should now be on cluster2 (only healthy option)
- assertEquals(provider.getCluster(endpoint2), provider.getCluster());
+ assertEquals(provider.getDatabase(endpoint2), provider.getDatabase());
// Make cluster1 healthy again
- MultiClusterPooledConnectionProviderHelper.onHealthStatusChange(provider, endpoint1,
+ MultiDbConnectionProviderHelper.onHealthStatusChange(provider, endpoint1,
HealthStatus.UNHEALTHY, HealthStatus.HEALTHY);
// Wait for failback check
// Should have failed back to cluster1 immediately (higher weight, no stability period
// required)
await().atMost(Durations.TWO_HUNDRED_MILLISECONDS).pollInterval(FIFTY_MILLISECONDS)
- .until(() -> provider.getCluster(endpoint1) == provider.getCluster());
+ .until(() -> provider.getDatabase(endpoint1) == provider.getDatabase());
}
}
}
@@ -216,45 +211,44 @@ void testFailbackToHigherWeightClusterImmediately() throws InterruptedException
@Test
void testUnhealthyClusterCancelsFailback() throws InterruptedException {
try (MockedConstruction mockedPool = mockPool()) {
- MultiClusterClientConfig.ClusterConfig cluster1 = MultiClusterClientConfig.ClusterConfig
+ MultiDbConfig.DatabaseConfig cluster1 = MultiDbConfig.DatabaseConfig
.builder(endpoint1, clientConfig).weight(2.0f).healthCheckEnabled(false).build(); // Higher
// weight
- MultiClusterClientConfig.ClusterConfig cluster2 = MultiClusterClientConfig.ClusterConfig
+ MultiDbConfig.DatabaseConfig cluster2 = MultiDbConfig.DatabaseConfig
.builder(endpoint2, clientConfig).weight(1.0f).healthCheckEnabled(false).build(); // Lower
// weight
- MultiClusterClientConfig config = new MultiClusterClientConfig.Builder(
- new MultiClusterClientConfig.ClusterConfig[] { cluster1, cluster2 })
- .failbackSupported(true).failbackCheckInterval(200).build();
+ MultiDbConfig config = new MultiDbConfig.Builder(
+ new MultiDbConfig.DatabaseConfig[] { cluster1, cluster2 }).failbackSupported(true)
+ .failbackCheckInterval(200).build();
- try (MultiClusterPooledConnectionProvider provider = new MultiClusterPooledConnectionProvider(
- config)) {
+ try (MultiDbConnectionProvider provider = new MultiDbConnectionProvider(config)) {
// Initially, cluster1 should be active (highest weight)
- assertEquals(provider.getCluster(endpoint1), provider.getCluster());
+ assertEquals(provider.getDatabase(endpoint1), provider.getDatabase());
// Make cluster1 unhealthy to force failover to cluster2
- MultiClusterPooledConnectionProviderHelper.onHealthStatusChange(provider, endpoint1,
+ MultiDbConnectionProviderHelper.onHealthStatusChange(provider, endpoint1,
HealthStatus.HEALTHY, HealthStatus.UNHEALTHY);
// Should now be on cluster2 (only healthy option)
- assertEquals(provider.getCluster(endpoint2), provider.getCluster());
+ assertEquals(provider.getDatabase(endpoint2), provider.getDatabase());
// Make cluster1 healthy again (should trigger failback attempt)
- MultiClusterPooledConnectionProviderHelper.onHealthStatusChange(provider, endpoint1,
+ MultiDbConnectionProviderHelper.onHealthStatusChange(provider, endpoint1,
HealthStatus.UNHEALTHY, HealthStatus.HEALTHY);
// Wait a bit
Thread.sleep(100);
// Make cluster1 unhealthy again before failback completes
- MultiClusterPooledConnectionProviderHelper.onHealthStatusChange(provider, endpoint1,
+ MultiDbConnectionProviderHelper.onHealthStatusChange(provider, endpoint1,
HealthStatus.HEALTHY, HealthStatus.UNHEALTHY);
// Wait past the original failback interval
// Should still be on cluster2 (failback was cancelled due to cluster1 becoming unhealthy)
await().atMost(Durations.TWO_HUNDRED_MILLISECONDS).pollInterval(FIFTY_MILLISECONDS)
- .until(() -> provider.getCluster(endpoint2) == provider.getCluster());
+ .until(() -> provider.getDatabase(endpoint2) == provider.getDatabase());
}
}
}
@@ -262,42 +256,41 @@ void testUnhealthyClusterCancelsFailback() throws InterruptedException {
@Test
void testMultipleClusterFailbackPriority() throws InterruptedException {
try (MockedConstruction mockedPool = mockPool()) {
- MultiClusterClientConfig.ClusterConfig cluster1 = MultiClusterClientConfig.ClusterConfig
+ MultiDbConfig.DatabaseConfig cluster1 = MultiDbConfig.DatabaseConfig
.builder(endpoint1, clientConfig).weight(1.0f).healthCheckEnabled(false).build(); // Lowest
// weight
- MultiClusterClientConfig.ClusterConfig cluster2 = MultiClusterClientConfig.ClusterConfig
+ MultiDbConfig.DatabaseConfig cluster2 = MultiDbConfig.DatabaseConfig
.builder(endpoint2, clientConfig).weight(2.0f).healthCheckEnabled(false).build(); // Medium
// weight
- MultiClusterClientConfig.ClusterConfig cluster3 = MultiClusterClientConfig.ClusterConfig
+ MultiDbConfig.DatabaseConfig cluster3 = MultiDbConfig.DatabaseConfig
.builder(endpoint3, clientConfig).weight(3.0f) // Highest weight
.healthCheckEnabled(false).build();
- MultiClusterClientConfig config = new MultiClusterClientConfig.Builder(
- new MultiClusterClientConfig.ClusterConfig[] { cluster1, cluster2, cluster3 })
+ MultiDbConfig config = new MultiDbConfig.Builder(
+ new MultiDbConfig.DatabaseConfig[] { cluster1, cluster2, cluster3 })
.failbackSupported(true).failbackCheckInterval(100).gracePeriod(100).build();
- try (MultiClusterPooledConnectionProvider provider = new MultiClusterPooledConnectionProvider(
- config)) {
+ try (MultiDbConnectionProvider provider = new MultiDbConnectionProvider(config)) {
// Initially, cluster3 should be active (highest weight)
- assertEquals(provider.getCluster(endpoint3), provider.getCluster());
+ assertEquals(provider.getDatabase(endpoint3), provider.getDatabase());
// Make cluster3 unhealthy to force failover to cluster2 (next highest weight)
- MultiClusterPooledConnectionProviderHelper.onHealthStatusChange(provider, endpoint3,
+ MultiDbConnectionProviderHelper.onHealthStatusChange(provider, endpoint3,
HealthStatus.HEALTHY, HealthStatus.UNHEALTHY);
// Should now be on cluster2 (highest weight among healthy clusters)
- assertEquals(provider.getCluster(endpoint2), provider.getCluster());
+ assertEquals(provider.getDatabase(endpoint2), provider.getDatabase());
// Make cluster3 healthy again
- MultiClusterPooledConnectionProviderHelper.onHealthStatusChange(provider, endpoint3,
+ MultiDbConnectionProviderHelper.onHealthStatusChange(provider, endpoint3,
HealthStatus.UNHEALTHY, HealthStatus.HEALTHY);
// Wait for failback
// Should fail back to cluster3 (highest weight)
await().atMost(Durations.FIVE_HUNDRED_MILLISECONDS).pollInterval(FIFTY_MILLISECONDS)
- .until(() -> provider.getCluster(endpoint3) == provider.getCluster());
+ .until(() -> provider.getDatabase(endpoint3) == provider.getDatabase());
}
}
}
@@ -305,34 +298,33 @@ void testMultipleClusterFailbackPriority() throws InterruptedException {
@Test
void testGracePeriodDisablesClusterOnUnhealthy() throws InterruptedException {
try (MockedConstruction mockedPool = mockPool()) {
- MultiClusterClientConfig.ClusterConfig cluster1 = MultiClusterClientConfig.ClusterConfig
+ MultiDbConfig.DatabaseConfig cluster1 = MultiDbConfig.DatabaseConfig
.builder(endpoint1, clientConfig).weight(1.0f).healthCheckEnabled(false).build(); // Lower
// weight
- MultiClusterClientConfig.ClusterConfig cluster2 = MultiClusterClientConfig.ClusterConfig
+ MultiDbConfig.DatabaseConfig cluster2 = MultiDbConfig.DatabaseConfig
.builder(endpoint2, clientConfig).weight(2.0f).healthCheckEnabled(false).build(); // Higher
// weight
- MultiClusterClientConfig config = new MultiClusterClientConfig.Builder(
- new MultiClusterClientConfig.ClusterConfig[] { cluster1, cluster2 })
- .failbackSupported(true).failbackCheckInterval(100).gracePeriod(200) // 200ms grace
- // period
+ MultiDbConfig config = new MultiDbConfig.Builder(
+ new MultiDbConfig.DatabaseConfig[] { cluster1, cluster2 }).failbackSupported(true)
+ .failbackCheckInterval(100).gracePeriod(200) // 200ms grace
+ // period
.build();
- try (MultiClusterPooledConnectionProvider provider = new MultiClusterPooledConnectionProvider(
- config)) {
+ try (MultiDbConnectionProvider provider = new MultiDbConnectionProvider(config)) {
// Initially, cluster2 should be active (highest weight)
- assertEquals(provider.getCluster(endpoint2), provider.getCluster());
+ assertEquals(provider.getDatabase(endpoint2), provider.getDatabase());
// Now make cluster2 unhealthy - it should be disabled for grace period
- MultiClusterPooledConnectionProviderHelper.onHealthStatusChange(provider, endpoint2,
+ MultiDbConnectionProviderHelper.onHealthStatusChange(provider, endpoint2,
HealthStatus.HEALTHY, HealthStatus.UNHEALTHY);
// Should failover to cluster1
- assertEquals(provider.getCluster(endpoint1), provider.getCluster());
+ assertEquals(provider.getDatabase(endpoint1), provider.getDatabase());
// Cluster2 should be in grace period
- assertTrue(provider.getCluster(endpoint2).isInGracePeriod());
+ assertTrue(provider.getDatabase(endpoint2).isInGracePeriod());
}
}
}
@@ -340,51 +332,50 @@ void testGracePeriodDisablesClusterOnUnhealthy() throws InterruptedException {
@Test
void testGracePeriodReEnablesClusterAfterPeriod() throws InterruptedException {
try (MockedConstruction mockedPool = mockPool()) {
- MultiClusterClientConfig.ClusterConfig cluster1 = MultiClusterClientConfig.ClusterConfig
+ MultiDbConfig.DatabaseConfig cluster1 = MultiDbConfig.DatabaseConfig
.builder(endpoint1, clientConfig).weight(1.0f).healthCheckEnabled(false).build(); // Lower
// weight
- MultiClusterClientConfig.ClusterConfig cluster2 = MultiClusterClientConfig.ClusterConfig
+ MultiDbConfig.DatabaseConfig cluster2 = MultiDbConfig.DatabaseConfig
.builder(endpoint2, clientConfig).weight(2.0f).healthCheckEnabled(false).build(); // Higher
// weight
- MultiClusterClientConfig config = new MultiClusterClientConfig.Builder(
- new MultiClusterClientConfig.ClusterConfig[] { cluster1, cluster2 })
- .failbackSupported(true).failbackCheckInterval(50) // Short interval for testing
+ MultiDbConfig config = new MultiDbConfig.Builder(
+ new MultiDbConfig.DatabaseConfig[] { cluster1, cluster2 }).failbackSupported(true)
+ .failbackCheckInterval(50) // Short interval for testing
.gracePeriod(100) // Short grace period for testing
.build();
- try (MultiClusterPooledConnectionProvider provider = new MultiClusterPooledConnectionProvider(
- config)) {
+ try (MultiDbConnectionProvider provider = new MultiDbConnectionProvider(config)) {
// Initially, cluster2 should be active (highest weight)
- assertEquals(provider.getCluster(endpoint2), provider.getCluster());
+ assertEquals(provider.getDatabase(endpoint2), provider.getDatabase());
// Make cluster2 unhealthy to start grace period and force failover
- MultiClusterPooledConnectionProviderHelper.onHealthStatusChange(provider, endpoint2,
+ MultiDbConnectionProviderHelper.onHealthStatusChange(provider, endpoint2,
HealthStatus.HEALTHY, HealthStatus.UNHEALTHY);
// Should failover to cluster1
- assertEquals(provider.getCluster(endpoint1), provider.getCluster());
+ assertEquals(provider.getDatabase(endpoint1), provider.getDatabase());
// Cluster2 should be in grace period
- assertTrue(provider.getCluster(endpoint2).isInGracePeriod());
+ assertTrue(provider.getDatabase(endpoint2).isInGracePeriod());
// Make cluster2 healthy again while it's still in grace period
- MultiClusterPooledConnectionProviderHelper.onHealthStatusChange(provider, endpoint2,
+ MultiDbConnectionProviderHelper.onHealthStatusChange(provider, endpoint2,
HealthStatus.UNHEALTHY, HealthStatus.HEALTHY);
// Should still be on cluster1 because cluster2 is in grace period
- assertEquals(provider.getCluster(endpoint1), provider.getCluster());
+ assertEquals(provider.getDatabase(endpoint1), provider.getDatabase());
// Wait for grace period to expire
// Cluster2 should no longer be in grace period
await().atMost(Durations.FIVE_HUNDRED_MILLISECONDS).pollInterval(FIFTY_MILLISECONDS)
- .until(() -> !provider.getCluster(endpoint2).isInGracePeriod());
+ .until(() -> !provider.getDatabase(endpoint2).isInGracePeriod());
// Wait for failback check to run
// Should now failback to cluster2 (higher weight) since grace period has expired
await().atMost(Durations.FIVE_HUNDRED_MILLISECONDS).pollInterval(FIFTY_MILLISECONDS)
- .until(() -> provider.getCluster(endpoint2) == provider.getCluster());
+ .until(() -> provider.getDatabase(endpoint2) == provider.getDatabase());
}
}
}
diff --git a/src/test/java/redis/clients/jedis/mcf/FailbackMechanismUnitTest.java b/src/test/java/redis/clients/jedis/mcf/FailbackMechanismUnitTest.java
index 4e57b3c466..ad251975c2 100644
--- a/src/test/java/redis/clients/jedis/mcf/FailbackMechanismUnitTest.java
+++ b/src/test/java/redis/clients/jedis/mcf/FailbackMechanismUnitTest.java
@@ -9,7 +9,7 @@
import redis.clients.jedis.DefaultJedisClientConfig;
import redis.clients.jedis.HostAndPort;
import redis.clients.jedis.JedisClientConfig;
-import redis.clients.jedis.MultiClusterClientConfig;
+import redis.clients.jedis.MultiDbConfig;
@ExtendWith(MockitoExtension.class)
class FailbackMechanismUnitTest {
@@ -26,69 +26,65 @@ void setUp() {
@Test
void testFailbackCheckIntervalConfiguration() {
// Test default value
- MultiClusterClientConfig.ClusterConfig clusterConfig = MultiClusterClientConfig.ClusterConfig
+ MultiDbConfig.DatabaseConfig clusterConfig = MultiDbConfig.DatabaseConfig
.builder(endpoint1, clientConfig).healthCheckEnabled(false).build();
- MultiClusterClientConfig defaultConfig = new MultiClusterClientConfig.Builder(
- new MultiClusterClientConfig.ClusterConfig[] { clusterConfig }).build();
+ MultiDbConfig defaultConfig = new MultiDbConfig.Builder(
+ new MultiDbConfig.DatabaseConfig[] { clusterConfig }).build();
- assertEquals(5000, defaultConfig.getFailbackCheckInterval());
+ assertEquals(120000, defaultConfig.getFailbackCheckInterval());
// Test custom value
- MultiClusterClientConfig customConfig = new MultiClusterClientConfig.Builder(
- new MultiClusterClientConfig.ClusterConfig[] { clusterConfig }).failbackCheckInterval(3000)
- .build();
+ MultiDbConfig customConfig = new MultiDbConfig.Builder(
+ new MultiDbConfig.DatabaseConfig[] { clusterConfig }).failbackCheckInterval(3000).build();
assertEquals(3000, customConfig.getFailbackCheckInterval());
}
@Test
void testFailbackSupportedConfiguration() {
- MultiClusterClientConfig.ClusterConfig clusterConfig = MultiClusterClientConfig.ClusterConfig
+ MultiDbConfig.DatabaseConfig clusterConfig = MultiDbConfig.DatabaseConfig
.builder(endpoint1, clientConfig).healthCheckEnabled(false).build();
// Test default (should be true)
- MultiClusterClientConfig defaultConfig = new MultiClusterClientConfig.Builder(
- new MultiClusterClientConfig.ClusterConfig[] { clusterConfig }).build();
+ MultiDbConfig defaultConfig = new MultiDbConfig.Builder(
+ new MultiDbConfig.DatabaseConfig[] { clusterConfig }).build();
assertTrue(defaultConfig.isFailbackSupported());
// Test disabled
- MultiClusterClientConfig disabledConfig = new MultiClusterClientConfig.Builder(
- new MultiClusterClientConfig.ClusterConfig[] { clusterConfig }).failbackSupported(false)
- .build();
+ MultiDbConfig disabledConfig = new MultiDbConfig.Builder(
+ new MultiDbConfig.DatabaseConfig[] { clusterConfig }).failbackSupported(false).build();
assertFalse(disabledConfig.isFailbackSupported());
}
@Test
void testFailbackCheckIntervalValidation() {
- MultiClusterClientConfig.ClusterConfig clusterConfig = MultiClusterClientConfig.ClusterConfig
+ MultiDbConfig.DatabaseConfig clusterConfig = MultiDbConfig.DatabaseConfig
.builder(endpoint1, clientConfig).healthCheckEnabled(false).build();
// Test zero interval (should be allowed)
- MultiClusterClientConfig zeroConfig = new MultiClusterClientConfig.Builder(
- new MultiClusterClientConfig.ClusterConfig[] { clusterConfig }).failbackCheckInterval(0)
- .build();
+ MultiDbConfig zeroConfig = new MultiDbConfig.Builder(
+ new MultiDbConfig.DatabaseConfig[] { clusterConfig }).failbackCheckInterval(0).build();
assertEquals(0, zeroConfig.getFailbackCheckInterval());
// Test negative interval (should be allowed - implementation decision)
- MultiClusterClientConfig negativeConfig = new MultiClusterClientConfig.Builder(
- new MultiClusterClientConfig.ClusterConfig[] { clusterConfig }).failbackCheckInterval(-1000)
- .build();
+ MultiDbConfig negativeConfig = new MultiDbConfig.Builder(
+ new MultiDbConfig.DatabaseConfig[] { clusterConfig }).failbackCheckInterval(-1000).build();
assertEquals(-1000, negativeConfig.getFailbackCheckInterval());
}
@Test
void testBuilderChaining() {
- MultiClusterClientConfig.ClusterConfig clusterConfig = MultiClusterClientConfig.ClusterConfig
+ MultiDbConfig.DatabaseConfig clusterConfig = MultiDbConfig.DatabaseConfig
.builder(endpoint1, clientConfig).healthCheckEnabled(false).build();
// Test that builder methods can be chained
- MultiClusterClientConfig config = new MultiClusterClientConfig.Builder(
- new MultiClusterClientConfig.ClusterConfig[] { clusterConfig }).failbackSupported(true)
+ MultiDbConfig config = new MultiDbConfig.Builder(
+ new MultiDbConfig.DatabaseConfig[] { clusterConfig }).failbackSupported(true)
.failbackCheckInterval(2000).retryOnFailover(true).build();
assertTrue(config.isFailbackSupported());
@@ -99,47 +95,47 @@ void testBuilderChaining() {
@Test
void testGracePeriodConfiguration() {
// Test default value
- MultiClusterClientConfig.ClusterConfig clusterConfig = MultiClusterClientConfig.ClusterConfig
+ MultiDbConfig.DatabaseConfig clusterConfig = MultiDbConfig.DatabaseConfig
.builder(endpoint1, clientConfig).healthCheckEnabled(false).build();
- MultiClusterClientConfig defaultConfig = new MultiClusterClientConfig.Builder(
- new MultiClusterClientConfig.ClusterConfig[] { clusterConfig }).build();
+ MultiDbConfig defaultConfig = new MultiDbConfig.Builder(
+ new MultiDbConfig.DatabaseConfig[] { clusterConfig }).build();
- assertEquals(10000, defaultConfig.getGracePeriod()); // Default is 10 seconds
+ assertEquals(60000, defaultConfig.getGracePeriod());
// Test custom value
- MultiClusterClientConfig customConfig = new MultiClusterClientConfig.Builder(
- new MultiClusterClientConfig.ClusterConfig[] { clusterConfig }).gracePeriod(5000).build();
+ MultiDbConfig customConfig = new MultiDbConfig.Builder(
+ new MultiDbConfig.DatabaseConfig[] { clusterConfig }).gracePeriod(5000).build();
assertEquals(5000, customConfig.getGracePeriod());
}
@Test
void testGracePeriodValidation() {
- MultiClusterClientConfig.ClusterConfig clusterConfig = MultiClusterClientConfig.ClusterConfig
+ MultiDbConfig.DatabaseConfig clusterConfig = MultiDbConfig.DatabaseConfig
.builder(endpoint1, clientConfig).healthCheckEnabled(false).build();
// Test zero grace period (should be allowed)
- MultiClusterClientConfig zeroConfig = new MultiClusterClientConfig.Builder(
- new MultiClusterClientConfig.ClusterConfig[] { clusterConfig }).gracePeriod(0).build();
+ MultiDbConfig zeroConfig = new MultiDbConfig.Builder(
+ new MultiDbConfig.DatabaseConfig[] { clusterConfig }).gracePeriod(0).build();
assertEquals(0, zeroConfig.getGracePeriod());
// Test negative grace period (should be allowed - implementation decision)
- MultiClusterClientConfig negativeConfig = new MultiClusterClientConfig.Builder(
- new MultiClusterClientConfig.ClusterConfig[] { clusterConfig }).gracePeriod(-1000).build();
+ MultiDbConfig negativeConfig = new MultiDbConfig.Builder(
+ new MultiDbConfig.DatabaseConfig[] { clusterConfig }).gracePeriod(-1000).build();
assertEquals(-1000, negativeConfig.getGracePeriod());
}
@Test
void testGracePeriodBuilderChaining() {
- MultiClusterClientConfig.ClusterConfig clusterConfig = MultiClusterClientConfig.ClusterConfig
+ MultiDbConfig.DatabaseConfig clusterConfig = MultiDbConfig.DatabaseConfig
.builder(endpoint1, clientConfig).healthCheckEnabled(false).build();
// Test that builder methods can be chained
- MultiClusterClientConfig config = new MultiClusterClientConfig.Builder(
- new MultiClusterClientConfig.ClusterConfig[] { clusterConfig }).failbackSupported(true)
+ MultiDbConfig config = new MultiDbConfig.Builder(
+ new MultiDbConfig.DatabaseConfig[] { clusterConfig }).failbackSupported(true)
.failbackCheckInterval(2000).gracePeriod(8000).retryOnFailover(true).build();
assertTrue(config.isFailbackSupported());
diff --git a/src/test/java/redis/clients/jedis/mcf/HealthCheckIntegrationTest.java b/src/test/java/redis/clients/jedis/mcf/HealthCheckIntegrationTest.java
index 835a655df3..ce12cde8a7 100644
--- a/src/test/java/redis/clients/jedis/mcf/HealthCheckIntegrationTest.java
+++ b/src/test/java/redis/clients/jedis/mcf/HealthCheckIntegrationTest.java
@@ -12,14 +12,13 @@
import org.junit.jupiter.api.Test;
-import io.github.resilience4j.circuitbreaker.CircuitBreakerConfig.SlidingWindowType;
import redis.clients.jedis.EndpointConfig;
import redis.clients.jedis.HostAndPorts;
import redis.clients.jedis.JedisClientConfig;
-import redis.clients.jedis.MultiClusterClientConfig;
+import redis.clients.jedis.MultiDbConfig;
import redis.clients.jedis.UnifiedJedis;
-import redis.clients.jedis.MultiClusterClientConfig.ClusterConfig;
-import redis.clients.jedis.MultiClusterClientConfig.StrategySupplier;
+import redis.clients.jedis.MultiDbConfig.DatabaseConfig;
+import redis.clients.jedis.MultiDbConfig.StrategySupplier;
import redis.clients.jedis.mcf.ProbingPolicy.BuiltIn;
import redis.clients.jedis.scenario.RecommendedSettings;
@@ -33,7 +32,7 @@ public class HealthCheckIntegrationTest {
@Test
public void testDisableHealthCheck() {
// No health check strategy supplier means health check is disabled
- MultiClusterPooledConnectionProvider customProvider = getMCCF(null);
+ MultiDbConnectionProvider customProvider = getMCCF(null);
try (UnifiedJedis customClient = new UnifiedJedis(customProvider)) {
// Verify that the client can connect and execute commands
String result = customClient.ping();
@@ -44,11 +43,10 @@ public void testDisableHealthCheck() {
@Test
public void testDefaultStrategySupplier() {
// Create a default strategy supplier that creates EchoStrategy instances
- MultiClusterClientConfig.StrategySupplier defaultSupplier = (hostAndPort,
- jedisClientConfig) -> {
+ MultiDbConfig.StrategySupplier defaultSupplier = (hostAndPort, jedisClientConfig) -> {
return new EchoStrategy(hostAndPort, jedisClientConfig);
};
- MultiClusterPooledConnectionProvider customProvider = getMCCF(defaultSupplier);
+ MultiDbConnectionProvider customProvider = getMCCF(defaultSupplier);
try (UnifiedJedis customClient = new UnifiedJedis(customProvider)) {
// Verify that the client can connect and execute commands
String result = customClient.ping();
@@ -59,8 +57,7 @@ public void testDefaultStrategySupplier() {
@Test
public void testCustomStrategySupplier() {
// Create a StrategySupplier that uses the JedisClientConfig when available
- MultiClusterClientConfig.StrategySupplier strategySupplier = (hostAndPort,
- jedisClientConfig) -> {
+ MultiDbConfig.StrategySupplier strategySupplier = (hostAndPort, jedisClientConfig) -> {
return new TestHealthCheckStrategy(HealthCheckStrategy.Config.builder().interval(500)
.timeout(500).numProbes(1).policy(BuiltIn.ANY_SUCCESS).build(), (endpoint) -> {
// Create connection per health check to avoid resource leak
@@ -73,7 +70,7 @@ public void testCustomStrategySupplier() {
});
};
- MultiClusterPooledConnectionProvider customProvider = getMCCF(strategySupplier);
+ MultiDbConnectionProvider customProvider = getMCCF(strategySupplier);
try (UnifiedJedis customClient = new UnifiedJedis(customProvider)) {
// Verify that the client can connect and execute commands
String result = customClient.ping();
@@ -81,25 +78,21 @@ public void testCustomStrategySupplier() {
}
}
- private MultiClusterPooledConnectionProvider getMCCF(
- MultiClusterClientConfig.StrategySupplier strategySupplier) {
- Function modifier = builder -> strategySupplier == null
+ private MultiDbConnectionProvider getMCCF(MultiDbConfig.StrategySupplier strategySupplier) {
+ Function modifier = builder -> strategySupplier == null
? builder.healthCheckEnabled(false)
: builder.healthCheckStrategySupplier(strategySupplier);
- List clusterConfigs = Arrays.stream(new EndpointConfig[] { endpoint1 })
+ List databaseConfigs = Arrays.stream(new EndpointConfig[] { endpoint1 })
.map(e -> modifier
- .apply(MultiClusterClientConfig.ClusterConfig.builder(e.getHostAndPort(), clientConfig))
- .build())
+ .apply(MultiDbConfig.DatabaseConfig.builder(e.getHostAndPort(), clientConfig)).build())
.collect(Collectors.toList());
- MultiClusterClientConfig mccf = new MultiClusterClientConfig.Builder(clusterConfigs)
- .retryMaxAttempts(1).retryWaitDuration(1)
- .circuitBreakerSlidingWindowType(SlidingWindowType.COUNT_BASED)
- .circuitBreakerSlidingWindowSize(1).circuitBreakerFailureRateThreshold(100)
- .circuitBreakerSlidingWindowMinCalls(1).build();
+ MultiDbConfig mccf = new MultiDbConfig.Builder(databaseConfigs).retryMaxAttempts(1)
+ .retryWaitDuration(1).circuitBreakerSlidingWindowSize(1)
+ .circuitBreakerFailureRateThreshold(100).build();
- return new MultiClusterPooledConnectionProvider(mccf);
+ return new MultiDbConnectionProvider(mccf);
}
// ========== Probe Logic Integration Tests ==========
diff --git a/src/test/java/redis/clients/jedis/mcf/HealthCheckTest.java b/src/test/java/redis/clients/jedis/mcf/HealthCheckTest.java
index d81e012c1e..b83ecb8981 100644
--- a/src/test/java/redis/clients/jedis/mcf/HealthCheckTest.java
+++ b/src/test/java/redis/clients/jedis/mcf/HealthCheckTest.java
@@ -9,7 +9,7 @@
import redis.clients.jedis.Endpoint;
import redis.clients.jedis.HostAndPort;
import redis.clients.jedis.JedisClientConfig;
-import redis.clients.jedis.MultiClusterClientConfig;
+import redis.clients.jedis.MultiDbConfig;
import redis.clients.jedis.UnifiedJedis;
import redis.clients.jedis.mcf.ProbingPolicy.BuiltIn;
@@ -280,8 +280,11 @@ void testHealthStatusManagerLifecycle() throws InterruptedException {
// Register listener before adding health check to capture the initial event
manager.registerListener(testEndpoint, listener);
+ HealthCheckStrategy delayedStrategy = new TestHealthCheckStrategy(2000, 1000, 3,
+ BuiltIn.ALL_SUCCESS, 100, e -> HealthStatus.HEALTHY);
+
// Add health check - this will start async health checking
- manager.add(testEndpoint, alwaysHealthyStrategy);
+ manager.add(testEndpoint, delayedStrategy);
// Initially should still be UNKNOWN until first check completes
assertEquals(HealthStatus.UNKNOWN, manager.getHealthStatus(testEndpoint));
@@ -335,7 +338,7 @@ void testEchoStrategyCustomIntervalTimeout() {
@Test
void testEchoStrategyDefaultSupplier() {
- MultiClusterClientConfig.StrategySupplier supplier = EchoStrategy.DEFAULT;
+ MultiDbConfig.StrategySupplier supplier = EchoStrategy.DEFAULT;
HealthCheckStrategy strategy = supplier.get(testEndpoint, testConfig);
assertInstanceOf(EchoStrategy.class, strategy);
@@ -345,12 +348,12 @@ void testEchoStrategyDefaultSupplier() {
@Test
void testNewFieldLocations() {
- // Test new field locations in ClusterConfig and MultiClusterClientConfig
- MultiClusterClientConfig.ClusterConfig clusterConfig = MultiClusterClientConfig.ClusterConfig
+ // Test new field locations in DatabaseConfig and MultiDbConfig
+ MultiDbConfig.DatabaseConfig clusterConfig = MultiDbConfig.DatabaseConfig
.builder(testEndpoint, testConfig).weight(2.5f).build();
- MultiClusterClientConfig multiConfig = new MultiClusterClientConfig.Builder(
- new MultiClusterClientConfig.ClusterConfig[] { clusterConfig }).retryOnFailover(true)
+ MultiDbConfig multiConfig = new MultiDbConfig.Builder(
+ new MultiDbConfig.DatabaseConfig[] { clusterConfig }).retryOnFailover(true)
.failbackSupported(false).build();
assertEquals(2.5f, clusterConfig.getWeight());
@@ -360,8 +363,8 @@ void testNewFieldLocations() {
@Test
void testDefaultValues() {
- // Test default values in ClusterConfig
- MultiClusterClientConfig.ClusterConfig clusterConfig = MultiClusterClientConfig.ClusterConfig
+ // Test default values in DatabaseConfig
+ MultiDbConfig.DatabaseConfig clusterConfig = MultiDbConfig.DatabaseConfig
.builder(testEndpoint, testConfig).build();
assertEquals(1.0f, clusterConfig.getWeight()); // Default weight
@@ -371,22 +374,21 @@ void testDefaultValues() {
// health
// check)
- // Test default values in MultiClusterClientConfig
- MultiClusterClientConfig multiConfig = new MultiClusterClientConfig.Builder(
- new MultiClusterClientConfig.ClusterConfig[] { clusterConfig }).build();
+ // Test default values in MultiDbConfig
+ MultiDbConfig multiConfig = new MultiDbConfig.Builder(
+ new MultiDbConfig.DatabaseConfig[] { clusterConfig }).build();
assertFalse(multiConfig.isRetryOnFailover()); // Default is false
assertTrue(multiConfig.isFailbackSupported()); // Default is true
}
@Test
- void testClusterConfigWithHealthCheckStrategy() {
+ void testDatabaseConfigWithHealthCheckStrategy() {
HealthCheckStrategy customStrategy = mock(HealthCheckStrategy.class);
- MultiClusterClientConfig.StrategySupplier supplier = (hostAndPort,
- jedisClientConfig) -> customStrategy;
+ MultiDbConfig.StrategySupplier supplier = (hostAndPort, jedisClientConfig) -> customStrategy;
- MultiClusterClientConfig.ClusterConfig clusterConfig = MultiClusterClientConfig.ClusterConfig
+ MultiDbConfig.DatabaseConfig clusterConfig = MultiDbConfig.DatabaseConfig
.builder(testEndpoint, testConfig).healthCheckStrategySupplier(supplier).build();
assertNotNull(clusterConfig.getHealthCheckStrategySupplier());
@@ -396,35 +398,34 @@ void testClusterConfigWithHealthCheckStrategy() {
}
@Test
- void testClusterConfigWithStrategySupplier() {
- MultiClusterClientConfig.StrategySupplier customSupplier = (hostAndPort, jedisClientConfig) -> {
+ void testDatabaseConfigWithStrategySupplier() {
+ MultiDbConfig.StrategySupplier customSupplier = (hostAndPort, jedisClientConfig) -> {
return mock(HealthCheckStrategy.class);
};
- MultiClusterClientConfig.ClusterConfig clusterConfig = MultiClusterClientConfig.ClusterConfig
+ MultiDbConfig.DatabaseConfig clusterConfig = MultiDbConfig.DatabaseConfig
.builder(testEndpoint, testConfig).healthCheckStrategySupplier(customSupplier).build();
assertEquals(customSupplier, clusterConfig.getHealthCheckStrategySupplier());
}
@Test
- void testClusterConfigWithEchoStrategy() {
- MultiClusterClientConfig.StrategySupplier echoSupplier = (hostAndPort, jedisClientConfig) -> {
+ void testDatabaseConfigWithEchoStrategy() {
+ MultiDbConfig.StrategySupplier echoSupplier = (hostAndPort, jedisClientConfig) -> {
return new EchoStrategy(hostAndPort, jedisClientConfig);
};
- MultiClusterClientConfig.ClusterConfig clusterConfig = MultiClusterClientConfig.ClusterConfig
+ MultiDbConfig.DatabaseConfig clusterConfig = MultiDbConfig.DatabaseConfig
.builder(testEndpoint, testConfig).healthCheckStrategySupplier(echoSupplier).build();
- MultiClusterClientConfig.StrategySupplier supplier = clusterConfig
- .getHealthCheckStrategySupplier();
+ MultiDbConfig.StrategySupplier supplier = clusterConfig.getHealthCheckStrategySupplier();
assertNotNull(supplier);
assertInstanceOf(EchoStrategy.class, supplier.get(testEndpoint, testConfig));
}
@Test
- void testClusterConfigWithDefaultHealthCheck() {
- MultiClusterClientConfig.ClusterConfig clusterConfig = MultiClusterClientConfig.ClusterConfig
+ void testDatabaseConfigWithDefaultHealthCheck() {
+ MultiDbConfig.DatabaseConfig clusterConfig = MultiDbConfig.DatabaseConfig
.builder(testEndpoint, testConfig).build(); // Should use default EchoStrategy
assertNotNull(clusterConfig.getHealthCheckStrategySupplier());
@@ -432,16 +433,16 @@ void testClusterConfigWithDefaultHealthCheck() {
}
@Test
- void testClusterConfigWithDisabledHealthCheck() {
- MultiClusterClientConfig.ClusterConfig clusterConfig = MultiClusterClientConfig.ClusterConfig
+ void testDatabaseConfigWithDisabledHealthCheck() {
+ MultiDbConfig.DatabaseConfig clusterConfig = MultiDbConfig.DatabaseConfig
.builder(testEndpoint, testConfig).healthCheckEnabled(false).build();
assertNull(clusterConfig.getHealthCheckStrategySupplier());
}
@Test
- void testClusterConfigHealthCheckEnabledExplicitly() {
- MultiClusterClientConfig.ClusterConfig clusterConfig = MultiClusterClientConfig.ClusterConfig
+ void testDatabaseConfigHealthCheckEnabledExplicitly() {
+ MultiDbConfig.DatabaseConfig clusterConfig = MultiDbConfig.DatabaseConfig
.builder(testEndpoint, testConfig).healthCheckEnabled(true).build();
assertNotNull(clusterConfig.getHealthCheckStrategySupplier());
@@ -513,7 +514,7 @@ void testHealthCheckIntegration() throws InterruptedException {
@Test
void testStrategySupplierPolymorphism() {
// Test that the polymorphic design works correctly
- MultiClusterClientConfig.StrategySupplier supplier = (hostAndPort, jedisClientConfig) -> {
+ MultiDbConfig.StrategySupplier supplier = (hostAndPort, jedisClientConfig) -> {
if (jedisClientConfig != null) {
return new EchoStrategy(hostAndPort, jedisClientConfig,
HealthCheckStrategy.Config.builder().interval(500).timeout(250).numProbes(1).build());
@@ -531,7 +532,7 @@ void testStrategySupplierPolymorphism() {
// Test without config
HealthCheckStrategy strategyWithoutConfig = supplier.get(testEndpoint, null);
assertNotNull(strategyWithoutConfig);
- assertEquals(1000, strategyWithoutConfig.getInterval()); // Default values
+ assertEquals(5000, strategyWithoutConfig.getInterval()); // Default values
assertEquals(1000, strategyWithoutConfig.getTimeout());
}
@@ -785,7 +786,7 @@ void testPolicy_Majority_EarlyFailStopsAtTwo() throws Exception {
CountDownLatch unhealthyLatch = new CountDownLatch(1);
TestHealthCheckStrategy strategy = new TestHealthCheckStrategy(
- HealthCheckStrategy.Config.builder().interval(5).timeout(200).numProbes(4)
+ HealthCheckStrategy.Config.builder().interval(5000).timeout(200).numProbes(4)
.policy(BuiltIn.MAJORITY_SUCCESS).delayInBetweenProbes(5).build(),
e -> {
int c = callCount.incrementAndGet();
diff --git a/src/test/java/redis/clients/jedis/mcf/LagAwareStrategyUnitTest.java b/src/test/java/redis/clients/jedis/mcf/LagAwareStrategyUnitTest.java
index 459b3be8c3..c985c0d06d 100644
--- a/src/test/java/redis/clients/jedis/mcf/LagAwareStrategyUnitTest.java
+++ b/src/test/java/redis/clients/jedis/mcf/LagAwareStrategyUnitTest.java
@@ -50,7 +50,7 @@ void healthy_when_bdb_available_and_cached_uid_used_on_next_check() throws Excep
try (MockedConstruction mockedConstructor = mockConstruction(RedisRestAPI.class,
(mock, context) -> {
when(mock.getBdbs()).thenReturn(Arrays.asList(bdbInfo));
- when(mock.checkBdbAvailability("1", true, 100L)).thenReturn(true);
+ when(mock.checkBdbAvailability("1", true, 5000L)).thenReturn(true);
reference[0] = mock;
})) {
Config lagCheckConfig = Config.builder(endpoint, creds).interval(500).timeout(250)
@@ -61,7 +61,7 @@ void healthy_when_bdb_available_and_cached_uid_used_on_next_check() throws Excep
assertEquals(HealthStatus.HEALTHY, strategy.doHealthCheck(endpoint));
verify(api, times(1)).getBdbs(); // Should not call getBdbs again when cached
- verify(api, times(2)).checkBdbAvailability("1", true, 100L);
+ verify(api, times(2)).checkBdbAvailability("1", true, 5000L);
}
}
}
@@ -97,7 +97,7 @@ void exception_and_cache_reset_on_exception_then_recovers_next_time() throws Exc
// First call throws exception, second call returns bdbInfo
when(mock.getBdbs()).thenThrow(new RuntimeException("boom"))
.thenReturn(Arrays.asList(bdbInfo));
- when(mock.checkBdbAvailability("42", true, 100L)).thenReturn(true);
+ when(mock.checkBdbAvailability("42", true, 5000L)).thenReturn(true);
reference[0] = mock;
})) {
@@ -115,7 +115,7 @@ void exception_and_cache_reset_on_exception_then_recovers_next_time() throws Exc
// Verify getBdbs was called twice (once failed, once succeeded)
verify(api, times(2)).getBdbs();
// Verify availability check was called only once (on the successful attempt)
- verify(api, times(1)).checkBdbAvailability("42", true, 100L);
+ verify(api, times(1)).checkBdbAvailability("42", true, 5000L);
}
}
}
@@ -173,10 +173,10 @@ void exception_when_no_matching_host_found() throws Exception {
void config_builder_creates_config_with_default_values() {
Config config = Config.builder(endpoint, creds).build();
- assertEquals(1000, config.interval);
+ assertEquals(5000, config.interval);
assertEquals(1000, config.timeout);
assertEquals(3, config.numProbes);
- assertEquals(Duration.ofMillis(100), config.getAvailabilityLagTolerance());
+ assertEquals(Duration.ofMillis(5000), config.getAvailabilityLagTolerance());
assertEquals(endpoint, config.getRestEndpoint());
assertEquals(creds, config.getCredentialsSupplier());
}
@@ -288,7 +288,7 @@ void base_config_builder_factory_method_works() {
void base_config_create_factory_method_uses_defaults() {
HealthCheckStrategy.Config config = HealthCheckStrategy.Config.create();
- assertEquals(1000, config.getInterval());
+ assertEquals(5000, config.getInterval());
assertEquals(1000, config.getTimeout());
assertEquals(3, config.getNumProbes());
}
diff --git a/src/test/java/redis/clients/jedis/mcf/MultiClusterInitializationTest.java b/src/test/java/redis/clients/jedis/mcf/MultiClusterInitializationTest.java
deleted file mode 100644
index 7b580042dc..0000000000
--- a/src/test/java/redis/clients/jedis/mcf/MultiClusterInitializationTest.java
+++ /dev/null
@@ -1,162 +0,0 @@
-package redis.clients.jedis.mcf;
-
-import static org.junit.jupiter.api.Assertions.*;
-import static org.mockito.Mockito.*;
-
-import org.junit.jupiter.api.BeforeEach;
-import org.junit.jupiter.api.Test;
-import org.junit.jupiter.api.extension.ExtendWith;
-import org.mockito.MockedConstruction;
-import org.mockito.junit.jupiter.MockitoExtension;
-
-import redis.clients.jedis.Connection;
-import redis.clients.jedis.ConnectionPool;
-import redis.clients.jedis.DefaultJedisClientConfig;
-import redis.clients.jedis.HostAndPort;
-import redis.clients.jedis.JedisClientConfig;
-import redis.clients.jedis.MultiClusterClientConfig;
-import redis.clients.jedis.exceptions.JedisValidationException;
-
-/**
- * Tests for MultiClusterPooledConnectionProvider initialization edge cases
- */
-@ExtendWith(MockitoExtension.class)
-public class MultiClusterInitializationTest {
-
- private HostAndPort endpoint1;
- private HostAndPort endpoint2;
- private HostAndPort endpoint3;
- private JedisClientConfig clientConfig;
-
- @BeforeEach
- void setUp() {
- endpoint1 = new HostAndPort("localhost", 6379);
- endpoint2 = new HostAndPort("localhost", 6380);
- endpoint3 = new HostAndPort("localhost", 6381);
- clientConfig = DefaultJedisClientConfig.builder().build();
- }
-
- private MockedConstruction mockPool() {
- Connection mockConnection = mock(Connection.class);
- lenient().when(mockConnection.ping()).thenReturn(true);
- return mockConstruction(ConnectionPool.class, (mock, context) -> {
- when(mock.getResource()).thenReturn(mockConnection);
- doNothing().when(mock).close();
- });
- }
-
- @Test
- void testInitializationWithMixedHealthCheckConfiguration() {
- try (MockedConstruction mockedPool = mockPool()) {
- // Create clusters with mixed health check configuration
- MultiClusterClientConfig.ClusterConfig cluster1 = MultiClusterClientConfig.ClusterConfig
- .builder(endpoint1, clientConfig).weight(1.0f).healthCheckEnabled(false) // No health
- // check
- .build();
-
- MultiClusterClientConfig.ClusterConfig cluster2 = MultiClusterClientConfig.ClusterConfig
- .builder(endpoint2, clientConfig).weight(2.0f)
- .healthCheckStrategySupplier(EchoStrategy.DEFAULT) // With
- // health
- // check
- .build();
-
- MultiClusterClientConfig config = new MultiClusterClientConfig.Builder(
- new MultiClusterClientConfig.ClusterConfig[] { cluster1, cluster2 }).build();
-
- try (MultiClusterPooledConnectionProvider provider = new MultiClusterPooledConnectionProvider(
- config)) {
- // Should initialize successfully
- assertNotNull(provider.getCluster());
-
- // Should select cluster1 (no health check, assumed healthy) or cluster2 based on weight
- // Since cluster2 has higher weight and health checks, it should be selected if healthy
- assertTrue(provider.getCluster() == provider.getCluster(endpoint1)
- || provider.getCluster() == provider.getCluster(endpoint2));
- }
- }
- }
-
- @Test
- void testInitializationWithAllHealthChecksDisabled() {
- try (MockedConstruction mockedPool = mockPool()) {
- // Create clusters with no health checks
- MultiClusterClientConfig.ClusterConfig cluster1 = MultiClusterClientConfig.ClusterConfig
- .builder(endpoint1, clientConfig).weight(1.0f).healthCheckEnabled(false).build();
-
- MultiClusterClientConfig.ClusterConfig cluster2 = MultiClusterClientConfig.ClusterConfig
- .builder(endpoint2, clientConfig).weight(3.0f) // Higher weight
- .healthCheckEnabled(false).build();
-
- MultiClusterClientConfig config = new MultiClusterClientConfig.Builder(
- new MultiClusterClientConfig.ClusterConfig[] { cluster1, cluster2 }).build();
-
- try (MultiClusterPooledConnectionProvider provider = new MultiClusterPooledConnectionProvider(
- config)) {
- // Should select cluster2 (highest weight, no health checks)
- assertEquals(provider.getCluster(endpoint2), provider.getCluster());
- }
- }
- }
-
- @Test
- void testInitializationWithSingleCluster() {
- try (MockedConstruction mockedPool = mockPool()) {
- MultiClusterClientConfig.ClusterConfig cluster = MultiClusterClientConfig.ClusterConfig
- .builder(endpoint1, clientConfig).weight(1.0f).healthCheckEnabled(false).build();
-
- MultiClusterClientConfig config = new MultiClusterClientConfig.Builder(
- new MultiClusterClientConfig.ClusterConfig[] { cluster }).build();
-
- try (MultiClusterPooledConnectionProvider provider = new MultiClusterPooledConnectionProvider(
- config)) {
- // Should select the only available cluster
- assertEquals(provider.getCluster(endpoint1), provider.getCluster());
- }
- }
- }
-
- @Test
- void testErrorHandlingWithNullConfiguration() {
- assertThrows(JedisValidationException.class, () -> {
- new MultiClusterPooledConnectionProvider(null);
- });
- }
-
- @Test
- void testErrorHandlingWithEmptyClusterArray() {
- assertThrows(JedisValidationException.class, () -> {
- new MultiClusterClientConfig.Builder(new MultiClusterClientConfig.ClusterConfig[0]).build();
- });
- }
-
- @Test
- void testErrorHandlingWithNullClusterConfig() {
- assertThrows(IllegalArgumentException.class, () -> {
- new MultiClusterClientConfig.Builder(new MultiClusterClientConfig.ClusterConfig[] { null })
- .build();
- });
- }
-
- @Test
- void testInitializationWithZeroWeights() {
- try (MockedConstruction mockedPool = mockPool()) {
- MultiClusterClientConfig.ClusterConfig cluster1 = MultiClusterClientConfig.ClusterConfig
- .builder(endpoint1, clientConfig).weight(0.0f) // Zero weight
- .healthCheckEnabled(false).build();
-
- MultiClusterClientConfig.ClusterConfig cluster2 = MultiClusterClientConfig.ClusterConfig
- .builder(endpoint2, clientConfig).weight(0.0f) // Zero weight
- .healthCheckEnabled(false).build();
-
- MultiClusterClientConfig config = new MultiClusterClientConfig.Builder(
- new MultiClusterClientConfig.ClusterConfig[] { cluster1, cluster2 }).build();
-
- try (MultiClusterPooledConnectionProvider provider = new MultiClusterPooledConnectionProvider(
- config)) {
- // Should still initialize and select one of the clusters
- assertNotNull(provider.getCluster());
- }
- }
- }
-}
diff --git a/src/test/java/redis/clients/jedis/mcf/MultiClusterPooledConnectionProviderHelper.java b/src/test/java/redis/clients/jedis/mcf/MultiClusterPooledConnectionProviderHelper.java
deleted file mode 100644
index f5076694c8..0000000000
--- a/src/test/java/redis/clients/jedis/mcf/MultiClusterPooledConnectionProviderHelper.java
+++ /dev/null
@@ -1,20 +0,0 @@
-package redis.clients.jedis.mcf;
-
-import redis.clients.jedis.Endpoint;
-
-public class MultiClusterPooledConnectionProviderHelper {
-
- public static void onHealthStatusChange(MultiClusterPooledConnectionProvider provider,
- Endpoint endpoint, HealthStatus oldStatus, HealthStatus newStatus) {
- provider.onHealthStatusChange(new HealthStatusChangeEvent(endpoint, oldStatus, newStatus));
- }
-
- public static void periodicFailbackCheck(MultiClusterPooledConnectionProvider provider) {
- provider.periodicFailbackCheck();
- }
-
- public static Endpoint switchToHealthyCluster(MultiClusterPooledConnectionProvider provider,
- SwitchReason reason, MultiClusterPooledConnectionProvider.Cluster iterateFrom) {
- return provider.switchToHealthyCluster(reason, iterateFrom);
- }
-}
diff --git a/src/test/java/redis/clients/jedis/mcf/MultiDbCircuitBreakerThresholdsTest.java b/src/test/java/redis/clients/jedis/mcf/MultiDbCircuitBreakerThresholdsTest.java
new file mode 100644
index 0000000000..7a0f4319c6
--- /dev/null
+++ b/src/test/java/redis/clients/jedis/mcf/MultiDbCircuitBreakerThresholdsTest.java
@@ -0,0 +1,244 @@
+package redis.clients.jedis.mcf;
+
+import static org.junit.jupiter.api.Assertions.*;
+import static org.mockito.ArgumentMatchers.*;
+import static org.mockito.Mockito.*;
+
+import io.github.resilience4j.circuitbreaker.CircuitBreaker;
+
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.params.ParameterizedTest;
+import org.junit.jupiter.params.provider.CsvSource;
+import redis.clients.jedis.BuilderFactory;
+import redis.clients.jedis.CommandArguments;
+import redis.clients.jedis.CommandObject;
+import redis.clients.jedis.Connection;
+import redis.clients.jedis.DefaultJedisClientConfig;
+import redis.clients.jedis.HostAndPort;
+import redis.clients.jedis.MultiDbConfig;
+import redis.clients.jedis.MultiDbConfig.DatabaseConfig;
+import redis.clients.jedis.Protocol;
+import redis.clients.jedis.exceptions.JedisConnectionException;
+import redis.clients.jedis.mcf.MultiDbConnectionProvider.Database;
+import redis.clients.jedis.util.ReflectionTestUtil;
+
+/**
+ * Tests for circuit breaker thresholds: both failure-rate threshold and minimum number of failures
+ * must be exceeded to trigger failover. Uses a real CircuitBreaker and real Retry, but mocks the
+ * provider and cluster wiring to avoid network I/O.
+ */
+public class MultiDbCircuitBreakerThresholdsTest {
+
+ private MultiDbConnectionProvider realProvider;
+ private MultiDbConnectionProvider spyProvider;
+ private Database cluster;
+ private MultiDbCommandExecutor executor;
+ private CommandObject dummyCommand;
+ private TrackingConnectionPool poolMock;
+ private HostAndPort fakeEndpoint = new HostAndPort("fake", 6379);
+ private HostAndPort fakeEndpoint2 = new HostAndPort("fake2", 6379);
+ private DatabaseConfig[] fakeDatabaseConfigs;
+
+ @BeforeEach
+ public void setup() throws Exception {
+
+ DatabaseConfig[] databaseConfigs = new DatabaseConfig[] {
+ DatabaseConfig.builder(fakeEndpoint, DefaultJedisClientConfig.builder().build())
+ .healthCheckEnabled(false).weight(1.0f).build(),
+ DatabaseConfig.builder(fakeEndpoint2, DefaultJedisClientConfig.builder().build())
+ .healthCheckEnabled(false).weight(0.5f).build() };
+ fakeDatabaseConfigs = databaseConfigs;
+
+ MultiDbConfig.Builder cfgBuilder = MultiDbConfig.builder(databaseConfigs)
+ .circuitBreakerFailureRateThreshold(50.0f).circuitBreakerMinNumOfFailures(3)
+ .circuitBreakerSlidingWindowSize(10).retryMaxAttempts(1).retryOnFailover(false);
+
+ MultiDbConfig mcc = cfgBuilder.build();
+
+ realProvider = new MultiDbConnectionProvider(mcc);
+ spyProvider = spy(realProvider);
+
+ cluster = spyProvider.getDatabase();
+
+ executor = new MultiDbCommandExecutor(spyProvider);
+
+ dummyCommand = new CommandObject<>(new CommandArguments(Protocol.Command.PING),
+ BuilderFactory.STRING);
+
+ // Replace the cluster's pool with a mock to avoid real network I/O
+ poolMock = mock(TrackingConnectionPool.class);
+ ReflectionTestUtil.setField(cluster, "connectionPool", poolMock);
+ }
+
+ /**
+ * Below minimum failures; even if all calls are failures, failover should NOT trigger.
+ */
+ @Test
+ public void belowMinFailures_doesNotFailover() {
+ // Always failing connections
+ Connection failing = mock(Connection.class);
+ when(failing.executeCommand(org.mockito.Mockito.> any()))
+ .thenThrow(new JedisConnectionException("fail"));
+ doNothing().when(failing).close();
+ when(poolMock.getResource()).thenReturn(failing);
+
+ for (int i = 0; i < 2; i++) {
+ assertThrows(JedisConnectionException.class, () -> executor.executeCommand(dummyCommand));
+ }
+
+ // Below min failures; CB remains CLOSED
+ assertEquals(CircuitBreaker.State.CLOSED, spyProvider.getDatabaseCircuitBreaker().getState());
+ }
+
+ /**
+ * Reaching minFailures and exceeding failure rate threshold should trigger failover.
+ */
+ @Test
+ public void minFailuresAndRateExceeded_triggersFailover() {
+ // Always failing connections
+ Connection failing = mock(Connection.class);
+ when(failing.executeCommand(org.mockito.Mockito.> any()))
+ .thenThrow(new JedisConnectionException("fail"));
+ doNothing().when(failing).close();
+ when(poolMock.getResource()).thenReturn(failing);
+
+ // Reach min failures and exceed rate threshold
+ for (int i = 0; i < 3; i++) {
+ assertThrows(JedisConnectionException.class, () -> executor.executeCommand(dummyCommand));
+ }
+
+ // Next call should hit open CB (CallNotPermitted) and trigger failover
+ assertThrows(JedisConnectionException.class, () -> executor.executeCommand(dummyCommand));
+
+ verify(spyProvider, atLeastOnce()).switchToHealthyDatabase(eq(SwitchReason.CIRCUIT_BREAKER),
+ any());
+ assertEquals(CircuitBreaker.State.FORCED_OPEN,
+ spyProvider.getDatabase(fakeEndpoint).getCircuitBreaker().getState());
+ }
+
+ /**
+ * Even after reaching minFailures, if failure rate is below threshold, do not failover.
+ */
+ @Test
+ public void rateBelowThreshold_doesNotFailover() throws Exception {
+ // Use local provider with higher threshold (80%) and no retries
+ MultiDbConfig.Builder cfgBuilder = MultiDbConfig.builder(fakeDatabaseConfigs)
+ .circuitBreakerFailureRateThreshold(80.0f).circuitBreakerMinNumOfFailures(3)
+ .circuitBreakerSlidingWindowSize(10).retryMaxAttempts(1).retryOnFailover(false);
+ MultiDbConnectionProvider rp = new MultiDbConnectionProvider(cfgBuilder.build());
+ MultiDbConnectionProvider sp = spy(rp);
+ Database c = sp.getDatabase();
+ try (MultiDbCommandExecutor ex = new MultiDbCommandExecutor(sp)) {
+ CommandObject cmd = new CommandObject<>(new CommandArguments(Protocol.Command.PING),
+ BuilderFactory.STRING);
+
+ TrackingConnectionPool pool = mock(TrackingConnectionPool.class);
+ ReflectionTestUtil.setField(c, "connectionPool", pool);
+
+ // 3 successes
+ Connection success = mock(Connection.class);
+ when(success.executeCommand(org.mockito.Mockito.> any()))
+ .thenReturn("PONG");
+ doNothing().when(success).close();
+ when(pool.getResource()).thenReturn(success);
+ for (int i = 0; i < 3; i++) {
+ assertEquals("PONG", ex.executeCommand(cmd));
+ }
+
+ // 3 failures -> total 6 calls, 50% failure rate; threshold 80% means stay CLOSED
+ Connection failing = mock(Connection.class);
+ when(failing.executeCommand(org.mockito.Mockito.> any()))
+ .thenThrow(new JedisConnectionException("fail"));
+ doNothing().when(failing).close();
+ when(pool.getResource()).thenReturn(failing);
+ for (int i = 0; i < 3; i++) {
+ assertThrows(JedisConnectionException.class, () -> ex.executeCommand(cmd));
+ }
+
+ assertEquals(CircuitBreaker.State.CLOSED, sp.getDatabaseCircuitBreaker().getState());
+ }
+ }
+
+ @Test
+ public void providerBuilder_zeroRate_mapsToHundredAndHugeMinCalls() {
+ MultiDbConfig.Builder cfgBuilder = MultiDbConfig.builder(fakeDatabaseConfigs);
+ cfgBuilder.circuitBreakerFailureRateThreshold(0.0f).circuitBreakerMinNumOfFailures(3)
+ .circuitBreakerSlidingWindowSize(10);
+ MultiDbConfig mcc = cfgBuilder.build();
+
+ CircuitBreakerThresholdsAdapter adapter = new CircuitBreakerThresholdsAdapter(mcc);
+
+ assertEquals(100.0f, adapter.getFailureRateThreshold(), 0.0001f);
+ assertEquals(Integer.MAX_VALUE, adapter.getMinimumNumberOfCalls());
+ }
+
+ @ParameterizedTest
+ @CsvSource({
+ // minFailures, ratePercent, successes, failures, expectFailoverOnNext
+ "0, 1.0, 0, 1, true", //
+ "1, 1.0, 0, 1, true", //
+ "3, 50.0, 0, 3, true", //
+ "1, 100.0, 0, 1, true", //
+ "0, 100.0, 99, 1, false", //
+ "0, 1.0, 99, 1, true", //
+ // additional edge cases
+ "1, 0.0, 0, 1, true", //
+ "3, 50.0, 3, 2, false", //
+ "1000, 1.0, 198, 2, false", })
+ public void thresholdMatrix(int minFailures, float ratePercent, int successes, int failures,
+ boolean expectFailoverOnNext) throws Exception {
+
+ MultiDbConfig.Builder cfgBuilder = MultiDbConfig.builder(fakeDatabaseConfigs)
+ .circuitBreakerFailureRateThreshold(ratePercent).circuitBreakerMinNumOfFailures(minFailures)
+ .circuitBreakerSlidingWindowSize(Math.max(10, successes + failures + 2)).retryMaxAttempts(1)
+ .retryOnFailover(false);
+
+ MultiDbConnectionProvider real = new MultiDbConnectionProvider(cfgBuilder.build());
+ MultiDbConnectionProvider spy = spy(real);
+ Database c = spy.getDatabase();
+ try (MultiDbCommandExecutor ex = new MultiDbCommandExecutor(spy)) {
+
+ CommandObject cmd = new CommandObject<>(new CommandArguments(Protocol.Command.PING),
+ BuilderFactory.STRING);
+
+ TrackingConnectionPool pool = mock(TrackingConnectionPool.class);
+ ReflectionTestUtil.setField(c, "connectionPool", pool);
+
+ if (successes > 0) {
+ Connection ok = mock(Connection.class);
+ when(ok.executeCommand(org.mockito.Mockito.> any()))
+ .thenReturn("PONG");
+ doNothing().when(ok).close();
+ when(pool.getResource()).thenReturn(ok);
+ for (int i = 0; i < successes; i++) {
+ ex.executeCommand(cmd);
+ }
+ }
+
+ if (failures > 0) {
+ Connection bad = mock(Connection.class);
+ when(bad.executeCommand(org.mockito.Mockito.> any()))
+ .thenThrow(new JedisConnectionException("fail"));
+ doNothing().when(bad).close();
+ when(pool.getResource()).thenReturn(bad);
+ for (int i = 0; i < failures; i++) {
+ try {
+ ex.executeCommand(cmd);
+ } catch (Exception ignore) {
+ }
+ }
+ }
+
+ if (expectFailoverOnNext) {
+ assertThrows(Exception.class, () -> ex.executeCommand(cmd));
+ verify(spy, atLeastOnce()).switchToHealthyDatabase(eq(SwitchReason.CIRCUIT_BREAKER), any());
+ assertEquals(CircuitBreaker.State.FORCED_OPEN, c.getCircuitBreaker().getState());
+ } else {
+ CircuitBreaker.State st = c.getCircuitBreaker().getState();
+ assertTrue(st == CircuitBreaker.State.CLOSED || st == CircuitBreaker.State.HALF_OPEN);
+ }
+ }
+ }
+
+}
diff --git a/src/test/java/redis/clients/jedis/mcf/MultiClusterDynamicEndpointUnitTest.java b/src/test/java/redis/clients/jedis/mcf/MultiDbConnectionProviderDynamicEndpointUnitTest.java
similarity index 52%
rename from src/test/java/redis/clients/jedis/mcf/MultiClusterDynamicEndpointUnitTest.java
rename to src/test/java/redis/clients/jedis/mcf/MultiDbConnectionProviderDynamicEndpointUnitTest.java
index 90ff443794..ceb5cc021c 100644
--- a/src/test/java/redis/clients/jedis/mcf/MultiClusterDynamicEndpointUnitTest.java
+++ b/src/test/java/redis/clients/jedis/mcf/MultiDbConnectionProviderDynamicEndpointUnitTest.java
@@ -5,14 +5,13 @@
import org.mockito.MockedConstruction;
import redis.clients.jedis.Connection;
-import redis.clients.jedis.ConnectionPool;
import redis.clients.jedis.DefaultJedisClientConfig;
import redis.clients.jedis.EndpointConfig;
import redis.clients.jedis.HostAndPort;
import redis.clients.jedis.HostAndPorts;
import redis.clients.jedis.JedisClientConfig;
-import redis.clients.jedis.MultiClusterClientConfig;
-import redis.clients.jedis.MultiClusterClientConfig.ClusterConfig;
+import redis.clients.jedis.MultiDbConfig;
+import redis.clients.jedis.MultiDbConfig.DatabaseConfig;
import redis.clients.jedis.exceptions.JedisValidationException;
import static org.junit.jupiter.api.Assertions.*;
@@ -21,9 +20,9 @@
import static org.mockito.Mockito.mockConstruction;
import static org.mockito.Mockito.when;
-public class MultiClusterDynamicEndpointUnitTest {
+public class MultiDbConnectionProviderDynamicEndpointUnitTest {
- private MultiClusterPooledConnectionProvider provider;
+ private MultiDbConnectionProvider provider;
private JedisClientConfig clientConfig;
private final EndpointConfig endpoint1 = HostAndPorts.getRedisEndpoint("standalone0");
private final EndpointConfig endpoint2 = HostAndPorts.getRedisEndpoint("standalone1");
@@ -33,42 +32,42 @@ void setUp() {
clientConfig = DefaultJedisClientConfig.builder().build();
// Create initial provider with endpoint1
- ClusterConfig initialConfig = createClusterConfig(endpoint1.getHostAndPort(), 1.0f);
+ DatabaseConfig initialConfig = createDatabaseConfig(endpoint1.getHostAndPort(), 1.0f);
- MultiClusterClientConfig multiConfig = new MultiClusterClientConfig.Builder(
- new ClusterConfig[] { initialConfig }).build();
+ MultiDbConfig multiConfig = new MultiDbConfig.Builder(new DatabaseConfig[] { initialConfig })
+ .build();
- provider = new MultiClusterPooledConnectionProvider(multiConfig);
+ provider = new MultiDbConnectionProvider(multiConfig);
}
- // Helper method to create cluster configurations
- private ClusterConfig createClusterConfig(HostAndPort hostAndPort, float weight) {
+ // Helper method to create database configurations
+ private DatabaseConfig createDatabaseConfig(HostAndPort hostAndPort, float weight) {
// Disable health check for unit tests to avoid real connections
- return ClusterConfig.builder(hostAndPort, clientConfig).weight(weight).healthCheckEnabled(false)
- .build();
+ return DatabaseConfig.builder(hostAndPort, clientConfig).weight(weight)
+ .healthCheckEnabled(false).build();
}
@Test
- void testAddNewCluster() {
- ClusterConfig newConfig = createClusterConfig(endpoint2.getHostAndPort(), 2.0f);
+ void testAddNewDatabase() {
+ DatabaseConfig newConfig = createDatabaseConfig(endpoint2.getHostAndPort(), 2.0f);
// Should not throw exception
assertDoesNotThrow(() -> provider.add(newConfig));
- // Verify the cluster was added by checking it can be retrieved
- assertNotNull(provider.getCluster(endpoint2.getHostAndPort()));
+ // Verify the database was added by checking it can be retrieved
+ assertNotNull(provider.getDatabase(endpoint2.getHostAndPort()));
}
@Test
- void testAddDuplicateCluster() {
- ClusterConfig duplicateConfig = createClusterConfig(endpoint1.getHostAndPort(), 2.0f);
+ void testAddDuplicateDatabase() {
+ DatabaseConfig duplicateConfig = createDatabaseConfig(endpoint1.getHostAndPort(), 2.0f);
// Should throw validation exception for duplicate endpoint
assertThrows(JedisValidationException.class, () -> provider.add(duplicateConfig));
}
@Test
- void testAddNullClusterConfig() {
+ void testAddNullDatabaseConfig() {
// Should throw validation exception for null config
assertThrows(JedisValidationException.class, () -> provider.add(null));
}
@@ -80,26 +79,24 @@ void testRemoveExistingCluster() {
try (MockedConstruction mockedPool = mockPool(mockConnection)) {
// Create initial provider with endpoint1
- ClusterConfig clusterConfig1 = createClusterConfig(endpoint1.getHostAndPort(), 1.0f);
+ DatabaseConfig dbConfig1 = createDatabaseConfig(endpoint1.getHostAndPort(), 1.0f);
- MultiClusterClientConfig multiConfig = MultiClusterClientConfig
- .builder(new ClusterConfig[] { clusterConfig1 }).build();
+ MultiDbConfig multiConfig = MultiDbConfig.builder(new DatabaseConfig[] { dbConfig1 }).build();
- try (
- MultiClusterPooledConnectionProvider providerWithMockedPool = new MultiClusterPooledConnectionProvider(
- multiConfig)) {
+ try (MultiDbConnectionProvider providerWithMockedPool = new MultiDbConnectionProvider(
+ multiConfig)) {
- // Add endpoint2 as second cluster
- ClusterConfig newConfig = createClusterConfig(endpoint2.getHostAndPort(), 2.0f);
+ // Add endpoint2 as second database
+ DatabaseConfig newConfig = createDatabaseConfig(endpoint2.getHostAndPort(), 2.0f);
providerWithMockedPool.add(newConfig);
- // Now remove endpoint1 (original cluster)
+ // Now remove endpoint1 (original database)
assertDoesNotThrow(() -> providerWithMockedPool.remove(endpoint1.getHostAndPort()));
// Verify endpoint1 was removed
- assertNull(providerWithMockedPool.getCluster(endpoint1.getHostAndPort()));
+ assertNull(providerWithMockedPool.getDatabase(endpoint1.getHostAndPort()));
// Verify endpoint2 still exists
- assertNotNull(providerWithMockedPool.getCluster(endpoint2.getHostAndPort()));
+ assertNotNull(providerWithMockedPool.getDatabase(endpoint2.getHostAndPort()));
}
}
}
@@ -120,8 +117,8 @@ void testRemoveNonExistentCluster() {
}
@Test
- void testRemoveLastRemainingCluster() {
- // Should throw validation exception when trying to remove the last cluster
+ void testRemoveLastRemainingDatabase() {
+ // Should throw validation exception when trying to remove the last database
assertThrows(JedisValidationException.class, () -> provider.remove(endpoint1.getHostAndPort()));
}
@@ -133,41 +130,41 @@ void testRemoveNullEndpoint() {
@Test
void testAddAndRemoveMultipleClusters() {
- // Add endpoint2 as second cluster
- ClusterConfig config2 = createClusterConfig(endpoint2.getHostAndPort(), 2.0f);
+ // Add endpoint2 as second database
+ DatabaseConfig config2 = createDatabaseConfig(endpoint2.getHostAndPort(), 2.0f);
// Create a third endpoint for this test
HostAndPort endpoint3 = new HostAndPort("localhost", 6381);
- ClusterConfig config3 = createClusterConfig(endpoint3, 3.0f);
+ DatabaseConfig config3 = createDatabaseConfig(endpoint3, 3.0f);
provider.add(config2);
provider.add(config3);
- // Verify all clusters exist
- assertNotNull(provider.getCluster(endpoint1.getHostAndPort()));
- assertNotNull(provider.getCluster(endpoint2.getHostAndPort()));
- assertNotNull(provider.getCluster(endpoint3));
+ // Verify all databases exist
+ assertNotNull(provider.getDatabase(endpoint1.getHostAndPort()));
+ assertNotNull(provider.getDatabase(endpoint2.getHostAndPort()));
+ assertNotNull(provider.getDatabase(endpoint3));
// Remove endpoint2
provider.remove(endpoint2.getHostAndPort());
- // Verify correct cluster was removed
- assertNull(provider.getCluster(endpoint2.getHostAndPort()));
- assertNotNull(provider.getCluster(endpoint1.getHostAndPort()));
- assertNotNull(provider.getCluster(endpoint3));
+ // Verify correct database was removed
+ assertNull(provider.getDatabase(endpoint2.getHostAndPort()));
+ assertNotNull(provider.getDatabase(endpoint1.getHostAndPort()));
+ assertNotNull(provider.getDatabase(endpoint3));
}
@Test
void testActiveClusterHandlingOnAdd() {
- // The initial cluster should be active
- assertNotNull(provider.getCluster());
+ // The initial database should be active
+ assertNotNull(provider.getDatabase());
// Add endpoint2 with higher weight
- ClusterConfig newConfig = createClusterConfig(endpoint2.getHostAndPort(), 5.0f);
+ DatabaseConfig newConfig = createDatabaseConfig(endpoint2.getHostAndPort(), 5.0f);
provider.add(newConfig);
- // Active cluster should still be valid (implementation may or may not switch)
- assertNotNull(provider.getCluster());
+ // Active database should still be valid (implementation may or may not switch)
+ assertNotNull(provider.getDatabase());
}
@Test
@@ -177,28 +174,26 @@ void testActiveClusterHandlingOnRemove() {
try (MockedConstruction mockedPool = mockPool(mockConnection)) {
// Create initial provider with endpoint1
- ClusterConfig clusterConfig1 = createClusterConfig(endpoint1.getHostAndPort(), 1.0f);
+ DatabaseConfig dbConfig1 = createDatabaseConfig(endpoint1.getHostAndPort(), 1.0f);
- MultiClusterClientConfig multiConfig = MultiClusterClientConfig
- .builder(new ClusterConfig[] { clusterConfig1 }).build();
+ MultiDbConfig multiConfig = MultiDbConfig.builder(new DatabaseConfig[] { dbConfig1 }).build();
- try (
- MultiClusterPooledConnectionProvider providerWithMockedPool = new MultiClusterPooledConnectionProvider(
- multiConfig)) {
+ try (MultiDbConnectionProvider providerWithMockedPool = new MultiDbConnectionProvider(
+ multiConfig)) {
- // Add endpoint2 as second cluster
- ClusterConfig newConfig = createClusterConfig(endpoint2.getHostAndPort(), 2.0f);
+ // Add endpoint2 as second database
+ DatabaseConfig newConfig = createDatabaseConfig(endpoint2.getHostAndPort(), 2.0f);
providerWithMockedPool.add(newConfig);
- // Get current active cluster
- Object initialActiveCluster = providerWithMockedPool.getCluster();
+ // Get current active database
+ Object initialActiveCluster = providerWithMockedPool.getDatabase();
assertNotNull(initialActiveCluster);
- // Remove endpoint1 (original cluster, might be active)
+ // Remove endpoint1 (original database, might be active)
providerWithMockedPool.remove(endpoint1.getHostAndPort());
- // Should still have an active cluster
- assertNotNull(providerWithMockedPool.getCluster());
+ // Should still have an active database
+ assertNotNull(providerWithMockedPool.getDatabase());
}
}
}
diff --git a/src/test/java/redis/clients/jedis/mcf/MultiClusterFailoverAttemptsConfigTest.java b/src/test/java/redis/clients/jedis/mcf/MultiDbConnectionProviderFailoverAttemptsConfigTest.java
similarity index 55%
rename from src/test/java/redis/clients/jedis/mcf/MultiClusterFailoverAttemptsConfigTest.java
rename to src/test/java/redis/clients/jedis/mcf/MultiDbConnectionProviderFailoverAttemptsConfigTest.java
index 3d0f114c18..0b062e4298 100644
--- a/src/test/java/redis/clients/jedis/mcf/MultiClusterFailoverAttemptsConfigTest.java
+++ b/src/test/java/redis/clients/jedis/mcf/MultiDbConnectionProviderFailoverAttemptsConfigTest.java
@@ -8,11 +8,12 @@
import redis.clients.jedis.DefaultJedisClientConfig;
import redis.clients.jedis.HostAndPort;
import redis.clients.jedis.JedisClientConfig;
-import redis.clients.jedis.MultiClusterClientConfig;
-import redis.clients.jedis.MultiClusterClientConfig.ClusterConfig;
+import redis.clients.jedis.MultiDbConfig;
+import redis.clients.jedis.MultiDbConfig.DatabaseConfig;
import redis.clients.jedis.mcf.JedisFailoverException.JedisPermanentlyNotAvailableException;
import redis.clients.jedis.mcf.JedisFailoverException.JedisTemporarilyNotAvailableException;
-import java.lang.reflect.Field;
+import redis.clients.jedis.util.ReflectionTestUtil;
+
import java.time.Duration;
import java.util.concurrent.atomic.AtomicInteger;
@@ -21,34 +22,34 @@
/**
* Tests for how getMaxNumFailoverAttempts and getDelayInBetweenFailoverAttempts impact
- * MultiClusterPooledConnectionProvider behaviour when no healthy clusters are available.
+ * MultiDbConnectionProvider behaviour when no healthy databases are available.
*/
-public class MultiClusterFailoverAttemptsConfigTest {
+public class MultiDbConnectionProviderFailoverAttemptsConfigTest {
private HostAndPort endpoint0 = new HostAndPort("purposefully-incorrect", 0000);
private HostAndPort endpoint1 = new HostAndPort("purposefully-incorrect", 0001);
- private MultiClusterPooledConnectionProvider provider;
+ private MultiDbConnectionProvider provider;
@BeforeEach
void setUp() throws Exception {
JedisClientConfig clientCfg = DefaultJedisClientConfig.builder().build();
- ClusterConfig[] clusterConfigs = new ClusterConfig[] {
- ClusterConfig.builder(endpoint0, clientCfg).weight(1.0f).healthCheckEnabled(false).build(),
- ClusterConfig.builder(endpoint1, clientCfg).weight(0.5f).healthCheckEnabled(false)
+ DatabaseConfig[] databaseConfigs = new DatabaseConfig[] {
+ DatabaseConfig.builder(endpoint0, clientCfg).weight(1.0f).healthCheckEnabled(false).build(),
+ DatabaseConfig.builder(endpoint1, clientCfg).weight(0.5f).healthCheckEnabled(false)
.build() };
- MultiClusterClientConfig.Builder builder = new MultiClusterClientConfig.Builder(clusterConfigs);
+ MultiDbConfig.Builder builder = new MultiDbConfig.Builder(databaseConfigs);
// Use small values by default for tests unless overridden per-test via reflection
setBuilderFailoverConfig(builder, /* maxAttempts */ 10, /* delayMs */ 12000);
- provider = new MultiClusterPooledConnectionProvider(builder.build());
+ provider = new MultiDbConnectionProvider(builder.build());
- // Disable both clusters to force handleNoHealthyCluster path
- provider.getCluster(endpoint0).setDisabled(true);
- provider.getCluster(endpoint1).setDisabled(true);
+ // Disable both databases to force handleNoHealthyCluster path
+ provider.getDatabase(endpoint0).setDisabled(true);
+ provider.getDatabase(endpoint1).setDisabled(true);
}
@AfterEach
@@ -68,9 +69,8 @@ void delayBetweenFailoverAttempts_gatesCounterIncrementsWithinWindow() throws Ex
// First call: should throw temporary and start the freeze window, incrementing attempt count to
// 1
- assertThrows(JedisTemporarilyNotAvailableException.class,
- () -> MultiClusterPooledConnectionProviderHelper.switchToHealthyCluster(provider,
- SwitchReason.HEALTH_CHECK, provider.getCluster()));
+ assertThrows(JedisTemporarilyNotAvailableException.class, () -> MultiDbConnectionProviderHelper
+ .switchToHealthyCluster(provider, SwitchReason.HEALTH_CHECK, provider.getDatabase()));
int afterFirst = getProviderAttemptCount();
assertEquals(1, afterFirst);
@@ -78,8 +78,8 @@ void delayBetweenFailoverAttempts_gatesCounterIncrementsWithinWindow() throws Ex
// and should NOT increment the attempt count beyond 1
for (int i = 0; i < 50; i++) {
assertThrows(JedisTemporarilyNotAvailableException.class,
- () -> MultiClusterPooledConnectionProviderHelper.switchToHealthyCluster(provider,
- SwitchReason.HEALTH_CHECK, provider.getCluster()));
+ () -> MultiDbConnectionProviderHelper.switchToHealthyCluster(provider,
+ SwitchReason.HEALTH_CHECK, provider.getDatabase()));
assertEquals(1, getProviderAttemptCount());
}
}
@@ -96,9 +96,8 @@ void delayBetweenFailoverAttempts_permanentExceptionAfterAttemptsExhausted() thr
// First call: should throw temporary and start the freeze window, incrementing attempt count to
// 1
- assertThrows(JedisTemporarilyNotAvailableException.class,
- () -> MultiClusterPooledConnectionProviderHelper.switchToHealthyCluster(provider,
- SwitchReason.HEALTH_CHECK, provider.getCluster()));
+ assertThrows(JedisTemporarilyNotAvailableException.class, () -> MultiDbConnectionProviderHelper
+ .switchToHealthyCluster(provider, SwitchReason.HEALTH_CHECK, provider.getDatabase()));
int afterFirst = getProviderAttemptCount();
assertEquals(1, afterFirst);
@@ -106,14 +105,14 @@ void delayBetweenFailoverAttempts_permanentExceptionAfterAttemptsExhausted() thr
// and should NOT increment the attempt count beyond 1
for (int i = 0; i < 50; i++) {
assertThrows(JedisTemporarilyNotAvailableException.class,
- () -> provider.switchToHealthyCluster(SwitchReason.HEALTH_CHECK, provider.getCluster()));
+ () -> provider.switchToHealthyDatabase(SwitchReason.HEALTH_CHECK, provider.getDatabase()));
assertEquals(1, getProviderAttemptCount());
}
await().atMost(Durations.TWO_HUNDRED_MILLISECONDS).pollInterval(Duration.ofMillis(10))
.until(() -> {
Exception e = assertThrows(JedisFailoverException.class, () -> provider
- .switchToHealthyCluster(SwitchReason.HEALTH_CHECK, provider.getCluster()));
+ .switchToHealthyDatabase(SwitchReason.HEALTH_CHECK, provider.getDatabase()));
return e instanceof JedisPermanentlyNotAvailableException;
});
}
@@ -129,15 +128,15 @@ void maxNumFailoverAttempts_zeroDelay_leadsToPermanentAfterExceeding() throws Ex
// Expect exactly 'maxAttempts' temporary exceptions, then a permanent one
assertThrows(JedisTemporarilyNotAvailableException.class,
- () -> provider.switchToHealthyCluster(SwitchReason.HEALTH_CHECK, provider.getCluster())); // attempt
+ () -> provider.switchToHealthyDatabase(SwitchReason.HEALTH_CHECK, provider.getDatabase())); // attempt
// 1
assertThrows(JedisTemporarilyNotAvailableException.class,
- () -> provider.switchToHealthyCluster(SwitchReason.HEALTH_CHECK, provider.getCluster())); // attempt
+ () -> provider.switchToHealthyDatabase(SwitchReason.HEALTH_CHECK, provider.getDatabase())); // attempt
// 2
// Next should exceed max and become permanent
assertThrows(JedisPermanentlyNotAvailableException.class,
- () -> provider.switchToHealthyCluster(SwitchReason.HEALTH_CHECK, provider.getCluster())); // attempt
+ () -> provider.switchToHealthyDatabase(SwitchReason.HEALTH_CHECK, provider.getDatabase())); // attempt
// 3
// ->
// permanent
@@ -145,55 +144,37 @@ void maxNumFailoverAttempts_zeroDelay_leadsToPermanentAfterExceeding() throws Ex
// ======== Test helper methods (reflection) ========
- private static void setBuilderFailoverConfig(MultiClusterClientConfig.Builder builder,
- int maxAttempts, int delayMs) throws Exception {
- Field fMax = builder.getClass().getDeclaredField("maxNumFailoverAttempts");
- fMax.setAccessible(true);
- fMax.setInt(builder, maxAttempts);
+ private static void setBuilderFailoverConfig(MultiDbConfig.Builder builder, int maxAttempts,
+ int delayMs) throws Exception {
+ ReflectionTestUtil.setField(builder, "maxNumFailoverAttempts", maxAttempts);
- Field fDelay = builder.getClass().getDeclaredField("delayInBetweenFailoverAttempts");
- fDelay.setAccessible(true);
- fDelay.setInt(builder, delayMs);
+ ReflectionTestUtil.setField(builder, "delayInBetweenFailoverAttempts", delayMs);
}
private void setProviderFailoverConfig(int maxAttempts, int delayMs) throws Exception {
- // Access the underlying MultiClusterClientConfig inside provider and adjust fields for this
+ // Access the underlying MultiDbConfig inside provider and adjust fields for this
// test
- Field cfgField = provider.getClass().getDeclaredField("multiClusterClientConfig");
- cfgField.setAccessible(true);
- Object cfg = cfgField.get(provider);
+ Object cfg = ReflectionTestUtil.getField(provider, "multiDbConfig");
- Field fMax = cfg.getClass().getDeclaredField("maxNumFailoverAttempts");
- fMax.setAccessible(true);
- fMax.setInt(cfg, maxAttempts);
+ ReflectionTestUtil.setField(cfg, "maxNumFailoverAttempts", maxAttempts);
- Field fDelay = cfg.getClass().getDeclaredField("delayInBetweenFailoverAttempts");
- fDelay.setAccessible(true);
- fDelay.setInt(cfg, delayMs);
+ ReflectionTestUtil.setField(cfg, "delayInBetweenFailoverAttempts", delayMs);
}
private int getProviderMaxAttempts() throws Exception {
- Field cfgField = provider.getClass().getDeclaredField("multiClusterClientConfig");
- cfgField.setAccessible(true);
- Object cfg = cfgField.get(provider);
- Field fMax = cfg.getClass().getDeclaredField("maxNumFailoverAttempts");
- fMax.setAccessible(true);
- return fMax.getInt(cfg);
+ Object cfg = ReflectionTestUtil.getField(provider, "multiDbConfig");
+
+ return ReflectionTestUtil.getField(cfg, "maxNumFailoverAttempts");
}
private int getProviderDelayMs() throws Exception {
- Field cfgField = provider.getClass().getDeclaredField("multiClusterClientConfig");
- cfgField.setAccessible(true);
- Object cfg = cfgField.get(provider);
- Field fDelay = cfg.getClass().getDeclaredField("delayInBetweenFailoverAttempts");
- fDelay.setAccessible(true);
- return fDelay.getInt(cfg);
+ Object cfg = ReflectionTestUtil.getField(provider, "multiDbConfig");
+
+ return ReflectionTestUtil.getField(cfg, "delayInBetweenFailoverAttempts");
}
private int getProviderAttemptCount() throws Exception {
- Field f = provider.getClass().getDeclaredField("failoverAttemptCount");
- f.setAccessible(true);
- AtomicInteger val = (AtomicInteger) f.get(provider);
+ AtomicInteger val = ReflectionTestUtil.getField(provider, "failoverAttemptCount");
return val.get();
}
}
diff --git a/src/test/java/redis/clients/jedis/mcf/MultiDbConnectionProviderHelper.java b/src/test/java/redis/clients/jedis/mcf/MultiDbConnectionProviderHelper.java
new file mode 100644
index 0000000000..4ae061c9f5
--- /dev/null
+++ b/src/test/java/redis/clients/jedis/mcf/MultiDbConnectionProviderHelper.java
@@ -0,0 +1,20 @@
+package redis.clients.jedis.mcf;
+
+import redis.clients.jedis.Endpoint;
+
+public class MultiDbConnectionProviderHelper {
+
+ public static void onHealthStatusChange(MultiDbConnectionProvider provider, Endpoint endpoint,
+ HealthStatus oldStatus, HealthStatus newStatus) {
+ provider.onHealthStatusChange(new HealthStatusChangeEvent(endpoint, oldStatus, newStatus));
+ }
+
+ public static void periodicFailbackCheck(MultiDbConnectionProvider provider) {
+ provider.periodicFailbackCheck();
+ }
+
+ public static Endpoint switchToHealthyCluster(MultiDbConnectionProvider provider,
+ SwitchReason reason, MultiDbConnectionProvider.Database iterateFrom) {
+ return provider.switchToHealthyDatabase(reason, iterateFrom);
+ }
+}
diff --git a/src/test/java/redis/clients/jedis/mcf/MultiDbConnectionProviderInitializationTest.java b/src/test/java/redis/clients/jedis/mcf/MultiDbConnectionProviderInitializationTest.java
new file mode 100644
index 0000000000..1935647d46
--- /dev/null
+++ b/src/test/java/redis/clients/jedis/mcf/MultiDbConnectionProviderInitializationTest.java
@@ -0,0 +1,153 @@
+package redis.clients.jedis.mcf;
+
+import static org.junit.jupiter.api.Assertions.*;
+import static org.mockito.Mockito.*;
+
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.extension.ExtendWith;
+import org.mockito.MockedConstruction;
+import org.mockito.junit.jupiter.MockitoExtension;
+
+import redis.clients.jedis.Connection;
+import redis.clients.jedis.ConnectionPool;
+import redis.clients.jedis.DefaultJedisClientConfig;
+import redis.clients.jedis.HostAndPort;
+import redis.clients.jedis.JedisClientConfig;
+import redis.clients.jedis.MultiDbConfig;
+import redis.clients.jedis.MultiDbConfig.DatabaseConfig;
+import redis.clients.jedis.exceptions.JedisValidationException;
+
+/**
+ * Tests for MultiDbConnectionProvider initialization edge cases
+ */
+@ExtendWith(MockitoExtension.class)
+public class MultiDbConnectionProviderInitializationTest {
+
+ private HostAndPort endpoint1;
+ private HostAndPort endpoint2;
+ private HostAndPort endpoint3;
+ private JedisClientConfig clientConfig;
+
+ @BeforeEach
+ void setUp() {
+ endpoint1 = new HostAndPort("localhost", 6379);
+ endpoint2 = new HostAndPort("localhost", 6380);
+ endpoint3 = new HostAndPort("localhost", 6381);
+ clientConfig = DefaultJedisClientConfig.builder().build();
+ }
+
+ private MockedConstruction mockPool() {
+ Connection mockConnection = mock(Connection.class);
+ lenient().when(mockConnection.ping()).thenReturn(true);
+ return mockConstruction(ConnectionPool.class, (mock, context) -> {
+ when(mock.getResource()).thenReturn(mockConnection);
+ doNothing().when(mock).close();
+ });
+ }
+
+ @Test
+ void testInitializationWithMixedHealthCheckConfiguration() {
+ try (MockedConstruction mockedPool = mockPool()) {
+ // Create databases with mixed health check configuration
+ DatabaseConfig db1 = DatabaseConfig.builder(endpoint1, clientConfig).weight(1.0f)
+ .healthCheckEnabled(false) // No health
+ // check
+ .build();
+
+ DatabaseConfig db2 = DatabaseConfig.builder(endpoint2, clientConfig).weight(2.0f)
+ .healthCheckStrategySupplier(EchoStrategy.DEFAULT) // With
+ // health
+ // check
+ .build();
+
+ MultiDbConfig config = new MultiDbConfig.Builder(new DatabaseConfig[] { db1, db2 }).build();
+
+ try (MultiDbConnectionProvider provider = new MultiDbConnectionProvider(config)) {
+ // Should initialize successfully
+ assertNotNull(provider.getDatabase());
+
+ // Should select db1 (no health check, assumed healthy) or db2 based on weight
+ // Since db2 has higher weight and health checks, it should be selected if healthy
+ assertTrue(provider.getDatabase() == provider.getDatabase(endpoint1)
+ || provider.getDatabase() == provider.getDatabase(endpoint2));
+ }
+ }
+ }
+
+ @Test
+ void testInitializationWithAllHealthChecksDisabled() {
+ try (MockedConstruction mockedPool = mockPool()) {
+ // Create databases with no health checks
+ DatabaseConfig db1 = DatabaseConfig.builder(endpoint1, clientConfig).weight(1.0f)
+ .healthCheckEnabled(false).build();
+
+ DatabaseConfig db22 = DatabaseConfig.builder(endpoint2, clientConfig).weight(3.0f) // Higher
+ // weight
+ .healthCheckEnabled(false).build();
+
+ MultiDbConfig config = new MultiDbConfig.Builder(new DatabaseConfig[] { db1, db22 }).build();
+
+ try (MultiDbConnectionProvider provider = new MultiDbConnectionProvider(config)) {
+ // Should select db22 (highest weight, no health checks)
+ assertEquals(provider.getDatabase(endpoint2), provider.getDatabase());
+ }
+ }
+ }
+
+ @Test
+ void testInitializationWithSingleCluster() {
+ try (MockedConstruction mockedPool = mockPool()) {
+ DatabaseConfig db = DatabaseConfig.builder(endpoint1, clientConfig).weight(1.0f)
+ .healthCheckEnabled(false).build();
+
+ MultiDbConfig config = new MultiDbConfig.Builder(new DatabaseConfig[] { db }).build();
+
+ try (MultiDbConnectionProvider provider = new MultiDbConnectionProvider(config)) {
+ // Should select the only available db
+ assertEquals(provider.getDatabase(endpoint1), provider.getDatabase());
+ }
+ }
+ }
+
+ @Test
+ void testErrorHandlingWithNullConfiguration() {
+ assertThrows(JedisValidationException.class, () -> {
+ new MultiDbConnectionProvider(null);
+ });
+ }
+
+ @Test
+ void testErrorHandlingWithEmptyClusterArray() {
+ assertThrows(JedisValidationException.class, () -> {
+ new MultiDbConfig.Builder(new DatabaseConfig[0]).build();
+ });
+ }
+
+ @Test
+ void testErrorHandlingWithNullDatabaseConfig() {
+ assertThrows(IllegalArgumentException.class, () -> {
+ new MultiDbConfig.Builder(new DatabaseConfig[] { null }).build();
+ });
+ }
+
+ @Test
+ void testInitializationWithZeroWeights() {
+ try (MockedConstruction mockedPool = mockPool()) {
+ DatabaseConfig db1 = DatabaseConfig.builder(endpoint1, clientConfig).weight(0.0f) // Zero
+ // weight
+ .healthCheckEnabled(false).build();
+
+ DatabaseConfig db2 = DatabaseConfig.builder(endpoint2, clientConfig).weight(0.0f) // Zero
+ // weight
+ .healthCheckEnabled(false).build();
+
+ MultiDbConfig config = new MultiDbConfig.Builder(new DatabaseConfig[] { db1, db2 }).build();
+
+ try (MultiDbConnectionProvider provider = new MultiDbConnectionProvider(config)) {
+ // Should still initialize and select one of the databases
+ assertNotNull(provider.getDatabase());
+ }
+ }
+ }
+}
diff --git a/src/test/java/redis/clients/jedis/mcf/MultiClusterPooledConnectionProviderTest.java b/src/test/java/redis/clients/jedis/mcf/MultiDbConnectionProviderTest.java
similarity index 68%
rename from src/test/java/redis/clients/jedis/mcf/MultiClusterPooledConnectionProviderTest.java
rename to src/test/java/redis/clients/jedis/mcf/MultiDbConnectionProviderTest.java
index 3b429a1436..9af53002e0 100644
--- a/src/test/java/redis/clients/jedis/mcf/MultiClusterPooledConnectionProviderTest.java
+++ b/src/test/java/redis/clients/jedis/mcf/MultiDbConnectionProviderTest.java
@@ -1,16 +1,14 @@
package redis.clients.jedis.mcf;
import io.github.resilience4j.circuitbreaker.CircuitBreaker;
-import io.github.resilience4j.circuitbreaker.CircuitBreakerConfig.SlidingWindowType;
-
import org.awaitility.Awaitility;
import org.awaitility.Durations;
import org.junit.jupiter.api.*;
import redis.clients.jedis.*;
-import redis.clients.jedis.MultiClusterClientConfig.ClusterConfig;
+import redis.clients.jedis.MultiDbConfig.DatabaseConfig;
import redis.clients.jedis.exceptions.JedisConnectionException;
import redis.clients.jedis.exceptions.JedisValidationException;
-import redis.clients.jedis.mcf.MultiClusterPooledConnectionProvider.Cluster;
+import redis.clients.jedis.mcf.MultiDbConnectionProvider.Database;
import redis.clients.jedis.mcf.ProbingPolicy.BuiltIn;
import redis.clients.jedis.mcf.JedisFailoverException.JedisPermanentlyNotAvailableException;
import redis.clients.jedis.mcf.JedisFailoverException.JedisTemporarilyNotAvailableException;
@@ -25,27 +23,26 @@
import static org.junit.jupiter.api.Assertions.*;
/**
- * @see MultiClusterPooledConnectionProvider
+ * @see MultiDbConnectionProvider
*/
@Tag("integration")
-public class MultiClusterPooledConnectionProviderTest {
+public class MultiDbConnectionProviderTest {
private final EndpointConfig endpointStandalone0 = HostAndPorts.getRedisEndpoint("standalone0");
private final EndpointConfig endpointStandalone1 = HostAndPorts.getRedisEndpoint("standalone1");
- private MultiClusterPooledConnectionProvider provider;
+ private MultiDbConnectionProvider provider;
@BeforeEach
public void setUp() {
- ClusterConfig[] clusterConfigs = new ClusterConfig[2];
- clusterConfigs[0] = ClusterConfig.builder(endpointStandalone0.getHostAndPort(),
+ DatabaseConfig[] databaseConfigs = new DatabaseConfig[2];
+ databaseConfigs[0] = DatabaseConfig.builder(endpointStandalone0.getHostAndPort(),
endpointStandalone0.getClientConfigBuilder().build()).weight(0.5f).build();
- clusterConfigs[1] = ClusterConfig.builder(endpointStandalone1.getHostAndPort(),
+ databaseConfigs[1] = DatabaseConfig.builder(endpointStandalone1.getHostAndPort(),
endpointStandalone1.getClientConfigBuilder().build()).weight(0.3f).build();
- provider = new MultiClusterPooledConnectionProvider(
- new MultiClusterClientConfig.Builder(clusterConfigs).build());
+ provider = new MultiDbConnectionProvider(new MultiDbConfig.Builder(databaseConfigs).build());
}
@AfterEach
@@ -57,7 +54,7 @@ public void destroy() {
@Test
public void testCircuitBreakerForcedTransitions() {
- CircuitBreaker circuitBreaker = provider.getClusterCircuitBreaker();
+ CircuitBreaker circuitBreaker = provider.getDatabaseCircuitBreaker();
circuitBreaker.getState();
if (CircuitBreaker.State.FORCED_OPEN.equals(circuitBreaker.getState()))
@@ -72,55 +69,55 @@ public void testCircuitBreakerForcedTransitions() {
@Test
public void testIterateActiveCluster() throws InterruptedException {
- waitForClustersToGetHealthy(provider.getCluster(endpointStandalone0.getHostAndPort()),
- provider.getCluster(endpointStandalone1.getHostAndPort()));
+ waitForClustersToGetHealthy(provider.getDatabase(endpointStandalone0.getHostAndPort()),
+ provider.getDatabase(endpointStandalone1.getHostAndPort()));
- Endpoint e2 = provider.switchToHealthyCluster(SwitchReason.HEALTH_CHECK, provider.getCluster());
+ Endpoint e2 = provider.switchToHealthyDatabase(SwitchReason.HEALTH_CHECK,
+ provider.getDatabase());
assertEquals(endpointStandalone1.getHostAndPort(), e2);
}
@Test
public void testCanIterateOnceMore() {
Endpoint endpoint0 = endpointStandalone0.getHostAndPort();
- waitForClustersToGetHealthy(provider.getCluster(endpoint0),
- provider.getCluster(endpointStandalone1.getHostAndPort()));
+ waitForClustersToGetHealthy(provider.getDatabase(endpoint0),
+ provider.getDatabase(endpointStandalone1.getHostAndPort()));
- provider.setActiveCluster(endpoint0);
- provider.getCluster().setDisabled(true);
- provider.switchToHealthyCluster(SwitchReason.HEALTH_CHECK, provider.getCluster(endpoint0));
+ provider.setActiveDatabase(endpoint0);
+ provider.getDatabase().setDisabled(true);
+ provider.switchToHealthyDatabase(SwitchReason.HEALTH_CHECK, provider.getDatabase(endpoint0));
- assertFalse(provider.canIterateFrom(provider.getCluster()));
+ assertFalse(provider.canIterateFrom(provider.getDatabase()));
}
- private void waitForClustersToGetHealthy(Cluster... clusters) {
+ private void waitForClustersToGetHealthy(Database... clusters) {
Awaitility.await().pollInterval(Durations.ONE_HUNDRED_MILLISECONDS)
.atMost(Durations.TWO_SECONDS)
- .until(() -> Arrays.stream(clusters).allMatch(Cluster::isHealthy));
+ .until(() -> Arrays.stream(clusters).allMatch(Database::isHealthy));
}
@Test
public void testRunClusterFailoverPostProcessor() {
- ClusterConfig[] clusterConfigs = new ClusterConfig[2];
- clusterConfigs[0] = ClusterConfig
+ DatabaseConfig[] databaseConfigs = new DatabaseConfig[2];
+ databaseConfigs[0] = DatabaseConfig
.builder(new HostAndPort("purposefully-incorrect", 0000),
DefaultJedisClientConfig.builder().build())
.weight(0.5f).healthCheckEnabled(false).build();
- clusterConfigs[1] = ClusterConfig
+ databaseConfigs[1] = DatabaseConfig
.builder(new HostAndPort("purposefully-incorrect", 0001),
DefaultJedisClientConfig.builder().build())
.weight(0.4f).healthCheckEnabled(false).build();
- MultiClusterClientConfig.Builder builder = new MultiClusterClientConfig.Builder(clusterConfigs);
+ MultiDbConfig.Builder builder = new MultiDbConfig.Builder(databaseConfigs);
// Configures a single failed command to trigger an open circuit on the next subsequent failure
- builder.circuitBreakerSlidingWindowSize(1);
- builder.circuitBreakerSlidingWindowMinCalls(1);
+ builder.circuitBreakerSlidingWindowSize(3).circuitBreakerMinNumOfFailures(1)
+ .circuitBreakerFailureRateThreshold(0);
AtomicBoolean isValidTest = new AtomicBoolean(false);
- MultiClusterPooledConnectionProvider localProvider = new MultiClusterPooledConnectionProvider(
- builder.build());
- localProvider.setClusterSwitchListener(a -> {
+ MultiDbConnectionProvider localProvider = new MultiDbConnectionProvider(builder.build());
+ localProvider.setDatabaseSwitchListener(a -> {
isValidTest.set(true);
});
@@ -140,21 +137,23 @@ public void testRunClusterFailoverPostProcessor() {
@Test
public void testSetActiveMultiClusterIndexEqualsZero() {
- assertThrows(JedisValidationException.class, () -> provider.setActiveCluster(null)); // Should
- // throw an
- // exception
+ assertThrows(JedisValidationException.class, () -> provider.setActiveDatabase(null)); // Should
+ // throw
+ // an
+ // exception
}
@Test
public void testSetActiveMultiClusterIndexLessThanZero() {
- assertThrows(JedisValidationException.class, () -> provider.setActiveCluster(null)); // Should
- // throw an
- // exception
+ assertThrows(JedisValidationException.class, () -> provider.setActiveDatabase(null)); // Should
+ // throw
+ // an
+ // exception
}
@Test
public void testSetActiveMultiClusterIndexOutOfRange() {
- assertThrows(JedisValidationException.class, () -> provider.setActiveCluster(new Endpoint() {
+ assertThrows(JedisValidationException.class, () -> provider.setActiveDatabase(new Endpoint() {
@Override
public String getHost() {
return "purposefully-incorrect";
@@ -173,15 +172,14 @@ public void testConnectionPoolConfigApplied() {
poolConfig.setMaxTotal(8);
poolConfig.setMaxIdle(4);
poolConfig.setMinIdle(1);
- ClusterConfig[] clusterConfigs = new ClusterConfig[2];
- clusterConfigs[0] = new ClusterConfig(endpointStandalone0.getHostAndPort(),
+ DatabaseConfig[] databaseConfigs = new DatabaseConfig[2];
+ databaseConfigs[0] = new DatabaseConfig(endpointStandalone0.getHostAndPort(),
endpointStandalone0.getClientConfigBuilder().build(), poolConfig);
- clusterConfigs[1] = new ClusterConfig(endpointStandalone1.getHostAndPort(),
+ databaseConfigs[1] = new DatabaseConfig(endpointStandalone1.getHostAndPort(),
endpointStandalone0.getClientConfigBuilder().build(), poolConfig);
- try (
- MultiClusterPooledConnectionProvider customProvider = new MultiClusterPooledConnectionProvider(
- new MultiClusterClientConfig.Builder(clusterConfigs).build())) {
- MultiClusterPooledConnectionProvider.Cluster activeCluster = customProvider.getCluster();
+ try (MultiDbConnectionProvider customProvider = new MultiDbConnectionProvider(
+ new MultiDbConfig.Builder(databaseConfigs).build())) {
+ MultiDbConnectionProvider.Database activeCluster = customProvider.getDatabase();
ConnectionPool connectionPool = activeCluster.getConnectionPool();
assertEquals(8, connectionPool.getMaxTotal());
assertEquals(4, connectionPool.getMaxIdle());
@@ -204,13 +202,13 @@ void testHealthChecksStopAfterProviderClose() throws InterruptedException {
});
// Create new provider with health check strategy (don't use the setUp() provider)
- ClusterConfig config = ClusterConfig
+ DatabaseConfig config = DatabaseConfig
.builder(endpointStandalone0.getHostAndPort(),
endpointStandalone0.getClientConfigBuilder().build())
.healthCheckStrategy(countingStrategy).build();
- MultiClusterPooledConnectionProvider testProvider = new MultiClusterPooledConnectionProvider(
- new MultiClusterClientConfig.Builder(Collections.singletonList(config)).build());
+ MultiDbConnectionProvider testProvider = new MultiDbConnectionProvider(
+ new MultiDbConfig.Builder(Collections.singletonList(config)).build());
try {
// Wait for some health checks to occur
@@ -238,22 +236,22 @@ void testHealthChecksStopAfterProviderClose() throws InterruptedException {
@Test
public void userCommand_firstTemporary_thenPermanent_inOrder() {
- ClusterConfig[] clusterConfigs = new ClusterConfig[2];
- clusterConfigs[0] = ClusterConfig.builder(endpointStandalone0.getHostAndPort(),
+ DatabaseConfig[] databaseConfigs = new DatabaseConfig[2];
+ databaseConfigs[0] = DatabaseConfig.builder(endpointStandalone0.getHostAndPort(),
endpointStandalone0.getClientConfigBuilder().build()).weight(0.5f).build();
- clusterConfigs[1] = ClusterConfig.builder(endpointStandalone1.getHostAndPort(),
+ databaseConfigs[1] = DatabaseConfig.builder(endpointStandalone1.getHostAndPort(),
endpointStandalone1.getClientConfigBuilder().build()).weight(0.3f).build();
- MultiClusterPooledConnectionProvider testProvider = new MultiClusterPooledConnectionProvider(
- new MultiClusterClientConfig.Builder(clusterConfigs).delayInBetweenFailoverAttempts(100)
+ MultiDbConnectionProvider testProvider = new MultiDbConnectionProvider(
+ new MultiDbConfig.Builder(databaseConfigs).delayInBetweenFailoverAttempts(100)
.maxNumFailoverAttempts(2).retryMaxAttempts(1).build());
try (UnifiedJedis jedis = new UnifiedJedis(testProvider)) {
jedis.get("foo");
// Disable both clusters so any attempt to switch results in 'no healthy cluster' path
- testProvider.getCluster(endpointStandalone0.getHostAndPort()).setDisabled(true);
- testProvider.getCluster(endpointStandalone1.getHostAndPort()).setDisabled(true);
+ testProvider.getDatabase(endpointStandalone0.getHostAndPort()).setDisabled(true);
+ testProvider.getDatabase(endpointStandalone1.getHostAndPort()).setDisabled(true);
// Simulate user running a command that fails and triggers failover iteration
assertThrows(JedisTemporarilyNotAvailableException.class, () -> jedis.get("foo"));
@@ -268,12 +266,12 @@ public void userCommand_firstTemporary_thenPermanent_inOrder() {
@Test
public void userCommand_connectionExceptions_thenMultipleTemporary_thenPermanent_inOrder() {
- ClusterConfig[] clusterConfigs = new ClusterConfig[2];
- clusterConfigs[0] = ClusterConfig
+ DatabaseConfig[] databaseConfigs = new DatabaseConfig[2];
+ databaseConfigs[0] = DatabaseConfig
.builder(endpointStandalone0.getHostAndPort(),
endpointStandalone0.getClientConfigBuilder().build())
.weight(0.5f).healthCheckEnabled(false).build();
- clusterConfigs[1] = ClusterConfig
+ databaseConfigs[1] = DatabaseConfig
.builder(endpointStandalone1.getHostAndPort(),
endpointStandalone1.getClientConfigBuilder().build())
.weight(0.3f).healthCheckEnabled(false).build();
@@ -281,11 +279,9 @@ public void userCommand_connectionExceptions_thenMultipleTemporary_thenPermanent
// ATTENTION: these configuration settings are not random and
// adjusted to get exact numbers of failures with exact exception types
// and open to impact from other defaulted values withing the components in use.
- MultiClusterPooledConnectionProvider testProvider = new MultiClusterPooledConnectionProvider(
- new MultiClusterClientConfig.Builder(clusterConfigs).delayInBetweenFailoverAttempts(100)
- .maxNumFailoverAttempts(2).retryMaxAttempts(1).circuitBreakerSlidingWindowMinCalls(3)
- .circuitBreakerSlidingWindowSize(5)
- .circuitBreakerSlidingWindowType(SlidingWindowType.TIME_BASED)
+ MultiDbConnectionProvider testProvider = new MultiDbConnectionProvider(
+ new MultiDbConfig.Builder(databaseConfigs).delayInBetweenFailoverAttempts(100)
+ .maxNumFailoverAttempts(2).retryMaxAttempts(1).circuitBreakerSlidingWindowSize(5)
.circuitBreakerFailureRateThreshold(60).build()) {
};
@@ -293,7 +289,7 @@ public void userCommand_connectionExceptions_thenMultipleTemporary_thenPermanent
jedis.get("foo");
// disable most weighted cluster so that it will fail on initial requests
- testProvider.getCluster(endpointStandalone0.getHostAndPort()).setDisabled(true);
+ testProvider.getDatabase(endpointStandalone0.getHostAndPort()).setDisabled(true);
Exception e = assertThrows(JedisConnectionException.class, () -> jedis.get("foo"));
assertEquals(JedisConnectionException.class, e.getClass());
@@ -302,7 +298,7 @@ public void userCommand_connectionExceptions_thenMultipleTemporary_thenPermanent
assertEquals(JedisConnectionException.class, e.getClass());
// then disable the second ones
- testProvider.getCluster(endpointStandalone1.getHostAndPort()).setDisabled(true);
+ testProvider.getDatabase(endpointStandalone1.getHostAndPort()).setDisabled(true);
assertThrows(JedisTemporarilyNotAvailableException.class, () -> jedis.get("foo"));
assertThrows(JedisTemporarilyNotAvailableException.class, () -> jedis.get("foo"));
diff --git a/src/test/java/redis/clients/jedis/mcf/PeriodicFailbackTest.java b/src/test/java/redis/clients/jedis/mcf/PeriodicFailbackTest.java
index d657e75829..f58df34e0c 100644
--- a/src/test/java/redis/clients/jedis/mcf/PeriodicFailbackTest.java
+++ b/src/test/java/redis/clients/jedis/mcf/PeriodicFailbackTest.java
@@ -2,7 +2,7 @@
import static org.junit.jupiter.api.Assertions.*;
import static org.mockito.Mockito.*;
-import static redis.clients.jedis.mcf.MultiClusterPooledConnectionProviderHelper.onHealthStatusChange;
+import static redis.clients.jedis.mcf.MultiDbConnectionProviderHelper.onHealthStatusChange;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
@@ -14,7 +14,7 @@
import redis.clients.jedis.DefaultJedisClientConfig;
import redis.clients.jedis.HostAndPort;
import redis.clients.jedis.JedisClientConfig;
-import redis.clients.jedis.MultiClusterClientConfig;
+import redis.clients.jedis.MultiDbConfig;
@ExtendWith(MockitoExtension.class)
class PeriodicFailbackTest {
@@ -42,33 +42,32 @@ private MockedConstruction mockPool() {
@Test
void testPeriodicFailbackCheckWithDisabledCluster() throws InterruptedException {
try (MockedConstruction mockedPool = mockPool()) {
- MultiClusterClientConfig.ClusterConfig cluster1 = MultiClusterClientConfig.ClusterConfig
+ MultiDbConfig.DatabaseConfig cluster1 = MultiDbConfig.DatabaseConfig
.builder(endpoint1, clientConfig).weight(1.0f).healthCheckEnabled(false).build();
- MultiClusterClientConfig.ClusterConfig cluster2 = MultiClusterClientConfig.ClusterConfig
+ MultiDbConfig.DatabaseConfig cluster2 = MultiDbConfig.DatabaseConfig
.builder(endpoint2, clientConfig).weight(2.0f).healthCheckEnabled(false).build();
- MultiClusterClientConfig config = new MultiClusterClientConfig.Builder(
- new MultiClusterClientConfig.ClusterConfig[] { cluster1, cluster2 })
- .failbackSupported(true).failbackCheckInterval(100).build();
+ MultiDbConfig config = new MultiDbConfig.Builder(
+ new MultiDbConfig.DatabaseConfig[] { cluster1, cluster2 }).failbackSupported(true)
+ .failbackCheckInterval(100).build();
- try (MultiClusterPooledConnectionProvider provider = new MultiClusterPooledConnectionProvider(
- config)) {
+ try (MultiDbConnectionProvider provider = new MultiDbConnectionProvider(config)) {
// Initially, cluster2 should be active (highest weight: 2.0f vs 1.0f)
- assertEquals(provider.getCluster(endpoint2), provider.getCluster());
+ assertEquals(provider.getDatabase(endpoint2), provider.getDatabase());
// Start grace period for cluster2 manually
- provider.getCluster(endpoint2).setGracePeriod();
- provider.getCluster(endpoint2).setDisabled(true);
+ provider.getDatabase(endpoint2).setGracePeriod();
+ provider.getDatabase(endpoint2).setDisabled(true);
// Force failover to cluster1 since cluster2 is disabled
- provider.switchToHealthyCluster(SwitchReason.FORCED, provider.getCluster(endpoint2));
+ provider.switchToHealthyDatabase(SwitchReason.FORCED, provider.getDatabase(endpoint2));
// Manually trigger periodic check
- MultiClusterPooledConnectionProviderHelper.periodicFailbackCheck(provider);
+ MultiDbConnectionProviderHelper.periodicFailbackCheck(provider);
// Should still be on cluster1 (cluster2 is in grace period)
- assertEquals(provider.getCluster(endpoint1), provider.getCluster());
+ assertEquals(provider.getDatabase(endpoint1), provider.getDatabase());
}
}
}
@@ -76,47 +75,46 @@ void testPeriodicFailbackCheckWithDisabledCluster() throws InterruptedException
@Test
void testPeriodicFailbackCheckWithHealthyCluster() throws InterruptedException {
try (MockedConstruction mockedPool = mockPool()) {
- MultiClusterClientConfig.ClusterConfig cluster1 = MultiClusterClientConfig.ClusterConfig
+ MultiDbConfig.DatabaseConfig cluster1 = MultiDbConfig.DatabaseConfig
.builder(endpoint1, clientConfig).weight(1.0f).healthCheckEnabled(false).build();
- MultiClusterClientConfig.ClusterConfig cluster2 = MultiClusterClientConfig.ClusterConfig
+ MultiDbConfig.DatabaseConfig cluster2 = MultiDbConfig.DatabaseConfig
.builder(endpoint2, clientConfig).weight(2.0f).healthCheckEnabled(false).build();
- MultiClusterClientConfig config = new MultiClusterClientConfig.Builder(
- new MultiClusterClientConfig.ClusterConfig[] { cluster1, cluster2 })
- .failbackSupported(true).failbackCheckInterval(50).gracePeriod(100).build(); // Add
- // grace
- // period
+ MultiDbConfig config = new MultiDbConfig.Builder(
+ new MultiDbConfig.DatabaseConfig[] { cluster1, cluster2 }).failbackSupported(true)
+ .failbackCheckInterval(50).gracePeriod(100).build(); // Add
+ // grace
+ // period
- try (MultiClusterPooledConnectionProvider provider = new MultiClusterPooledConnectionProvider(
- config)) {
+ try (MultiDbConnectionProvider provider = new MultiDbConnectionProvider(config)) {
// Initially, cluster2 should be active (highest weight: 2.0f vs 1.0f)
- assertEquals(provider.getCluster(endpoint2), provider.getCluster());
+ assertEquals(provider.getDatabase(endpoint2), provider.getDatabase());
// Make cluster2 unhealthy to force failover to cluster1
onHealthStatusChange(provider, endpoint2, HealthStatus.HEALTHY, HealthStatus.UNHEALTHY);
// Should now be on cluster1 (cluster2 is in grace period)
- assertEquals(provider.getCluster(endpoint1), provider.getCluster());
+ assertEquals(provider.getDatabase(endpoint1), provider.getDatabase());
// Verify cluster2 is in grace period
- assertTrue(provider.getCluster(endpoint2).isInGracePeriod());
+ assertTrue(provider.getDatabase(endpoint2).isInGracePeriod());
// Make cluster2 healthy again (but it's still in grace period)
onHealthStatusChange(provider, endpoint2, HealthStatus.UNHEALTHY, HealthStatus.HEALTHY);
// Trigger periodic check immediately - should still be on cluster1
- MultiClusterPooledConnectionProviderHelper.periodicFailbackCheck(provider);
- assertEquals(provider.getCluster(endpoint1), provider.getCluster());
+ MultiDbConnectionProviderHelper.periodicFailbackCheck(provider);
+ assertEquals(provider.getDatabase(endpoint1), provider.getDatabase());
// Wait for grace period to expire
Thread.sleep(150);
// Trigger periodic check after grace period expires
- MultiClusterPooledConnectionProviderHelper.periodicFailbackCheck(provider);
+ MultiDbConnectionProviderHelper.periodicFailbackCheck(provider);
// Should have failed back to cluster2 (higher weight, grace period expired)
- assertEquals(provider.getCluster(endpoint2), provider.getCluster());
+ assertEquals(provider.getDatabase(endpoint2), provider.getDatabase());
}
}
}
@@ -124,27 +122,25 @@ void testPeriodicFailbackCheckWithHealthyCluster() throws InterruptedException {
@Test
void testPeriodicFailbackCheckWithFailbackDisabled() throws InterruptedException {
try (MockedConstruction mockedPool = mockPool()) {
- MultiClusterClientConfig.ClusterConfig cluster1 = MultiClusterClientConfig.ClusterConfig
+ MultiDbConfig.DatabaseConfig cluster1 = MultiDbConfig.DatabaseConfig
.builder(endpoint1, clientConfig).weight(1.0f).healthCheckEnabled(false).build();
- MultiClusterClientConfig.ClusterConfig cluster2 = MultiClusterClientConfig.ClusterConfig
+ MultiDbConfig.DatabaseConfig cluster2 = MultiDbConfig.DatabaseConfig
.builder(endpoint2, clientConfig).weight(2.0f).healthCheckEnabled(false).build();
- MultiClusterClientConfig config = new MultiClusterClientConfig.Builder(
- new MultiClusterClientConfig.ClusterConfig[] { cluster1, cluster2 })
- .failbackSupported(false) // Disabled
+ MultiDbConfig config = new MultiDbConfig.Builder(
+ new MultiDbConfig.DatabaseConfig[] { cluster1, cluster2 }).failbackSupported(false) // Disabled
.failbackCheckInterval(50).build();
- try (MultiClusterPooledConnectionProvider provider = new MultiClusterPooledConnectionProvider(
- config)) {
+ try (MultiDbConnectionProvider provider = new MultiDbConnectionProvider(config)) {
// Initially, cluster2 should be active (highest weight: 2.0f vs 1.0f)
- assertEquals(provider.getCluster(endpoint2), provider.getCluster());
+ assertEquals(provider.getDatabase(endpoint2), provider.getDatabase());
// Make cluster2 unhealthy to force failover to cluster1
onHealthStatusChange(provider, endpoint2, HealthStatus.HEALTHY, HealthStatus.UNHEALTHY);
// Should now be on cluster1
- assertEquals(provider.getCluster(endpoint1), provider.getCluster());
+ assertEquals(provider.getDatabase(endpoint1), provider.getDatabase());
// Make cluster2 healthy again
onHealthStatusChange(provider, endpoint2, HealthStatus.UNHEALTHY, HealthStatus.HEALTHY);
@@ -153,10 +149,10 @@ void testPeriodicFailbackCheckWithFailbackDisabled() throws InterruptedException
Thread.sleep(100);
// Trigger periodic check
- MultiClusterPooledConnectionProviderHelper.periodicFailbackCheck(provider);
+ MultiDbConnectionProviderHelper.periodicFailbackCheck(provider);
// Should still be on cluster1 (failback disabled)
- assertEquals(provider.getCluster(endpoint1), provider.getCluster());
+ assertEquals(provider.getDatabase(endpoint1), provider.getDatabase());
}
}
}
@@ -166,38 +162,37 @@ void testPeriodicFailbackCheckSelectsHighestWeightCluster() throws InterruptedEx
try (MockedConstruction mockedPool = mockPool()) {
HostAndPort endpoint3 = new HostAndPort("localhost", 6381);
- MultiClusterClientConfig.ClusterConfig cluster1 = MultiClusterClientConfig.ClusterConfig
+ MultiDbConfig.DatabaseConfig cluster1 = MultiDbConfig.DatabaseConfig
.builder(endpoint1, clientConfig).weight(1.0f).healthCheckEnabled(false).build();
- MultiClusterClientConfig.ClusterConfig cluster2 = MultiClusterClientConfig.ClusterConfig
+ MultiDbConfig.DatabaseConfig cluster2 = MultiDbConfig.DatabaseConfig
.builder(endpoint2, clientConfig).weight(2.0f).healthCheckEnabled(false).build();
- MultiClusterClientConfig.ClusterConfig cluster3 = MultiClusterClientConfig.ClusterConfig
+ MultiDbConfig.DatabaseConfig cluster3 = MultiDbConfig.DatabaseConfig
.builder(endpoint3, clientConfig).weight(3.0f) // Highest weight
.healthCheckEnabled(false).build();
- MultiClusterClientConfig config = new MultiClusterClientConfig.Builder(
- new MultiClusterClientConfig.ClusterConfig[] { cluster1, cluster2, cluster3 })
+ MultiDbConfig config = new MultiDbConfig.Builder(
+ new MultiDbConfig.DatabaseConfig[] { cluster1, cluster2, cluster3 })
.failbackSupported(true).failbackCheckInterval(50).gracePeriod(100).build(); // Add
// grace
// period
- try (MultiClusterPooledConnectionProvider provider = new MultiClusterPooledConnectionProvider(
- config)) {
+ try (MultiDbConnectionProvider provider = new MultiDbConnectionProvider(config)) {
// Initially, cluster3 should be active (highest weight: 3.0f vs 2.0f vs 1.0f)
- assertEquals(provider.getCluster(endpoint3), provider.getCluster());
+ assertEquals(provider.getDatabase(endpoint3), provider.getDatabase());
// Make cluster3 unhealthy to force failover to cluster2 (next highest weight)
onHealthStatusChange(provider, endpoint3, HealthStatus.HEALTHY, HealthStatus.UNHEALTHY);
// Should now be on cluster2 (weight 2.0f, higher than cluster1's 1.0f)
- assertEquals(provider.getCluster(endpoint2), provider.getCluster());
+ assertEquals(provider.getDatabase(endpoint2), provider.getDatabase());
// Make cluster2 unhealthy to force failover to cluster1
onHealthStatusChange(provider, endpoint2, HealthStatus.HEALTHY, HealthStatus.UNHEALTHY);
// Should now be on cluster1 (only healthy cluster left)
- assertEquals(provider.getCluster(endpoint1), provider.getCluster());
+ assertEquals(provider.getDatabase(endpoint1), provider.getDatabase());
// Make cluster2 and cluster3 healthy again
onHealthStatusChange(provider, endpoint2, HealthStatus.UNHEALTHY, HealthStatus.HEALTHY);
@@ -207,10 +202,10 @@ void testPeriodicFailbackCheckSelectsHighestWeightCluster() throws InterruptedEx
Thread.sleep(150);
// Trigger periodic check
- MultiClusterPooledConnectionProviderHelper.periodicFailbackCheck(provider);
+ MultiDbConnectionProviderHelper.periodicFailbackCheck(provider);
// Should have failed back to cluster3 (highest weight, grace period expired)
- assertEquals(provider.getCluster(endpoint3), provider.getCluster());
+ assertEquals(provider.getDatabase(endpoint3), provider.getDatabase());
}
}
}
diff --git a/src/test/java/redis/clients/jedis/mcf/StatusTrackerTest.java b/src/test/java/redis/clients/jedis/mcf/StatusTrackerTest.java
index 95cd541631..c17fa2ff51 100644
--- a/src/test/java/redis/clients/jedis/mcf/StatusTrackerTest.java
+++ b/src/test/java/redis/clients/jedis/mcf/StatusTrackerTest.java
@@ -5,6 +5,7 @@
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicReference;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
@@ -184,15 +185,20 @@ void testWaitForHealthStatus_IgnoresOtherEndpoints() throws InterruptedException
void testWaitForHealthStatus_InterruptHandling() {
// Given: Health status is initially UNKNOWN and will stay that way
when(mockHealthStatusManager.getHealthStatus(testEndpoint)).thenReturn(HealthStatus.UNKNOWN);
+ when(mockHealthStatusManager.getMaxWaitFor(any())).thenReturn(3000L);
- // When: Interrupt the waiting thread
+ AtomicReference interruptedThreadName = new AtomicReference<>();
+ AtomicReference thrownException = new AtomicReference<>();
+ AtomicReference isInterrupted = new AtomicReference<>();
+ // When: Interrupt thse waiting thread
Thread testThread = new Thread(() -> {
try {
statusTracker.waitForHealthStatus(testEndpoint);
fail("Should have thrown JedisConnectionException due to interrupt");
} catch (Exception e) {
- assertTrue(e.getMessage().contains("Interrupted while waiting"));
- assertTrue(Thread.currentThread().isInterrupted());
+ interruptedThreadName.set(Thread.currentThread().getName());
+ thrownException.set(e);
+ isInterrupted.set(Thread.currentThread().isInterrupted());
}
});
@@ -215,6 +221,8 @@ void testWaitForHealthStatus_InterruptHandling() {
}
assertFalse(testThread.isAlive(), "Test thread should have completed");
+ assertTrue(thrownException.get().getMessage().contains("Interrupted while waiting"));
+ assertTrue(isInterrupted.get(), "Thread should be interrupted");
}
@Test
diff --git a/src/test/java/redis/clients/jedis/misc/AutomaticFailoverTest.java b/src/test/java/redis/clients/jedis/misc/AutomaticFailoverTest.java
index b6f34106f2..ac74738226 100644
--- a/src/test/java/redis/clients/jedis/misc/AutomaticFailoverTest.java
+++ b/src/test/java/redis/clients/jedis/misc/AutomaticFailoverTest.java
@@ -17,9 +17,9 @@
import redis.clients.jedis.*;
import redis.clients.jedis.exceptions.JedisAccessControlException;
import redis.clients.jedis.exceptions.JedisConnectionException;
-import redis.clients.jedis.mcf.ClusterSwitchEventArgs;
-import redis.clients.jedis.mcf.MultiClusterPooledConnectionProvider;
-import redis.clients.jedis.mcf.MultiClusterPooledConnectionProviderHelper;
+import redis.clients.jedis.mcf.DatabaseSwitchEvent;
+import redis.clients.jedis.mcf.MultiDbConnectionProvider;
+import redis.clients.jedis.mcf.MultiDbConnectionProviderHelper;
import redis.clients.jedis.mcf.SwitchReason;
import redis.clients.jedis.util.IOUtils;
@@ -47,10 +47,10 @@ public class AutomaticFailoverTest {
private Jedis jedis2;
- private List getClusterConfigs(
+ private List getDatabaseConfigs(
JedisClientConfig clientConfig, HostAndPort... hostPorts) {
return Arrays.stream(hostPorts)
- .map(hp -> new MultiClusterClientConfig.ClusterConfig(hp, clientConfig))
+ .map(hp -> new MultiDbConfig.DatabaseConfig(hp, clientConfig))
.collect(Collectors.toList());
}
@@ -68,17 +68,17 @@ public void cleanUp() {
@Test
public void pipelineWithSwitch() {
- MultiClusterPooledConnectionProvider provider = new MultiClusterPooledConnectionProvider(
- new MultiClusterClientConfig.Builder(
- getClusterConfigs(clientConfig, hostPortWithFailure, workingEndpoint.getHostAndPort()))
+ MultiDbConnectionProvider provider = new MultiDbConnectionProvider(
+ new MultiDbConfig.Builder(
+ getDatabaseConfigs(clientConfig, hostPortWithFailure, workingEndpoint.getHostAndPort()))
.build());
try (UnifiedJedis client = new UnifiedJedis(provider)) {
AbstractPipeline pipe = client.pipelined();
pipe.set("pstr", "foobar");
pipe.hset("phash", "foo", "bar");
- MultiClusterPooledConnectionProviderHelper.switchToHealthyCluster(provider,
- SwitchReason.HEALTH_CHECK, provider.getCluster());
+ MultiDbConnectionProviderHelper.switchToHealthyCluster(provider,
+ SwitchReason.HEALTH_CHECK, provider.getDatabase());
pipe.sync();
}
@@ -88,17 +88,17 @@ public void pipelineWithSwitch() {
@Test
public void transactionWithSwitch() {
- MultiClusterPooledConnectionProvider provider = new MultiClusterPooledConnectionProvider(
- new MultiClusterClientConfig.Builder(
- getClusterConfigs(clientConfig, hostPortWithFailure, workingEndpoint.getHostAndPort()))
+ MultiDbConnectionProvider provider = new MultiDbConnectionProvider(
+ new MultiDbConfig.Builder(
+ getDatabaseConfigs(clientConfig, hostPortWithFailure, workingEndpoint.getHostAndPort()))
.build());
try (UnifiedJedis client = new UnifiedJedis(provider)) {
AbstractTransaction tx = client.multi();
tx.set("tstr", "foobar");
tx.hset("thash", "foo", "bar");
- MultiClusterPooledConnectionProviderHelper.switchToHealthyCluster(provider,
- SwitchReason.HEALTH_CHECK, provider.getCluster());
+ MultiDbConnectionProviderHelper.switchToHealthyCluster(provider,
+ SwitchReason.HEALTH_CHECK, provider.getDatabase());
assertEquals(Arrays.asList("OK", 1L), tx.exec());
}
@@ -108,20 +108,20 @@ public void transactionWithSwitch() {
@Test
public void commandFailoverUnresolvableHost() {
- int slidingWindowMinCalls = 2;
+ int slidingWindowMinFails = 2;
int slidingWindowSize = 2;
HostAndPort unresolvableHostAndPort = new HostAndPort("unresolvable", 6379);
- MultiClusterClientConfig.Builder builder = new MultiClusterClientConfig.Builder(
- getClusterConfigs(clientConfig, unresolvableHostAndPort, workingEndpoint.getHostAndPort()))
+ MultiDbConfig.Builder builder = new MultiDbConfig.Builder(
+ getDatabaseConfigs(clientConfig, unresolvableHostAndPort, workingEndpoint.getHostAndPort()))
.retryWaitDuration(1).retryMaxAttempts(1)
- .circuitBreakerSlidingWindowMinCalls(slidingWindowMinCalls)
- .circuitBreakerSlidingWindowSize(slidingWindowSize);
+ .circuitBreakerSlidingWindowSize(slidingWindowSize)
+ .circuitBreakerMinNumOfFailures(slidingWindowMinFails);
RedisFailoverReporter failoverReporter = new RedisFailoverReporter();
- MultiClusterPooledConnectionProvider connectionProvider = new MultiClusterPooledConnectionProvider(
+ MultiDbConnectionProvider connectionProvider = new MultiDbConnectionProvider(
builder.build());
- connectionProvider.setClusterSwitchListener(failoverReporter);
+ connectionProvider.setDatabaseSwitchListener(failoverReporter);
UnifiedJedis jedis = new UnifiedJedis(connectionProvider);
@@ -129,16 +129,16 @@ public void commandFailoverUnresolvableHost() {
log.info("Starting calls to Redis");
assertFalse(failoverReporter.failedOver);
- for (int attempt = 0; attempt < slidingWindowMinCalls; attempt++) {
+ for (int attempt = 0; attempt < slidingWindowMinFails; attempt++) {
+ assertFalse(failoverReporter.failedOver);
Throwable thrown = assertThrows(JedisConnectionException.class,
() -> jedis.hset(key, "f1", "v1"));
assertThat(thrown.getCause(), instanceOf(UnknownHostException.class));
- assertFalse(failoverReporter.failedOver);
}
- // should failover now
- jedis.hset(key, "f1", "v1");
+ // already failed over now
assertTrue(failoverReporter.failedOver);
+ jedis.hset(key, "f1", "v1");
assertEquals(Collections.singletonMap("f1", "v1"), jedis.hgetAll(key));
jedis.flushAll();
@@ -148,22 +148,23 @@ public void commandFailoverUnresolvableHost() {
@Test
public void commandFailover() {
- int slidingWindowMinCalls = 6;
+ int slidingWindowMinFails = 6;
int slidingWindowSize = 6;
int retryMaxAttempts = 3;
- MultiClusterClientConfig.Builder builder = new MultiClusterClientConfig.Builder(
- getClusterConfigs(clientConfig, hostPortWithFailure, workingEndpoint.getHostAndPort()))
+ MultiDbConfig.Builder builder = new MultiDbConfig.Builder(
+ getDatabaseConfigs(clientConfig, hostPortWithFailure, workingEndpoint.getHostAndPort()))
.retryMaxAttempts(retryMaxAttempts) // Default
// is
// 3
- .circuitBreakerSlidingWindowMinCalls(slidingWindowMinCalls)
+ .circuitBreakerFailureRateThreshold(50)
+ .circuitBreakerMinNumOfFailures(slidingWindowMinFails)
.circuitBreakerSlidingWindowSize(slidingWindowSize);
RedisFailoverReporter failoverReporter = new RedisFailoverReporter();
- MultiClusterPooledConnectionProvider connectionProvider = new MultiClusterPooledConnectionProvider(
+ MultiDbConnectionProvider connectionProvider = new MultiDbConnectionProvider(
builder.build());
- connectionProvider.setClusterSwitchListener(failoverReporter);
+ connectionProvider.setDatabaseSwitchListener(failoverReporter);
UnifiedJedis jedis = new UnifiedJedis(connectionProvider);
@@ -191,19 +192,17 @@ public void commandFailover() {
@Test
public void pipelineFailover() {
- int slidingWindowMinCalls = 10;
int slidingWindowSize = 10;
- MultiClusterClientConfig.Builder builder = new MultiClusterClientConfig.Builder(
- getClusterConfigs(clientConfig, hostPortWithFailure, workingEndpoint.getHostAndPort()))
- .circuitBreakerSlidingWindowMinCalls(slidingWindowMinCalls)
+ MultiDbConfig.Builder builder = new MultiDbConfig.Builder(
+ getDatabaseConfigs(clientConfig, hostPortWithFailure, workingEndpoint.getHostAndPort()))
.circuitBreakerSlidingWindowSize(slidingWindowSize)
.fallbackExceptionList(Collections.singletonList(JedisConnectionException.class));
RedisFailoverReporter failoverReporter = new RedisFailoverReporter();
- MultiClusterPooledConnectionProvider cacheProvider = new MultiClusterPooledConnectionProvider(
+ MultiDbConnectionProvider cacheProvider = new MultiDbConnectionProvider(
builder.build());
- cacheProvider.setClusterSwitchListener(failoverReporter);
+ cacheProvider.setDatabaseSwitchListener(failoverReporter);
UnifiedJedis jedis = new UnifiedJedis(cacheProvider);
@@ -225,20 +224,17 @@ public void pipelineFailover() {
@Test
public void failoverFromAuthError() {
- int slidingWindowMinCalls = 10;
int slidingWindowSize = 10;
- MultiClusterClientConfig.Builder builder = new MultiClusterClientConfig.Builder(
- getClusterConfigs(clientConfig, endpointForAuthFailure.getHostAndPort(),
- workingEndpoint.getHostAndPort()))
- .circuitBreakerSlidingWindowMinCalls(slidingWindowMinCalls)
- .circuitBreakerSlidingWindowSize(slidingWindowSize)
+ MultiDbConfig.Builder builder = new MultiDbConfig.Builder(
+ getDatabaseConfigs(clientConfig, endpointForAuthFailure.getHostAndPort(),
+ workingEndpoint.getHostAndPort())).circuitBreakerSlidingWindowSize(slidingWindowSize)
.fallbackExceptionList(Collections.singletonList(JedisAccessControlException.class));
RedisFailoverReporter failoverReporter = new RedisFailoverReporter();
- MultiClusterPooledConnectionProvider cacheProvider = new MultiClusterPooledConnectionProvider(
+ MultiDbConnectionProvider cacheProvider = new MultiDbConnectionProvider(
builder.build());
- cacheProvider.setClusterSwitchListener(failoverReporter);
+ cacheProvider.setDatabaseSwitchListener(failoverReporter);
UnifiedJedis jedis = new UnifiedJedis(cacheProvider);
@@ -254,13 +250,13 @@ public void failoverFromAuthError() {
jedis.close();
}
- static class RedisFailoverReporter implements Consumer {
+ static class RedisFailoverReporter implements Consumer {
boolean failedOver = false;
@Override
- public void accept(ClusterSwitchEventArgs e) {
- log.info("Jedis fail over to cluster: " + e.getClusterName());
+ public void accept(DatabaseSwitchEvent e) {
+ log.info("Jedis fail over to cluster: " + e.getDatabaseName());
failedOver = true;
}
}
diff --git a/src/test/java/redis/clients/jedis/providers/MultiClusterProviderHealthStatusChangeEventTest.java b/src/test/java/redis/clients/jedis/providers/MultiClusterProviderHealthStatusChangeEventTest.java
index bde6ab7fc6..6d1b14009e 100644
--- a/src/test/java/redis/clients/jedis/providers/MultiClusterProviderHealthStatusChangeEventTest.java
+++ b/src/test/java/redis/clients/jedis/providers/MultiClusterProviderHealthStatusChangeEventTest.java
@@ -14,13 +14,13 @@
import redis.clients.jedis.DefaultJedisClientConfig;
import redis.clients.jedis.HostAndPort;
import redis.clients.jedis.JedisClientConfig;
-import redis.clients.jedis.MultiClusterClientConfig;
+import redis.clients.jedis.MultiDbConfig;
import redis.clients.jedis.mcf.HealthStatus;
-import redis.clients.jedis.mcf.MultiClusterPooledConnectionProvider;
-import redis.clients.jedis.mcf.MultiClusterPooledConnectionProviderHelper;
+import redis.clients.jedis.mcf.MultiDbConnectionProvider;
+import redis.clients.jedis.mcf.MultiDbConnectionProviderHelper;
/**
- * Tests for MultiClusterPooledConnectionProvider event handling behavior during initialization and
+ * Tests for MultiDbConnectionProvider event handling behavior during initialization and
* throughout its lifecycle with HealthStatusChangeEvents.
*/
@ExtendWith(MockitoExtension.class)
@@ -52,30 +52,30 @@ private MockedConstruction mockConnectionPool() {
void postInit_unhealthy_active_sets_grace_and_fails_over() throws Exception {
try (MockedConstruction mockedPool = mockConnectionPool()) {
// Create clusters without health checks
- MultiClusterClientConfig.ClusterConfig cluster1 = MultiClusterClientConfig.ClusterConfig
+ MultiDbConfig.DatabaseConfig cluster1 = MultiDbConfig.DatabaseConfig
.builder(endpoint1, clientConfig).weight(1.0f).healthCheckEnabled(false).build();
- MultiClusterClientConfig.ClusterConfig cluster2 = MultiClusterClientConfig.ClusterConfig
+ MultiDbConfig.DatabaseConfig cluster2 = MultiDbConfig.DatabaseConfig
.builder(endpoint2, clientConfig).weight(0.5f).healthCheckEnabled(false).build();
- MultiClusterClientConfig config = new MultiClusterClientConfig.Builder(
- new MultiClusterClientConfig.ClusterConfig[] { cluster1, cluster2 }).build();
+ MultiDbConfig config = new MultiDbConfig.Builder(
+ new MultiDbConfig.DatabaseConfig[] { cluster1, cluster2 }).build();
- try (MultiClusterPooledConnectionProvider provider = new MultiClusterPooledConnectionProvider(
+ try (MultiDbConnectionProvider provider = new MultiDbConnectionProvider(
config)) {
- assertFalse(provider.getCluster(endpoint1).isInGracePeriod());
- assertEquals(provider.getCluster(), provider.getCluster(endpoint1));
+ assertFalse(provider.getDatabase(endpoint1).isInGracePeriod());
+ assertEquals(provider.getDatabase(), provider.getDatabase(endpoint1));
// This should process immediately since initialization is complete
assertDoesNotThrow(() -> {
- MultiClusterPooledConnectionProviderHelper.onHealthStatusChange(provider, endpoint1,
+ MultiDbConnectionProviderHelper.onHealthStatusChange(provider, endpoint1,
HealthStatus.HEALTHY, HealthStatus.UNHEALTHY);
}, "Post-initialization events should be processed immediately");
// Verify the cluster has changed according to the UNHEALTHY status
- assertTrue(provider.getCluster(endpoint1).isInGracePeriod(),
+ assertTrue(provider.getDatabase(endpoint1).isInGracePeriod(),
"UNHEALTHY status on active cluster should cause a grace period");
- assertNotEquals(provider.getCluster(), provider.getCluster(endpoint1),
+ assertNotEquals(provider.getDatabase(), provider.getDatabase(endpoint1),
"UNHEALTHY status on active cluster should cause a failover");
}
}
@@ -84,46 +84,46 @@ void postInit_unhealthy_active_sets_grace_and_fails_over() throws Exception {
@Test
void postInit_nonActive_changes_do_not_switch_active() throws Exception {
try (MockedConstruction mockedPool = mockConnectionPool()) {
- MultiClusterClientConfig.ClusterConfig cluster1 = MultiClusterClientConfig.ClusterConfig
+ MultiDbConfig.DatabaseConfig cluster1 = MultiDbConfig.DatabaseConfig
.builder(endpoint1, clientConfig).weight(1.0f).healthCheckEnabled(false).build();
- MultiClusterClientConfig.ClusterConfig cluster2 = MultiClusterClientConfig.ClusterConfig
+ MultiDbConfig.DatabaseConfig cluster2 = MultiDbConfig.DatabaseConfig
.builder(endpoint2, clientConfig).weight(0.5f).healthCheckEnabled(false).build();
- MultiClusterClientConfig config = new MultiClusterClientConfig.Builder(
- new MultiClusterClientConfig.ClusterConfig[] { cluster1, cluster2 }).build();
+ MultiDbConfig config = new MultiDbConfig.Builder(
+ new MultiDbConfig.DatabaseConfig[] { cluster1, cluster2 }).build();
- try (MultiClusterPooledConnectionProvider provider = new MultiClusterPooledConnectionProvider(
+ try (MultiDbConnectionProvider provider = new MultiDbConnectionProvider(
config)) {
// Verify initial state
- assertEquals(provider.getCluster(endpoint1), provider.getCluster(),
+ assertEquals(provider.getDatabase(endpoint1), provider.getDatabase(),
"Should start with endpoint1 active");
// Simulate multiple rapid events for the same endpoint (post-init behavior)
- MultiClusterPooledConnectionProviderHelper.onHealthStatusChange(provider, endpoint1,
+ MultiDbConnectionProviderHelper.onHealthStatusChange(provider, endpoint1,
HealthStatus.HEALTHY, HealthStatus.UNHEALTHY);
// After first UNHEALTHY on active cluster: it enters grace period and provider fails over
- assertTrue(provider.getCluster(endpoint1).isInGracePeriod(),
+ assertTrue(provider.getDatabase(endpoint1).isInGracePeriod(),
"Active cluster should enter grace period");
- assertEquals(provider.getCluster(endpoint2), provider.getCluster(),
+ assertEquals(provider.getDatabase(endpoint2), provider.getDatabase(),
"Should fail over to endpoint2");
- MultiClusterPooledConnectionProviderHelper.onHealthStatusChange(provider, endpoint1,
+ MultiDbConnectionProviderHelper.onHealthStatusChange(provider, endpoint1,
HealthStatus.UNHEALTHY, HealthStatus.HEALTHY);
// Healthy event for non-active cluster should not immediately revert active cluster
- assertEquals(provider.getCluster(endpoint2), provider.getCluster(),
+ assertEquals(provider.getDatabase(endpoint2), provider.getDatabase(),
"Active cluster should remain endpoint2");
- assertTrue(provider.getCluster(endpoint1).isInGracePeriod(),
+ assertTrue(provider.getDatabase(endpoint1).isInGracePeriod(),
"Grace period should still be in effect");
- MultiClusterPooledConnectionProviderHelper.onHealthStatusChange(provider, endpoint1,
+ MultiDbConnectionProviderHelper.onHealthStatusChange(provider, endpoint1,
HealthStatus.HEALTHY, HealthStatus.UNHEALTHY);
// Further UNHEALTHY for non-active cluster is a no-op
- assertEquals(provider.getCluster(endpoint2), provider.getCluster(),
+ assertEquals(provider.getDatabase(endpoint2), provider.getDatabase(),
"Active cluster unchanged");
- assertTrue(provider.getCluster(endpoint1).isInGracePeriod(), "Still in grace period");
+ assertTrue(provider.getDatabase(endpoint1).isInGracePeriod(), "Still in grace period");
}
}
}
@@ -131,26 +131,26 @@ void postInit_nonActive_changes_do_not_switch_active() throws Exception {
@Test
void init_selects_highest_weight_healthy_when_checks_disabled() throws Exception {
try (MockedConstruction mockedPool = mockConnectionPool()) {
- MultiClusterClientConfig.ClusterConfig cluster1 = MultiClusterClientConfig.ClusterConfig
+ MultiDbConfig.DatabaseConfig cluster1 = MultiDbConfig.DatabaseConfig
.builder(endpoint1, clientConfig).weight(1.0f).healthCheckEnabled(false).build();
- MultiClusterClientConfig.ClusterConfig cluster2 = MultiClusterClientConfig.ClusterConfig
+ MultiDbConfig.DatabaseConfig cluster2 = MultiDbConfig.DatabaseConfig
.builder(endpoint2, clientConfig).weight(2.0f).healthCheckEnabled(false).build();
- MultiClusterClientConfig config = new MultiClusterClientConfig.Builder(
- new MultiClusterClientConfig.ClusterConfig[] { cluster1, cluster2 }).build();
+ MultiDbConfig config = new MultiDbConfig.Builder(
+ new MultiDbConfig.DatabaseConfig[] { cluster1, cluster2 }).build();
- try (MultiClusterPooledConnectionProvider provider = new MultiClusterPooledConnectionProvider(
+ try (MultiDbConnectionProvider provider = new MultiDbConnectionProvider(
config)) {
// This test verifies that multiple endpoints are properly initialized
// Verify both clusters are initialized properly
- assertNotNull(provider.getCluster(endpoint1), "Cluster 1 should be available");
- assertNotNull(provider.getCluster(endpoint2), "Cluster 2 should be available");
+ assertNotNull(provider.getDatabase(endpoint1), "Database 1 should be available");
+ assertNotNull(provider.getDatabase(endpoint2), "Database 2 should be available");
// Both should be healthy (no health checks = assumed healthy)
- assertTrue(provider.getCluster(endpoint1).isHealthy(), "Cluster 1 should be healthy");
- assertTrue(provider.getCluster(endpoint2).isHealthy(), "Cluster 2 should be healthy");
+ assertTrue(provider.getDatabase(endpoint1).isHealthy(), "Database 1 should be healthy");
+ assertTrue(provider.getDatabase(endpoint2).isHealthy(), "Database 2 should be healthy");
}
}
}
@@ -158,22 +158,22 @@ void init_selects_highest_weight_healthy_when_checks_disabled() throws Exception
@Test
void init_single_cluster_initializes_and_is_healthy() throws Exception {
try (MockedConstruction mockedPool = mockConnectionPool()) {
- MultiClusterClientConfig.ClusterConfig cluster1 = MultiClusterClientConfig.ClusterConfig
+ MultiDbConfig.DatabaseConfig cluster1 = MultiDbConfig.DatabaseConfig
.builder(endpoint1, clientConfig).weight(1.0f).healthCheckEnabled(false).build();
- MultiClusterClientConfig config = new MultiClusterClientConfig.Builder(
- new MultiClusterClientConfig.ClusterConfig[] { cluster1 }).build();
+ MultiDbConfig config = new MultiDbConfig.Builder(
+ new MultiDbConfig.DatabaseConfig[] { cluster1 }).build();
// This test verifies that the provider initializes correctly and doesn't lose events
// In practice, with health checks disabled, no events should be generated during init
- try (MultiClusterPooledConnectionProvider provider = new MultiClusterPooledConnectionProvider(
+ try (MultiDbConnectionProvider provider = new MultiDbConnectionProvider(
config)) {
// Verify successful initialization
- assertNotNull(provider.getCluster(), "Provider should have initialized successfully");
- assertEquals(provider.getCluster(endpoint1), provider.getCluster(),
+ assertNotNull(provider.getDatabase(), "Provider should have initialized successfully");
+ assertEquals(provider.getDatabase(endpoint1), provider.getDatabase(),
"Should have selected the configured cluster");
- assertTrue(provider.getCluster().isHealthy(),
- "Cluster should be healthy (assumed healthy with no health checks)");
+ assertTrue(provider.getDatabase().isHealthy(),
+ "Database should be healthy (assumed healthy with no health checks)");
}
}
}
@@ -183,42 +183,42 @@ void init_single_cluster_initializes_and_is_healthy() throws Exception {
@Test
void postInit_two_hop_failover_chain_respected() throws Exception {
try (MockedConstruction mockedPool = mockConnectionPool()) {
- MultiClusterClientConfig.ClusterConfig cluster1 = MultiClusterClientConfig.ClusterConfig
+ MultiDbConfig.DatabaseConfig cluster1 = MultiDbConfig.DatabaseConfig
.builder(endpoint1, clientConfig).weight(1.0f).healthCheckEnabled(false).build();
- MultiClusterClientConfig.ClusterConfig cluster2 = MultiClusterClientConfig.ClusterConfig
+ MultiDbConfig.DatabaseConfig cluster2 = MultiDbConfig.DatabaseConfig
.builder(endpoint2, clientConfig).weight(0.5f).healthCheckEnabled(false).build();
- MultiClusterClientConfig.ClusterConfig cluster3 = MultiClusterClientConfig.ClusterConfig
+ MultiDbConfig.DatabaseConfig cluster3 = MultiDbConfig.DatabaseConfig
.builder(endpoint3, clientConfig).weight(0.2f).healthCheckEnabled(false).build();
- MultiClusterClientConfig config = new MultiClusterClientConfig.Builder(
- new MultiClusterClientConfig.ClusterConfig[] { cluster1, cluster2, cluster3 }).build();
+ MultiDbConfig config = new MultiDbConfig.Builder(
+ new MultiDbConfig.DatabaseConfig[] { cluster1, cluster2, cluster3 }).build();
- try (MultiClusterPooledConnectionProvider provider = new MultiClusterPooledConnectionProvider(
+ try (MultiDbConnectionProvider provider = new MultiDbConnectionProvider(
config)) {
// First event: endpoint1 (active) becomes UNHEALTHY -> failover to endpoint2, endpoint1
// enters grace
- MultiClusterPooledConnectionProviderHelper.onHealthStatusChange(provider, endpoint1,
+ MultiDbConnectionProviderHelper.onHealthStatusChange(provider, endpoint1,
HealthStatus.HEALTHY, HealthStatus.UNHEALTHY);
- assertTrue(provider.getCluster(endpoint1).isInGracePeriod(),
+ assertTrue(provider.getDatabase(endpoint1).isInGracePeriod(),
"Endpoint1 should be in grace after unhealthy");
- assertEquals(provider.getCluster(endpoint2), provider.getCluster(),
+ assertEquals(provider.getDatabase(endpoint2), provider.getDatabase(),
"Should have failed over to endpoint2");
// Second event: endpoint2 (now active) becomes UNHEALTHY -> failover to endpoint3
- MultiClusterPooledConnectionProviderHelper.onHealthStatusChange(provider, endpoint2,
+ MultiDbConnectionProviderHelper.onHealthStatusChange(provider, endpoint2,
HealthStatus.HEALTHY, HealthStatus.UNHEALTHY);
- assertTrue(provider.getCluster(endpoint2).isInGracePeriod(),
+ assertTrue(provider.getDatabase(endpoint2).isInGracePeriod(),
"Endpoint2 should be in grace after unhealthy");
- assertEquals(provider.getCluster(endpoint3), provider.getCluster(),
+ assertEquals(provider.getDatabase(endpoint3), provider.getDatabase(),
"Should have failed over to endpoint3");
// Third event: endpoint1 becomes HEALTHY again -> no immediate switch due to grace period
// behavior
- MultiClusterPooledConnectionProviderHelper.onHealthStatusChange(provider, endpoint1,
+ MultiDbConnectionProviderHelper.onHealthStatusChange(provider, endpoint1,
HealthStatus.UNHEALTHY, HealthStatus.HEALTHY);
- assertEquals(provider.getCluster(endpoint3), provider.getCluster(),
+ assertEquals(provider.getDatabase(endpoint3), provider.getDatabase(),
"Active cluster should remain endpoint3");
}
}
@@ -227,33 +227,33 @@ void postInit_two_hop_failover_chain_respected() throws Exception {
@Test
void postInit_rapid_events_respect_grace_and_keep_active_stable() throws Exception {
try (MockedConstruction mockedPool = mockConnectionPool()) {
- MultiClusterClientConfig.ClusterConfig cluster1 = MultiClusterClientConfig.ClusterConfig
+ MultiDbConfig.DatabaseConfig cluster1 = MultiDbConfig.DatabaseConfig
.builder(endpoint1, clientConfig).weight(1.0f).healthCheckEnabled(false).build();
- MultiClusterClientConfig.ClusterConfig cluster2 = MultiClusterClientConfig.ClusterConfig
+ MultiDbConfig.DatabaseConfig cluster2 = MultiDbConfig.DatabaseConfig
.builder(endpoint2, clientConfig).weight(0.5f).healthCheckEnabled(false).build();
- MultiClusterClientConfig config = new MultiClusterClientConfig.Builder(
- new MultiClusterClientConfig.ClusterConfig[] { cluster1, cluster2 }).build();
+ MultiDbConfig config = new MultiDbConfig.Builder(
+ new MultiDbConfig.DatabaseConfig[] { cluster1, cluster2 }).build();
- try (MultiClusterPooledConnectionProvider provider = new MultiClusterPooledConnectionProvider(
+ try (MultiDbConnectionProvider provider = new MultiDbConnectionProvider(
config)) {
// Verify initial state
- assertEquals(HealthStatus.HEALTHY, provider.getCluster(endpoint1).getHealthStatus(),
+ assertEquals(HealthStatus.HEALTHY, provider.getDatabase(endpoint1).getHealthStatus(),
"Should start as HEALTHY");
// Send rapid sequence of events post-init
- MultiClusterPooledConnectionProviderHelper.onHealthStatusChange(provider, endpoint1,
+ MultiDbConnectionProviderHelper.onHealthStatusChange(provider, endpoint1,
HealthStatus.HEALTHY, HealthStatus.UNHEALTHY); // triggers failover and grace
- MultiClusterPooledConnectionProviderHelper.onHealthStatusChange(provider, endpoint1,
+ MultiDbConnectionProviderHelper.onHealthStatusChange(provider, endpoint1,
HealthStatus.UNHEALTHY, HealthStatus.HEALTHY); // non-active cluster becomes healthy
- MultiClusterPooledConnectionProviderHelper.onHealthStatusChange(provider, endpoint1,
+ MultiDbConnectionProviderHelper.onHealthStatusChange(provider, endpoint1,
HealthStatus.HEALTHY, HealthStatus.UNHEALTHY); // still non-active and in grace; no change
// Final expectations: endpoint1 is in grace, provider remains on endpoint2
- assertTrue(provider.getCluster(endpoint1).isInGracePeriod(),
+ assertTrue(provider.getDatabase(endpoint1).isInGracePeriod(),
"Endpoint1 should be in grace period");
- assertEquals(provider.getCluster(endpoint2), provider.getCluster(),
+ assertEquals(provider.getDatabase(endpoint2), provider.getDatabase(),
"Active cluster should remain endpoint2");
}
}
diff --git a/src/test/java/redis/clients/jedis/scenario/ActiveActiveFailoverTest.java b/src/test/java/redis/clients/jedis/scenario/ActiveActiveFailoverTest.java
index 6531cc5490..e6ebc42b8d 100644
--- a/src/test/java/redis/clients/jedis/scenario/ActiveActiveFailoverTest.java
+++ b/src/test/java/redis/clients/jedis/scenario/ActiveActiveFailoverTest.java
@@ -9,10 +9,11 @@
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import redis.clients.jedis.*;
-import redis.clients.jedis.MultiClusterClientConfig.ClusterConfig;
+import redis.clients.jedis.MultiDbConfig.DatabaseConfig;
import redis.clients.jedis.exceptions.JedisConnectionException;
-import redis.clients.jedis.mcf.ClusterSwitchEventArgs;
-import redis.clients.jedis.mcf.MultiClusterPooledConnectionProvider;
+import redis.clients.jedis.mcf.DatabaseSwitchEvent;
+import redis.clients.jedis.mcf.MultiDbConnectionProvider;
+import redis.clients.jedis.util.ClientTestUtil;
import java.io.IOException;
import java.time.Duration;
@@ -57,35 +58,31 @@ public static void beforeClass() {
@Test
public void testFailover() {
- MultiClusterClientConfig.ClusterConfig[] clusterConfig = new MultiClusterClientConfig.ClusterConfig[2];
-
JedisClientConfig config = endpoint.getClientConfigBuilder()
.socketTimeoutMillis(SOCKET_TIMEOUT_MS)
.connectionTimeoutMillis(CONNECTION_TIMEOUT_MS).build();
- clusterConfig[0] = ClusterConfig.builder(endpoint.getHostAndPort(0), config)
+ DatabaseConfig primary = DatabaseConfig.builder(endpoint.getHostAndPort(0), config)
.connectionPoolConfig(RecommendedSettings.poolConfig).weight(1.0f).build();
- clusterConfig[1] = ClusterConfig.builder(endpoint.getHostAndPort(1), config)
- .connectionPoolConfig(RecommendedSettings.poolConfig).weight(0.5f).build();
-
- MultiClusterClientConfig.Builder builder = new MultiClusterClientConfig.Builder(clusterConfig);
-
- builder.circuitBreakerSlidingWindowType(CircuitBreakerConfig.SlidingWindowType.TIME_BASED);
- builder.circuitBreakerSlidingWindowSize(1); // SLIDING WINDOW SIZE IN SECONDS
- builder.circuitBreakerSlidingWindowMinCalls(1);
- builder.circuitBreakerFailureRateThreshold(10.0f); // percentage of failures to trigger circuit breaker
- builder.failbackSupported(true);
- builder.failbackCheckInterval(1000);
- builder.gracePeriod(2000);
-
- builder.retryWaitDuration(10);
- builder.retryMaxAttempts(1);
- builder.retryWaitDurationExponentialBackoffMultiplier(1);
- builder.fastFailover(true);
- builder.retryOnFailover(false);
+ DatabaseConfig secondary = DatabaseConfig.builder(endpoint.getHostAndPort(1), config)
+ .connectionPoolConfig(RecommendedSettings.poolConfig).weight(0.5f).build();
- class FailoverReporter implements Consumer {
+ MultiDbConfig multiConfig = MultiDbConfig.builder()
+ .endpoint(primary)
+ .endpoint(secondary)
+ .circuitBreakerSlidingWindowSize(1) // SLIDING WINDOW SIZE IN SECONDS
+ .circuitBreakerFailureRateThreshold(10.0f) // percentage of failures to trigger circuit breaker
+ .failbackSupported(true)
+ .failbackCheckInterval(1000)
+ .gracePeriod(2000)
+ .retryWaitDuration(10)
+ .retryMaxAttempts(1)
+ .retryWaitDurationExponentialBackoffMultiplier(1)
+ .fastFailover(true)
+ .retryOnFailover(false)
+ .build();
+ class FailoverReporter implements Consumer {
String currentClusterName = "not set";
@@ -102,10 +99,10 @@ public String getCurrentClusterName() {
}
@Override
- public void accept(ClusterSwitchEventArgs e) {
- this.currentClusterName = e.getClusterName();
+ public void accept(DatabaseSwitchEvent e) {
+ this.currentClusterName = e.getDatabaseName();
log.info("\n\n====FailoverEvent=== \nJedis failover to cluster: {}\n====FailoverEvent===\n\n",
- e.getClusterName());
+ e.getDatabaseName());
if (failoverHappened) {
failbackHappened = true;
@@ -117,12 +114,12 @@ public void accept(ClusterSwitchEventArgs e) {
}
}
- MultiClusterPooledConnectionProvider provider = new MultiClusterPooledConnectionProvider(builder.build());
FailoverReporter reporter = new FailoverReporter();
- provider.setClusterSwitchListener(reporter);
- provider.setActiveCluster(endpoint.getHostAndPort(0));
- UnifiedJedis client = new UnifiedJedis(provider);
+ MultiDbClient client = MultiDbClient.builder()
+ .multiDbConfig(multiConfig)
+ .databaseSwitchListener(reporter)
+ .build();
AtomicLong executedCommands = new AtomicLong(0);
AtomicLong retryingThreadsCounter = new AtomicLong(0);
@@ -211,9 +208,9 @@ public void accept(ClusterSwitchEventArgs e) {
throw new RuntimeException(e);
}
-
- ConnectionPool pool1 = provider.getCluster(endpoint.getHostAndPort(0)).getConnectionPool();
- ConnectionPool pool2 = provider.getCluster(endpoint.getHostAndPort(1)).getConnectionPool();
+ MultiDbConnectionProvider provider = ClientTestUtil.getConnectionProvider(client);
+ ConnectionPool pool1 = provider.getDatabase(endpoint.getHostAndPort(0)).getConnectionPool();
+ ConnectionPool pool2 = provider.getDatabase(endpoint.getHostAndPort(1)).getConnectionPool();
await().atMost(Duration.ofSeconds(1)).until(() -> pool1.getNumActive() == 0);
await().atMost(Duration.ofSeconds(1)).until(() -> pool2.getNumActive() == 0);
diff --git a/src/test/java/redis/clients/jedis/util/ClientTestUtil.java b/src/test/java/redis/clients/jedis/util/ClientTestUtil.java
new file mode 100644
index 0000000000..ecc33d3d9c
--- /dev/null
+++ b/src/test/java/redis/clients/jedis/util/ClientTestUtil.java
@@ -0,0 +1,11 @@
+package redis.clients.jedis.util;
+
+import redis.clients.jedis.UnifiedJedis;
+import redis.clients.jedis.providers.ConnectionProvider;
+
+public class ClientTestUtil {
+
+ public static T getConnectionProvider(UnifiedJedis jedis) {
+ return ReflectionTestUtil.getField(jedis, "provider");
+ }
+}
diff --git a/src/test/java/redis/clients/jedis/util/ReflectionTestUtil.java b/src/test/java/redis/clients/jedis/util/ReflectionTestUtil.java
new file mode 100644
index 0000000000..50b6448429
--- /dev/null
+++ b/src/test/java/redis/clients/jedis/util/ReflectionTestUtil.java
@@ -0,0 +1,91 @@
+package redis.clients.jedis.util;
+
+import java.lang.reflect.Field;
+
+/**
+ * Simple utility for accessing private fields in tests using reflection.
+ *
+ * This utility is intended for testing purposes only to access internal state that is not exposed
+ * through public APIs.
+ *
+ */
+public class ReflectionTestUtil {
+
+ /**
+ * Gets the value of a private field from an object.
+ * @param target the object containing the field
+ * @param fieldName the name of the field to access
+ * @param the expected type of the field value
+ * @return the value of the field
+ * @throws RuntimeException if the field cannot be accessed
+ */
+ @SuppressWarnings("unchecked")
+ public static T getField(Object target, String fieldName) {
+ if (target == null) {
+ throw new IllegalArgumentException("Target object cannot be null");
+ }
+ if (fieldName == null || fieldName.isEmpty()) {
+ throw new IllegalArgumentException("Field name cannot be null or empty");
+ }
+
+ try {
+ Field field = findField(target.getClass(), fieldName);
+ field.setAccessible(true);
+ return (T) field.get(target);
+ } catch (NoSuchFieldException e) {
+ throw new RuntimeException(
+ "Field '" + fieldName + "' not found in class " + target.getClass().getName(), e);
+ } catch (IllegalAccessException e) {
+ throw new RuntimeException(
+ "Cannot access field '" + fieldName + "' in class " + target.getClass().getName(), e);
+ }
+ }
+
+ /**
+ * Sets the value of a private field in an object.
+ * @param target the object containing the field
+ * @param fieldName the name of the field to set
+ * @param value the value to set
+ * @throws RuntimeException if the field cannot be accessed
+ */
+ public static void setField(Object target, String fieldName, Object value) {
+ if (target == null) {
+ throw new IllegalArgumentException("Target object cannot be null");
+ }
+ if (fieldName == null || fieldName.isEmpty()) {
+ throw new IllegalArgumentException("Field name cannot be null or empty");
+ }
+
+ try {
+ Field field = findField(target.getClass(), fieldName);
+ field.setAccessible(true);
+ field.set(target, value);
+ } catch (NoSuchFieldException e) {
+ throw new RuntimeException(
+ "Field '" + fieldName + "' not found in class " + target.getClass().getName(), e);
+ } catch (IllegalAccessException e) {
+ throw new RuntimeException(
+ "Cannot access field '" + fieldName + "' in class " + target.getClass().getName(), e);
+ }
+ }
+
+ /**
+ * Finds a field in the class hierarchy.
+ * @param clazz the class to search
+ * @param fieldName the name of the field
+ * @return the field
+ * @throws NoSuchFieldException if the field is not found
+ */
+ private static Field findField(Class> clazz, String fieldName) throws NoSuchFieldException {
+ Class> current = clazz;
+ while (current != null) {
+ try {
+ return current.getDeclaredField(fieldName);
+ } catch (NoSuchFieldException e) {
+ current = current.getSuperclass();
+ }
+ }
+ throw new NoSuchFieldException(
+ "Field '" + fieldName + "' not found in class hierarchy of " + clazz.getName());
+ }
+}