diff --git a/.github/wordlist.txt b/.github/wordlist.txt index a0ba60937b..8f4a29819d 100644 --- a/.github/wordlist.txt +++ b/.github/wordlist.txt @@ -355,4 +355,5 @@ entraid EntraID ACR AMR -Entra \ No newline at end of file +Entra +DatabaseSwitchEvent \ No newline at end of file diff --git a/Makefile b/Makefile index 13cb801f34..603328df1c 100644 --- a/Makefile +++ b/Makefile @@ -522,10 +522,10 @@ stop: test: | start mvn-test-local stop mvn-test-local: - @TEST_ENV_PROVIDER=local mvn -Dwith-param-names=true -Dtest=${TEST} clean compile test + @TEST_ENV_PROVIDER=local mvn -Dwith-param-names=true -Dtest=${TEST} clean verify mvn-test: - mvn -Dwith-param-names=true -Dtest=${TEST} clean compile test + mvn -Dwith-param-names=true -Dtest=${TEST} clean verify package: | start mvn-package stop diff --git a/docs/failover.md b/docs/failover.md index 632fba1a82..29e5551306 100644 --- a/docs/failover.md +++ b/docs/failover.md @@ -19,7 +19,7 @@ The remainder of this guide describes: * A basic failover and health check configuration * Supported retry and circuit breaker settings -* Failback and the cluster selection API +* Failback and the database selection API We recommend that you read this guide carefully and understand the configuration settings before enabling Jedis failover in production. @@ -38,67 +38,60 @@ If `redis-east` becomes unavailable, you want your application to connect to `re Let's look at one way of configuring Jedis for this scenario. -First, create an array of `ClusterConfig` objects, one for each Redis database. +First, start by defining the initial configuration for each Redis database available and prioritize them using weights. ```java -JedisClientConfig config = DefaultJedisClientConfig.builder().user("cache").password("secret") - .socketTimeoutMillis(5000).connectionTimeoutMillis(5000).build(); + JedisClientConfig config = DefaultJedisClientConfig.builder().user("cache").password("secret") + .socketTimeoutMillis(5000).connectionTimeoutMillis(5000).build(); +// Custom pool config per database can be provided ConnectionPoolConfig poolConfig = new ConnectionPoolConfig(); -poolConfig.setMaxTotal(8); -poolConfig.setMaxIdle(8); -poolConfig.setMinIdle(0); -poolConfig.setBlockWhenExhausted(true); -poolConfig.setMaxWait(Duration.ofSeconds(1)); -poolConfig.setTestWhileIdle(true); -poolConfig.setTimeBetweenEvictionRuns(Duration.ofSeconds(1)); - -MultiClusterClientConfig.ClusterConfig[] clusterConfig = new MultiClusterClientConfig.ClusterConfig[2]; -HostAndPort east = new HostAndPort("redis-east.example.com", 14000); -clusterConfig[0] = ClusterConfig.builder(east, config).connectionPoolConfig(poolConfig).weight(1.0f).build(); + poolConfig.setMaxTotal(8); + poolConfig.setMaxIdle(8); + poolConfig.setMinIdle(0); + poolConfig.setBlockWhenExhausted(true); + poolConfig.setMaxWait(Duration.ofSeconds(1)); + poolConfig.setTestWhileIdle(true); + poolConfig.setTimeBetweenEvictionRuns(Duration.ofSeconds(1)); +HostAndPort east = new HostAndPort("redis-east.example.com", 14000); HostAndPort west = new HostAndPort("redis-west.example.com", 14000); -clusterConfig[1] = ClusterConfig.builder(west, config).connectionPoolConfig(poolConfig).weight(0.5f).build(); + +MultiDbConfig.Builder multiConfig = MultiDbConfig.builder() + .endpoint(DatabaseConfig.builder(east, config).connectionPoolConfig(poolConfig).weight(1.0f).build()) + .endpoint(DatabaseConfig.builder(west, config).connectionPoolConfig(poolConfig).weight(0.5f).build()); ``` The configuration above represents your two Redis deployments: `redis-east` and `redis-west`. -You'll use this array of configuration objects to create a connection provider that supports failover. -Use the `MultiClusterClientConfig` builder to set your preferred retry and failover configuration, passing in the client configs you just created. -Then build a `MultiClusterPooledConnectionProvider`. +Continue using the `MultiDbConfig.Builder` builder to set your preferred retry and failover configuration. +Then build a `MultiDbClient`. ```java -MultiClusterClientConfig.Builder builder = new MultiClusterClientConfig.Builder(clientConfigs); -builder.circuitBreakerSlidingWindowSize(2); // Sliding window size in number of calls -builder.circuitBreakerFailureRateThreshold(10.0f); // percentage of failures to trigger circuit breaker +multiDbBuilder.circuitBreakerSlidingWindowSize(2) // Sliding window size in number of calls + .circuitBreakerFailureRateThreshold(10.0f) // percentage of failures to trigger circuit breaker + .circuitBreakerMinNumOfFailures(1000) // Minimum number of failures before circuit breaker is tripped -builder.failbackSupported(true); // Enable failback -builder.failbackCheckInterval(1000); // Check every second the unhealthy cluster to see if it has recovered -builder.gracePeriod(10000); // Keep cluster disabled for 10 seconds after it becomes unhealthy + .failbackSupported(true) // Enable failback + .failbackCheckInterval(1000) // Check every second the unhealthy database to see if it has recovered + .gracePeriod(10000) // Keep database disabled for 10 seconds after it becomes unhealthy // Optional: configure retry settings -builder.retryMaxAttempts(3); // Maximum number of retry attempts (including the initial call) -builder.retryWaitDuration(500); // Number of milliseconds to wait between retry attempts -builder.retryWaitDurationExponentialBackoffMultiplier(2); // Exponential backoff factor multiplied against wait duration between retries + .retryMaxAttempts(3) // Maximum number of retry attempts (including the initial call) + .retryWaitDuration(500) // Number of milliseconds to wait between retry attempts + .retryWaitDurationExponentialBackoffMultiplier(2) // Exponential backoff factor multiplied against wait duration between retries // Optional: configure fast failover -builder.fastFailover(true); // Force closing connections to unhealthy cluster on failover -builder.retryOnFailover(false); // Do not retry failed commands during failover + .fastFailover(true) // Force closing connections to unhealthy database on failover + .retryOnFailover(false); // Do not retry failed commands during failover -MultiClusterPooledConnectionProvider provider = new MultiClusterPooledConnectionProvider(builder.build()); +MultiDbClient multiDbClient = multiDbBuilder.build(); ``` -Internally, the connection provider uses a [highly configurable circuit breaker and retry implementation](https://resilience4j.readme.io/docs/circuitbreaker) to determine when to fail over. In the configuration here, we've set a sliding window size of 10 and a failure rate threshold of 50%. This means that a failover will be triggered if 5 out of any 10 calls to Redis fail. -Once you've configured and created a `MultiClusterPooledConnectionProvider`, instantiate a `UnifiedJedis` instance for your application, passing in the provider you just created: - -```java -UnifiedJedis jedis = new UnifiedJedis(provider); -``` - -You can now use this `UnifiedJedis` instance, and the connection management and failover will be handled transparently. +You can now use this `MultiDbClient` instance, and the connection management and failover will be handled transparently. ## Configuration options @@ -108,7 +101,7 @@ a fault-tolerance library that implements [retry](https://resilience4j.readme.io Once you configure Jedis for failover using the `MultiClusterPooledConnectionProvider`, each call to Redis is decorated with a resilience4j retry and circuit breaker. By default, any call that throws a `JedisConnectionException` will be retried up to 3 times. -If the call continues to fail after the maximum number of retry attempts, then the circuit breaker will record a failure. +If the call fail then the circuit breaker will record a failure. The circuit breaker maintains a record of failures in a sliding window data structure. If the failure rate reaches a configured threshold (e.g., when 50% of the last 10 calls have failed), @@ -147,16 +140,16 @@ Jedis uses the following circuit breaker settings: ### Health Check Configuration and Customization -The `MultiClusterPooledConnectionProvider` includes a comprehensive health check system that continuously monitors the availability of Redis clusters to enable automatic failover and failback. +The `MultiDbClient` includes a comprehensive health check system that continuously monitors the availability of Redis databases to enable automatic failover and failback. The health check system serves several critical purposes in the failover architecture: -1. **Proactive Monitoring**: Continuously monitors passive clusters that aren't currently receiving traffic -2. **Failback Detection**: Determines when a previously failed cluster has recovered and is ready to accept traffic -3. **Circuit Breaker Integration**: Works with the circuit breaker pattern to manage cluster state transitions +1. **Proactive Monitoring**: Continuously monitors passive databases that aren't currently receiving traffic +2. **Failback Detection**: Determines when a previously failed database has recovered and is ready to accept traffic +3. **Circuit Breaker Integration**: Works with the circuit breaker pattern to manage database state transitions 4. **Customizable Strategies**: Supports pluggable health check implementations for different deployment scenarios -The health check system operates independently of your application traffic, running background checks at configurable intervals to assess cluster health without impacting performance. +The health check system operates independently of your application traffic, running background checks at configurable intervals to assess database health without impacting performance. #### Available Health Check Types @@ -190,29 +183,29 @@ The `LagAwareStrategy` is designed specifically for Redis Enterprise Active-Acti **Example Configuration:** ```java -BiFunction, MultiClusterClientConfig.StrategySupplier> healthCheckStrategySupplier = -(HostAndPort clusterHostPort, Supplier credentialsSupplier) -> { - LagAwareStrategy.Config lagConfig = LagAwareStrategy.Config.builder(clusterHostPort, credentialsSupplier) - .interval(5000) // Check every 5 seconds - .timeout(3000) // 3 second timeout - .extendedCheckEnabled(true) - .build(); - - return (hostAndPort, jedisClientConfig) -> new LagAwareStrategy(lagConfig); -}; +BiFunction, MultiDbConfig.StrategySupplier> healthCheckStrategySupplier = + (HostAndPort dbHostPort, Supplier credentialsSupplier) -> { + LagAwareStrategy.Config lagConfig = LagAwareStrategy.Config.builder(dbHostPort, credentialsSupplier) + .interval(5000) // Check every 5 seconds + .timeout(3000) // 3 second timeout + .extendedCheckEnabled(true) + .build(); + + return (hostAndPort, jedisClientConfig) -> new LagAwareStrategy(lagConfig); + }; // Configure REST API endpoint and credentials -Endpoint restEndpoint = new HostAndPort("redis-enterprise-cluster-fqdn", 9443); -Supplier credentialsSupplier = () -> - new DefaultRedisCredentials("rest-api-user", "pwd"); +HostAndPort restEndpoint = new HostAndPort("redis-enterprise-db-fqdn", 9443); +Supplier credentialsSupplier = () -> + new DefaultRedisCredentials("rest-api-user", "pwd"); -MultiClusterClientConfig.StrategySupplier lagawareStrategySupplier = healthCheckStrategySupplier.apply( - restEndpoint, credentialsSupplier); +MultiDbConfig.StrategySupplier lagawareStrategySupplier = healthCheckStrategySupplier.apply( + restEndpoint, credentialsSupplier); -MultiClusterClientConfig.ClusterConfig clusterConfig = - MultiClusterClientConfig.ClusterConfig.builder(hostAndPort, clientConfig) - .healthCheckStrategySupplier(lagawareStrategySupplier) - .build(); +MultiDbConfig.DatabaseConfig dbConfig = + MultiDbConfig.DatabaseConfig.builder(hostAndPort, clientConfig) + .healthCheckStrategySupplier(lagawareStrategySupplier) + .build(); ``` ##### 3. Custom Health Check Strategies @@ -234,7 +227,7 @@ MultiClusterClientConfig.StrategySupplier customStrategy = return new MyCustomHealthCheckStrategy(hostAndPort, jedisClientConfig); }; -MultiClusterClientConfig.ClusterConfig clusterConfig = +MultiClusterClientConfig.ClusterConfig dbConfig = MultiClusterClientConfig.ClusterConfig.builder(hostAndPort, clientConfig) .healthCheckStrategySupplier(customStrategy) .weight(1.0f) @@ -278,7 +271,7 @@ MultiClusterClientConfig.StrategySupplier pingStrategy = (hostAndPort, jedisClie }; }; -MultiClusterClientConfig.ClusterConfig clusterConfig = +MultiClusterClientConfig.ClusterConfig dbConfig = MultiClusterClientConfig.ClusterConfig.builder(hostAndPort, clientConfig) .healthCheckStrategySupplier(pingStrategy) .build(); @@ -289,9 +282,9 @@ MultiClusterClientConfig.ClusterConfig clusterConfig = Use the `healthCheckEnabled(false)` method to completely disable health checks: ```java -clusterConfig[0] = ClusterConfig.builder(east, config) - .healthCheckEnabled(false) // Disable health checks entirely - .build(); +DatabaseConfig dbConfig = DatabaseConfig.builder(east, config) + .healthCheckEnabled(false) // Disable health checks entirely + .build(); ``` ### Fallback configuration @@ -305,41 +298,43 @@ Jedis uses the following fallback settings: ### Failover callbacks In the event that Jedis fails over, you may wish to take some action. This might include logging a warning, recording -a metric, or externally persisting the cluster connection state, to name just a few examples. For this reason, -`MultiPooledConnectionProvider` lets you register a custom callback that will be called whenever Jedis -fails over to a new cluster. +a metric, or externally persisting the database connection state, to name just a few examples. For this reason, +`MultiDbClient` lets you register a custom callback that will be called whenever Jedis +fails over to a new database. To use this feature, you'll need to design a class that implements `java.util.function.Consumer`. This class must implement the `accept` method, as you can see below. ```java -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.function.Consumer; - -public class FailoverReporter implements Consumer { - + public class FailoverReporter implements Consumer { + @Override - public void accept(ClusterSwitchEventArgs e) { - Logger logger = LoggerFactory.getLogger(FailoverReporter.class); - logger.warn("Jedis failover to cluster: " + e.getClusterName() + " due to " + e.getReason()); + public void accept(DatabaseSwitchEvent e) { + System.out.println("Jedis failover to database: " + e.getDatabaseName() + " due to " + e.getReason()); } } ``` -You can then pass an instance of this class to your `MultiPooledConnectionProvider`. +DatabaseSwitchEvent consumer can be registered as follows: ``` -FailoverReporter reporter = new FailoverReporter(); -provider.setClusterSwitchListener(reporter); + FailoverReporter reporter = new FailoverReporter(); + MultiDbClient client = MultiDbClient.builder() + .databaseSwitchListener(reporter) + .build(); ``` - The provider will call your `accept` whenever a failover occurs. +or directly using lambda expression: +``` + MultiDbClient client = MultiDbClient.builder() + .databaseSwitchListener(event -> System.out.println("Switched to: " + event.getEndpoint())) + .build(); +``` + ## Failing back -Jedis supports automatic failback based on health checks or manual failback using the cluster selection API. +Jedis supports automatic failback based on health checks or manual failback using the database selection API. ## Failback scenario @@ -355,30 +350,31 @@ You will likely want to fail your application back to `redis-east`. ### Automatic failback based on health checks -When health checks are enabled, Jedis automatically monitors the health of all configured clusters, including those that are currently inactive due to previous failures. +When health checks are enabled, Jedis automatically monitors the health of all configured databases, including those that are currently inactive due to previous failures. The automatic failback process works as follows: -1. **Continuous Monitoring**: Health checks run continuously for all clusters, regardless of their current active status -2. **Recovery Detection**: When a previously failed cluster passes the required number of consecutive health checks, it's marked as healthy -3. **Weight-Based Failback**: If automatic failback is enabled and a recovered cluster has a higher weight than the currently active cluster, Jedis will automatically switch to the recovered cluster -4. **Grace Period Respect**: Failback only occurs after the configured grace period has elapsed since the cluster was marked as unhealthy +1. **Continuous Monitoring**: Health checks run continuously for all databases, regardless of their current active status +2. **Recovery Detection**: When a previously failed database passes the required number of consecutive health checks, it's marked as healthy +3. **Weight-Based Failback**: If automatic failback is enabled and a recovered database has a higher weight than the currently active database, Jedis will automatically switch to the recovered database +4. **Grace Period Respect**: Failback only occurs after the configured grace period has elapsed since the database was marked as unhealthy -## Manual Failback using the cluster selection API +## Manual Failback using the database selection API -Once you've determined that it's safe to fail back to a previously-unavailable cluster, +Once you've determined that it's safe to fail back to a previously-unavailable database, you need to decide how to trigger the failback. There are two ways to accomplish this: -`MultiClusterPooledConnectionProvider` exposes a method that you can use to manually select which cluster Jedis should use. -To select a different cluster to use, pass the cluster's `HostAndPort` to `setActiveCluster()`: +`MultiDbClient` exposes a method that you can use to manually select which database Jedis should use. +To select a different database to use, pass the database's `HostAndPort` to `setActiveDatabase()`: ``` -provider.setActiveCluster(west); + Endpoint endpoint = new HostAndPort("redis-east.example.com", 14000); + client.setActiveDatabase(endpoint); ``` This method is thread-safe. If you decide to implement manual failback, you will need a way for external systems to trigger this method in your application. For example, if your application exposes a REST API, you might consider creating a REST endpoint -to call `setActiveCluster` and fail back the application. +to call `setActiveDatabase` and fail back the application. ## Troubleshooting Failover and Failback Issues @@ -418,9 +414,9 @@ HealthCheckStrategy.Config config = HealthCheckStrategy.Config.builder() .build(); // Adjust failback timing -MultiClusterClientConfig multiConfig = new MultiClusterClientConfig.Builder(clusterConfigs) - .gracePeriod(5000) // Shorter grace period - .build(); +MultiDbConfig multiConfig = new MultiDbConfig.Builder() + .gracePeriod(5000) // Shorter grace period + .build(); ``` ## Need help or have questions? diff --git a/pom.xml b/pom.xml index fd98539365..e056cb1748 100644 --- a/pom.xml +++ b/pom.xml @@ -488,7 +488,7 @@ **/Health*.java **/*IT.java **/scenario/RestEndpointUtil.java - src/main/java/redis/clients/jedis/MultiClusterClientConfig.java + src/main/java/redis/clients/jedis/MultiDbConfig.java src/main/java/redis/clients/jedis/HostAndPort.java **/builders/*.java **/MultiDb*.java diff --git a/src/main/java/redis/clients/jedis/MultiDbClient.java b/src/main/java/redis/clients/jedis/MultiDbClient.java index 9df888651c..3ffeaf93aa 100644 --- a/src/main/java/redis/clients/jedis/MultiDbClient.java +++ b/src/main/java/redis/clients/jedis/MultiDbClient.java @@ -1,15 +1,15 @@ package redis.clients.jedis; -import redis.clients.jedis.MultiClusterClientConfig.ClusterConfig; +import redis.clients.jedis.MultiDbConfig.DatabaseConfig; import redis.clients.jedis.annots.Experimental; import redis.clients.jedis.builders.MultiDbClientBuilder; import redis.clients.jedis.csc.Cache; import redis.clients.jedis.executors.CommandExecutor; -import redis.clients.jedis.mcf.CircuitBreakerCommandExecutor; -import redis.clients.jedis.mcf.MultiClusterPipeline; -import redis.clients.jedis.mcf.MultiClusterTransaction; +import redis.clients.jedis.mcf.MultiDbCommandExecutor; +import redis.clients.jedis.mcf.MultiDbPipeline; +import redis.clients.jedis.mcf.MultiDbTransaction; import redis.clients.jedis.providers.ConnectionProvider; -import redis.clients.jedis.mcf.MultiClusterPooledConnectionProvider; +import redis.clients.jedis.mcf.MultiDbConnectionProvider; import java.util.Set; @@ -43,14 +43,14 @@ * * MultiDbClient client = MultiDbClient.builder() * .multiDbConfig( - * MultiClusterClientConfig.builder() + * MultiDbConfig.builder() * .endpoint( - * ClusterConfig.builder( + * DatabaseConfig.builder( * primary, * DefaultJedisClientConfig.builder().build()) * .weight(100.0f) * .build()) - * .endpoint(ClusterConfig.builder( + * .endpoint(DatabaseConfig.builder( * secondary, * DefaultJedisClientConfig.builder().build()) * .weight(50.0f).build()) @@ -75,10 +75,10 @@ * resilience features. *

* @author Ivo Gaydazhiev - * @since 5.2.0 - * @see MultiClusterPooledConnectionProvider - * @see CircuitBreakerCommandExecutor - * @see MultiClusterClientConfig + * @since 7.0.0 + * @see MultiDbConnectionProvider + * @see MultiDbCommandExecutor + * @see MultiDbConfig */ @Experimental public class MultiDbClient extends UnifiedJedis { @@ -90,9 +90,8 @@ public class MultiDbClient extends UnifiedJedis { * the builder pattern for advanced configurations. For most use cases, prefer using * {@link #builder()} to create instances. *

- * @param commandExecutor the command executor (typically CircuitBreakerCommandExecutor) - * @param connectionProvider the connection provider (typically - * MultiClusterPooledConnectionProvider) + * @param commandExecutor the command executor (typically MultiDbCommandExecutor) + * @param connectionProvider the connection provider (typically MultiDbConnectionProvider) * @param commandObjects the command objects * @param redisProtocol the Redis protocol version * @param cache the client-side cache (may be null) @@ -103,16 +102,16 @@ public class MultiDbClient extends UnifiedJedis { } /** - * Returns the underlying MultiClusterPooledConnectionProvider. + * Returns the underlying MultiDbConnectionProvider. *

* This provides access to multi-cluster specific operations like manual failover, health status * monitoring, and cluster switch event handling. *

* @return the multi-cluster connection provider - * @throws ClassCastException if the provider is not a MultiClusterPooledConnectionProvider + * @throws ClassCastException if the provider is not a MultiDbConnectionProvider */ - private MultiClusterPooledConnectionProvider getMultiClusterProvider() { - return (MultiClusterPooledConnectionProvider) this.provider; + private MultiDbConnectionProvider getMultiDbConnectionProvider() { + return (MultiDbConnectionProvider) this.provider; } /** @@ -124,20 +123,20 @@ private MultiClusterPooledConnectionProvider getMultiClusterProvider() { * @param endpoint the endpoint to switch to */ public void setActiveDatabase(Endpoint endpoint) { - getMultiClusterProvider().setActiveCluster(endpoint); + getMultiDbConnectionProvider().setActiveDatabase(endpoint); } /** * Adds a pre-configured cluster configuration. *

- * This method allows adding a fully configured ClusterConfig instance, providing maximum + * This method allows adding a fully configured DatabaseConfig instance, providing maximum * flexibility for advanced configurations including custom health check strategies, connection * pool settings, etc. *

- * @param clusterConfig the pre-configured cluster configuration + * @param databaseConfig the pre-configured database configuration */ - public void addEndpoint(ClusterConfig clusterConfig) { - getMultiClusterProvider().add(clusterConfig); + public void addEndpoint(DatabaseConfig databaseConfig) { + getMultiDbConnectionProvider().add(databaseConfig); } /** @@ -153,10 +152,10 @@ public void addEndpoint(ClusterConfig clusterConfig) { * @throws redis.clients.jedis.exceptions.JedisValidationException if the endpoint already exists */ public void addEndpoint(Endpoint endpoint, float weight, JedisClientConfig clientConfig) { - ClusterConfig clusterConfig = ClusterConfig.builder(endpoint, clientConfig).weight(weight) + DatabaseConfig databaseConfig = DatabaseConfig.builder(endpoint, clientConfig).weight(weight) .build(); - getMultiClusterProvider().add(clusterConfig); + getMultiDbConnectionProvider().add(databaseConfig); } /** @@ -167,7 +166,7 @@ public void addEndpoint(Endpoint endpoint, float weight, JedisClientConfig clien * @return the set of all configured endpoints */ public Set getEndpoints() { - return getMultiClusterProvider().getEndpoints(); + return getMultiDbConnectionProvider().getEndpoints(); } /** @@ -179,7 +178,7 @@ public Set getEndpoints() { * @return the health status of the endpoint */ public boolean isHealthy(Endpoint endpoint) { - return getMultiClusterProvider().isHealthy(endpoint); + return getMultiDbConnectionProvider().isHealthy(endpoint); } /** @@ -195,7 +194,7 @@ public boolean isHealthy(Endpoint endpoint) { * healthy clusters available */ public void removeEndpoint(Endpoint endpoint) { - getMultiClusterProvider().remove(endpoint); + getMultiDbConnectionProvider().remove(endpoint); } /** @@ -211,7 +210,7 @@ public void removeEndpoint(Endpoint endpoint) { * or doesn't exist */ public void forceActiveEndpoint(Endpoint endpoint, long forcedActiveDurationMs) { - getMultiClusterProvider().forceActiveCluster(endpoint, forcedActiveDurationMs); + getMultiDbConnectionProvider().forceActiveDatabase(endpoint, forcedActiveDurationMs); } /** @@ -220,11 +219,11 @@ public void forceActiveEndpoint(Endpoint endpoint, long forcedActiveDurationMs) * The returned pipeline supports the same resilience features as the main client, including * automatic failover during batch execution. *

- * @return a new MultiClusterPipeline instance + * @return a new MultiDbPipeline instance */ @Override - public MultiClusterPipeline pipelined() { - return new MultiClusterPipeline(getMultiClusterProvider(), commandObjects); + public MultiDbPipeline pipelined() { + return new MultiDbPipeline(getMultiDbConnectionProvider(), commandObjects); } /** @@ -233,12 +232,11 @@ public MultiClusterPipeline pipelined() { * The returned transaction supports the same resilience features as the main client, including * automatic failover during transaction execution. *

- * @return a new MultiClusterTransaction instance + * @return a new MultiDbTransaction instance */ @Override - public MultiClusterTransaction multi() { - return new MultiClusterTransaction((MultiClusterPooledConnectionProvider) provider, true, - commandObjects); + public MultiDbTransaction multi() { + return new MultiDbTransaction((MultiDbConnectionProvider) provider, true, commandObjects); } /** @@ -246,17 +244,17 @@ public MultiClusterTransaction multi() { * @return transaction object */ @Override - public MultiClusterTransaction transaction(boolean doMulti) { + public MultiDbTransaction transaction(boolean doMulti) { if (provider == null) { throw new IllegalStateException( "It is not allowed to create Transaction from this " + getClass()); } - return new MultiClusterTransaction(getMultiClusterProvider(), doMulti, commandObjects); + return new MultiDbTransaction(getMultiDbConnectionProvider(), doMulti, commandObjects); } public Endpoint getActiveEndpoint() { - return getMultiClusterProvider().getCluster().getEndpoint(); + return getMultiDbConnectionProvider().getDatabase().getEndpoint(); } /** diff --git a/src/main/java/redis/clients/jedis/MultiClusterClientConfig.java b/src/main/java/redis/clients/jedis/MultiDbConfig.java similarity index 88% rename from src/main/java/redis/clients/jedis/MultiClusterClientConfig.java rename to src/main/java/redis/clients/jedis/MultiDbConfig.java index 35e51ee600..7bafdd9f5e 100644 --- a/src/main/java/redis/clients/jedis/MultiClusterClientConfig.java +++ b/src/main/java/redis/clients/jedis/MultiDbConfig.java @@ -21,8 +21,8 @@ * This configuration enables seamless failover between multiple Redis clusters, databases, or * endpoints by providing comprehensive settings for retry logic, circuit breaker behavior, health * checks, and failback mechanisms. It is designed to work with - * {@link redis.clients.jedis.mcf.MultiClusterPooledConnectionProvider} to provide high availability - * and disaster recovery capabilities. + * {@link redis.clients.jedis.mcf.MultiDbConnectionProvider} to provide high availability and + * disaster recovery capabilities. *

*

* Key Features: @@ -49,27 +49,26 @@ * { * @code * // Configure individual clusters - * ClusterConfig primary = ClusterConfig.builder(primaryEndpoint, clientConfig).weight(1.0f) + * DatabaseConfig primary = DatabaseConfig.builder(primaryEndpoint, clientConfig).weight(1.0f) * .build(); * - * ClusterConfig secondary = ClusterConfig.builder(secondaryEndpoint, clientConfig).weight(0.5f) + * DatabaseConfig secondary = DatabaseConfig.builder(secondaryEndpoint, clientConfig).weight(0.5f) * .healthCheckEnabled(true).build(); * * // Build multi-cluster configuration - * MultiClusterClientConfig config = MultiClusterClientConfig.builder(primary, secondary) + * MultiDbConfig config = MultiDbConfig.builder(primary, secondary) * .circuitBreakerFailureRateThreshold(10.0f).retryMaxAttempts(3).failbackSupported(true) * .gracePeriod(10000).build(); * * // Use with connection provider - * MultiClusterPooledConnectionProvider provider = new MultiClusterPooledConnectionProvider( - * config); + * MultiDbConnectionProvider provider = new MultiDbConnectionProvider(config); * } * *

* The configuration leverages Resilience4j for * circuit breaker and retry implementations, providing battle-tested fault tolerance patterns. *

- * @see redis.clients.jedis.mcf.MultiClusterPooledConnectionProvider + * @see redis.clients.jedis.mcf.MultiDbConnectionProvider * @see redis.clients.jedis.mcf.HealthCheckStrategy * @see redis.clients.jedis.mcf.EchoStrategy * @see redis.clients.jedis.mcf.LagAwareStrategy @@ -77,7 +76,7 @@ */ // TODO: move @Experimental -public final class MultiClusterClientConfig { +public final class MultiDbConfig { /** * Functional interface for creating {@link HealthCheckStrategy} instances for specific Redis @@ -158,7 +157,7 @@ public static interface StrategySupplier { private static final int DELAY_IN_BETWEEN_FAILOVER_ATTEMPTS_DEFAULT = 12000; /** Array of cluster configurations defining the available Redis endpoints and their settings. */ - private final ClusterConfig[] clusterConfigs; + private final DatabaseConfig[] databaseConfigs; // ============ Retry Configuration ============ // Based on Resilience4j Retry: https://resilience4j.readme.io/docs/retry @@ -438,35 +437,35 @@ public static interface StrategySupplier { private int delayInBetweenFailoverAttempts; /** - * Constructs a new MultiClusterClientConfig with the specified cluster configurations. + * Constructs a new MultiDbConfig with the specified cluster configurations. *

* This constructor validates that at least one cluster configuration is provided and that all * configurations are non-null. Use the {@link Builder} class for more convenient configuration * with default values. *

- * @param clusterConfigs array of cluster configurations defining the available Redis endpoints - * @throws JedisValidationException if clusterConfigs is null or empty + * @param databaseConfigs array of cluster configurations defining the available Redis endpoints + * @throws JedisValidationException if databaseConfigs is null or empty * @throws IllegalArgumentException if any cluster configuration is null - * @see Builder#Builder(ClusterConfig[]) + * @see Builder#Builder(DatabaseConfig[]) */ - public MultiClusterClientConfig(ClusterConfig[] clusterConfigs) { + public MultiDbConfig(DatabaseConfig[] databaseConfigs) { - if (clusterConfigs == null || clusterConfigs.length < 1) throw new JedisValidationException( - "ClusterClientConfigs are required for MultiClusterPooledConnectionProvider"); + if (databaseConfigs == null || databaseConfigs.length < 1) throw new JedisValidationException( + "DatabaseClientConfigs are required for MultiDbConnectionProvider"); - for (ClusterConfig clusterConfig : clusterConfigs) { - if (clusterConfig == null) - throw new IllegalArgumentException("ClusterClientConfigs must not contain null elements"); + for (DatabaseConfig databaseConfig : databaseConfigs) { + if (databaseConfig == null) + throw new IllegalArgumentException("DatabaseClientConfigs must not contain null elements"); } - this.clusterConfigs = clusterConfigs; + this.databaseConfigs = databaseConfigs; } /** * Returns the array of cluster configurations defining available Redis endpoints. * @return array of cluster configurations, never null or empty */ - public ClusterConfig[] getClusterConfigs() { - return clusterConfigs; + public DatabaseConfig[] getDatabaseConfigs() { + return databaseConfigs; } /** @@ -637,79 +636,79 @@ public boolean isFastFailover() { } /** - * Creates a new Builder instance for configuring MultiClusterClientConfig. + * Creates a new Builder instance for configuring MultiDbConfig. *

* At least one cluster configuration must be added to the builder before calling build(). Use the * endpoint() methods to add cluster configurations. *

* @return new Builder instance - * @throws JedisValidationException if clusterConfigs is null or empty - * @see Builder#Builder(ClusterConfig[]) + * @throws JedisValidationException if databaseConfigs is null or empty + * @see Builder#Builder(DatabaseConfig[]) */ public static Builder builder() { return new Builder(); } /** - * Creates a new Builder instance for configuring MultiClusterClientConfig. - * @param clusterConfigs array of cluster configurations defining available Redis endpoints + * Creates a new Builder instance for configuring MultiDbConfig. + * @param databaseConfigs array of cluster configurations defining available Redis endpoints * @return new Builder instance - * @throws JedisValidationException if clusterConfigs is null or empty - * @see Builder#Builder(ClusterConfig[]) + * @throws JedisValidationException if databaseConfigs is null or empty + * @see Builder#Builder(DatabaseConfig[]) */ - public static Builder builder(ClusterConfig[] clusterConfigs) { - return new Builder(clusterConfigs); + public static Builder builder(DatabaseConfig[] databaseConfigs) { + return new Builder(databaseConfigs); } /** - * Creates a new Builder instance for configuring MultiClusterClientConfig. - * @param clusterConfigs list of cluster configurations defining available Redis endpoints + * Creates a new Builder instance for configuring MultiDbConfig. + * @param databaseConfigs list of cluster configurations defining available Redis endpoints * @return new Builder instance - * @throws JedisValidationException if clusterConfigs is null or empty + * @throws JedisValidationException if databaseConfigs is null or empty * @see Builder#Builder(List) */ - public static Builder builder(List clusterConfigs) { - return new Builder(clusterConfigs); + public static Builder builder(List databaseConfigs) { + return new Builder(databaseConfigs); } /** * Configuration class for individual Redis cluster endpoints within a multi-cluster setup. *

- * Each ClusterConfig represents a single Redis endpoint that can participate in the multi-cluster - * failover system. It encapsulates the connection details, weight for priority-based selection, - * and health check configuration for that endpoint. + * Each DatabaseConfig represents a single Redis endpoint that can participate in the + * multi-cluster failover system. It encapsulates the connection details, weight for + * priority-based selection, and health check configuration for that endpoint. *

* @see Builder * @see StrategySupplier * @see redis.clients.jedis.mcf.HealthCheckStrategy */ - public static class ClusterConfig { + public static class DatabaseConfig { - /** The Redis endpoint (host and port) for this cluster. */ + /** The Redis endpoint (host and port) for this database. */ private final Endpoint endpoint; /** Jedis client configuration containing connection settings and authentication. */ private final JedisClientConfig jedisClientConfig; - /** Optional connection pool configuration for managing connections to this cluster. */ + /** Optional connection pool configuration for managing connections to this database. */ private GenericObjectPoolConfig connectionPoolConfig; /** - * Weight value for cluster selection priority. Higher weights indicate higher priority. Default - * value is 1.0f. + * Weight value for database selection priority. Higher weights indicate higher priority. + * Default value is 1.0f. */ private float weight = 1.0f; /** - * Strategy supplier for creating health check instances for this cluster. Default is + * Strategy supplier for creating health check instances for this database. Default is * EchoStrategy.DEFAULT. */ private StrategySupplier healthCheckStrategySupplier; /** - * Constructs a ClusterConfig with basic endpoint and client configuration. + * Constructs a DatabaseConfig with basic endpoint and client configuration. *

- * This constructor creates a cluster configuration with default settings: weight of 1.0f and + * This constructor creates a database configuration with default settings: weight of 1.0f and * EchoStrategy for health checks. Use the {@link Builder} for more advanced configuration * options. *

@@ -717,13 +716,13 @@ public static class ClusterConfig { * @param clientConfig the Jedis client configuration * @throws IllegalArgumentException if endpoint or clientConfig is null */ - public ClusterConfig(Endpoint endpoint, JedisClientConfig clientConfig) { + public DatabaseConfig(Endpoint endpoint, JedisClientConfig clientConfig) { this.endpoint = endpoint; this.jedisClientConfig = clientConfig; } /** - * Constructs a ClusterConfig with endpoint, client, and connection pool configuration. + * Constructs a DatabaseConfig with endpoint, client, and connection pool configuration. *

* This constructor allows specification of connection pool settings in addition to basic * endpoint configuration. Default weight of 1.0f and EchoStrategy for health checks are used. @@ -733,7 +732,7 @@ public ClusterConfig(Endpoint endpoint, JedisClientConfig clientConfig) { * @param connectionPoolConfig the connection pool configuration * @throws IllegalArgumentException if endpoint or clientConfig is null */ - public ClusterConfig(Endpoint endpoint, JedisClientConfig clientConfig, + public DatabaseConfig(Endpoint endpoint, JedisClientConfig clientConfig, GenericObjectPoolConfig connectionPoolConfig) { this.endpoint = endpoint; this.jedisClientConfig = clientConfig; @@ -744,7 +743,7 @@ public ClusterConfig(Endpoint endpoint, JedisClientConfig clientConfig, * Private constructor used by the Builder to create configured instances. * @param builder the builder containing configuration values */ - private ClusterConfig(Builder builder) { + private DatabaseConfig(Builder builder) { this.endpoint = builder.endpoint; this.jedisClientConfig = builder.jedisClientConfig; this.connectionPoolConfig = builder.connectionPoolConfig; @@ -753,7 +752,7 @@ private ClusterConfig(Builder builder) { } /** - * Returns the Redis endpoint (host and port) for this cluster. + * Returns the Redis endpoint (host and port) for this database. * @return the host and port information */ public Endpoint getEndpoint() { @@ -761,7 +760,7 @@ public Endpoint getEndpoint() { } /** - * Creates a new Builder instance for configuring a ClusterConfig. + * Creates a new Builder instance for configuring a DatabaseConfig. * @param endpoint the Redis endpoint (host and port) * @param clientConfig the Jedis client configuration * @return new Builder instance @@ -773,7 +772,7 @@ public static Builder builder(Endpoint endpoint, JedisClientConfig clientConfig) } /** - * Returns the Jedis client configuration for this cluster. + * Returns the Jedis client configuration for this database. * @return the client configuration containing connection settings and authentication */ public JedisClientConfig getJedisClientConfig() { @@ -781,7 +780,7 @@ public JedisClientConfig getJedisClientConfig() { } /** - * Returns the connection pool configuration for this cluster. + * Returns the connection pool configuration for this database. * @return the connection pool configuration, may be null if not specified */ public GenericObjectPoolConfig getConnectionPoolConfig() { @@ -789,9 +788,9 @@ public GenericObjectPoolConfig getConnectionPoolConfig() { } /** - * Returns the weight value used for cluster selection priority. + * Returns the weight value used for database selection priority. *

- * Higher weight values indicate higher priority. During failover, clusters are selected in + * Higher weight values indicate higher priority. During failover, databases are selected in * descending order of weight (highest weight first). *

* @return the weight value, default is 1.0f @@ -801,9 +800,9 @@ public float getWeight() { } /** - * Returns the health check strategy supplier for this cluster. + * Returns the health check strategy supplier for this database. *

- * The strategy supplier is used to create health check instances that monitor this cluster's + * The strategy supplier is used to create health check instances that monitor this database's * availability. Returns null if health checks are disabled. *

* @return the health check strategy supplier, or null if health checks are disabled @@ -815,9 +814,9 @@ public StrategySupplier getHealthCheckStrategySupplier() { } /** - * Builder class for creating ClusterConfig instances with fluent configuration API. + * Builder class for creating DatabaseConfig instances with fluent configuration API. *

- * The Builder provides a convenient way to configure cluster settings including connection + * The Builder provides a convenient way to configure database settings including connection * pooling, weight-based priority, and health check strategies. All configuration methods return * the builder instance for method chaining. *

@@ -831,7 +830,7 @@ public StrategySupplier getHealthCheckStrategySupplier() { * */ public static class Builder { - /** The Redis endpoint for this cluster configuration. */ + /** The Redis endpoint for this database configuration. */ private Endpoint endpoint; /** The Jedis client configuration. */ @@ -840,7 +839,7 @@ public static class Builder { /** Optional connection pool configuration. */ private GenericObjectPoolConfig connectionPoolConfig; - /** Weight for cluster selection priority. Default: 1.0f */ + /** Weight for database selection priority. Default: 1.0f */ private float weight = 1.0f; /** Health check strategy supplier. Default: EchoStrategy.DEFAULT */ @@ -858,7 +857,7 @@ public Builder(Endpoint endpoint, JedisClientConfig clientConfig) { } /** - * Sets the connection pool configuration for this cluster. + * Sets the connection pool configuration for this database. *

* Connection pooling helps manage connections efficiently and provides better performance * under load. If not specified, default pooling behavior will be used. @@ -873,19 +872,19 @@ public Builder connectionPoolConfig( } /** - * Sets the weight value for cluster selection priority. + * Sets the weight value for database selection priority. *

- * Weight determines the priority order for cluster selection during failover. Clusters with + * Weight determines the priority order for database selection during failover. Databases with * higher weights are preferred over those with lower weights. The system will attempt to use - * the highest-weight healthy cluster available. + * the highest-weight healthy database available. *

*

* Examples: *

*
    *
  • 1.0f: Standard priority (default)
  • - *
  • 0.8f: Lower priority (secondary cluster)
  • - *
  • 0.1f: Lowest priority (backup cluster)
  • + *
  • 0.8f: Lower priority (secondary database)
  • + *
  • 0.1f: Lowest priority (backup database)
  • *
* @param weight the weight value for priority-based selection * @return this builder instance for method chaining @@ -896,10 +895,10 @@ public Builder weight(float weight) { } /** - * Sets a custom health check strategy supplier for this cluster. + * Sets a custom health check strategy supplier for this database. *

- * The strategy supplier creates health check instances that monitor this cluster's - * availability. Different clusters can use different health check strategies based on their + * The strategy supplier creates health check instances that monitor this database's + * availability. Different databases can use different health check strategies based on their * specific requirements. *

* @param healthCheckStrategySupplier the health check strategy supplier @@ -917,14 +916,14 @@ public Builder healthCheckStrategySupplier(StrategySupplier healthCheckStrategyS } /** - * Sets a specific health check strategy instance for this cluster. + * Sets a specific health check strategy instance for this database. *

* This is a convenience method that wraps the provided strategy in a supplier that always * returns the same instance. Use this when you have a pre-configured strategy instance. *

*

* Note: The same strategy instance will be reused, so ensure it's - * thread-safe if multiple clusters might use it. + * thread-safe if multiple databases might use it. *

* @param healthCheckStrategy the health check strategy instance * @return this builder instance for method chaining @@ -940,15 +939,15 @@ public Builder healthCheckStrategy(HealthCheckStrategy healthCheckStrategy) { } /** - * Enables or disables health checks for this cluster. + * Enables or disables health checks for this database. *

- * When health checks are disabled (false), the cluster will not be proactively monitored for + * When health checks are disabled (false), the database will not be proactively monitored for * availability. This means: *

*
    *
  • No background health check threads will be created
  • - *
  • Failback to this cluster must be triggered manually
  • - *
  • The cluster is assumed to be healthy unless circuit breaker opens
  • + *
  • Failback to this database must be triggered manually
  • + *
  • The database is assumed to be healthy unless circuit breaker opens
  • *
*

* When health checks are enabled (true) and no strategy supplier was previously set, the @@ -967,31 +966,30 @@ public Builder healthCheckEnabled(boolean healthCheckEnabled) { } /** - * Builds and returns a new ClusterConfig instance with the configured settings. - * @return a new ClusterConfig instance + * Builds and returns a new DatabaseConfig instance with the configured settings. + * @return a new DatabaseConfig instance */ - public ClusterConfig build() { - return new ClusterConfig(this); + public DatabaseConfig build() { + return new DatabaseConfig(this); } } } /** - * Builder class for creating MultiClusterClientConfig instances with comprehensive configuration - * options. + * Builder class for creating MultiDbConfig instances with comprehensive configuration options. *

* The Builder provides a fluent API for configuring all aspects of multi-cluster failover * behavior, including retry logic, circuit breaker settings, and failback mechanisms. It uses * sensible defaults based on production best practices while allowing fine-tuning for specific * requirements. *

- * @see MultiClusterClientConfig - * @see ClusterConfig + * @see MultiDbConfig + * @see DatabaseConfig */ public static class Builder { - /** Array of cluster configurations defining available Redis endpoints. */ - private final List clusterConfigs = new ArrayList<>(); + /** Array of database configurations defining available Redis endpoints. */ + private final List databaseConfigs = new ArrayList<>(); // ============ Retry Configuration Fields ============ /** Maximum number of retry attempts including the initial call. */ @@ -1058,35 +1056,35 @@ public Builder() { /** * Constructs a new Builder with the specified cluster configurations. - * @param clusterConfigs array of cluster configurations defining available Redis endpoints - * @throws JedisValidationException if clusterConfigs is null or empty + * @param databaseConfigs array of cluster configurations defining available Redis endpoints + * @throws JedisValidationException if databaseConfigs is null or empty */ - public Builder(ClusterConfig[] clusterConfigs) { + public Builder(DatabaseConfig[] databaseConfigs) { - this(Arrays.asList(clusterConfigs)); + this(Arrays.asList(databaseConfigs)); } /** - * Constructs a new Builder with the specified cluster configurations. - * @param clusterConfigs list of cluster configurations defining available Redis endpoints - * @throws JedisValidationException if clusterConfigs is null or empty + * Constructs a new Builder with the specified database configurations. + * @param databaseConfigs list of database configurations defining available Redis endpoints + * @throws JedisValidationException if databaseConfigs is null or empty */ - public Builder(List clusterConfigs) { - this.clusterConfigs.addAll(clusterConfigs); + public Builder(List databaseConfigs) { + this.databaseConfigs.addAll(databaseConfigs); } /** * Adds a pre-configured endpoint configuration. *

- * This method allows adding a fully configured ClusterConfig instance, providing maximum + * This method allows adding a fully configured DatabaseConfig instance, providing maximum * flexibility for advanced configurations including custom health check strategies, connection * pool settings, etc. *

- * @param clusterConfig the pre-configured cluster configuration + * @param databaseConfig the pre-configured database configuration * @return this builder */ - public Builder endpoint(ClusterConfig clusterConfig) { - this.clusterConfigs.add(clusterConfig); + public Builder endpoint(DatabaseConfig databaseConfig) { + this.databaseConfigs.add(databaseConfig); return this; } @@ -1104,10 +1102,10 @@ public Builder endpoint(ClusterConfig clusterConfig) { */ public Builder endpoint(Endpoint endpoint, float weight, JedisClientConfig clientConfig) { - ClusterConfig clusterConfig = ClusterConfig.builder(endpoint, clientConfig).weight(weight) + DatabaseConfig databaseConfig = DatabaseConfig.builder(endpoint, clientConfig).weight(weight) .build(); - this.clusterConfigs.add(clusterConfig); + this.databaseConfigs.add(databaseConfig); return this; } @@ -1500,18 +1498,17 @@ public Builder delayInBetweenFailoverAttempts(int delayInBetweenFailoverAttempts } /** - * Builds and returns a new MultiClusterClientConfig instance with all configured settings. + * Builds and returns a new MultiDbConfig instance with all configured settings. *

* This method creates the final configuration object by copying all builder settings to the * configuration instance. The builder can be reused after calling build() to create additional * configurations with different settings. *

- * @return a new MultiClusterClientConfig instance with the configured settings + * @return a new MultiDbConfig instance with the configured settings */ - public MultiClusterClientConfig build() { + public MultiDbConfig build() { - MultiClusterClientConfig config = new MultiClusterClientConfig( - this.clusterConfigs.toArray(new ClusterConfig[0])); + MultiDbConfig config = new MultiDbConfig(this.databaseConfigs.toArray(new DatabaseConfig[0])); // Copy retry configuration config.retryMaxAttempts = this.retryMaxAttempts; diff --git a/src/main/java/redis/clients/jedis/UnifiedJedis.java b/src/main/java/redis/clients/jedis/UnifiedJedis.java index b175dbd319..895d6280c6 100644 --- a/src/main/java/redis/clients/jedis/UnifiedJedis.java +++ b/src/main/java/redis/clients/jedis/UnifiedJedis.java @@ -28,14 +28,14 @@ import redis.clients.jedis.json.JsonSetParams; import redis.clients.jedis.json.Path; import redis.clients.jedis.json.Path2; +import redis.clients.jedis.mcf.MultiDbCommandExecutor; import redis.clients.jedis.params.VAddParams; import redis.clients.jedis.params.VSimParams; import redis.clients.jedis.resps.RawVector; import redis.clients.jedis.json.JsonObjectMapper; -import redis.clients.jedis.mcf.CircuitBreakerCommandExecutor; -import redis.clients.jedis.mcf.MultiClusterPipeline; -import redis.clients.jedis.mcf.MultiClusterPooledConnectionProvider; -import redis.clients.jedis.mcf.MultiClusterTransaction; +import redis.clients.jedis.mcf.MultiDbPipeline; +import redis.clients.jedis.mcf.MultiDbConnectionProvider; +import redis.clients.jedis.mcf.MultiDbTransaction; import redis.clients.jedis.params.*; import redis.clients.jedis.providers.*; import redis.clients.jedis.resps.*; @@ -240,8 +240,8 @@ public UnifiedJedis(ConnectionProvider provider, int maxAttempts, Duration maxTo *

*/ @Experimental - public UnifiedJedis(MultiClusterPooledConnectionProvider provider) { - this(new CircuitBreakerCommandExecutor(provider), provider); + public UnifiedJedis(MultiDbConnectionProvider provider) { + this(new MultiDbCommandExecutor(provider), provider); } /** @@ -5099,8 +5099,8 @@ public List tdigestByRevRank(String key, long... ranks) { public PipelineBase pipelined() { if (provider == null) { throw new IllegalStateException("It is not allowed to create Pipeline from this " + getClass()); - } else if (provider instanceof MultiClusterPooledConnectionProvider) { - return new MultiClusterPipeline((MultiClusterPooledConnectionProvider) provider, commandObjects); + } else if (provider instanceof MultiDbConnectionProvider) { + return new MultiDbPipeline((MultiDbConnectionProvider) provider, commandObjects); } else { return new Pipeline(provider.getConnection(), true, commandObjects); } @@ -5120,8 +5120,8 @@ public AbstractTransaction multi() { public AbstractTransaction transaction(boolean doMulti) { if (provider == null) { throw new IllegalStateException("It is not allowed to create Transaction from this " + getClass()); - } else if (provider instanceof MultiClusterPooledConnectionProvider) { - return new MultiClusterTransaction((MultiClusterPooledConnectionProvider) provider, doMulti, commandObjects); + } else if (provider instanceof MultiDbConnectionProvider) { + return new MultiDbTransaction((MultiDbConnectionProvider) provider, doMulti, commandObjects); } else { return new Transaction(provider.getConnection(), doMulti, true, commandObjects); } diff --git a/src/main/java/redis/clients/jedis/builders/MultiDbClientBuilder.java b/src/main/java/redis/clients/jedis/builders/MultiDbClientBuilder.java index df3c1f86d6..002de51666 100644 --- a/src/main/java/redis/clients/jedis/builders/MultiDbClientBuilder.java +++ b/src/main/java/redis/clients/jedis/builders/MultiDbClientBuilder.java @@ -2,12 +2,12 @@ import java.util.function.Consumer; -import redis.clients.jedis.MultiClusterClientConfig; +import redis.clients.jedis.MultiDbConfig; import redis.clients.jedis.annots.Experimental; import redis.clients.jedis.executors.CommandExecutor; -import redis.clients.jedis.mcf.CircuitBreakerCommandExecutor; -import redis.clients.jedis.mcf.ClusterSwitchEventArgs; -import redis.clients.jedis.mcf.MultiClusterPooledConnectionProvider; +import redis.clients.jedis.mcf.MultiDbCommandExecutor; +import redis.clients.jedis.mcf.DatabaseSwitchEvent; +import redis.clients.jedis.mcf.MultiDbConnectionProvider; import redis.clients.jedis.providers.ConnectionProvider; /** @@ -38,14 +38,14 @@ *

  * MultiDbClient client = MultiDbClient.builder()
  *                 .multiDbConfig(
- *                         MultiClusterClientConfig.builder()
+ *                         MultiDbConfig.builder()
  *                                 .endpoint(
- *                                         ClusterConfig.builder(
+ *                                         DatabaseConfig.builder(
  *                                                         east,
  *                                                         DefaultJedisClientConfig.builder().credentials(credentialsEast).build())
  *                                                 .weight(100.0f)
  *                                                 .build())
- *                                 .endpoint(ClusterConfig.builder(
+ *                                 .endpoint(DatabaseConfig.builder(
  *                                                 west,
  *                                                 DefaultJedisClientConfig.builder().credentials(credentialsWest).build())
  *                                         .weight(50.0f).build())
@@ -60,15 +60,15 @@
  * 
  * @param  the client type that this builder creates
  * @author Ivo Gaydazhiev
- * @since 5.2.0
+ * @since 7.0.0
  */
 @Experimental
 public abstract class MultiDbClientBuilder
     extends AbstractClientBuilder, C> {
 
   // Multi-db specific configuration fields
-  private MultiClusterClientConfig multiDbConfig = null;
-  private Consumer databaseSwitchListener = null;
+  private MultiDbConfig multiDbConfig = null;
+  private Consumer databaseSwitchListener = null;
 
   /**
    * Sets the multi-database configuration.
@@ -79,7 +79,7 @@ public abstract class MultiDbClientBuilder
    * @param config the multi-database configuration
    * @return this builder
    */
-  public MultiDbClientBuilder multiDbConfig(MultiClusterClientConfig config) {
+  public MultiDbClientBuilder multiDbConfig(MultiDbConfig config) {
     this.multiDbConfig = config;
     return this;
   }
@@ -94,7 +94,7 @@ public MultiDbClientBuilder multiDbConfig(MultiClusterClientConfig config) {
    * @param listener the database switch event listener
    * @return this builder
    */
-  public MultiDbClientBuilder databaseSwitchListener(Consumer listener) {
+  public MultiDbClientBuilder databaseSwitchListener(Consumer listener) {
     this.databaseSwitchListener = listener;
     return this;
   }
@@ -107,18 +107,17 @@ protected MultiDbClientBuilder self() {
   @Override
   protected ConnectionProvider createDefaultConnectionProvider() {
 
-    if (this.multiDbConfig == null || this.multiDbConfig.getClusterConfigs() == null
-        || this.multiDbConfig.getClusterConfigs().length < 1) {
+    if (this.multiDbConfig == null || this.multiDbConfig.getDatabaseConfigs() == null
+        || this.multiDbConfig.getDatabaseConfigs().length < 1) {
       throw new IllegalArgumentException("At least one endpoint must be specified");
     }
 
     // Create the multi-cluster connection provider
-    MultiClusterPooledConnectionProvider provider = new MultiClusterPooledConnectionProvider(
-        multiDbConfig);
+    MultiDbConnectionProvider provider = new MultiDbConnectionProvider(multiDbConfig);
 
     // Set database switch listener if provided
     if (this.databaseSwitchListener != null) {
-      provider.setClusterSwitchListener(this.databaseSwitchListener);
+      provider.setDatabaseSwitchListener(this.databaseSwitchListener);
     }
 
     return provider;
@@ -126,9 +125,8 @@ protected ConnectionProvider createDefaultConnectionProvider() {
 
   @Override
   protected CommandExecutor createDefaultCommandExecutor() {
-    // For multi-db clients, we always use CircuitBreakerCommandExecutor
-    return new CircuitBreakerCommandExecutor(
-        (MultiClusterPooledConnectionProvider) this.connectionProvider);
+    // For multi-db clients, we always use MultiDbCommandExecutor
+    return new MultiDbCommandExecutor((MultiDbConnectionProvider) this.connectionProvider);
   }
 
   @Override
diff --git a/src/main/java/redis/clients/jedis/mcf/CircuitBreakerThresholdsAdapter.java b/src/main/java/redis/clients/jedis/mcf/CircuitBreakerThresholdsAdapter.java
index 92211ff2ac..3dfc65a1f5 100644
--- a/src/main/java/redis/clients/jedis/mcf/CircuitBreakerThresholdsAdapter.java
+++ b/src/main/java/redis/clients/jedis/mcf/CircuitBreakerThresholdsAdapter.java
@@ -1,7 +1,7 @@
 package redis.clients.jedis.mcf;
 
 import io.github.resilience4j.circuitbreaker.CircuitBreakerConfig.SlidingWindowType;
-import redis.clients.jedis.MultiClusterClientConfig;
+import redis.clients.jedis.MultiDbConfig;
 
 /**
  * Adapter that disables Resilience4j's built-in circuit breaker evaluation and help delegate
@@ -9,10 +9,10 @@
  * 

* This adapter sets maximum values for failure rate (100%) and minimum calls (Integer.MAX_VALUE) to * effectively disable Resilience4j's automatic circuit breaker transitions, allowing - * {@link MultiClusterPooledConnectionProvider.Cluster#evaluateThresholds(boolean)} to control when - * the circuit breaker opens based on both minimum failure count AND failure rate. + * {@link MultiDbConnectionProvider.Database#evaluateThresholds(boolean)} to control when the + * circuit breaker opens based on both minimum failure count AND failure rate. *

- * @see MultiClusterPooledConnectionProvider.Cluster#evaluateThresholds(boolean) + * @see MultiDbConnectionProvider.Database#evaluateThresholds(boolean) */ class CircuitBreakerThresholdsAdapter { /** Maximum failure rate threshold (100%) to disable Resilience4j evaluation */ @@ -67,9 +67,9 @@ int getSlidingWindowSize() { * method controls circuit breaker state based on the original configuration's dual-threshold * logic. *

- * @param multiClusterClientConfig configuration containing sliding window size + * @param multiDbConfig configuration containing sliding window size */ - CircuitBreakerThresholdsAdapter(MultiClusterClientConfig multiClusterClientConfig) { + CircuitBreakerThresholdsAdapter(MultiDbConfig multiDbConfig) { // IMPORTANT: failureRateThreshold is set to max theoretically disable Resilience4j's evaluation // and rely on our custom evaluateThresholds() logic. @@ -79,6 +79,6 @@ int getSlidingWindowSize() { // and rely on our custom evaluateThresholds() logic. minimumNumberOfCalls = Integer.MAX_VALUE; - slidingWindowSize = multiClusterClientConfig.getCircuitBreakerSlidingWindowSize(); + slidingWindowSize = multiDbConfig.getCircuitBreakerSlidingWindowSize(); } } diff --git a/src/main/java/redis/clients/jedis/mcf/ClusterSwitchEventArgs.java b/src/main/java/redis/clients/jedis/mcf/ClusterSwitchEventArgs.java deleted file mode 100644 index 1fe6cebe4d..0000000000 --- a/src/main/java/redis/clients/jedis/mcf/ClusterSwitchEventArgs.java +++ /dev/null @@ -1,31 +0,0 @@ -package redis.clients.jedis.mcf; - -import redis.clients.jedis.Endpoint; -import redis.clients.jedis.mcf.MultiClusterPooledConnectionProvider.Cluster; - -public class ClusterSwitchEventArgs { - - private final SwitchReason reason; - private final String ClusterName; - private final Endpoint Endpoint; - - public ClusterSwitchEventArgs(SwitchReason reason, Endpoint endpoint, Cluster cluster) { - this.reason = reason; - // TODO: @ggivo do we need cluster name? - this.ClusterName = cluster.getCircuitBreaker().getName(); - this.Endpoint = endpoint; - } - - public SwitchReason getReason() { - return reason; - } - - public String getClusterName() { - return ClusterName; - } - - public Endpoint getEndpoint() { - return Endpoint; - } - -} diff --git a/src/main/java/redis/clients/jedis/mcf/DatabaseSwitchEvent.java b/src/main/java/redis/clients/jedis/mcf/DatabaseSwitchEvent.java new file mode 100644 index 0000000000..6cc233cd7d --- /dev/null +++ b/src/main/java/redis/clients/jedis/mcf/DatabaseSwitchEvent.java @@ -0,0 +1,30 @@ +package redis.clients.jedis.mcf; + +import redis.clients.jedis.Endpoint; +import redis.clients.jedis.mcf.MultiDbConnectionProvider.Database; + +public class DatabaseSwitchEvent { + + private final SwitchReason reason; + private final String databaseName; + private final Endpoint endpoint; + + public DatabaseSwitchEvent(SwitchReason reason, Endpoint endpoint, Database database) { + this.reason = reason; + this.databaseName = database.getCircuitBreaker().getName(); + this.endpoint = endpoint; + } + + public SwitchReason getReason() { + return reason; + } + + public String getDatabaseName() { + return databaseName; + } + + public Endpoint getEndpoint() { + return endpoint; + } + +} diff --git a/src/main/java/redis/clients/jedis/mcf/EchoStrategy.java b/src/main/java/redis/clients/jedis/mcf/EchoStrategy.java index 3c73e17d6f..6be05e2cfb 100644 --- a/src/main/java/redis/clients/jedis/mcf/EchoStrategy.java +++ b/src/main/java/redis/clients/jedis/mcf/EchoStrategy.java @@ -8,7 +8,7 @@ import redis.clients.jedis.JedisClientConfig; import redis.clients.jedis.JedisPooled; import redis.clients.jedis.UnifiedJedis; -import redis.clients.jedis.MultiClusterClientConfig.StrategySupplier; +import redis.clients.jedis.MultiDbConfig.StrategySupplier; public class EchoStrategy implements HealthCheckStrategy { private static final int MAX_HEALTH_CHECK_POOL_SIZE = 2; diff --git a/src/main/java/redis/clients/jedis/mcf/JedisFailoverException.java b/src/main/java/redis/clients/jedis/mcf/JedisFailoverException.java index 3543517703..c431764d42 100644 --- a/src/main/java/redis/clients/jedis/mcf/JedisFailoverException.java +++ b/src/main/java/redis/clients/jedis/mcf/JedisFailoverException.java @@ -11,7 +11,7 @@ * @see JedisFailoverException.JedisTemporarilyNotAvailableException */ public class JedisFailoverException extends JedisConnectionException { - private static final String MESSAGE = "Cluster/database endpoint could not failover since the MultiClusterClientConfig was not " + private static final String MESSAGE = "Cluster/database endpoint could not failover since the MultiDbConfig was not " + "provided with an additional cluster/database endpoint according to its prioritized sequence. " + "If applicable, consider falling back OR restarting with an available cluster/database endpoint"; @@ -28,9 +28,8 @@ public JedisFailoverException() { * the max number of failover attempts has been exceeded. And there is still no healthy cluster. *

* See the configuration properties - * {@link redis.clients.jedis.MultiClusterClientConfig#maxNumFailoverAttempts} and - * {@link redis.clients.jedis.MultiClusterClientConfig#delayInBetweenFailoverAttempts} for more - * details. + * {@link redis.clients.jedis.MultiDbConfig#maxNumFailoverAttempts} and + * {@link redis.clients.jedis.MultiDbConfig#delayInBetweenFailoverAttempts} for more details. */ public static class JedisPermanentlyNotAvailableException extends JedisFailoverException { public JedisPermanentlyNotAvailableException(String s) { @@ -49,9 +48,8 @@ public JedisPermanentlyNotAvailableException() { * temporary condition and it is possible that there will be a healthy cluster available. *

* See the configuration properties - * {@link redis.clients.jedis.MultiClusterClientConfig#maxNumFailoverAttempts} and - * {@link redis.clients.jedis.MultiClusterClientConfig#delayInBetweenFailoverAttempts} for more - * details. + * {@link redis.clients.jedis.MultiDbConfig#maxNumFailoverAttempts} and + * {@link redis.clients.jedis.MultiDbConfig#delayInBetweenFailoverAttempts} for more details. */ public static class JedisTemporarilyNotAvailableException extends JedisFailoverException { diff --git a/src/main/java/redis/clients/jedis/mcf/CircuitBreakerCommandExecutor.java b/src/main/java/redis/clients/jedis/mcf/MultiDbCommandExecutor.java similarity index 75% rename from src/main/java/redis/clients/jedis/mcf/CircuitBreakerCommandExecutor.java rename to src/main/java/redis/clients/jedis/mcf/MultiDbCommandExecutor.java index 90f269bd70..d3b7c48e2e 100644 --- a/src/main/java/redis/clients/jedis/mcf/CircuitBreakerCommandExecutor.java +++ b/src/main/java/redis/clients/jedis/mcf/MultiDbCommandExecutor.java @@ -9,7 +9,7 @@ import redis.clients.jedis.annots.Experimental; import redis.clients.jedis.exceptions.JedisConnectionException; import redis.clients.jedis.executors.CommandExecutor; -import redis.clients.jedis.mcf.MultiClusterPooledConnectionProvider.Cluster; +import redis.clients.jedis.mcf.MultiDbConnectionProvider.Database; /** * @author Allen Terleto (aterleto) @@ -21,29 +21,28 @@ *

*/ @Experimental -public class CircuitBreakerCommandExecutor extends CircuitBreakerFailoverBase - implements CommandExecutor { +public class MultiDbCommandExecutor extends MultiDbFailoverBase implements CommandExecutor { - public CircuitBreakerCommandExecutor(MultiClusterPooledConnectionProvider provider) { + public MultiDbCommandExecutor(MultiDbConnectionProvider provider) { super(provider); } @Override public T executeCommand(CommandObject commandObject) { - Cluster cluster = provider.getCluster(); // Pass this by reference for thread safety + Database database = provider.getDatabase(); // Pass this by reference for thread safety DecorateSupplier supplier = Decorators - .ofSupplier(() -> this.handleExecuteCommand(commandObject, cluster)); + .ofSupplier(() -> this.handleExecuteCommand(commandObject, database)); - supplier.withCircuitBreaker(cluster.getCircuitBreaker()); - supplier.withRetry(cluster.getRetry()); + supplier.withCircuitBreaker(database.getCircuitBreaker()); + supplier.withRetry(database.getRetry()); supplier.withFallback(provider.getFallbackExceptionList(), - e -> this.handleClusterFailover(commandObject, cluster)); + e -> this.handleClusterFailover(commandObject, database)); try { return supplier.decorate().get(); } catch (Exception e) { - if (cluster.getCircuitBreaker().getState() == State.OPEN && isActiveCluster(cluster)) { - clusterFailover(cluster); + if (database.getCircuitBreaker().getState() == State.OPEN && isActiveDatabase(database)) { + clusterFailover(database); } throw e; } @@ -52,7 +51,7 @@ public T executeCommand(CommandObject commandObject) { /** * Functional interface wrapped in retry and circuit breaker logic to handle happy path scenarios */ - private T handleExecuteCommand(CommandObject commandObject, Cluster cluster) { + private T handleExecuteCommand(CommandObject commandObject, Database cluster) { Connection connection; try { connection = cluster.getConnection(); @@ -63,7 +62,7 @@ private T handleExecuteCommand(CommandObject commandObject, Cluster clust try { return connection.executeCommand(commandObject); } catch (Exception e) { - if (cluster.retryOnFailover() && !isActiveCluster(cluster) + if (cluster.retryOnFailover() && !isActiveDatabase(cluster) && isCircuitBreakerTrackedException(e, cluster)) { throw new ConnectionFailoverException( "Command failed during failover: " + cluster.getCircuitBreaker().getName(), e); @@ -78,7 +77,7 @@ && isCircuitBreakerTrackedException(e, cluster)) { * Functional interface wrapped in retry and circuit breaker logic to handle open circuit breaker * failure scenarios */ - private T handleClusterFailover(CommandObject commandObject, Cluster cluster) { + private T handleClusterFailover(CommandObject commandObject, Database cluster) { clusterFailover(cluster); diff --git a/src/main/java/redis/clients/jedis/mcf/MultiClusterPooledConnectionProvider.java b/src/main/java/redis/clients/jedis/mcf/MultiDbConnectionProvider.java similarity index 56% rename from src/main/java/redis/clients/jedis/mcf/MultiClusterPooledConnectionProvider.java rename to src/main/java/redis/clients/jedis/mcf/MultiDbConnectionProvider.java index a389a0d7b4..97f7fa658f 100644 --- a/src/main/java/redis/clients/jedis/mcf/MultiClusterPooledConnectionProvider.java +++ b/src/main/java/redis/clients/jedis/mcf/MultiDbConnectionProvider.java @@ -33,7 +33,7 @@ import org.slf4j.LoggerFactory; import redis.clients.jedis.*; -import redis.clients.jedis.MultiClusterClientConfig.ClusterConfig; +import redis.clients.jedis.MultiDbConfig.DatabaseConfig; import redis.clients.jedis.annots.Experimental; import redis.clients.jedis.annots.VisibleForTesting; import redis.clients.jedis.exceptions.JedisConnectionException; @@ -41,47 +41,46 @@ import redis.clients.jedis.exceptions.JedisValidationException; import redis.clients.jedis.mcf.JedisFailoverException.*; import redis.clients.jedis.providers.ConnectionProvider; -import redis.clients.jedis.MultiClusterClientConfig.StrategySupplier; +import redis.clients.jedis.MultiDbConfig.StrategySupplier; import redis.clients.jedis.util.Pool; /** * @author Allen Terleto (aterleto) *

- * ConnectionProvider which supports multiple cluster/database endpoints each with their own + * ConnectionProvider which supports multiple database endpoints each with their own * isolated connection pool. With this ConnectionProvider users can seamlessly failover to - * Disaster Recovery (DR), Backup, and Active-Active cluster(s) by using simple + * Disaster Recovery (DR), Backup, and Active-Active database(s) by using simple * configuration which is passed through from Resilience4j - * docs *

- * Support for manual failback is provided by way of {@link #setActiveCluster(Endpoint)} + * Support for manual failback is provided by way of {@link #setActiveDatabase(Endpoint)} *

*/ @Experimental -public class MultiClusterPooledConnectionProvider implements ConnectionProvider { +public class MultiDbConnectionProvider implements ConnectionProvider { private final Logger log = LoggerFactory.getLogger(getClass()); /** - * Ordered map of cluster/database endpoints which were provided at startup via the - * MultiClusterClientConfig. Users can move down (failover) or (up) failback the map depending on + * Ordered map of database. Users can move down (failover) or (up) failback the map depending on * their availability and order. */ - private final Map multiClusterMap = new ConcurrentHashMap<>(); + private final Map databaseMap = new ConcurrentHashMap<>(); /** - * Indicates the actively used cluster/database endpoint (connection pool) amongst the - * pre-configured list which were provided at startup via the MultiClusterClientConfig. All - * traffic will be routed with this cluster/database + * Indicates the actively used database endpoint (connection pool) amongst the pre-configured list + * which were provided at startup via the MultiDbConfig. All traffic will be routed with this + * database */ - private volatile Cluster activeCluster; + private volatile Database activeDatabase; - private final Lock activeClusterChangeLock = new ReentrantLock(true); + private final Lock activeDatabaseChangeLock = new ReentrantLock(true); /** - * Functional interface for listening to cluster switch events. The event args contain the reason - * for the switch, the endpoint, and the cluster. + * Functional interface for listening to database switch events. The event args contain the reason + * for the switch, the endpoint, and the database. */ - private Consumer clusterSwitchListener; + private Consumer databaseSwitchListener; private List> fallbackExceptionList; @@ -99,33 +98,33 @@ public class MultiClusterPooledConnectionProvider implements ConnectionProvider return t; }); - // Store retry and circuit breaker configs for dynamic cluster addition/removal + // Store retry and circuit breaker configs for dynamic database addition/removal private RetryConfig retryConfig; private CircuitBreakerConfig circuitBreakerConfig; - private MultiClusterClientConfig multiClusterClientConfig; + private MultiDbConfig multiDbConfig; private AtomicLong failoverFreezeUntil = new AtomicLong(0); private AtomicInteger failoverAttemptCount = new AtomicInteger(0); - public MultiClusterPooledConnectionProvider(MultiClusterClientConfig multiClusterClientConfig) { + public MultiDbConnectionProvider(MultiDbConfig multiDbConfig) { - if (multiClusterClientConfig == null) throw new JedisValidationException( - "MultiClusterClientConfig must not be NULL for MultiClusterPooledConnectionProvider"); + if (multiDbConfig == null) throw new JedisValidationException( + "MultiDbConfig must not be NULL for MultiDbConnectionProvider"); - this.multiClusterClientConfig = multiClusterClientConfig; + this.multiDbConfig = multiDbConfig; ////////////// Configure Retry //////////////////// RetryConfig.Builder retryConfigBuilder = RetryConfig.custom(); - retryConfigBuilder.maxAttempts(multiClusterClientConfig.getRetryMaxAttempts()); + retryConfigBuilder.maxAttempts(multiDbConfig.getRetryMaxAttempts()); retryConfigBuilder.intervalFunction( - IntervalFunction.ofExponentialBackoff(multiClusterClientConfig.getRetryWaitDuration(), - multiClusterClientConfig.getRetryWaitDurationExponentialBackoffMultiplier())); + IntervalFunction.ofExponentialBackoff(multiDbConfig.getRetryWaitDuration(), + multiDbConfig.getRetryWaitDurationExponentialBackoffMultiplier())); retryConfigBuilder.failAfterMaxAttempts(false); // JedisConnectionException will be thrown retryConfigBuilder.retryExceptions( - multiClusterClientConfig.getRetryIncludedExceptionList().stream().toArray(Class[]::new)); + multiDbConfig.getRetryIncludedExceptionList().stream().toArray(Class[]::new)); - List retryIgnoreExceptionList = multiClusterClientConfig.getRetryIgnoreExceptionList(); + List retryIgnoreExceptionList = multiDbConfig.getRetryIgnoreExceptionList(); if (retryIgnoreExceptionList != null) retryConfigBuilder.ignoreExceptions(retryIgnoreExceptionList.stream().toArray(Class[]::new)); @@ -135,15 +134,14 @@ public MultiClusterPooledConnectionProvider(MultiClusterClientConfig multiCluste CircuitBreakerConfig.Builder circuitBreakerConfigBuilder = CircuitBreakerConfig.custom(); - CircuitBreakerThresholdsAdapter adapter = new CircuitBreakerThresholdsAdapter( - multiClusterClientConfig); + CircuitBreakerThresholdsAdapter adapter = new CircuitBreakerThresholdsAdapter(multiDbConfig); circuitBreakerConfigBuilder.minimumNumberOfCalls(adapter.getMinimumNumberOfCalls()); circuitBreakerConfigBuilder.failureRateThreshold(adapter.getFailureRateThreshold()); circuitBreakerConfigBuilder.slidingWindowSize(adapter.getSlidingWindowSize()); circuitBreakerConfigBuilder.slidingWindowType(adapter.getSlidingWindowType()); - circuitBreakerConfigBuilder.recordExceptions(multiClusterClientConfig - .getCircuitBreakerIncludedExceptionList().stream().toArray(Class[]::new)); + circuitBreakerConfigBuilder.recordExceptions( + multiDbConfig.getCircuitBreakerIncludedExceptionList().stream().toArray(Class[]::new)); circuitBreakerConfigBuilder.automaticTransitionFromOpenToHalfOpenEnabled(false); // State // transitions // are @@ -152,76 +150,76 @@ public MultiClusterPooledConnectionProvider(MultiClusterClientConfig multiCluste // states // are used - List circuitBreakerIgnoreExceptionList = multiClusterClientConfig + List circuitBreakerIgnoreExceptionList = multiDbConfig .getCircuitBreakerIgnoreExceptionList(); if (circuitBreakerIgnoreExceptionList != null) circuitBreakerConfigBuilder .ignoreExceptions(circuitBreakerIgnoreExceptionList.stream().toArray(Class[]::new)); this.circuitBreakerConfig = circuitBreakerConfigBuilder.build(); - ////////////// Configure Cluster Map //////////////////// + ////////////// Configure Database Map //////////////////// - ClusterConfig[] clusterConfigs = multiClusterClientConfig.getClusterConfigs(); + DatabaseConfig[] databaseConfigs = multiDbConfig.getDatabaseConfigs(); - // Now add clusters - health checks will start but events will be queued - for (ClusterConfig config : clusterConfigs) { - addClusterInternal(multiClusterClientConfig, config); + // Now add databases - health checks will start but events will be queued + for (DatabaseConfig config : databaseConfigs) { + addDatabaseInternal(multiDbConfig, config); } // Initialize StatusTracker for waiting on health check results StatusTracker statusTracker = new StatusTracker(healthStatusManager); - // Wait for initial health check results and select active cluster based on weights - activeCluster = waitForInitialHealthyCluster(statusTracker); + // Wait for initial health check results and select active database based on weights + activeDatabase = waitForInitialHealthyDatabase(statusTracker); // Mark initialization as complete - handleHealthStatusChange can now process events initializationComplete = true; - Cluster temp = activeCluster; + Database temp = activeDatabase; if (!temp.isHealthy()) { - // Race condition: Direct assignment to 'activeCluster' is not thread safe because + // Race condition: Direct assignment to 'activeDatabase' is not thread safe because // 'onHealthStatusChange' may execute concurrently once 'initializationComplete' // is set to true. - // Simple rule is to never assign value of 'activeCluster' outside of - // 'activeClusterChangeLock' once the 'initializationComplete' is done. - waitForInitialHealthyCluster(statusTracker); - switchToHealthyCluster(SwitchReason.HEALTH_CHECK, temp); + // Simple rule is to never assign value of 'activeDatabase' outside of + // 'activeDatabaseChangeLock' once the 'initializationComplete' is done. + waitForInitialHealthyDatabase(statusTracker); + switchToHealthyDatabase(SwitchReason.HEALTH_CHECK, temp); } - this.fallbackExceptionList = multiClusterClientConfig.getFallbackExceptionList(); + this.fallbackExceptionList = multiDbConfig.getFallbackExceptionList(); // Start periodic failback checker - if (multiClusterClientConfig.isFailbackSupported()) { - long failbackInterval = multiClusterClientConfig.getFailbackCheckInterval(); + if (multiDbConfig.isFailbackSupported()) { + long failbackInterval = multiDbConfig.getFailbackCheckInterval(); failbackScheduler.scheduleAtFixedRate(this::periodicFailbackCheck, failbackInterval, failbackInterval, TimeUnit.MILLISECONDS); } } /** - * Adds a new cluster endpoint to the provider. - * @param clusterConfig the configuration for the new cluster + * Adds a new database endpoint to the provider. + * @param databaseConfig the configuration for the new database * @throws JedisValidationException if the endpoint already exists */ - public void add(ClusterConfig clusterConfig) { - if (clusterConfig == null) { - throw new JedisValidationException("ClusterConfig must not be null"); + public void add(DatabaseConfig databaseConfig) { + if (databaseConfig == null) { + throw new JedisValidationException("DatabaseConfig must not be null"); } - Endpoint endpoint = clusterConfig.getEndpoint(); - if (multiClusterMap.containsKey(endpoint)) { + Endpoint endpoint = databaseConfig.getEndpoint(); + if (databaseMap.containsKey(endpoint)) { throw new JedisValidationException( "Endpoint " + endpoint + " already exists in the provider"); } - activeClusterChangeLock.lock(); + activeDatabaseChangeLock.lock(); try { - addClusterInternal(multiClusterClientConfig, clusterConfig); + addDatabaseInternal(multiDbConfig, databaseConfig); } finally { - activeClusterChangeLock.unlock(); + activeDatabaseChangeLock.unlock(); } } /** - * Removes a cluster endpoint from the provider. + * Removes a database endpoint from the provider. * @param endpoint the endpoint to remove * @throws JedisValidationException if the endpoint doesn't exist or is the last remaining * endpoint @@ -231,35 +229,35 @@ public void remove(Endpoint endpoint) { throw new JedisValidationException("Endpoint must not be null"); } - if (!multiClusterMap.containsKey(endpoint)) { + if (!databaseMap.containsKey(endpoint)) { throw new JedisValidationException( "Endpoint " + endpoint + " does not exist in the provider"); } - if (multiClusterMap.size() < 2) { + if (databaseMap.size() < 2) { throw new JedisValidationException("Cannot remove the last remaining endpoint"); } log.debug("Removing endpoint {}", endpoint); - Map.Entry notificationData = null; - activeClusterChangeLock.lock(); + Map.Entry notificationData = null; + activeDatabaseChangeLock.lock(); try { - Cluster clusterToRemove = multiClusterMap.get(endpoint); - boolean isActiveCluster = (activeCluster == clusterToRemove); + Database databaseToRemove = databaseMap.get(endpoint); + boolean isActiveDatabase = (activeDatabase == databaseToRemove); - if (isActiveCluster) { - log.info("Active cluster is being removed. Finding a new active cluster..."); - Map.Entry candidate = findWeightedHealthyClusterToIterate( - clusterToRemove); + if (isActiveDatabase) { + log.info("Active database is being removed. Finding a new active database..."); + Map.Entry candidate = findWeightedHealthyClusterToIterate( + databaseToRemove); if (candidate != null) { - Cluster selectedCluster = candidate.getValue(); - if (setActiveCluster(selectedCluster, true)) { - log.info("New active cluster set to {}", candidate.getKey()); + Database selectedCluster = candidate.getValue(); + if (setActiveDatabase(selectedCluster, true)) { + log.info("New active database set to {}", candidate.getKey()); notificationData = candidate; } } else { throw new JedisException( - "Cluster can not be removed due to no healthy cluster available to switch!"); + "Database can not be removed due to no healthy database available to switch!"); } } @@ -267,43 +265,42 @@ public void remove(Endpoint endpoint) { healthStatusManager.unregisterListener(endpoint, this::onHealthStatusChange); healthStatusManager.remove(endpoint); - // Remove from cluster map - multiClusterMap.remove(endpoint); + // Remove from database map + databaseMap.remove(endpoint); - // Close the cluster resources - if (clusterToRemove != null) { - clusterToRemove.setDisabled(true); - clusterToRemove.close(); + // Close the database resources + if (databaseToRemove != null) { + databaseToRemove.setDisabled(true); + databaseToRemove.close(); } } finally { - activeClusterChangeLock.unlock(); + activeDatabaseChangeLock.unlock(); } if (notificationData != null) { - onClusterSwitch(SwitchReason.FORCED, notificationData.getKey(), notificationData.getValue()); + onDatabaseSwitch(SwitchReason.FORCED, notificationData.getKey(), notificationData.getValue()); } } /** - * Internal method to add a cluster configuration. This method is not thread-safe and should be + * Internal method to add a database configuration. This method is not thread-safe and should be * called within appropriate locks. */ - private void addClusterInternal(MultiClusterClientConfig multiClusterClientConfig, - ClusterConfig config) { - if (multiClusterMap.containsKey(config.getEndpoint())) { + private void addDatabaseInternal(MultiDbConfig multiDbConfig, DatabaseConfig config) { + if (databaseMap.containsKey(config.getEndpoint())) { throw new JedisValidationException( "Endpoint " + config.getEndpoint() + " already exists in the provider"); } - String clusterId = "cluster:" + config.getEndpoint(); + String databaseId = "database:" + config.getEndpoint(); - Retry retry = RetryRegistry.of(retryConfig).retry(clusterId); + Retry retry = RetryRegistry.of(retryConfig).retry(databaseId); Retry.EventPublisher retryPublisher = retry.getEventPublisher(); retryPublisher.onRetry(event -> log.warn(String.valueOf(event))); retryPublisher.onError(event -> log.error(String.valueOf(event))); CircuitBreaker circuitBreaker = CircuitBreakerRegistry.of(circuitBreakerConfig) - .circuitBreaker(clusterId); + .circuitBreaker(databaseId); CircuitBreaker.EventPublisher circuitBreakerEventPublisher = circuitBreaker.getEventPublisher(); circuitBreakerEventPublisher.onCallNotPermitted(event -> log.error(String.valueOf(event))); @@ -315,27 +312,27 @@ private void addClusterInternal(MultiClusterClientConfig multiClusterClientConfi .hostAndPort(hostPort(config.getEndpoint())).clientConfig(config.getJedisClientConfig()) .poolConfig(config.getConnectionPoolConfig()).build(); - Cluster cluster; + Database database; StrategySupplier strategySupplier = config.getHealthCheckStrategySupplier(); if (strategySupplier != null) { HealthCheckStrategy hcs = strategySupplier.get(hostPort(config.getEndpoint()), config.getJedisClientConfig()); - // Register listeners BEFORE adding clusters to avoid missing events + // Register listeners BEFORE adding databases to avoid missing events healthStatusManager.registerListener(config.getEndpoint(), this::onHealthStatusChange); HealthCheck hc = healthStatusManager.add(config.getEndpoint(), hcs); - cluster = new Cluster(config.getEndpoint(), pool, retry, hc, circuitBreaker, - config.getWeight(), multiClusterClientConfig); + database = new Database(config.getEndpoint(), pool, retry, hc, circuitBreaker, + config.getWeight(), multiDbConfig); } else { - cluster = new Cluster(config.getEndpoint(), pool, retry, circuitBreaker, config.getWeight(), - multiClusterClientConfig); + database = new Database(config.getEndpoint(), pool, retry, circuitBreaker, config.getWeight(), + multiDbConfig); } - multiClusterMap.put(config.getEndpoint(), cluster); + databaseMap.put(config.getEndpoint(), database); // this is the place where we listen tracked errors and check if - // thresholds are exceeded for the cluster + // thresholds are exceeded for the database circuitBreakerEventPublisher.onError(event -> { - cluster.evaluateThresholds(false); + database.evaluateThresholds(false); }); } @@ -344,8 +341,8 @@ private HostAndPort hostPort(Endpoint endpoint) { } /** - * Handles health status changes for clusters. This method is called by the health status manager - * when the health status of a cluster changes. + * Handles health status changes for databases. This method is called by the health status manager + * when the health status of a database changes. */ @VisibleForTesting void onHealthStatusChange(HealthStatusChangeEvent eventArgs) { @@ -353,66 +350,66 @@ void onHealthStatusChange(HealthStatusChangeEvent eventArgs) { HealthStatus newStatus = eventArgs.getNewStatus(); log.debug("Health status changed for {} from {} to {}", endpoint, eventArgs.getOldStatus(), newStatus); - Cluster clusterWithHealthChange = multiClusterMap.get(endpoint); + Database databaseWithHealthChange = databaseMap.get(endpoint); - if (clusterWithHealthChange == null) return; + if (databaseWithHealthChange == null) return; if (initializationComplete) { - if (!newStatus.isHealthy() && clusterWithHealthChange == activeCluster) { - clusterWithHealthChange.setGracePeriod(); - switchToHealthyCluster(SwitchReason.HEALTH_CHECK, clusterWithHealthChange); + if (!newStatus.isHealthy() && databaseWithHealthChange == activeDatabase) { + databaseWithHealthChange.setGracePeriod(); + switchToHealthyDatabase(SwitchReason.HEALTH_CHECK, databaseWithHealthChange); } } } /** - * Waits for initial health check results and selects the first healthy cluster based on weight - * priority. Blocks until at least one cluster becomes healthy or all clusters are determined to + * Waits for initial health check results and selects the first healthy database based on weight + * priority. Blocks until at least one database becomes healthy or all databases are determined to * be unhealthy. * @param statusTracker the status tracker to use for waiting on health check results - * @return the first healthy cluster found, ordered by weight (highest first) - * @throws JedisConnectionException if all clusters are unhealthy + * @return the first healthy database found, ordered by weight (highest first) + * @throws JedisConnectionException if all databases are unhealthy */ - private Cluster waitForInitialHealthyCluster(StatusTracker statusTracker) { - // Sort clusters by weight in descending order - List> sortedClusters = multiClusterMap.entrySet().stream() - .sorted(Map.Entry. comparingByValue( - Comparator.comparing(Cluster::getWeight).reversed())) + private Database waitForInitialHealthyDatabase(StatusTracker statusTracker) { + // Sort databases by weight in descending order + List> sortedDatabases = databaseMap.entrySet().stream() + .sorted(Map.Entry. comparingByValue( + Comparator.comparing(Database::getWeight).reversed())) .collect(Collectors.toList()); - log.info("Selecting initial cluster from {} configured clusters", sortedClusters.size()); + log.info("Selecting initial database from {} configured databases", sortedDatabases.size()); - // Select cluster in weight order - for (Map.Entry entry : sortedClusters) { + // Select database in weight order + for (Map.Entry entry : sortedDatabases) { Endpoint endpoint = entry.getKey(); - Cluster cluster = entry.getValue(); + Database database = entry.getValue(); - log.info("Evaluating cluster {} (weight: {})", endpoint, cluster.getWeight()); + log.info("Evaluating database {} (weight: {})", endpoint, database.getWeight()); HealthStatus status; // Check if health checks are enabled for this endpoint if (healthStatusManager.hasHealthCheck(endpoint)) { log.info("Health checks enabled for {}, waiting for result", endpoint); - // Wait for this cluster's health status to be determined + // Wait for this database's health status to be determined status = statusTracker.waitForHealthStatus(endpoint); } else { // No health check configured - assume healthy - log.info("No health check configured for cluster {}, defaulting to HEALTHY", endpoint); + log.info("No health check configured for database {}, defaulting to HEALTHY", endpoint); status = HealthStatus.HEALTHY; } if (status.isHealthy()) { - log.info("Found healthy cluster: {} (weight: {})", endpoint, cluster.getWeight()); - return cluster; + log.info("Found healthy database: {} (weight: {})", endpoint, database.getWeight()); + return database; } else { - log.info("Cluster {} is unhealthy, trying next cluster", endpoint); + log.info("Database {} is unhealthy, trying next database", endpoint); } } - // All clusters are unhealthy + // All databases are unhealthy throw new JedisConnectionException( - "All configured clusters are unhealthy. Cannot initialize MultiClusterPooledConnectionProvider."); + "All configured databases are unhealthy. Cannot initialize MultiDbConnectionProvider."); } /** @@ -421,38 +418,38 @@ private Cluster waitForInitialHealthyCluster(StatusTracker statusTracker) { @VisibleForTesting void periodicFailbackCheck() { try { - // Find the best candidate cluster for failback - Map.Entry bestCandidate = null; - float bestWeight = activeCluster.getWeight(); + // Find the best candidate database for failback + Map.Entry bestCandidate = null; + float bestWeight = activeDatabase.getWeight(); - for (Map.Entry entry : multiClusterMap.entrySet()) { - Cluster cluster = entry.getValue(); + for (Map.Entry entry : databaseMap.entrySet()) { + Database database = entry.getValue(); - // Skip if this is already the active cluster - if (cluster == activeCluster) { + // Skip if this is already the active database + if (database == activeDatabase) { continue; } - // Skip if cluster is not healthy - if (!cluster.isHealthy()) { + // Skip if database is not healthy + if (!database.isHealthy()) { continue; } - // This cluster is a valid candidate - if (cluster.getWeight() > bestWeight) { + // This database is a valid candidate + if (database.getWeight() > bestWeight) { bestCandidate = entry; - bestWeight = cluster.getWeight(); + bestWeight = database.getWeight(); } } // Perform failback if we found a better candidate if (bestCandidate != null) { - Cluster selectedCluster = bestCandidate.getValue(); - log.info("Performing failback from {} to {} (higher weight cluster available)", - activeCluster.getCircuitBreaker().getName(), + Database selectedCluster = bestCandidate.getValue(); + log.info("Performing failback from {} to {} (higher weight database available)", + activeDatabase.getCircuitBreaker().getName(), selectedCluster.getCircuitBreaker().getName()); - if (setActiveCluster(selectedCluster, true)) { - onClusterSwitch(SwitchReason.FAILBACK, bestCandidate.getKey(), selectedCluster); + if (setActiveDatabase(selectedCluster, true)) { + onDatabaseSwitch(SwitchReason.FAILBACK, bestCandidate.getKey(), selectedCluster); } } } catch (Exception e) { @@ -460,25 +457,25 @@ void periodicFailbackCheck() { } } - Endpoint switchToHealthyCluster(SwitchReason reason, Cluster iterateFrom) { - Map.Entry clusterToIterate = findWeightedHealthyClusterToIterate( + Endpoint switchToHealthyDatabase(SwitchReason reason, Database iterateFrom) { + Map.Entry databaseToIterate = findWeightedHealthyClusterToIterate( iterateFrom); - if (clusterToIterate == null) { + if (databaseToIterate == null) { // throws exception anyway since not able to iterate handleNoHealthyCluster(); } - Cluster cluster = clusterToIterate.getValue(); - boolean changed = setActiveCluster(cluster, false); + Database database = databaseToIterate.getValue(); + boolean changed = setActiveDatabase(database, false); if (!changed) return null; failoverAttemptCount.set(0); - onClusterSwitch(reason, clusterToIterate.getKey(), cluster); - return clusterToIterate.getKey(); + onDatabaseSwitch(reason, databaseToIterate.getKey(), database); + return databaseToIterate.getKey(); } private void handleNoHealthyCluster() { - int max = multiClusterClientConfig.getMaxNumFailoverAttempts(); - log.error("No healthy cluster available to switch to"); + int max = multiDbConfig.getMaxNumFailoverAttempts(); + log.error("No healthy database available to switch to"); if (failoverAttemptCount.get() > max) { throw new JedisPermanentlyNotAvailableException(); } @@ -496,7 +493,7 @@ private boolean markAsFreeze() { long until = failoverFreezeUntil.get(); long now = System.currentTimeMillis(); if (until <= now) { - long nextUntil = now + multiClusterClientConfig.getDelayInBetweenFailoverAttempts(); + long nextUntil = now + multiDbConfig.getDelayInBetweenFailoverAttempts(); if (failoverFreezeUntil.compareAndSet(until, nextUntil)) { return true; } @@ -505,46 +502,46 @@ private boolean markAsFreeze() { } /** - * Asserts that the active cluster is operable. If not, throws an exception. + * Asserts that the active database is operable. If not, throws an exception. *

* This method is called by the circuit breaker command executor before executing a command. - * @throws JedisPermanentlyNotAvailableException if the there is no operable cluster and the max + * @throws JedisPermanentlyNotAvailableException if the there is no operable database and the max * number of failover attempts has been exceeded. - * @throws JedisTemporarilyNotAvailableException if the there is no operable cluster and the max + * @throws JedisTemporarilyNotAvailableException if the there is no operable database and the max * number of failover attempts has not been exceeded. */ @VisibleForTesting public void assertOperability() { - Cluster current = activeCluster; + Database current = activeDatabase; if (!current.isHealthy() && !this.canIterateFrom(current)) { handleNoHealthyCluster(); } } - private static Comparator> maxByWeight = Map.Entry - . comparingByValue(Comparator.comparing(Cluster::getWeight)); + private static Comparator> maxByWeight = Map.Entry + . comparingByValue(Comparator.comparing(Database::getWeight)); - private static Predicate> filterByHealth = c -> c.getValue() + private static Predicate> filterByHealth = c -> c.getValue() .isHealthy(); - private Map.Entry findWeightedHealthyClusterToIterate(Cluster iterateFrom) { - return multiClusterMap.entrySet().stream().filter(filterByHealth) + private Map.Entry findWeightedHealthyClusterToIterate(Database iterateFrom) { + return databaseMap.entrySet().stream().filter(filterByHealth) .filter(entry -> entry.getValue() != iterateFrom).max(maxByWeight).orElse(null); } /** * Design decision was made to defer responsibility for cross-replication validation to the user. - * Alternatively there was discussion to handle cross-cluster replication validation by setting a + * Alternatively there was discussion to handle cross-database replication validation by setting a * key/value pair per hashslot in the active connection (with a TTL) and subsequently reading it * from the target connection. */ public void validateTargetConnection(Endpoint endpoint) { - Cluster cluster = multiClusterMap.get(endpoint); - validateTargetConnection(cluster); + Database database = databaseMap.get(endpoint); + validateTargetConnection(database); } - private void validateTargetConnection(Cluster cluster) { - CircuitBreaker circuitBreaker = cluster.getCircuitBreaker(); + private void validateTargetConnection(Database database) { + CircuitBreaker circuitBreaker = database.getCircuitBreaker(); State originalState = circuitBreaker.getState(); try { @@ -555,7 +552,7 @@ private void validateTargetConnection(Cluster cluster) { // yet circuitBreaker.transitionToClosedState(); - try (Connection targetConnection = cluster.getConnection()) { + try (Connection targetConnection = database.getConnection()) { targetConnection.ping(); } } catch (Exception e) { @@ -575,77 +572,77 @@ private void validateTargetConnection(Cluster cluster) { * @return */ public Set getEndpoints() { - return new HashSet<>(multiClusterMap.keySet()); + return new HashSet<>(databaseMap.keySet()); } - public void setActiveCluster(Endpoint endpoint) { + public void setActiveDatabase(Endpoint endpoint) { if (endpoint == null) { throw new JedisValidationException( "Provided endpoint is null. Please use one from the configuration"); } - Cluster cluster = multiClusterMap.get(endpoint); - if (cluster == null) { + Database database = databaseMap.get(endpoint); + if (database == null) { throw new JedisValidationException("Provided endpoint: " + endpoint + " is not within " + "the configured endpoints. Please use one from the configuration"); } - if (setActiveCluster(cluster, true)) { - onClusterSwitch(SwitchReason.FORCED, endpoint, cluster); + if (setActiveDatabase(database, true)) { + onDatabaseSwitch(SwitchReason.FORCED, endpoint, database); } } - public void forceActiveCluster(Endpoint endpoint, long forcedActiveDuration) { - Cluster cluster = multiClusterMap.get(endpoint); + public void forceActiveDatabase(Endpoint endpoint, long forcedActiveDuration) { + Database database = databaseMap.get(endpoint); - if (cluster == null) { + if (database == null) { throw new JedisValidationException("Provided endpoint: " + endpoint + " is not within " + "the configured endpoints. Please use one from the configuration"); } - cluster.clearGracePeriod(); - if (!cluster.isHealthy()) { + database.clearGracePeriod(); + if (!database.isHealthy()) { throw new JedisValidationException("Provided endpoint: " + endpoint + " is not healthy. Please consider a healthy endpoint from the configuration"); } - multiClusterMap.entrySet().stream().forEach(entry -> { + databaseMap.entrySet().stream().forEach(entry -> { if (entry.getKey() != endpoint) { entry.getValue().setGracePeriod(forcedActiveDuration); } }); - setActiveCluster(endpoint); + setActiveDatabase(endpoint); } - private boolean setActiveCluster(Cluster cluster, boolean validateConnection) { - // Cluster cluster = clusterEntry.getValue(); + private boolean setActiveDatabase(Database database, boolean validateConnection) { + // Database database = clusterEntry.getValue(); // Field-level synchronization is used to avoid the edge case in which - // incrementActiveMultiClusterIndex() is called at the same time - activeClusterChangeLock.lock(); - Cluster oldCluster; + // setActiveDatabase() is called at the same time + activeDatabaseChangeLock.lock(); + Database oldCluster; try { - // Allows an attempt to reset the current cluster from a FORCED_OPEN to CLOSED state in the + // Allows an attempt to reset the current database from a FORCED_OPEN to CLOSED state in the // event that no failover is possible - if (activeCluster == cluster && !cluster.isCBForcedOpen()) return false; + if (activeDatabase == database && !database.isCBForcedOpen()) return false; - if (validateConnection) validateTargetConnection(cluster); + if (validateConnection) validateTargetConnection(database); - String originalClusterName = getClusterCircuitBreaker().getName(); + String originalClusterName = getDatabaseCircuitBreaker().getName(); - if (activeCluster == cluster) - log.warn("Cluster/database endpoint '{}' successfully closed its circuit breaker", + if (activeDatabase == database) + log.warn("Database/database endpoint '{}' successfully closed its circuit breaker", originalClusterName); - else log.warn("Cluster/database endpoint successfully updated from '{}' to '{}'", - originalClusterName, cluster.circuitBreaker.getName()); - oldCluster = activeCluster; - activeCluster = cluster; + else log.warn("Database/database endpoint successfully updated from '{}' to '{}'", + originalClusterName, database.circuitBreaker.getName()); + oldCluster = activeDatabase; + activeDatabase = database; } finally { - activeClusterChangeLock.unlock(); + activeDatabaseChangeLock.unlock(); } - boolean switched = oldCluster != cluster; - if (switched && this.multiClusterClientConfig.isFastFailover()) { - log.info("Forcing disconnect of all active connections in old cluster: {}", + boolean switched = oldCluster != database; + if (switched && this.multiDbConfig.isFastFailover()) { + log.info("Forcing disconnect of all active connections in old database: {}", oldCluster.circuitBreaker.getName()); oldCluster.forceDisconnect(); - log.info("Disconnected all active connections in old cluster: {}", + log.info("Disconnected all active connections in old database: {}", oldCluster.circuitBreaker.getName()); } @@ -670,39 +667,39 @@ public void close() { Thread.currentThread().interrupt(); } - // Close all cluster connection pools - for (Cluster cluster : multiClusterMap.values()) { - cluster.close(); + // Close all database connection pools + for (Database database : databaseMap.values()) { + database.close(); } } @Override public Connection getConnection() { - return activeCluster.getConnection(); + return activeDatabase.getConnection(); } public Connection getConnection(Endpoint endpoint) { - return multiClusterMap.get(endpoint).getConnection(); + return databaseMap.get(endpoint).getConnection(); } @Override public Connection getConnection(CommandArguments args) { - return activeCluster.getConnection(); + return activeDatabase.getConnection(); } @Override public Map> getConnectionMap() { - ConnectionPool connectionPool = activeCluster.connectionPool; + ConnectionPool connectionPool = activeDatabase.connectionPool; return Collections.singletonMap(connectionPool.getFactory(), connectionPool); } - public Cluster getCluster() { - return activeCluster; + public Database getDatabase() { + return activeDatabase; } @VisibleForTesting - public Cluster getCluster(Endpoint endpoint) { - return multiClusterMap.get(endpoint); + public Database getDatabase(Endpoint endpoint) { + return databaseMap.get(endpoint); } /** @@ -710,10 +707,10 @@ public Cluster getCluster(Endpoint endpoint) { *

* Active endpoint is the one which is currently being used for all operations. It can change at * any time due to health checks, failover, failback, etc. - * @return the active cluster endpoint + * @return the active database endpoint */ public Endpoint getActiveEndpoint() { - return activeCluster.getEndpoint(); + return activeDatabase.getEndpoint(); } /** @@ -722,51 +719,51 @@ public Endpoint getActiveEndpoint() { * @return the health status of the endpoint */ public boolean isHealthy(Endpoint endpoint) { - Cluster cluster = getCluster(endpoint); - if (cluster == null) { + Database database = getDatabase(endpoint); + if (database == null) { throw new JedisValidationException( "Endpoint " + endpoint + " does not exist in the provider"); } - return cluster.isHealthy(); + return database.isHealthy(); } - public CircuitBreaker getClusterCircuitBreaker() { - return activeCluster.getCircuitBreaker(); + public CircuitBreaker getDatabaseCircuitBreaker() { + return activeDatabase.getCircuitBreaker(); } /** - * Indicates the final cluster/database endpoint (connection pool), according to the - * pre-configured list provided at startup via the MultiClusterClientConfig, is unavailable and - * therefore no further failover is possible. Users can manually failback to an available cluster + * Indicates the final database endpoint (connection pool), according to the pre-configured list + * provided at startup via the MultiDbConfig, is unavailable and therefore no further failover is + * possible. Users can manually failback to an available database */ - public boolean canIterateFrom(Cluster iterateFrom) { - Map.Entry e = findWeightedHealthyClusterToIterate(iterateFrom); + public boolean canIterateFrom(Database iterateFrom) { + Map.Entry e = findWeightedHealthyClusterToIterate(iterateFrom); return e != null; } - public void onClusterSwitch(SwitchReason reason, Endpoint endpoint, Cluster cluster) { - if (clusterSwitchListener != null) { - ClusterSwitchEventArgs eventArgs = new ClusterSwitchEventArgs(reason, endpoint, cluster); - clusterSwitchListener.accept(eventArgs); + public void onDatabaseSwitch(SwitchReason reason, Endpoint endpoint, Database database) { + if (databaseSwitchListener != null) { + DatabaseSwitchEvent eventArgs = new DatabaseSwitchEvent(reason, endpoint, database); + databaseSwitchListener.accept(eventArgs); } } - public void setClusterSwitchListener(Consumer clusterSwitchListener) { - this.clusterSwitchListener = clusterSwitchListener; + public void setDatabaseSwitchListener(Consumer databaseSwitchListener) { + this.databaseSwitchListener = databaseSwitchListener; } public List> getFallbackExceptionList() { return fallbackExceptionList; } - public static class Cluster { + public static class Database { private TrackingConnectionPool connectionPool; private final Retry retry; private final CircuitBreaker circuitBreaker; private final float weight; private final HealthCheck healthCheck; - private final MultiClusterClientConfig multiClusterClientConfig; + private final MultiDbConfig multiDbConfig; private boolean disabled = false; private final Endpoint endpoint; @@ -774,29 +771,27 @@ public static class Cluster { private volatile long gracePeriodEndsAt = 0; private final Logger log = LoggerFactory.getLogger(getClass()); - private Cluster(Endpoint endpoint, TrackingConnectionPool connectionPool, Retry retry, - CircuitBreaker circuitBreaker, float weight, - MultiClusterClientConfig multiClusterClientConfig) { + private Database(Endpoint endpoint, TrackingConnectionPool connectionPool, Retry retry, + CircuitBreaker circuitBreaker, float weight, MultiDbConfig multiDbConfig) { this.endpoint = endpoint; this.connectionPool = connectionPool; this.retry = retry; this.circuitBreaker = circuitBreaker; this.weight = weight; - this.multiClusterClientConfig = multiClusterClientConfig; + this.multiDbConfig = multiDbConfig; this.healthCheck = null; } - private Cluster(Endpoint endpoint, TrackingConnectionPool connectionPool, Retry retry, - HealthCheck hc, CircuitBreaker circuitBreaker, float weight, - MultiClusterClientConfig multiClusterClientConfig) { + private Database(Endpoint endpoint, TrackingConnectionPool connectionPool, Retry retry, + HealthCheck hc, CircuitBreaker circuitBreaker, float weight, MultiDbConfig multiDbConfig) { this.endpoint = endpoint; this.connectionPool = connectionPool; this.retry = retry; this.circuitBreaker = circuitBreaker; this.weight = weight; - this.multiClusterClientConfig = multiClusterClientConfig; + this.multiDbConfig = multiDbConfig; this.healthCheck = hc; } @@ -805,7 +800,7 @@ public Endpoint getEndpoint() { } public Connection getConnection() { - if (!isHealthy()) throw new JedisConnectionException("Cluster is not healthy"); + if (!isHealthy()) throw new JedisConnectionException("Database is not healthy"); if (connectionPool.isClosed()) { connectionPool = TrackingConnectionPool.from(connectionPool); } @@ -830,7 +825,7 @@ public HealthStatus getHealthStatus() { } /** - * Assigned weight for this cluster + * Assigned weight for this database */ public float getWeight() { return weight; @@ -850,15 +845,15 @@ public boolean isHealthy() { } public boolean retryOnFailover() { - return multiClusterClientConfig.isRetryOnFailover(); + return multiDbConfig.isRetryOnFailover(); } public int getCircuitBreakerMinNumOfFailures() { - return multiClusterClientConfig.getCircuitBreakerMinNumOfFailures(); + return multiDbConfig.getCircuitBreakerMinNumOfFailures(); } public float getCircuitBreakerFailureRateThreshold() { - return multiClusterClientConfig.getCircuitBreakerFailureRateThreshold(); + return multiDbConfig.getCircuitBreakerFailureRateThreshold(); } public boolean isDisabled() { @@ -870,17 +865,17 @@ public void setDisabled(boolean disabled) { } /** - * Checks if the cluster is currently in grace period + * Checks if the da is currently in grace period */ public boolean isInGracePeriod() { return System.currentTimeMillis() < gracePeriodEndsAt; } /** - * Sets the grace period for this cluster + * Sets the grace period for this database */ public void setGracePeriod() { - setGracePeriod(multiClusterClientConfig.getGracePeriod()); + setGracePeriod(multiDbConfig.getGracePeriod()); } public void setGracePeriod(long gracePeriod) { @@ -897,7 +892,7 @@ public void clearGracePeriod() { * Whether failback is supported by client */ public boolean isFailbackSupported() { - return multiClusterClientConfig.isFailbackSupported(); + return multiDbConfig.isFailbackSupported(); } public void forceDisconnect() { @@ -915,15 +910,15 @@ && isThresholdsExceeded(this, lastFailRecorded)) { } } - private static boolean isThresholdsExceeded(Cluster cluster, boolean lastFailRecorded) { - Metrics metrics = cluster.getCircuitBreaker().getMetrics(); + private static boolean isThresholdsExceeded(Database database, boolean lastFailRecorded) { + Metrics metrics = database.getCircuitBreaker().getMetrics(); // ATTENTION: this is to increment fails in regard to the current call that is failing, // DO NOT remove the increment, it will change the behaviour in case of initial requests to - // cluster fail + // database fail int fails = metrics.getNumberOfFailedCalls() + (lastFailRecorded ? 0 : 1); int succ = metrics.getNumberOfSuccessfulCalls(); - if (fails >= cluster.getCircuitBreakerMinNumOfFailures()) { - float ratePercentThreshold = cluster.getCircuitBreakerFailureRateThreshold();// 0..100 + if (fails >= database.getCircuitBreakerMinNumOfFailures()) { + float ratePercentThreshold = database.getCircuitBreakerFailureRateThreshold();// 0..100 int total = fails + succ; if (total == 0) return false; float failureRatePercent = (fails * 100.0f) / total; @@ -936,7 +931,7 @@ private static boolean isThresholdsExceeded(Cluster cluster, boolean lastFailRec public String toString() { return circuitBreaker.getName() + "{" + "connectionPool=" + connectionPool + ", retry=" + retry + ", circuitBreaker=" + circuitBreaker + ", weight=" + weight + ", healthStatus=" - + getHealthStatus() + ", multiClusterClientConfig=" + multiClusterClientConfig + '}'; + + getHealthStatus() + ", multiDbConfig=" + multiDbConfig + '}'; } } diff --git a/src/main/java/redis/clients/jedis/mcf/CircuitBreakerFailoverConnectionProvider.java b/src/main/java/redis/clients/jedis/mcf/MultiDbConnectionSupplier.java similarity index 72% rename from src/main/java/redis/clients/jedis/mcf/CircuitBreakerFailoverConnectionProvider.java rename to src/main/java/redis/clients/jedis/mcf/MultiDbConnectionSupplier.java index 51a5d35788..9bd1f35440 100644 --- a/src/main/java/redis/clients/jedis/mcf/CircuitBreakerFailoverConnectionProvider.java +++ b/src/main/java/redis/clients/jedis/mcf/MultiDbConnectionSupplier.java @@ -6,23 +6,22 @@ import redis.clients.jedis.Connection; import redis.clients.jedis.annots.Experimental; -import redis.clients.jedis.mcf.MultiClusterPooledConnectionProvider.Cluster; +import redis.clients.jedis.mcf.MultiDbConnectionProvider.Database; /** * ConnectionProvider with built-in retry, circuit-breaker, and failover to another cluster/database * endpoint. With this executor users can seamlessly failover to Disaster Recovery (DR), Backup, and - * Active-Active cluster(s) by using simple configuration which is passed through from Resilience4j - * - https://resilience4j.readme.io/docs + * Active-Active cluster(s) by using simple configuration */ @Experimental -public class CircuitBreakerFailoverConnectionProvider extends CircuitBreakerFailoverBase { +public class MultiDbConnectionSupplier extends MultiDbFailoverBase { - public CircuitBreakerFailoverConnectionProvider(MultiClusterPooledConnectionProvider provider) { + public MultiDbConnectionSupplier(MultiDbConnectionProvider provider) { super(provider); } public Connection getConnection() { - Cluster cluster = provider.getCluster(); // Pass this by reference for thread safety + Database cluster = provider.getDatabase(); // Pass this by reference for thread safety DecorateSupplier supplier = Decorators .ofSupplier(() -> this.handleGetConnection(cluster)); @@ -35,7 +34,7 @@ public Connection getConnection() { try { return supplier.decorate().get(); } catch (Exception e) { - if (cluster.getCircuitBreaker().getState() == State.OPEN && isActiveCluster(cluster)) { + if (cluster.getCircuitBreaker().getState() == State.OPEN && isActiveDatabase(cluster)) { clusterFailover(cluster); } throw e; @@ -45,7 +44,7 @@ public Connection getConnection() { /** * Functional interface wrapped in retry and circuit breaker logic to handle happy path scenarios */ - private Connection handleGetConnection(Cluster cluster) { + private Connection handleGetConnection(Database cluster) { Connection connection = cluster.getConnection(); connection.ping(); return connection; @@ -55,7 +54,7 @@ private Connection handleGetConnection(Cluster cluster) { * Functional interface wrapped in retry and circuit breaker logic to handle open circuit breaker * failure scenarios */ - private Connection handleClusterFailover(Cluster cluster) { + private Connection handleClusterFailover(Database cluster) { clusterFailover(cluster); diff --git a/src/main/java/redis/clients/jedis/mcf/CircuitBreakerFailoverBase.java b/src/main/java/redis/clients/jedis/mcf/MultiDbFailoverBase.java similarity index 57% rename from src/main/java/redis/clients/jedis/mcf/CircuitBreakerFailoverBase.java rename to src/main/java/redis/clients/jedis/mcf/MultiDbFailoverBase.java index 40141fb009..e9fc874a2d 100644 --- a/src/main/java/redis/clients/jedis/mcf/CircuitBreakerFailoverBase.java +++ b/src/main/java/redis/clients/jedis/mcf/MultiDbFailoverBase.java @@ -6,7 +6,7 @@ import java.util.concurrent.locks.ReentrantLock; import redis.clients.jedis.annots.Experimental; -import redis.clients.jedis.mcf.MultiClusterPooledConnectionProvider.Cluster; +import redis.clients.jedis.mcf.MultiDbConnectionProvider.Database; import redis.clients.jedis.util.IOUtils; /** @@ -20,12 +20,12 @@ *

*/ @Experimental -public class CircuitBreakerFailoverBase implements AutoCloseable { +public class MultiDbFailoverBase implements AutoCloseable { private final Lock lock = new ReentrantLock(true); - protected final MultiClusterPooledConnectionProvider provider; + protected final MultiDbConnectionProvider provider; - public CircuitBreakerFailoverBase(MultiClusterPooledConnectionProvider provider) { + public MultiDbFailoverBase(MultiDbConnectionProvider provider) { this.provider = provider; } @@ -38,12 +38,12 @@ public void close() { * Functional interface wrapped in retry and circuit breaker logic to handle open circuit breaker * failure scenarios */ - protected void clusterFailover(Cluster cluster) { + protected void clusterFailover(Database database) { lock.lock(); - CircuitBreaker circuitBreaker = cluster.getCircuitBreaker(); + CircuitBreaker circuitBreaker = database.getCircuitBreaker(); try { - // Check state to handle race conditions since iterateActiveCluster() is + // Check state to handle race conditions since () is // non-idempotent if (!CircuitBreaker.State.FORCED_OPEN.equals(circuitBreaker.getState())) { @@ -51,29 +51,29 @@ protected void clusterFailover(Cluster cluster) { // event publishing. // To recover/transition from this forced state the user will need to manually failback - Cluster activeCluster = provider.getCluster(); - // This should be possible only if active cluster is switched from by other reasons than + Database activeDatabase = provider.getDatabase(); + // This should be possible only if active database is switched from by other reasons than // circuit breaker, just before circuit breaker triggers - if (activeCluster != cluster) { + if (activeDatabase != database) { return; } - cluster.setGracePeriod(); + database.setGracePeriod(); circuitBreaker.transitionToForcedOpenState(); - // Iterating the active cluster will allow subsequent calls to the executeCommand() to use + // Iterating the active database will allow subsequent calls to the executeCommand() to use // the next - // cluster's connection pool - according to the configuration's prioritization/order/weight - provider.switchToHealthyCluster(SwitchReason.CIRCUIT_BREAKER, cluster); + // database's connection pool - according to the configuration's prioritization/order/weight + provider.switchToHealthyDatabase(SwitchReason.CIRCUIT_BREAKER, database); } // this check relies on the fact that many failover attempts can hit with the same CB, // only the first one will trigger a failover, and make the CB FORCED_OPEN. - // when the rest reaches here, the active cluster is already the next one, and should be + // when the rest reaches here, the active database is already the next one, and should be // different than // active CB. If its the same one and there are no more clusters to failover to, then throw an // exception - else if (cluster == provider.getCluster()) { - provider.switchToHealthyCluster(SwitchReason.CIRCUIT_BREAKER, cluster); + else if (database == provider.getDatabase()) { + provider.switchToHealthyDatabase(SwitchReason.CIRCUIT_BREAKER, database); } // Ignore exceptions since we are already in a failure state } finally { @@ -81,13 +81,13 @@ else if (cluster == provider.getCluster()) { } } - boolean isActiveCluster(Cluster cluster) { - Cluster activeCluster = provider.getCluster(); - return activeCluster != null && activeCluster.equals(cluster); + boolean isActiveDatabase(Database database) { + Database activeDatabase = provider.getDatabase(); + return activeDatabase != null && activeDatabase.equals(database); } - static boolean isCircuitBreakerTrackedException(Exception e, Cluster cluster) { - return cluster.getCircuitBreaker().getCircuitBreakerConfig().getRecordExceptionPredicate() + static boolean isCircuitBreakerTrackedException(Exception e, Database database) { + return database.getCircuitBreaker().getCircuitBreakerConfig().getRecordExceptionPredicate() .test(e); } } diff --git a/src/main/java/redis/clients/jedis/mcf/MultiClusterPipeline.java b/src/main/java/redis/clients/jedis/mcf/MultiDbPipeline.java similarity index 77% rename from src/main/java/redis/clients/jedis/mcf/MultiClusterPipeline.java rename to src/main/java/redis/clients/jedis/mcf/MultiDbPipeline.java index c227b27e99..bc0d950a6a 100644 --- a/src/main/java/redis/clients/jedis/mcf/MultiClusterPipeline.java +++ b/src/main/java/redis/clients/jedis/mcf/MultiDbPipeline.java @@ -11,20 +11,19 @@ /** * This is high memory dependent solution as all the appending commands will be hold in memory until - * {@link MultiClusterPipeline#sync() SYNC} (or {@link MultiClusterPipeline#close() CLOSE}) gets - * called. + * {@link MultiDbPipeline#sync() SYNC} (or {@link MultiDbPipeline#close() CLOSE}) gets called. */ @Experimental -public class MultiClusterPipeline extends PipelineBase implements Closeable { +public class MultiDbPipeline extends PipelineBase implements Closeable { - private final CircuitBreakerFailoverConnectionProvider failoverProvider; + private final MultiDbConnectionSupplier failoverProvider; private final Queue>> commands = new LinkedList<>(); @Deprecated - public MultiClusterPipeline(MultiClusterPooledConnectionProvider pooledProvider) { + public MultiDbPipeline(MultiDbConnectionProvider pooledProvider) { super(new CommandObjects()); - this.failoverProvider = new CircuitBreakerFailoverConnectionProvider(pooledProvider); + this.failoverProvider = new MultiDbConnectionSupplier(pooledProvider); try (Connection connection = failoverProvider.getConnection()) { RedisProtocol proto = connection.getRedisProtocol(); @@ -32,10 +31,9 @@ public MultiClusterPipeline(MultiClusterPooledConnectionProvider pooledProvider) } } - public MultiClusterPipeline(MultiClusterPooledConnectionProvider pooledProvider, - CommandObjects commandObjects) { + public MultiDbPipeline(MultiDbConnectionProvider pooledProvider, CommandObjects commandObjects) { super(commandObjects); - this.failoverProvider = new CircuitBreakerFailoverConnectionProvider(pooledProvider); + this.failoverProvider = new MultiDbConnectionSupplier(pooledProvider); } @Override diff --git a/src/main/java/redis/clients/jedis/mcf/MultiClusterTransaction.java b/src/main/java/redis/clients/jedis/mcf/MultiDbTransaction.java similarity index 90% rename from src/main/java/redis/clients/jedis/mcf/MultiClusterTransaction.java rename to src/main/java/redis/clients/jedis/mcf/MultiDbTransaction.java index 2de927826c..1688a2c635 100644 --- a/src/main/java/redis/clients/jedis/mcf/MultiClusterTransaction.java +++ b/src/main/java/redis/clients/jedis/mcf/MultiDbTransaction.java @@ -20,13 +20,13 @@ * This is high memory dependent solution as all the appending commands will be hold in memory. */ @Experimental -public class MultiClusterTransaction extends TransactionBase { +public class MultiDbTransaction extends TransactionBase { private static final Builder NO_OP_BUILDER = BuilderFactory.RAW_OBJECT; private static final String GRAPH_COMMANDS_NOT_SUPPORTED_MESSAGE = "Graph commands are not supported."; - private final CircuitBreakerFailoverConnectionProvider failoverProvider; + private final MultiDbConnectionSupplier failoverProvider; private final AtomicInteger extraCommandCount = new AtomicInteger(); private final Queue>> commands = new LinkedList<>(); @@ -39,7 +39,7 @@ public class MultiClusterTransaction extends TransactionBase { * @param provider */ @Deprecated - public MultiClusterTransaction(MultiClusterPooledConnectionProvider provider) { + public MultiDbTransaction(MultiDbConnectionProvider provider) { this(provider, true); } @@ -50,8 +50,8 @@ public MultiClusterTransaction(MultiClusterPooledConnectionProvider provider) { * @param doMulti {@code false} should be set to enable manual WATCH, UNWATCH and MULTI */ @Deprecated - public MultiClusterTransaction(MultiClusterPooledConnectionProvider provider, boolean doMulti) { - this.failoverProvider = new CircuitBreakerFailoverConnectionProvider(provider); + public MultiDbTransaction(MultiDbConnectionProvider provider, boolean doMulti) { + this.failoverProvider = new MultiDbConnectionSupplier(provider); try (Connection connection = failoverProvider.getConnection()) { RedisProtocol proto = connection.getRedisProtocol(); @@ -68,10 +68,10 @@ public MultiClusterTransaction(MultiClusterPooledConnectionProvider provider, bo * @param doMulti {@code false} should be set to enable manual WATCH, UNWATCH and MULTI * @param commandObjects command objects */ - public MultiClusterTransaction(MultiClusterPooledConnectionProvider provider, boolean doMulti, + public MultiDbTransaction(MultiDbConnectionProvider provider, boolean doMulti, CommandObjects commandObjects) { super(commandObjects); - this.failoverProvider = new CircuitBreakerFailoverConnectionProvider(provider); + this.failoverProvider = new MultiDbConnectionSupplier(provider); if (doMulti) multi(); } diff --git a/src/main/java/redis/clients/jedis/providers/MultiClusterPooledConnectionProvider.java b/src/main/java/redis/clients/jedis/providers/MultiClusterPooledConnectionProvider.java deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/src/test/java/redis/clients/jedis/MultiDbClientTest.java b/src/test/java/redis/clients/jedis/MultiDbClientTest.java index 9c0126a5af..43673da1ed 100644 --- a/src/test/java/redis/clients/jedis/MultiDbClientTest.java +++ b/src/test/java/redis/clients/jedis/MultiDbClientTest.java @@ -15,9 +15,9 @@ import static org.hamcrest.Matchers.not; import static org.junit.jupiter.api.Assertions.*; -import redis.clients.jedis.MultiClusterClientConfig.ClusterConfig; +import redis.clients.jedis.MultiDbConfig.DatabaseConfig; import redis.clients.jedis.exceptions.JedisValidationException; -import redis.clients.jedis.mcf.ClusterSwitchEventArgs; +import redis.clients.jedis.mcf.DatabaseSwitchEvent; import redis.clients.jedis.mcf.SwitchReason; import java.io.IOException; @@ -56,7 +56,7 @@ public static void setupAdminClients() throws IOException { @BeforeEach void setUp() { // Create a simple resilient client with mock endpoints for testing - MultiClusterClientConfig clientConfig = MultiClusterClientConfig.builder() + MultiDbConfig clientConfig = MultiDbConfig.builder() .endpoint(endpoint1.getHostAndPort(), 100.0f, endpoint1.getClientConfigBuilder().build()) .endpoint(endpoint2.getHostAndPort(), 50.0f, endpoint2.getClientConfigBuilder().build()) .build(); @@ -86,11 +86,11 @@ void testAddRemoveEndpointWithEndpointInterface() { } @Test - void testAddRemoveEndpointWithClusterConfig() { + void testAddRemoveEndpointWithDatabaseConfig() { // todo : (@ggivo) Replace HostAndPort with Endpoint HostAndPort newEndpoint = new HostAndPort("unavailable", 6381); - ClusterConfig newConfig = ClusterConfig + DatabaseConfig newConfig = DatabaseConfig .builder(newEndpoint, DefaultJedisClientConfig.builder().build()).weight(25.0f).build(); assertDoesNotThrow(() -> client.addEndpoint(newConfig)); @@ -121,9 +121,9 @@ void testSetActiveDatabase() { @Test void testBuilderWithMultipleEndpointTypes() { - MultiClusterClientConfig clientConfig = MultiClusterClientConfig.builder() + MultiDbConfig clientConfig = MultiDbConfig.builder() .endpoint(endpoint1.getHostAndPort(), 100.0f, DefaultJedisClientConfig.builder().build()) - .endpoint(ClusterConfig + .endpoint(DatabaseConfig .builder(endpoint2.getHostAndPort(), DefaultJedisClientConfig.builder().build()) .weight(50.0f).build()) .build(); @@ -172,17 +172,17 @@ public void testForceActiveEndpointWithNonExistingEndpoint() { @Test public void testWithDatabaseSwitchListener() { - MultiClusterClientConfig endpointsConfig = MultiClusterClientConfig.builder() - .endpoint(ClusterConfig + MultiDbConfig endpointsConfig = MultiDbConfig.builder() + .endpoint(DatabaseConfig .builder(endpoint1.getHostAndPort(), endpoint1.getClientConfigBuilder().build()) .weight(100.0f).build()) - .endpoint(ClusterConfig + .endpoint(DatabaseConfig .builder(endpoint2.getHostAndPort(), endpoint2.getClientConfigBuilder().build()) .weight(50.0f).build()) .build(); - Consumer eventConsumer; - List events = new ArrayList<>(); + Consumer eventConsumer; + List events = new ArrayList<>(); eventConsumer = events::add; try (MultiDbClient testClient = MultiDbClient.builder().databaseSwitchListener(eventConsumer) diff --git a/src/test/java/redis/clients/jedis/builders/UnifiedJedisConstructorReflectionTest.java b/src/test/java/redis/clients/jedis/builders/UnifiedJedisConstructorReflectionTest.java index f2b2f56e14..591c3ed942 100644 --- a/src/test/java/redis/clients/jedis/builders/UnifiedJedisConstructorReflectionTest.java +++ b/src/test/java/redis/clients/jedis/builders/UnifiedJedisConstructorReflectionTest.java @@ -44,7 +44,7 @@ void testConstructorParameterCoverageReport() { for (Constructor ctor : ctors) { if (isUnsafeConstructor(ctor) || clusterConstructorThatShouldBeDeprecatedAndRemoved(ctor) || retriesConstructorThatShouldBeIncorporatedIntoBuilderAsDefault(ctor) - || multiClusterPooledConnectionProviderShouldBeReplacedWithResilientClient(ctor)) { + || multiDbConnectionProviderShouldBeReplacedWithMultiDbClient(ctor)) { // Exclude unsafe constructors from analysis as requested continue; } @@ -181,11 +181,10 @@ private static boolean clusterConstructorThatShouldBeDeprecatedAndRemoved(Constr } // FIXME: Remove this when we add convince class and builder for ResilientClient - private static boolean multiClusterPooledConnectionProviderShouldBeReplacedWithResilientClient( + private static boolean multiDbConnectionProviderShouldBeReplacedWithMultiDbClient( Constructor ctor) { Class[] types = ctor.getParameterTypes(); - return types.length == 1 - && types[0].getSimpleName().equals("MultiClusterPooledConnectionProvider"); + return types.length == 1 && types[0].getSimpleName().equals("MultiDbConnectionProvider"); } private static String prettySignature(Constructor ctor) { diff --git a/src/test/java/redis/clients/jedis/failover/FailoverIntegrationTest.java b/src/test/java/redis/clients/jedis/failover/FailoverIntegrationTest.java index 6eb047f6c7..b3c19fdda5 100644 --- a/src/test/java/redis/clients/jedis/failover/FailoverIntegrationTest.java +++ b/src/test/java/redis/clients/jedis/failover/FailoverIntegrationTest.java @@ -16,10 +16,10 @@ import redis.clients.jedis.EndpointConfig; import redis.clients.jedis.HostAndPorts; import redis.clients.jedis.JedisClientConfig; -import redis.clients.jedis.MultiClusterClientConfig; +import redis.clients.jedis.MultiDbConfig; import redis.clients.jedis.UnifiedJedis; import redis.clients.jedis.exceptions.JedisConnectionException; -import redis.clients.jedis.mcf.MultiClusterPooledConnectionProvider; +import redis.clients.jedis.mcf.MultiDbConnectionProvider; import redis.clients.jedis.scenario.RecommendedSettings; import java.io.IOException; @@ -57,7 +57,7 @@ public class FailoverIntegrationTest { private static UnifiedJedis jedis2; private static String JEDIS1_ID = ""; private static String JEDIS2_ID = ""; - private MultiClusterPooledConnectionProvider provider; + private MultiDbConnectionProvider provider; private UnifiedJedis failoverClient; @BeforeAll @@ -138,7 +138,7 @@ public void testAutomaticFailoverWhenServerBecomesUnavailable() throws Exception assertThat(getNodeId(failoverClient.info("server")), equalTo(JEDIS1_ID)); await().atMost(1, TimeUnit.SECONDS).pollInterval(50, TimeUnit.MILLISECONDS) - .until(() -> provider.getCluster(endpoint2.getHostAndPort()).isHealthy()); + .until(() -> provider.getDatabase(endpoint2.getHostAndPort()).isHealthy()); // Disable redisProxy1 redisProxy1.disable(); @@ -149,7 +149,7 @@ public void testAutomaticFailoverWhenServerBecomesUnavailable() throws Exception // 3. Subsequent calls should be routed to Endpoint 2 assertThrows(JedisConnectionException.class, () -> failoverClient.info("server")); - assertThat(provider.getCluster(endpoint1.getHostAndPort()).getCircuitBreaker().getState(), + assertThat(provider.getDatabase(endpoint1.getHostAndPort()).getCircuitBreaker().getState(), equalTo(CircuitBreaker.State.FORCED_OPEN)); // Check that the failoverClient is now using Endpoint 2 @@ -160,7 +160,7 @@ public void testAutomaticFailoverWhenServerBecomesUnavailable() throws Exception // Endpoint1 and Endpoint2 are NOT available, assertThrows(JedisConnectionException.class, () -> failoverClient.info("server")); - assertThat(provider.getCluster(endpoint2.getHostAndPort()).getCircuitBreaker().getState(), + assertThat(provider.getDatabase(endpoint2.getHostAndPort()).getCircuitBreaker().getState(), equalTo(CircuitBreaker.State.FORCED_OPEN)); // and since no other nodes are available, it should propagate the errors to the caller @@ -173,20 +173,20 @@ public void testManualFailoverNewCommandsAreSentToActiveCluster() throws Interru assertThat(getNodeId(failoverClient.info("server")), equalTo(JEDIS1_ID)); await().atMost(1, TimeUnit.SECONDS).pollInterval(50, TimeUnit.MILLISECONDS) - .until(() -> provider.getCluster(endpoint2.getHostAndPort()).isHealthy()); + .until(() -> provider.getDatabase(endpoint2.getHostAndPort()).isHealthy()); - provider.setActiveCluster(endpoint2.getHostAndPort()); + provider.setActiveDatabase(endpoint2.getHostAndPort()); assertThat(getNodeId(failoverClient.info("server")), equalTo(JEDIS2_ID)); } - private List getClusterConfigs( - JedisClientConfig clientConfig, EndpointConfig... endpoints) { + private List getDatabaseConfigs(JedisClientConfig clientConfig, + EndpointConfig... endpoints) { int weight = endpoints.length; AtomicInteger weightCounter = new AtomicInteger(weight); return Arrays.stream(endpoints) - .map(e -> MultiClusterClientConfig.ClusterConfig.builder(e.getHostAndPort(), clientConfig) + .map(e -> MultiDbConfig.DatabaseConfig.builder(e.getHostAndPort(), clientConfig) .weight(1.0f / weightCounter.getAndIncrement()).healthCheckEnabled(false).build()) .collect(Collectors.toList()); } @@ -197,21 +197,21 @@ public void testManualFailoverInflightCommandsCompleteGracefully() throws ExecutionException, InterruptedException { await().atMost(1, TimeUnit.SECONDS).pollInterval(50, TimeUnit.MILLISECONDS) - .until(() -> provider.getCluster(endpoint2.getHostAndPort()).isHealthy()); + .until(() -> provider.getDatabase(endpoint2.getHostAndPort()).isHealthy()); assertThat(getNodeId(failoverClient.info("server")), equalTo(JEDIS1_ID)); // We will trigger failover while this command is in-flight Future> blpop = executor.submit(() -> failoverClient.blpop(1000, "test-list")); - provider.setActiveCluster(endpoint2.getHostAndPort()); + provider.setActiveDatabase(endpoint2.getHostAndPort()); // After the manual failover, commands should be executed against Endpoint 2 assertThat(getNodeId(failoverClient.info("server")), equalTo(JEDIS2_ID)); // Failover was manually triggered, and there were no errors // previous endpoint CB should still be in CLOSED state - assertThat(provider.getCluster(endpoint1.getHostAndPort()).getCircuitBreaker().getState(), + assertThat(provider.getDatabase(endpoint1.getHostAndPort()).getCircuitBreaker().getState(), equalTo(CircuitBreaker.State.CLOSED)); jedis1.rpush("test-list", "somevalue"); @@ -228,12 +228,12 @@ public void testManualFailoverInflightCommandsWithErrorsPropagateError() throws assertThat(getNodeId(failoverClient.info("server")), equalTo(JEDIS1_ID)); await().atMost(1, TimeUnit.SECONDS).pollInterval(50, TimeUnit.MILLISECONDS) - .until(() -> provider.getCluster(endpoint2.getHostAndPort()).isHealthy()); + .until(() -> provider.getDatabase(endpoint2.getHostAndPort()).isHealthy()); Future> blpop = executor.submit(() -> failoverClient.blpop(10000, "test-list-1")); // trigger failover manually - provider.setActiveCluster(endpoint2.getHostAndPort()); + provider.setActiveDatabase(endpoint2.getHostAndPort()); Future infoCmd = executor.submit(() -> failoverClient.info("server")); // After the manual failover, commands should be executed against Endpoint 2 @@ -247,7 +247,7 @@ public void testManualFailoverInflightCommandsWithErrorsPropagateError() throws assertThat(exception.getCause(), instanceOf(JedisConnectionException.class)); // Check that the circuit breaker for Endpoint 1 is open after the error - assertThat(provider.getCluster(endpoint1.getHostAndPort()).getCircuitBreaker().getState(), + assertThat(provider.getDatabase(endpoint1.getHostAndPort()).getCircuitBreaker().getState(), equalTo(CircuitBreaker.State.OPEN)); // Ensure that the active cluster is still Endpoint 2 @@ -261,18 +261,15 @@ public void testManualFailoverInflightCommandsWithErrorsPropagateError() throws */ @Test public void testCircuitBreakerCountsEachConnectionErrorSeparately() throws IOException { - MultiClusterClientConfig failoverConfig = new MultiClusterClientConfig.Builder( - getClusterConfigs( - DefaultJedisClientConfig.builder() - .socketTimeoutMillis(RecommendedSettings.DEFAULT_TIMEOUT_MS) - .connectionTimeoutMillis(RecommendedSettings.DEFAULT_TIMEOUT_MS).build(), - endpoint1, endpoint2)).retryMaxAttempts(2).retryWaitDuration(1) - .circuitBreakerSlidingWindowSize(3).circuitBreakerMinNumOfFailures(2) - .circuitBreakerFailureRateThreshold(50f) // %50 failure rate - .build(); - - MultiClusterPooledConnectionProvider provider = new MultiClusterPooledConnectionProvider( - failoverConfig); + MultiDbConfig failoverConfig = new MultiDbConfig.Builder(getDatabaseConfigs( + DefaultJedisClientConfig.builder().socketTimeoutMillis(RecommendedSettings.DEFAULT_TIMEOUT_MS) + .connectionTimeoutMillis(RecommendedSettings.DEFAULT_TIMEOUT_MS).build(), + endpoint1, endpoint2)).retryMaxAttempts(2).retryWaitDuration(1) + .circuitBreakerSlidingWindowSize(3).circuitBreakerMinNumOfFailures(2) + .circuitBreakerFailureRateThreshold(50f) // %50 failure rate + .build(); + + MultiDbConnectionProvider provider = new MultiDbConnectionProvider(failoverConfig); try (UnifiedJedis client = new UnifiedJedis(provider)) { // Verify initial connection to first endpoint assertThat(getNodeId(client.info("server")), equalTo(JEDIS1_ID)); @@ -298,7 +295,7 @@ public void testCircuitBreakerCountsEachConnectionErrorSeparately() throws IOExc assertThrows(JedisConnectionException.class, () -> client.info("server")); // Circuit breaker should be open after just one command with retries - assertThat(provider.getCluster(endpoint1.getHostAndPort()).getCircuitBreaker().getState(), + assertThat(provider.getDatabase(endpoint1.getHostAndPort()).getCircuitBreaker().getState(), equalTo(CircuitBreaker.State.FORCED_OPEN)); // Next command should be routed to the second endpoint @@ -318,7 +315,7 @@ public void testCircuitBreakerCountsEachConnectionErrorSeparately() throws IOExc @Test public void testInflightCommandsAreRetriedAfterFailover() throws Exception { - MultiClusterPooledConnectionProvider customProvider = createProvider( + MultiDbConnectionProvider customProvider = createProvider( builder -> builder.retryOnFailover(true)); // Create a custom client with retryOnFailover enabled for this specific test @@ -342,7 +339,7 @@ public void testInflightCommandsAreRetriedAfterFailover() throws Exception { assertThat(getNodeId(customClient.info("server")), equalTo(JEDIS2_ID)); // Check that the circuit breaker for Endpoint 1 is open assertThat( - customProvider.getCluster(endpoint1.getHostAndPort()).getCircuitBreaker().getState(), + customProvider.getDatabase(endpoint1.getHostAndPort()).getCircuitBreaker().getState(), equalTo(CircuitBreaker.State.FORCED_OPEN)); // Disable redisProxy1 to enforce connection drop for the in-flight (blpop) command @@ -360,7 +357,7 @@ public void testInflightCommandsAreRetriedAfterFailover() throws Exception { @Test public void testInflightCommandsAreNotRetriedAfterFailover() throws Exception { // Create a custom provider and client with retry disabled for this specific test - MultiClusterPooledConnectionProvider customProvider = createProvider( + MultiDbConnectionProvider customProvider = createProvider( builder -> builder.retryOnFailover(false)); try (UnifiedJedis customClient = new UnifiedJedis(customProvider)) { @@ -376,7 +373,7 @@ public void testInflightCommandsAreNotRetriedAfterFailover() throws Exception { // Check that the circuit breaker for Endpoint 1 is open assertThat( - customProvider.getCluster(endpoint1.getHostAndPort()).getCircuitBreaker().getState(), + customProvider.getDatabase(endpoint1.getHostAndPort()).getCircuitBreaker().getState(), equalTo(CircuitBreaker.State.FORCED_OPEN)); // Disable redisProxy1 to enforce the current blpop command failure @@ -417,34 +414,34 @@ private static String generateTestValue(int byteSize) { } /** - * Creates a MultiClusterPooledConnectionProvider with standard configuration + * Creates a MultiDbConnectionProvider with standard configuration * @return A configured provider */ - private MultiClusterPooledConnectionProvider createProvider() { + private MultiDbConnectionProvider createProvider() { JedisClientConfig clientConfig = DefaultJedisClientConfig.builder() .socketTimeoutMillis(RecommendedSettings.DEFAULT_TIMEOUT_MS) .connectionTimeoutMillis(RecommendedSettings.DEFAULT_TIMEOUT_MS).build(); - MultiClusterClientConfig failoverConfig = new MultiClusterClientConfig.Builder( - getClusterConfigs(clientConfig, endpoint1, endpoint2)).retryMaxAttempts(1) + MultiDbConfig failoverConfig = new MultiDbConfig.Builder( + getDatabaseConfigs(clientConfig, endpoint1, endpoint2)).retryMaxAttempts(1) .retryWaitDuration(1).circuitBreakerSlidingWindowSize(3) .circuitBreakerMinNumOfFailures(1).circuitBreakerFailureRateThreshold(50f).build(); - return new MultiClusterPooledConnectionProvider(failoverConfig); + return new MultiDbConnectionProvider(failoverConfig); } /** - * Creates a MultiClusterPooledConnectionProvider with standard configuration + * Creates a MultiDbConnectionProvider with standard configuration * @return A configured provider */ - private MultiClusterPooledConnectionProvider createProvider( - Function configCustomizer) { + private MultiDbConnectionProvider createProvider( + Function configCustomizer) { JedisClientConfig clientConfig = DefaultJedisClientConfig.builder() .socketTimeoutMillis(RecommendedSettings.DEFAULT_TIMEOUT_MS) .connectionTimeoutMillis(RecommendedSettings.DEFAULT_TIMEOUT_MS).build(); - MultiClusterClientConfig.Builder builder = new MultiClusterClientConfig.Builder( - getClusterConfigs(clientConfig, endpoint1, endpoint2)).retryMaxAttempts(1) + MultiDbConfig.Builder builder = new MultiDbConfig.Builder( + getDatabaseConfigs(clientConfig, endpoint1, endpoint2)).retryMaxAttempts(1) .retryWaitDuration(1).circuitBreakerSlidingWindowSize(3) .circuitBreakerMinNumOfFailures(1).circuitBreakerFailureRateThreshold(50f); @@ -452,6 +449,6 @@ private MultiClusterPooledConnectionProvider createProvider( builder = configCustomizer.apply(builder); } - return new MultiClusterPooledConnectionProvider(builder.build()); + return new MultiDbConnectionProvider(builder.build()); } } diff --git a/src/test/java/redis/clients/jedis/mcf/ActiveActiveLocalFailoverTest.java b/src/test/java/redis/clients/jedis/mcf/ActiveActiveLocalFailoverTest.java index bc00caf8ed..a5aae5e9bf 100644 --- a/src/test/java/redis/clients/jedis/mcf/ActiveActiveLocalFailoverTest.java +++ b/src/test/java/redis/clients/jedis/mcf/ActiveActiveLocalFailoverTest.java @@ -1,7 +1,5 @@ package redis.clients.jedis.mcf; -import io.github.resilience4j.circuitbreaker.CircuitBreakerConfig; - import io.github.resilience4j.ratelimiter.RateLimiterConfig; import org.hamcrest.Matchers; import org.junit.jupiter.api.AfterAll; @@ -18,7 +16,7 @@ import eu.rekawek.toxiproxy.ToxiproxyClient; import eu.rekawek.toxiproxy.model.Toxic; import redis.clients.jedis.*; -import redis.clients.jedis.MultiClusterClientConfig.ClusterConfig; +import redis.clients.jedis.MultiDbConfig.DatabaseConfig; import redis.clients.jedis.scenario.ActiveActiveFailoverTest; import redis.clients.jedis.scenario.MultiThreadedFakeApp; import redis.clients.jedis.scenario.RecommendedSettings; @@ -96,18 +94,18 @@ public void testFailover(boolean fastFailover, long minFailoverCompletionDuratio "TESTING WITH PARAMETERS: fastFailover: {} numberOfThreads: {} minFailoverCompletionDuration: {} maxFailoverCompletionDuration: {] ", fastFailover, numberOfThreads, minFailoverCompletionDuration, maxFailoverCompletionDuration); - MultiClusterClientConfig.ClusterConfig[] clusterConfig = new MultiClusterClientConfig.ClusterConfig[2]; + MultiDbConfig.DatabaseConfig[] clusterConfig = new MultiDbConfig.DatabaseConfig[2]; JedisClientConfig config = endpoint1.getClientConfigBuilder() .socketTimeoutMillis(RecommendedSettings.DEFAULT_TIMEOUT_MS) .connectionTimeoutMillis(RecommendedSettings.DEFAULT_TIMEOUT_MS).build(); - clusterConfig[0] = ClusterConfig.builder(endpoint1.getHostAndPort(), config) + clusterConfig[0] = DatabaseConfig.builder(endpoint1.getHostAndPort(), config) .connectionPoolConfig(RecommendedSettings.poolConfig).weight(1.0f).build(); - clusterConfig[1] = ClusterConfig.builder(endpoint2.getHostAndPort(), config) + clusterConfig[1] = DatabaseConfig.builder(endpoint2.getHostAndPort(), config) .connectionPoolConfig(RecommendedSettings.poolConfig).weight(0.5f).build(); - MultiClusterClientConfig.Builder builder = new MultiClusterClientConfig.Builder(clusterConfig); + MultiDbConfig.Builder builder = new MultiDbConfig.Builder(clusterConfig); builder.circuitBreakerSlidingWindowSize(1); // SLIDING WINDOW SIZE IN SECONDS builder.circuitBreakerFailureRateThreshold(10.0f); // percentage of failures to trigger circuit @@ -124,7 +122,7 @@ public void testFailover(boolean fastFailover, long minFailoverCompletionDuratio // Use the parameterized fastFailover setting builder.fastFailover(fastFailover); - class FailoverReporter implements Consumer { + class FailoverReporter implements Consumer { String currentClusterName = "not set"; @@ -141,10 +139,10 @@ public String getCurrentClusterName() { } @Override - public void accept(ClusterSwitchEventArgs e) { - this.currentClusterName = e.getClusterName(); + public void accept(DatabaseSwitchEvent e) { + this.currentClusterName = e.getDatabaseName(); log.info("\n\n===={}=== \nJedis switching to cluster: {}\n====End of log===\n", - e.getReason(), e.getClusterName()); + e.getReason(), e.getDatabaseName()); if ((e.getReason() == SwitchReason.CIRCUIT_BREAKER || e.getReason() == SwitchReason.HEALTH_CHECK)) { failoverHappened = true; @@ -164,11 +162,10 @@ public void accept(ClusterSwitchEventArgs e) { ensureEndpointAvailability(endpoint2.getHostAndPort(), config); // Create the connection provider - MultiClusterPooledConnectionProvider provider = new MultiClusterPooledConnectionProvider( - builder.build()); + MultiDbConnectionProvider provider = new MultiDbConnectionProvider(builder.build()); FailoverReporter reporter = new FailoverReporter(); - provider.setClusterSwitchListener(reporter); - provider.setActiveCluster(endpoint1.getHostAndPort()); + provider.setDatabaseSwitchListener(reporter); + provider.setActiveDatabase(endpoint1.getHostAndPort()); UnifiedJedis client = new UnifiedJedis(provider); @@ -180,7 +177,7 @@ public void accept(ClusterSwitchEventArgs e) { AtomicBoolean unexpectedErrors = new AtomicBoolean(false); AtomicReference lastException = new AtomicReference(); AtomicLong stopRunningAt = new AtomicLong(); - String cluster2Id = provider.getCluster(endpoint2.getHostAndPort()).getCircuitBreaker() + String cluster2Id = provider.getDatabase(endpoint2.getHostAndPort()).getCircuitBreaker() .getName(); // Start thread that imitates an application that uses the client @@ -198,7 +195,7 @@ public void accept(ClusterSwitchEventArgs e) { while (true) { try { if (System.currentTimeMillis() > stopRunningAt.get()) break; - currentClusterId = provider.getCluster().getCircuitBreaker().getName(); + currentClusterId = provider.getDatabase().getCircuitBreaker().getName(); Map executionInfo = new HashMap() { { put("threadId", String.valueOf(threadId)); @@ -287,7 +284,7 @@ public boolean isCompleted(Duration checkInterval, Duration delayAfter, Duration } log.info("Fake app completed"); - ConnectionPool pool = provider.getCluster(endpoint1.getHostAndPort()).getConnectionPool(); + ConnectionPool pool = provider.getDatabase(endpoint1.getHostAndPort()).getConnectionPool(); log.info("First connection pool state: active: {}, idle: {}", pool.getNumActive(), pool.getNumIdle()); diff --git a/src/test/java/redis/clients/jedis/mcf/ClusterEvaluateThresholdsTest.java b/src/test/java/redis/clients/jedis/mcf/DatabaseEvaluateThresholdsTest.java similarity index 81% rename from src/test/java/redis/clients/jedis/mcf/ClusterEvaluateThresholdsTest.java rename to src/test/java/redis/clients/jedis/mcf/DatabaseEvaluateThresholdsTest.java index c603509f32..2892005cb4 100644 --- a/src/test/java/redis/clients/jedis/mcf/ClusterEvaluateThresholdsTest.java +++ b/src/test/java/redis/clients/jedis/mcf/DatabaseEvaluateThresholdsTest.java @@ -11,35 +11,35 @@ import org.junit.jupiter.params.provider.CsvSource; import redis.clients.jedis.DefaultJedisClientConfig; import redis.clients.jedis.HostAndPort; -import redis.clients.jedis.MultiClusterClientConfig; -import redis.clients.jedis.mcf.MultiClusterPooledConnectionProvider.Cluster; +import redis.clients.jedis.MultiDbConfig; +import redis.clients.jedis.mcf.MultiDbConnectionProvider.Database; /** * Tests for circuit breaker thresholds: both failure-rate threshold and minimum number of failures * must be exceeded to trigger failover. Uses a real CircuitBreaker and real Retry, but mocks the - * provider and cluster wiring to avoid network I/O. + * provider and {@link Database} wiring to avoid network I/O. */ -public class ClusterEvaluateThresholdsTest { +public class DatabaseEvaluateThresholdsTest { - private MultiClusterPooledConnectionProvider provider; - private Cluster cluster; + private MultiDbConnectionProvider provider; + private Database database; private CircuitBreaker circuitBreaker; private CircuitBreaker.Metrics metrics; @BeforeEach public void setup() { - provider = mock(MultiClusterPooledConnectionProvider.class); - cluster = mock(Cluster.class); + provider = mock(MultiDbConnectionProvider.class); + database = mock(Database.class); circuitBreaker = mock(CircuitBreaker.class); metrics = mock(CircuitBreaker.Metrics.class); - when(cluster.getCircuitBreaker()).thenReturn(circuitBreaker); + when(database.getCircuitBreaker()).thenReturn(circuitBreaker); when(circuitBreaker.getMetrics()).thenReturn(metrics); when(circuitBreaker.getState()).thenReturn(CircuitBreaker.State.CLOSED); // Configure the mock to call the real evaluateThresholds method - doCallRealMethod().when(cluster).evaluateThresholds(anyBoolean()); + doCallRealMethod().when(database).evaluateThresholds(anyBoolean()); } @@ -50,15 +50,15 @@ public void setup() { */ @Test public void belowMinFailures_doesNotFailover() { - when(cluster.getCircuitBreakerMinNumOfFailures()).thenReturn(3); + when(database.getCircuitBreakerMinNumOfFailures()).thenReturn(3); when(metrics.getNumberOfFailedCalls()).thenReturn(1); // +1 becomes 2, still < 3 when(metrics.getNumberOfSuccessfulCalls()).thenReturn(0); - when(cluster.getCircuitBreakerFailureRateThreshold()).thenReturn(50.0f); + when(database.getCircuitBreakerFailureRateThreshold()).thenReturn(50.0f); when(circuitBreaker.getState()).thenReturn(CircuitBreaker.State.CLOSED); - cluster.evaluateThresholds(false); + database.evaluateThresholds(false); verify(circuitBreaker, never()).transitionToOpenState(); - verify(provider, never()).switchToHealthyCluster(any(), any()); + verify(provider, never()).switchToHealthyDatabase(any(), any()); } /** @@ -68,13 +68,13 @@ public void belowMinFailures_doesNotFailover() { */ @Test public void minFailuresAndRateExceeded_triggersOpenState() { - when(cluster.getCircuitBreakerMinNumOfFailures()).thenReturn(3); + when(database.getCircuitBreakerMinNumOfFailures()).thenReturn(3); when(metrics.getNumberOfFailedCalls()).thenReturn(2); // +1 becomes 3, reaching minFailures when(metrics.getNumberOfSuccessfulCalls()).thenReturn(0); - when(cluster.getCircuitBreakerFailureRateThreshold()).thenReturn(50.0f); + when(database.getCircuitBreakerFailureRateThreshold()).thenReturn(50.0f); when(circuitBreaker.getState()).thenReturn(CircuitBreaker.State.CLOSED); - cluster.evaluateThresholds(false); + database.evaluateThresholds(false); verify(circuitBreaker, times(1)).transitionToOpenState(); } @@ -86,27 +86,27 @@ public void minFailuresAndRateExceeded_triggersOpenState() { */ @Test public void rateBelowThreshold_doesNotFailover() { - when(cluster.getCircuitBreakerMinNumOfFailures()).thenReturn(3); + when(database.getCircuitBreakerMinNumOfFailures()).thenReturn(3); when(metrics.getNumberOfSuccessfulCalls()).thenReturn(3); when(metrics.getNumberOfFailedCalls()).thenReturn(2); // +1 becomes 3, rate = 3/(3+3) = 50% - when(cluster.getCircuitBreakerFailureRateThreshold()).thenReturn(80.0f); + when(database.getCircuitBreakerFailureRateThreshold()).thenReturn(80.0f); when(circuitBreaker.getState()).thenReturn(CircuitBreaker.State.CLOSED); - cluster.evaluateThresholds(false); + database.evaluateThresholds(false); verify(circuitBreaker, never()).transitionToOpenState(); - verify(provider, never()).switchToHealthyCluster(any(), any()); + verify(provider, never()).switchToHealthyDatabase(any(), any()); } @Test public void providerBuilder_zeroRate_mapsToHundredAndHugeMinCalls() { - MultiClusterClientConfig.Builder cfgBuilder = MultiClusterClientConfig - .builder(java.util.Arrays.asList(MultiClusterClientConfig.ClusterConfig + MultiDbConfig.Builder cfgBuilder = MultiDbConfig + .builder(java.util.Arrays.asList(MultiDbConfig.DatabaseConfig .builder(new HostAndPort("localhost", 6379), DefaultJedisClientConfig.builder().build()) .healthCheckEnabled(false).build())); cfgBuilder.circuitBreakerFailureRateThreshold(0.0f).circuitBreakerMinNumOfFailures(3) .circuitBreakerSlidingWindowSize(10); - MultiClusterClientConfig mcc = cfgBuilder.build(); + MultiDbConfig mcc = cfgBuilder.build(); CircuitBreakerThresholdsAdapter adapter = new CircuitBreakerThresholdsAdapter(mcc); @@ -165,13 +165,13 @@ public void providerBuilder_zeroRate_mapsToHundredAndHugeMinCalls() { public void thresholdMatrix(int minFailures, float ratePercent, int successes, int failures, boolean lastFailRecorded, boolean expectOpenState) { - when(cluster.getCircuitBreakerMinNumOfFailures()).thenReturn(minFailures); + when(database.getCircuitBreakerMinNumOfFailures()).thenReturn(minFailures); when(metrics.getNumberOfSuccessfulCalls()).thenReturn(successes); when(metrics.getNumberOfFailedCalls()).thenReturn(failures); - when(cluster.getCircuitBreakerFailureRateThreshold()).thenReturn(ratePercent); + when(database.getCircuitBreakerFailureRateThreshold()).thenReturn(ratePercent); when(circuitBreaker.getState()).thenReturn(CircuitBreaker.State.CLOSED); - cluster.evaluateThresholds(lastFailRecorded); + database.evaluateThresholds(lastFailRecorded); if (expectOpenState) { verify(circuitBreaker, times(1)).transitionToOpenState(); diff --git a/src/test/java/redis/clients/jedis/mcf/DefaultValuesTest.java b/src/test/java/redis/clients/jedis/mcf/DefaultValuesTest.java index 6939ef7069..e3e5f3f05e 100644 --- a/src/test/java/redis/clients/jedis/mcf/DefaultValuesTest.java +++ b/src/test/java/redis/clients/jedis/mcf/DefaultValuesTest.java @@ -9,7 +9,7 @@ import redis.clients.jedis.DefaultJedisClientConfig; import redis.clients.jedis.HostAndPort; import redis.clients.jedis.JedisClientConfig; -import redis.clients.jedis.MultiClusterClientConfig; +import redis.clients.jedis.MultiDbConfig; public class DefaultValuesTest { @@ -19,16 +19,16 @@ public class DefaultValuesTest { @Test void testDefaultValuesInConfig() { - MultiClusterClientConfig.ClusterConfig clusterConfig = MultiClusterClientConfig.ClusterConfig + MultiDbConfig.DatabaseConfig clusterConfig = MultiDbConfig.DatabaseConfig .builder(fakeEndpoint, config).build(); - MultiClusterClientConfig multiConfig = new MultiClusterClientConfig.Builder( - new MultiClusterClientConfig.ClusterConfig[] { clusterConfig }).build(); + MultiDbConfig multiConfig = new MultiDbConfig.Builder( + new MultiDbConfig.DatabaseConfig[] { clusterConfig }).build(); // check for grace period assertEquals(60000, multiConfig.getGracePeriod()); // check for cluster config - assertEquals(clusterConfig, multiConfig.getClusterConfigs()[0]); + assertEquals(clusterConfig, multiConfig.getDatabaseConfigs()[0]); // check healthchecks enabled assertNotNull(clusterConfig.getHealthCheckStrategySupplier()); diff --git a/src/test/java/redis/clients/jedis/mcf/FailbackMechanismIntegrationTest.java b/src/test/java/redis/clients/jedis/mcf/FailbackMechanismIntegrationTest.java index afedc66f4d..34e521683e 100644 --- a/src/test/java/redis/clients/jedis/mcf/FailbackMechanismIntegrationTest.java +++ b/src/test/java/redis/clients/jedis/mcf/FailbackMechanismIntegrationTest.java @@ -17,7 +17,7 @@ import redis.clients.jedis.DefaultJedisClientConfig; import redis.clients.jedis.HostAndPort; import redis.clients.jedis.JedisClientConfig; -import redis.clients.jedis.MultiClusterClientConfig; +import redis.clients.jedis.MultiDbConfig; @ExtendWith(MockitoExtension.class) class FailbackMechanismIntegrationTest { @@ -49,40 +49,38 @@ private MockedConstruction mockPool() { void testFailbackDisabledDoesNotPerformFailback() throws InterruptedException { try (MockedConstruction mockedPool = mockPool()) { // Create clusters with different weights - MultiClusterClientConfig.ClusterConfig cluster1 = MultiClusterClientConfig.ClusterConfig + MultiDbConfig.DatabaseConfig cluster1 = MultiDbConfig.DatabaseConfig .builder(endpoint1, clientConfig).weight(1.0f).healthCheckEnabled(false).build(); // Lower // weight - MultiClusterClientConfig.ClusterConfig cluster2 = MultiClusterClientConfig.ClusterConfig + MultiDbConfig.DatabaseConfig cluster2 = MultiDbConfig.DatabaseConfig .builder(endpoint2, clientConfig).weight(2.0f) // Higher weight .healthCheckEnabled(false).build(); - MultiClusterClientConfig config = new MultiClusterClientConfig.Builder( - new MultiClusterClientConfig.ClusterConfig[] { cluster1, cluster2 }) - .failbackSupported(false) // Disabled + MultiDbConfig config = new MultiDbConfig.Builder( + new MultiDbConfig.DatabaseConfig[] { cluster1, cluster2 }).failbackSupported(false) // Disabled .failbackCheckInterval(100) // Short interval for testing .build(); - try (MultiClusterPooledConnectionProvider provider = new MultiClusterPooledConnectionProvider( - config)) { + try (MultiDbConnectionProvider provider = new MultiDbConnectionProvider(config)) { // Initially, cluster2 should be active (highest weight) - assertEquals(provider.getCluster(endpoint2), provider.getCluster()); + assertEquals(provider.getDatabase(endpoint2), provider.getDatabase()); // Make cluster2 unhealthy to force failover to cluster1 - MultiClusterPooledConnectionProviderHelper.onHealthStatusChange(provider, endpoint2, + MultiDbConnectionProviderHelper.onHealthStatusChange(provider, endpoint2, HealthStatus.HEALTHY, HealthStatus.UNHEALTHY); // Should now be on cluster1 (only healthy option) - assertEquals(provider.getCluster(endpoint1), provider.getCluster()); + assertEquals(provider.getDatabase(endpoint1), provider.getDatabase()); // Make cluster2 healthy again (higher weight - would normally trigger failback) - MultiClusterPooledConnectionProviderHelper.onHealthStatusChange(provider, endpoint2, + MultiDbConnectionProviderHelper.onHealthStatusChange(provider, endpoint2, HealthStatus.UNHEALTHY, HealthStatus.HEALTHY); // Wait longer than failback interval // Should still be on cluster1 since failback is disabled await().atMost(Durations.FIVE_HUNDRED_MILLISECONDS).pollInterval(FIFTY_MILLISECONDS) - .until(() -> provider.getCluster(endpoint1) == provider.getCluster()); + .until(() -> provider.getDatabase(endpoint1) == provider.getDatabase()); } } } @@ -91,39 +89,38 @@ void testFailbackDisabledDoesNotPerformFailback() throws InterruptedException { void testFailbackToHigherWeightCluster() throws InterruptedException { try (MockedConstruction mockedPool = mockPool()) { // Create clusters with different weights - MultiClusterClientConfig.ClusterConfig cluster1 = MultiClusterClientConfig.ClusterConfig + MultiDbConfig.DatabaseConfig cluster1 = MultiDbConfig.DatabaseConfig .builder(endpoint1, clientConfig).weight(2.0f) // Higher weight .healthCheckEnabled(false).build(); - MultiClusterClientConfig.ClusterConfig cluster2 = MultiClusterClientConfig.ClusterConfig + MultiDbConfig.DatabaseConfig cluster2 = MultiDbConfig.DatabaseConfig .builder(endpoint2, clientConfig).weight(1.0f) // Lower weight .healthCheckEnabled(false).build(); - MultiClusterClientConfig config = new MultiClusterClientConfig.Builder( - new MultiClusterClientConfig.ClusterConfig[] { cluster1, cluster2 }) - .failbackSupported(true).failbackCheckInterval(100) // Short interval for testing + MultiDbConfig config = new MultiDbConfig.Builder( + new MultiDbConfig.DatabaseConfig[] { cluster1, cluster2 }).failbackSupported(true) + .failbackCheckInterval(100) // Short interval for testing .gracePeriod(100).build(); - try (MultiClusterPooledConnectionProvider provider = new MultiClusterPooledConnectionProvider( - config)) { + try (MultiDbConnectionProvider provider = new MultiDbConnectionProvider(config)) { // Initially, cluster1 should be active (highest weight) - assertEquals(provider.getCluster(endpoint1), provider.getCluster()); + assertEquals(provider.getDatabase(endpoint1), provider.getDatabase()); // Make cluster1 unhealthy to force failover to cluster2 - MultiClusterPooledConnectionProviderHelper.onHealthStatusChange(provider, endpoint1, + MultiDbConnectionProviderHelper.onHealthStatusChange(provider, endpoint1, HealthStatus.HEALTHY, HealthStatus.UNHEALTHY); // Should now be on cluster2 (lower weight, but only healthy option) - assertEquals(provider.getCluster(endpoint2), provider.getCluster()); + assertEquals(provider.getDatabase(endpoint2), provider.getDatabase()); // Make cluster1 healthy again - MultiClusterPooledConnectionProviderHelper.onHealthStatusChange(provider, endpoint1, + MultiDbConnectionProviderHelper.onHealthStatusChange(provider, endpoint1, HealthStatus.UNHEALTHY, HealthStatus.HEALTHY); // Wait for failback check interval + some buffer // Should have failed back to cluster1 (higher weight) await().atMost(Durations.FIVE_HUNDRED_MILLISECONDS).pollInterval(FIFTY_MILLISECONDS) - .until(() -> provider.getCluster(endpoint1) == provider.getCluster()); + .until(() -> provider.getDatabase(endpoint1) == provider.getDatabase()); } } } @@ -132,43 +129,42 @@ void testFailbackToHigherWeightCluster() throws InterruptedException { void testNoFailbackToLowerWeightCluster() throws InterruptedException { try (MockedConstruction mockedPool = mockPool()) { // Create three clusters with different weights to properly test no failback to lower weight - MultiClusterClientConfig.ClusterConfig cluster1 = MultiClusterClientConfig.ClusterConfig + MultiDbConfig.DatabaseConfig cluster1 = MultiDbConfig.DatabaseConfig .builder(endpoint1, clientConfig).weight(1.0f) // Lowest weight .healthCheckEnabled(false).build(); - MultiClusterClientConfig.ClusterConfig cluster2 = MultiClusterClientConfig.ClusterConfig + MultiDbConfig.DatabaseConfig cluster2 = MultiDbConfig.DatabaseConfig .builder(endpoint2, clientConfig).weight(2.0f) // Medium weight .healthCheckEnabled(false).build(); - MultiClusterClientConfig.ClusterConfig cluster3 = MultiClusterClientConfig.ClusterConfig + MultiDbConfig.DatabaseConfig cluster3 = MultiDbConfig.DatabaseConfig .builder(endpoint3, clientConfig).weight(3.0f) // Highest weight .healthCheckEnabled(false).build(); - MultiClusterClientConfig config = new MultiClusterClientConfig.Builder( - new MultiClusterClientConfig.ClusterConfig[] { cluster1, cluster2, cluster3 }) + MultiDbConfig config = new MultiDbConfig.Builder( + new MultiDbConfig.DatabaseConfig[] { cluster1, cluster2, cluster3 }) .failbackSupported(true).failbackCheckInterval(100).build(); - try (MultiClusterPooledConnectionProvider provider = new MultiClusterPooledConnectionProvider( - config)) { + try (MultiDbConnectionProvider provider = new MultiDbConnectionProvider(config)) { // Initially, cluster3 should be active (highest weight) - assertEquals(provider.getCluster(endpoint3), provider.getCluster()); + assertEquals(provider.getDatabase(endpoint3), provider.getDatabase()); // Make cluster3 unhealthy to force failover to cluster2 (medium weight) - MultiClusterPooledConnectionProviderHelper.onHealthStatusChange(provider, endpoint3, + MultiDbConnectionProviderHelper.onHealthStatusChange(provider, endpoint3, HealthStatus.HEALTHY, HealthStatus.UNHEALTHY); // Should now be on cluster2 (highest weight among healthy clusters) - assertEquals(provider.getCluster(endpoint2), provider.getCluster()); + assertEquals(provider.getDatabase(endpoint2), provider.getDatabase()); // Make cluster1 (lowest weight) healthy - this should NOT trigger failback // since we don't failback to lower weight clusters - MultiClusterPooledConnectionProviderHelper.onHealthStatusChange(provider, endpoint1, + MultiDbConnectionProviderHelper.onHealthStatusChange(provider, endpoint1, HealthStatus.UNHEALTHY, HealthStatus.HEALTHY); // Wait for failback check interval // Should still be on cluster2 (no failback to lower weight cluster1) await().atMost(Durations.FIVE_HUNDRED_MILLISECONDS).pollInterval(FIFTY_MILLISECONDS) - .until(() -> provider.getCluster(endpoint2) == provider.getCluster()); + .until(() -> provider.getDatabase(endpoint2) == provider.getDatabase()); } } } @@ -176,39 +172,38 @@ void testNoFailbackToLowerWeightCluster() throws InterruptedException { @Test void testFailbackToHigherWeightClusterImmediately() throws InterruptedException { try (MockedConstruction mockedPool = mockPool()) { - MultiClusterClientConfig.ClusterConfig cluster1 = MultiClusterClientConfig.ClusterConfig + MultiDbConfig.DatabaseConfig cluster1 = MultiDbConfig.DatabaseConfig .builder(endpoint1, clientConfig).weight(2.0f).healthCheckEnabled(false).build(); // Higher // weight - MultiClusterClientConfig.ClusterConfig cluster2 = MultiClusterClientConfig.ClusterConfig + MultiDbConfig.DatabaseConfig cluster2 = MultiDbConfig.DatabaseConfig .builder(endpoint2, clientConfig).weight(1.0f).healthCheckEnabled(false).build(); // Lower // weight - MultiClusterClientConfig config = new MultiClusterClientConfig.Builder( - new MultiClusterClientConfig.ClusterConfig[] { cluster1, cluster2 }) - .failbackSupported(true).failbackCheckInterval(100).gracePeriod(50).build(); + MultiDbConfig config = new MultiDbConfig.Builder( + new MultiDbConfig.DatabaseConfig[] { cluster1, cluster2 }).failbackSupported(true) + .failbackCheckInterval(100).gracePeriod(50).build(); - try (MultiClusterPooledConnectionProvider provider = new MultiClusterPooledConnectionProvider( - config)) { + try (MultiDbConnectionProvider provider = new MultiDbConnectionProvider(config)) { // Initially, cluster1 should be active (highest weight) - assertEquals(provider.getCluster(endpoint1), provider.getCluster()); + assertEquals(provider.getDatabase(endpoint1), provider.getDatabase()); // Make cluster1 unhealthy to force failover to cluster2 - MultiClusterPooledConnectionProviderHelper.onHealthStatusChange(provider, endpoint1, + MultiDbConnectionProviderHelper.onHealthStatusChange(provider, endpoint1, HealthStatus.HEALTHY, HealthStatus.UNHEALTHY); // Should now be on cluster2 (only healthy option) - assertEquals(provider.getCluster(endpoint2), provider.getCluster()); + assertEquals(provider.getDatabase(endpoint2), provider.getDatabase()); // Make cluster1 healthy again - MultiClusterPooledConnectionProviderHelper.onHealthStatusChange(provider, endpoint1, + MultiDbConnectionProviderHelper.onHealthStatusChange(provider, endpoint1, HealthStatus.UNHEALTHY, HealthStatus.HEALTHY); // Wait for failback check // Should have failed back to cluster1 immediately (higher weight, no stability period // required) await().atMost(Durations.TWO_HUNDRED_MILLISECONDS).pollInterval(FIFTY_MILLISECONDS) - .until(() -> provider.getCluster(endpoint1) == provider.getCluster()); + .until(() -> provider.getDatabase(endpoint1) == provider.getDatabase()); } } } @@ -216,45 +211,44 @@ void testFailbackToHigherWeightClusterImmediately() throws InterruptedException @Test void testUnhealthyClusterCancelsFailback() throws InterruptedException { try (MockedConstruction mockedPool = mockPool()) { - MultiClusterClientConfig.ClusterConfig cluster1 = MultiClusterClientConfig.ClusterConfig + MultiDbConfig.DatabaseConfig cluster1 = MultiDbConfig.DatabaseConfig .builder(endpoint1, clientConfig).weight(2.0f).healthCheckEnabled(false).build(); // Higher // weight - MultiClusterClientConfig.ClusterConfig cluster2 = MultiClusterClientConfig.ClusterConfig + MultiDbConfig.DatabaseConfig cluster2 = MultiDbConfig.DatabaseConfig .builder(endpoint2, clientConfig).weight(1.0f).healthCheckEnabled(false).build(); // Lower // weight - MultiClusterClientConfig config = new MultiClusterClientConfig.Builder( - new MultiClusterClientConfig.ClusterConfig[] { cluster1, cluster2 }) - .failbackSupported(true).failbackCheckInterval(200).build(); + MultiDbConfig config = new MultiDbConfig.Builder( + new MultiDbConfig.DatabaseConfig[] { cluster1, cluster2 }).failbackSupported(true) + .failbackCheckInterval(200).build(); - try (MultiClusterPooledConnectionProvider provider = new MultiClusterPooledConnectionProvider( - config)) { + try (MultiDbConnectionProvider provider = new MultiDbConnectionProvider(config)) { // Initially, cluster1 should be active (highest weight) - assertEquals(provider.getCluster(endpoint1), provider.getCluster()); + assertEquals(provider.getDatabase(endpoint1), provider.getDatabase()); // Make cluster1 unhealthy to force failover to cluster2 - MultiClusterPooledConnectionProviderHelper.onHealthStatusChange(provider, endpoint1, + MultiDbConnectionProviderHelper.onHealthStatusChange(provider, endpoint1, HealthStatus.HEALTHY, HealthStatus.UNHEALTHY); // Should now be on cluster2 (only healthy option) - assertEquals(provider.getCluster(endpoint2), provider.getCluster()); + assertEquals(provider.getDatabase(endpoint2), provider.getDatabase()); // Make cluster1 healthy again (should trigger failback attempt) - MultiClusterPooledConnectionProviderHelper.onHealthStatusChange(provider, endpoint1, + MultiDbConnectionProviderHelper.onHealthStatusChange(provider, endpoint1, HealthStatus.UNHEALTHY, HealthStatus.HEALTHY); // Wait a bit Thread.sleep(100); // Make cluster1 unhealthy again before failback completes - MultiClusterPooledConnectionProviderHelper.onHealthStatusChange(provider, endpoint1, + MultiDbConnectionProviderHelper.onHealthStatusChange(provider, endpoint1, HealthStatus.HEALTHY, HealthStatus.UNHEALTHY); // Wait past the original failback interval // Should still be on cluster2 (failback was cancelled due to cluster1 becoming unhealthy) await().atMost(Durations.TWO_HUNDRED_MILLISECONDS).pollInterval(FIFTY_MILLISECONDS) - .until(() -> provider.getCluster(endpoint2) == provider.getCluster()); + .until(() -> provider.getDatabase(endpoint2) == provider.getDatabase()); } } } @@ -262,42 +256,41 @@ void testUnhealthyClusterCancelsFailback() throws InterruptedException { @Test void testMultipleClusterFailbackPriority() throws InterruptedException { try (MockedConstruction mockedPool = mockPool()) { - MultiClusterClientConfig.ClusterConfig cluster1 = MultiClusterClientConfig.ClusterConfig + MultiDbConfig.DatabaseConfig cluster1 = MultiDbConfig.DatabaseConfig .builder(endpoint1, clientConfig).weight(1.0f).healthCheckEnabled(false).build(); // Lowest // weight - MultiClusterClientConfig.ClusterConfig cluster2 = MultiClusterClientConfig.ClusterConfig + MultiDbConfig.DatabaseConfig cluster2 = MultiDbConfig.DatabaseConfig .builder(endpoint2, clientConfig).weight(2.0f).healthCheckEnabled(false).build(); // Medium // weight - MultiClusterClientConfig.ClusterConfig cluster3 = MultiClusterClientConfig.ClusterConfig + MultiDbConfig.DatabaseConfig cluster3 = MultiDbConfig.DatabaseConfig .builder(endpoint3, clientConfig).weight(3.0f) // Highest weight .healthCheckEnabled(false).build(); - MultiClusterClientConfig config = new MultiClusterClientConfig.Builder( - new MultiClusterClientConfig.ClusterConfig[] { cluster1, cluster2, cluster3 }) + MultiDbConfig config = new MultiDbConfig.Builder( + new MultiDbConfig.DatabaseConfig[] { cluster1, cluster2, cluster3 }) .failbackSupported(true).failbackCheckInterval(100).gracePeriod(100).build(); - try (MultiClusterPooledConnectionProvider provider = new MultiClusterPooledConnectionProvider( - config)) { + try (MultiDbConnectionProvider provider = new MultiDbConnectionProvider(config)) { // Initially, cluster3 should be active (highest weight) - assertEquals(provider.getCluster(endpoint3), provider.getCluster()); + assertEquals(provider.getDatabase(endpoint3), provider.getDatabase()); // Make cluster3 unhealthy to force failover to cluster2 (next highest weight) - MultiClusterPooledConnectionProviderHelper.onHealthStatusChange(provider, endpoint3, + MultiDbConnectionProviderHelper.onHealthStatusChange(provider, endpoint3, HealthStatus.HEALTHY, HealthStatus.UNHEALTHY); // Should now be on cluster2 (highest weight among healthy clusters) - assertEquals(provider.getCluster(endpoint2), provider.getCluster()); + assertEquals(provider.getDatabase(endpoint2), provider.getDatabase()); // Make cluster3 healthy again - MultiClusterPooledConnectionProviderHelper.onHealthStatusChange(provider, endpoint3, + MultiDbConnectionProviderHelper.onHealthStatusChange(provider, endpoint3, HealthStatus.UNHEALTHY, HealthStatus.HEALTHY); // Wait for failback // Should fail back to cluster3 (highest weight) await().atMost(Durations.FIVE_HUNDRED_MILLISECONDS).pollInterval(FIFTY_MILLISECONDS) - .until(() -> provider.getCluster(endpoint3) == provider.getCluster()); + .until(() -> provider.getDatabase(endpoint3) == provider.getDatabase()); } } } @@ -305,34 +298,33 @@ void testMultipleClusterFailbackPriority() throws InterruptedException { @Test void testGracePeriodDisablesClusterOnUnhealthy() throws InterruptedException { try (MockedConstruction mockedPool = mockPool()) { - MultiClusterClientConfig.ClusterConfig cluster1 = MultiClusterClientConfig.ClusterConfig + MultiDbConfig.DatabaseConfig cluster1 = MultiDbConfig.DatabaseConfig .builder(endpoint1, clientConfig).weight(1.0f).healthCheckEnabled(false).build(); // Lower // weight - MultiClusterClientConfig.ClusterConfig cluster2 = MultiClusterClientConfig.ClusterConfig + MultiDbConfig.DatabaseConfig cluster2 = MultiDbConfig.DatabaseConfig .builder(endpoint2, clientConfig).weight(2.0f).healthCheckEnabled(false).build(); // Higher // weight - MultiClusterClientConfig config = new MultiClusterClientConfig.Builder( - new MultiClusterClientConfig.ClusterConfig[] { cluster1, cluster2 }) - .failbackSupported(true).failbackCheckInterval(100).gracePeriod(200) // 200ms grace - // period + MultiDbConfig config = new MultiDbConfig.Builder( + new MultiDbConfig.DatabaseConfig[] { cluster1, cluster2 }).failbackSupported(true) + .failbackCheckInterval(100).gracePeriod(200) // 200ms grace + // period .build(); - try (MultiClusterPooledConnectionProvider provider = new MultiClusterPooledConnectionProvider( - config)) { + try (MultiDbConnectionProvider provider = new MultiDbConnectionProvider(config)) { // Initially, cluster2 should be active (highest weight) - assertEquals(provider.getCluster(endpoint2), provider.getCluster()); + assertEquals(provider.getDatabase(endpoint2), provider.getDatabase()); // Now make cluster2 unhealthy - it should be disabled for grace period - MultiClusterPooledConnectionProviderHelper.onHealthStatusChange(provider, endpoint2, + MultiDbConnectionProviderHelper.onHealthStatusChange(provider, endpoint2, HealthStatus.HEALTHY, HealthStatus.UNHEALTHY); // Should failover to cluster1 - assertEquals(provider.getCluster(endpoint1), provider.getCluster()); + assertEquals(provider.getDatabase(endpoint1), provider.getDatabase()); // Cluster2 should be in grace period - assertTrue(provider.getCluster(endpoint2).isInGracePeriod()); + assertTrue(provider.getDatabase(endpoint2).isInGracePeriod()); } } } @@ -340,51 +332,50 @@ void testGracePeriodDisablesClusterOnUnhealthy() throws InterruptedException { @Test void testGracePeriodReEnablesClusterAfterPeriod() throws InterruptedException { try (MockedConstruction mockedPool = mockPool()) { - MultiClusterClientConfig.ClusterConfig cluster1 = MultiClusterClientConfig.ClusterConfig + MultiDbConfig.DatabaseConfig cluster1 = MultiDbConfig.DatabaseConfig .builder(endpoint1, clientConfig).weight(1.0f).healthCheckEnabled(false).build(); // Lower // weight - MultiClusterClientConfig.ClusterConfig cluster2 = MultiClusterClientConfig.ClusterConfig + MultiDbConfig.DatabaseConfig cluster2 = MultiDbConfig.DatabaseConfig .builder(endpoint2, clientConfig).weight(2.0f).healthCheckEnabled(false).build(); // Higher // weight - MultiClusterClientConfig config = new MultiClusterClientConfig.Builder( - new MultiClusterClientConfig.ClusterConfig[] { cluster1, cluster2 }) - .failbackSupported(true).failbackCheckInterval(50) // Short interval for testing + MultiDbConfig config = new MultiDbConfig.Builder( + new MultiDbConfig.DatabaseConfig[] { cluster1, cluster2 }).failbackSupported(true) + .failbackCheckInterval(50) // Short interval for testing .gracePeriod(100) // Short grace period for testing .build(); - try (MultiClusterPooledConnectionProvider provider = new MultiClusterPooledConnectionProvider( - config)) { + try (MultiDbConnectionProvider provider = new MultiDbConnectionProvider(config)) { // Initially, cluster2 should be active (highest weight) - assertEquals(provider.getCluster(endpoint2), provider.getCluster()); + assertEquals(provider.getDatabase(endpoint2), provider.getDatabase()); // Make cluster2 unhealthy to start grace period and force failover - MultiClusterPooledConnectionProviderHelper.onHealthStatusChange(provider, endpoint2, + MultiDbConnectionProviderHelper.onHealthStatusChange(provider, endpoint2, HealthStatus.HEALTHY, HealthStatus.UNHEALTHY); // Should failover to cluster1 - assertEquals(provider.getCluster(endpoint1), provider.getCluster()); + assertEquals(provider.getDatabase(endpoint1), provider.getDatabase()); // Cluster2 should be in grace period - assertTrue(provider.getCluster(endpoint2).isInGracePeriod()); + assertTrue(provider.getDatabase(endpoint2).isInGracePeriod()); // Make cluster2 healthy again while it's still in grace period - MultiClusterPooledConnectionProviderHelper.onHealthStatusChange(provider, endpoint2, + MultiDbConnectionProviderHelper.onHealthStatusChange(provider, endpoint2, HealthStatus.UNHEALTHY, HealthStatus.HEALTHY); // Should still be on cluster1 because cluster2 is in grace period - assertEquals(provider.getCluster(endpoint1), provider.getCluster()); + assertEquals(provider.getDatabase(endpoint1), provider.getDatabase()); // Wait for grace period to expire // Cluster2 should no longer be in grace period await().atMost(Durations.FIVE_HUNDRED_MILLISECONDS).pollInterval(FIFTY_MILLISECONDS) - .until(() -> !provider.getCluster(endpoint2).isInGracePeriod()); + .until(() -> !provider.getDatabase(endpoint2).isInGracePeriod()); // Wait for failback check to run // Should now failback to cluster2 (higher weight) since grace period has expired await().atMost(Durations.FIVE_HUNDRED_MILLISECONDS).pollInterval(FIFTY_MILLISECONDS) - .until(() -> provider.getCluster(endpoint2) == provider.getCluster()); + .until(() -> provider.getDatabase(endpoint2) == provider.getDatabase()); } } } diff --git a/src/test/java/redis/clients/jedis/mcf/FailbackMechanismUnitTest.java b/src/test/java/redis/clients/jedis/mcf/FailbackMechanismUnitTest.java index fee216f2be..ad251975c2 100644 --- a/src/test/java/redis/clients/jedis/mcf/FailbackMechanismUnitTest.java +++ b/src/test/java/redis/clients/jedis/mcf/FailbackMechanismUnitTest.java @@ -9,7 +9,7 @@ import redis.clients.jedis.DefaultJedisClientConfig; import redis.clients.jedis.HostAndPort; import redis.clients.jedis.JedisClientConfig; -import redis.clients.jedis.MultiClusterClientConfig; +import redis.clients.jedis.MultiDbConfig; @ExtendWith(MockitoExtension.class) class FailbackMechanismUnitTest { @@ -26,69 +26,65 @@ void setUp() { @Test void testFailbackCheckIntervalConfiguration() { // Test default value - MultiClusterClientConfig.ClusterConfig clusterConfig = MultiClusterClientConfig.ClusterConfig + MultiDbConfig.DatabaseConfig clusterConfig = MultiDbConfig.DatabaseConfig .builder(endpoint1, clientConfig).healthCheckEnabled(false).build(); - MultiClusterClientConfig defaultConfig = new MultiClusterClientConfig.Builder( - new MultiClusterClientConfig.ClusterConfig[] { clusterConfig }).build(); + MultiDbConfig defaultConfig = new MultiDbConfig.Builder( + new MultiDbConfig.DatabaseConfig[] { clusterConfig }).build(); assertEquals(120000, defaultConfig.getFailbackCheckInterval()); // Test custom value - MultiClusterClientConfig customConfig = new MultiClusterClientConfig.Builder( - new MultiClusterClientConfig.ClusterConfig[] { clusterConfig }).failbackCheckInterval(3000) - .build(); + MultiDbConfig customConfig = new MultiDbConfig.Builder( + new MultiDbConfig.DatabaseConfig[] { clusterConfig }).failbackCheckInterval(3000).build(); assertEquals(3000, customConfig.getFailbackCheckInterval()); } @Test void testFailbackSupportedConfiguration() { - MultiClusterClientConfig.ClusterConfig clusterConfig = MultiClusterClientConfig.ClusterConfig + MultiDbConfig.DatabaseConfig clusterConfig = MultiDbConfig.DatabaseConfig .builder(endpoint1, clientConfig).healthCheckEnabled(false).build(); // Test default (should be true) - MultiClusterClientConfig defaultConfig = new MultiClusterClientConfig.Builder( - new MultiClusterClientConfig.ClusterConfig[] { clusterConfig }).build(); + MultiDbConfig defaultConfig = new MultiDbConfig.Builder( + new MultiDbConfig.DatabaseConfig[] { clusterConfig }).build(); assertTrue(defaultConfig.isFailbackSupported()); // Test disabled - MultiClusterClientConfig disabledConfig = new MultiClusterClientConfig.Builder( - new MultiClusterClientConfig.ClusterConfig[] { clusterConfig }).failbackSupported(false) - .build(); + MultiDbConfig disabledConfig = new MultiDbConfig.Builder( + new MultiDbConfig.DatabaseConfig[] { clusterConfig }).failbackSupported(false).build(); assertFalse(disabledConfig.isFailbackSupported()); } @Test void testFailbackCheckIntervalValidation() { - MultiClusterClientConfig.ClusterConfig clusterConfig = MultiClusterClientConfig.ClusterConfig + MultiDbConfig.DatabaseConfig clusterConfig = MultiDbConfig.DatabaseConfig .builder(endpoint1, clientConfig).healthCheckEnabled(false).build(); // Test zero interval (should be allowed) - MultiClusterClientConfig zeroConfig = new MultiClusterClientConfig.Builder( - new MultiClusterClientConfig.ClusterConfig[] { clusterConfig }).failbackCheckInterval(0) - .build(); + MultiDbConfig zeroConfig = new MultiDbConfig.Builder( + new MultiDbConfig.DatabaseConfig[] { clusterConfig }).failbackCheckInterval(0).build(); assertEquals(0, zeroConfig.getFailbackCheckInterval()); // Test negative interval (should be allowed - implementation decision) - MultiClusterClientConfig negativeConfig = new MultiClusterClientConfig.Builder( - new MultiClusterClientConfig.ClusterConfig[] { clusterConfig }).failbackCheckInterval(-1000) - .build(); + MultiDbConfig negativeConfig = new MultiDbConfig.Builder( + new MultiDbConfig.DatabaseConfig[] { clusterConfig }).failbackCheckInterval(-1000).build(); assertEquals(-1000, negativeConfig.getFailbackCheckInterval()); } @Test void testBuilderChaining() { - MultiClusterClientConfig.ClusterConfig clusterConfig = MultiClusterClientConfig.ClusterConfig + MultiDbConfig.DatabaseConfig clusterConfig = MultiDbConfig.DatabaseConfig .builder(endpoint1, clientConfig).healthCheckEnabled(false).build(); // Test that builder methods can be chained - MultiClusterClientConfig config = new MultiClusterClientConfig.Builder( - new MultiClusterClientConfig.ClusterConfig[] { clusterConfig }).failbackSupported(true) + MultiDbConfig config = new MultiDbConfig.Builder( + new MultiDbConfig.DatabaseConfig[] { clusterConfig }).failbackSupported(true) .failbackCheckInterval(2000).retryOnFailover(true).build(); assertTrue(config.isFailbackSupported()); @@ -99,47 +95,47 @@ void testBuilderChaining() { @Test void testGracePeriodConfiguration() { // Test default value - MultiClusterClientConfig.ClusterConfig clusterConfig = MultiClusterClientConfig.ClusterConfig + MultiDbConfig.DatabaseConfig clusterConfig = MultiDbConfig.DatabaseConfig .builder(endpoint1, clientConfig).healthCheckEnabled(false).build(); - MultiClusterClientConfig defaultConfig = new MultiClusterClientConfig.Builder( - new MultiClusterClientConfig.ClusterConfig[] { clusterConfig }).build(); + MultiDbConfig defaultConfig = new MultiDbConfig.Builder( + new MultiDbConfig.DatabaseConfig[] { clusterConfig }).build(); assertEquals(60000, defaultConfig.getGracePeriod()); // Test custom value - MultiClusterClientConfig customConfig = new MultiClusterClientConfig.Builder( - new MultiClusterClientConfig.ClusterConfig[] { clusterConfig }).gracePeriod(5000).build(); + MultiDbConfig customConfig = new MultiDbConfig.Builder( + new MultiDbConfig.DatabaseConfig[] { clusterConfig }).gracePeriod(5000).build(); assertEquals(5000, customConfig.getGracePeriod()); } @Test void testGracePeriodValidation() { - MultiClusterClientConfig.ClusterConfig clusterConfig = MultiClusterClientConfig.ClusterConfig + MultiDbConfig.DatabaseConfig clusterConfig = MultiDbConfig.DatabaseConfig .builder(endpoint1, clientConfig).healthCheckEnabled(false).build(); // Test zero grace period (should be allowed) - MultiClusterClientConfig zeroConfig = new MultiClusterClientConfig.Builder( - new MultiClusterClientConfig.ClusterConfig[] { clusterConfig }).gracePeriod(0).build(); + MultiDbConfig zeroConfig = new MultiDbConfig.Builder( + new MultiDbConfig.DatabaseConfig[] { clusterConfig }).gracePeriod(0).build(); assertEquals(0, zeroConfig.getGracePeriod()); // Test negative grace period (should be allowed - implementation decision) - MultiClusterClientConfig negativeConfig = new MultiClusterClientConfig.Builder( - new MultiClusterClientConfig.ClusterConfig[] { clusterConfig }).gracePeriod(-1000).build(); + MultiDbConfig negativeConfig = new MultiDbConfig.Builder( + new MultiDbConfig.DatabaseConfig[] { clusterConfig }).gracePeriod(-1000).build(); assertEquals(-1000, negativeConfig.getGracePeriod()); } @Test void testGracePeriodBuilderChaining() { - MultiClusterClientConfig.ClusterConfig clusterConfig = MultiClusterClientConfig.ClusterConfig + MultiDbConfig.DatabaseConfig clusterConfig = MultiDbConfig.DatabaseConfig .builder(endpoint1, clientConfig).healthCheckEnabled(false).build(); // Test that builder methods can be chained - MultiClusterClientConfig config = new MultiClusterClientConfig.Builder( - new MultiClusterClientConfig.ClusterConfig[] { clusterConfig }).failbackSupported(true) + MultiDbConfig config = new MultiDbConfig.Builder( + new MultiDbConfig.DatabaseConfig[] { clusterConfig }).failbackSupported(true) .failbackCheckInterval(2000).gracePeriod(8000).retryOnFailover(true).build(); assertTrue(config.isFailbackSupported()); diff --git a/src/test/java/redis/clients/jedis/mcf/HealthCheckIntegrationTest.java b/src/test/java/redis/clients/jedis/mcf/HealthCheckIntegrationTest.java index d1cb8b90e9..ce12cde8a7 100644 --- a/src/test/java/redis/clients/jedis/mcf/HealthCheckIntegrationTest.java +++ b/src/test/java/redis/clients/jedis/mcf/HealthCheckIntegrationTest.java @@ -15,10 +15,10 @@ import redis.clients.jedis.EndpointConfig; import redis.clients.jedis.HostAndPorts; import redis.clients.jedis.JedisClientConfig; -import redis.clients.jedis.MultiClusterClientConfig; +import redis.clients.jedis.MultiDbConfig; import redis.clients.jedis.UnifiedJedis; -import redis.clients.jedis.MultiClusterClientConfig.ClusterConfig; -import redis.clients.jedis.MultiClusterClientConfig.StrategySupplier; +import redis.clients.jedis.MultiDbConfig.DatabaseConfig; +import redis.clients.jedis.MultiDbConfig.StrategySupplier; import redis.clients.jedis.mcf.ProbingPolicy.BuiltIn; import redis.clients.jedis.scenario.RecommendedSettings; @@ -32,7 +32,7 @@ public class HealthCheckIntegrationTest { @Test public void testDisableHealthCheck() { // No health check strategy supplier means health check is disabled - MultiClusterPooledConnectionProvider customProvider = getMCCF(null); + MultiDbConnectionProvider customProvider = getMCCF(null); try (UnifiedJedis customClient = new UnifiedJedis(customProvider)) { // Verify that the client can connect and execute commands String result = customClient.ping(); @@ -43,11 +43,10 @@ public void testDisableHealthCheck() { @Test public void testDefaultStrategySupplier() { // Create a default strategy supplier that creates EchoStrategy instances - MultiClusterClientConfig.StrategySupplier defaultSupplier = (hostAndPort, - jedisClientConfig) -> { + MultiDbConfig.StrategySupplier defaultSupplier = (hostAndPort, jedisClientConfig) -> { return new EchoStrategy(hostAndPort, jedisClientConfig); }; - MultiClusterPooledConnectionProvider customProvider = getMCCF(defaultSupplier); + MultiDbConnectionProvider customProvider = getMCCF(defaultSupplier); try (UnifiedJedis customClient = new UnifiedJedis(customProvider)) { // Verify that the client can connect and execute commands String result = customClient.ping(); @@ -58,8 +57,7 @@ public void testDefaultStrategySupplier() { @Test public void testCustomStrategySupplier() { // Create a StrategySupplier that uses the JedisClientConfig when available - MultiClusterClientConfig.StrategySupplier strategySupplier = (hostAndPort, - jedisClientConfig) -> { + MultiDbConfig.StrategySupplier strategySupplier = (hostAndPort, jedisClientConfig) -> { return new TestHealthCheckStrategy(HealthCheckStrategy.Config.builder().interval(500) .timeout(500).numProbes(1).policy(BuiltIn.ANY_SUCCESS).build(), (endpoint) -> { // Create connection per health check to avoid resource leak @@ -72,7 +70,7 @@ public void testCustomStrategySupplier() { }); }; - MultiClusterPooledConnectionProvider customProvider = getMCCF(strategySupplier); + MultiDbConnectionProvider customProvider = getMCCF(strategySupplier); try (UnifiedJedis customClient = new UnifiedJedis(customProvider)) { // Verify that the client can connect and execute commands String result = customClient.ping(); @@ -80,23 +78,21 @@ public void testCustomStrategySupplier() { } } - private MultiClusterPooledConnectionProvider getMCCF( - MultiClusterClientConfig.StrategySupplier strategySupplier) { - Function modifier = builder -> strategySupplier == null + private MultiDbConnectionProvider getMCCF(MultiDbConfig.StrategySupplier strategySupplier) { + Function modifier = builder -> strategySupplier == null ? builder.healthCheckEnabled(false) : builder.healthCheckStrategySupplier(strategySupplier); - List clusterConfigs = Arrays.stream(new EndpointConfig[] { endpoint1 }) + List databaseConfigs = Arrays.stream(new EndpointConfig[] { endpoint1 }) .map(e -> modifier - .apply(MultiClusterClientConfig.ClusterConfig.builder(e.getHostAndPort(), clientConfig)) - .build()) + .apply(MultiDbConfig.DatabaseConfig.builder(e.getHostAndPort(), clientConfig)).build()) .collect(Collectors.toList()); - MultiClusterClientConfig mccf = new MultiClusterClientConfig.Builder(clusterConfigs) - .retryMaxAttempts(1).retryWaitDuration(1).circuitBreakerSlidingWindowSize(1) + MultiDbConfig mccf = new MultiDbConfig.Builder(databaseConfigs).retryMaxAttempts(1) + .retryWaitDuration(1).circuitBreakerSlidingWindowSize(1) .circuitBreakerFailureRateThreshold(100).build(); - return new MultiClusterPooledConnectionProvider(mccf); + return new MultiDbConnectionProvider(mccf); } // ========== Probe Logic Integration Tests ========== diff --git a/src/test/java/redis/clients/jedis/mcf/HealthCheckTest.java b/src/test/java/redis/clients/jedis/mcf/HealthCheckTest.java index b7205fd808..b83ecb8981 100644 --- a/src/test/java/redis/clients/jedis/mcf/HealthCheckTest.java +++ b/src/test/java/redis/clients/jedis/mcf/HealthCheckTest.java @@ -9,7 +9,7 @@ import redis.clients.jedis.Endpoint; import redis.clients.jedis.HostAndPort; import redis.clients.jedis.JedisClientConfig; -import redis.clients.jedis.MultiClusterClientConfig; +import redis.clients.jedis.MultiDbConfig; import redis.clients.jedis.UnifiedJedis; import redis.clients.jedis.mcf.ProbingPolicy.BuiltIn; @@ -338,7 +338,7 @@ void testEchoStrategyCustomIntervalTimeout() { @Test void testEchoStrategyDefaultSupplier() { - MultiClusterClientConfig.StrategySupplier supplier = EchoStrategy.DEFAULT; + MultiDbConfig.StrategySupplier supplier = EchoStrategy.DEFAULT; HealthCheckStrategy strategy = supplier.get(testEndpoint, testConfig); assertInstanceOf(EchoStrategy.class, strategy); @@ -348,12 +348,12 @@ void testEchoStrategyDefaultSupplier() { @Test void testNewFieldLocations() { - // Test new field locations in ClusterConfig and MultiClusterClientConfig - MultiClusterClientConfig.ClusterConfig clusterConfig = MultiClusterClientConfig.ClusterConfig + // Test new field locations in DatabaseConfig and MultiDbConfig + MultiDbConfig.DatabaseConfig clusterConfig = MultiDbConfig.DatabaseConfig .builder(testEndpoint, testConfig).weight(2.5f).build(); - MultiClusterClientConfig multiConfig = new MultiClusterClientConfig.Builder( - new MultiClusterClientConfig.ClusterConfig[] { clusterConfig }).retryOnFailover(true) + MultiDbConfig multiConfig = new MultiDbConfig.Builder( + new MultiDbConfig.DatabaseConfig[] { clusterConfig }).retryOnFailover(true) .failbackSupported(false).build(); assertEquals(2.5f, clusterConfig.getWeight()); @@ -363,8 +363,8 @@ void testNewFieldLocations() { @Test void testDefaultValues() { - // Test default values in ClusterConfig - MultiClusterClientConfig.ClusterConfig clusterConfig = MultiClusterClientConfig.ClusterConfig + // Test default values in DatabaseConfig + MultiDbConfig.DatabaseConfig clusterConfig = MultiDbConfig.DatabaseConfig .builder(testEndpoint, testConfig).build(); assertEquals(1.0f, clusterConfig.getWeight()); // Default weight @@ -374,22 +374,21 @@ void testDefaultValues() { // health // check) - // Test default values in MultiClusterClientConfig - MultiClusterClientConfig multiConfig = new MultiClusterClientConfig.Builder( - new MultiClusterClientConfig.ClusterConfig[] { clusterConfig }).build(); + // Test default values in MultiDbConfig + MultiDbConfig multiConfig = new MultiDbConfig.Builder( + new MultiDbConfig.DatabaseConfig[] { clusterConfig }).build(); assertFalse(multiConfig.isRetryOnFailover()); // Default is false assertTrue(multiConfig.isFailbackSupported()); // Default is true } @Test - void testClusterConfigWithHealthCheckStrategy() { + void testDatabaseConfigWithHealthCheckStrategy() { HealthCheckStrategy customStrategy = mock(HealthCheckStrategy.class); - MultiClusterClientConfig.StrategySupplier supplier = (hostAndPort, - jedisClientConfig) -> customStrategy; + MultiDbConfig.StrategySupplier supplier = (hostAndPort, jedisClientConfig) -> customStrategy; - MultiClusterClientConfig.ClusterConfig clusterConfig = MultiClusterClientConfig.ClusterConfig + MultiDbConfig.DatabaseConfig clusterConfig = MultiDbConfig.DatabaseConfig .builder(testEndpoint, testConfig).healthCheckStrategySupplier(supplier).build(); assertNotNull(clusterConfig.getHealthCheckStrategySupplier()); @@ -399,35 +398,34 @@ void testClusterConfigWithHealthCheckStrategy() { } @Test - void testClusterConfigWithStrategySupplier() { - MultiClusterClientConfig.StrategySupplier customSupplier = (hostAndPort, jedisClientConfig) -> { + void testDatabaseConfigWithStrategySupplier() { + MultiDbConfig.StrategySupplier customSupplier = (hostAndPort, jedisClientConfig) -> { return mock(HealthCheckStrategy.class); }; - MultiClusterClientConfig.ClusterConfig clusterConfig = MultiClusterClientConfig.ClusterConfig + MultiDbConfig.DatabaseConfig clusterConfig = MultiDbConfig.DatabaseConfig .builder(testEndpoint, testConfig).healthCheckStrategySupplier(customSupplier).build(); assertEquals(customSupplier, clusterConfig.getHealthCheckStrategySupplier()); } @Test - void testClusterConfigWithEchoStrategy() { - MultiClusterClientConfig.StrategySupplier echoSupplier = (hostAndPort, jedisClientConfig) -> { + void testDatabaseConfigWithEchoStrategy() { + MultiDbConfig.StrategySupplier echoSupplier = (hostAndPort, jedisClientConfig) -> { return new EchoStrategy(hostAndPort, jedisClientConfig); }; - MultiClusterClientConfig.ClusterConfig clusterConfig = MultiClusterClientConfig.ClusterConfig + MultiDbConfig.DatabaseConfig clusterConfig = MultiDbConfig.DatabaseConfig .builder(testEndpoint, testConfig).healthCheckStrategySupplier(echoSupplier).build(); - MultiClusterClientConfig.StrategySupplier supplier = clusterConfig - .getHealthCheckStrategySupplier(); + MultiDbConfig.StrategySupplier supplier = clusterConfig.getHealthCheckStrategySupplier(); assertNotNull(supplier); assertInstanceOf(EchoStrategy.class, supplier.get(testEndpoint, testConfig)); } @Test - void testClusterConfigWithDefaultHealthCheck() { - MultiClusterClientConfig.ClusterConfig clusterConfig = MultiClusterClientConfig.ClusterConfig + void testDatabaseConfigWithDefaultHealthCheck() { + MultiDbConfig.DatabaseConfig clusterConfig = MultiDbConfig.DatabaseConfig .builder(testEndpoint, testConfig).build(); // Should use default EchoStrategy assertNotNull(clusterConfig.getHealthCheckStrategySupplier()); @@ -435,16 +433,16 @@ void testClusterConfigWithDefaultHealthCheck() { } @Test - void testClusterConfigWithDisabledHealthCheck() { - MultiClusterClientConfig.ClusterConfig clusterConfig = MultiClusterClientConfig.ClusterConfig + void testDatabaseConfigWithDisabledHealthCheck() { + MultiDbConfig.DatabaseConfig clusterConfig = MultiDbConfig.DatabaseConfig .builder(testEndpoint, testConfig).healthCheckEnabled(false).build(); assertNull(clusterConfig.getHealthCheckStrategySupplier()); } @Test - void testClusterConfigHealthCheckEnabledExplicitly() { - MultiClusterClientConfig.ClusterConfig clusterConfig = MultiClusterClientConfig.ClusterConfig + void testDatabaseConfigHealthCheckEnabledExplicitly() { + MultiDbConfig.DatabaseConfig clusterConfig = MultiDbConfig.DatabaseConfig .builder(testEndpoint, testConfig).healthCheckEnabled(true).build(); assertNotNull(clusterConfig.getHealthCheckStrategySupplier()); @@ -516,7 +514,7 @@ void testHealthCheckIntegration() throws InterruptedException { @Test void testStrategySupplierPolymorphism() { // Test that the polymorphic design works correctly - MultiClusterClientConfig.StrategySupplier supplier = (hostAndPort, jedisClientConfig) -> { + MultiDbConfig.StrategySupplier supplier = (hostAndPort, jedisClientConfig) -> { if (jedisClientConfig != null) { return new EchoStrategy(hostAndPort, jedisClientConfig, HealthCheckStrategy.Config.builder().interval(500).timeout(250).numProbes(1).build()); diff --git a/src/test/java/redis/clients/jedis/mcf/MultiClusterDynamicEndpointUnitTest.java b/src/test/java/redis/clients/jedis/mcf/MultiClusterDynamicEndpointUnitTest.java deleted file mode 100644 index 90ff443794..0000000000 --- a/src/test/java/redis/clients/jedis/mcf/MultiClusterDynamicEndpointUnitTest.java +++ /dev/null @@ -1,205 +0,0 @@ -package redis.clients.jedis.mcf; - -import org.junit.jupiter.api.BeforeEach; -import org.junit.jupiter.api.Test; -import org.mockito.MockedConstruction; - -import redis.clients.jedis.Connection; -import redis.clients.jedis.ConnectionPool; -import redis.clients.jedis.DefaultJedisClientConfig; -import redis.clients.jedis.EndpointConfig; -import redis.clients.jedis.HostAndPort; -import redis.clients.jedis.HostAndPorts; -import redis.clients.jedis.JedisClientConfig; -import redis.clients.jedis.MultiClusterClientConfig; -import redis.clients.jedis.MultiClusterClientConfig.ClusterConfig; -import redis.clients.jedis.exceptions.JedisValidationException; - -import static org.junit.jupiter.api.Assertions.*; -import static org.mockito.Mockito.doNothing; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.mockConstruction; -import static org.mockito.Mockito.when; - -public class MultiClusterDynamicEndpointUnitTest { - - private MultiClusterPooledConnectionProvider provider; - private JedisClientConfig clientConfig; - private final EndpointConfig endpoint1 = HostAndPorts.getRedisEndpoint("standalone0"); - private final EndpointConfig endpoint2 = HostAndPorts.getRedisEndpoint("standalone1"); - - @BeforeEach - void setUp() { - clientConfig = DefaultJedisClientConfig.builder().build(); - - // Create initial provider with endpoint1 - ClusterConfig initialConfig = createClusterConfig(endpoint1.getHostAndPort(), 1.0f); - - MultiClusterClientConfig multiConfig = new MultiClusterClientConfig.Builder( - new ClusterConfig[] { initialConfig }).build(); - - provider = new MultiClusterPooledConnectionProvider(multiConfig); - } - - // Helper method to create cluster configurations - private ClusterConfig createClusterConfig(HostAndPort hostAndPort, float weight) { - // Disable health check for unit tests to avoid real connections - return ClusterConfig.builder(hostAndPort, clientConfig).weight(weight).healthCheckEnabled(false) - .build(); - } - - @Test - void testAddNewCluster() { - ClusterConfig newConfig = createClusterConfig(endpoint2.getHostAndPort(), 2.0f); - - // Should not throw exception - assertDoesNotThrow(() -> provider.add(newConfig)); - - // Verify the cluster was added by checking it can be retrieved - assertNotNull(provider.getCluster(endpoint2.getHostAndPort())); - } - - @Test - void testAddDuplicateCluster() { - ClusterConfig duplicateConfig = createClusterConfig(endpoint1.getHostAndPort(), 2.0f); - - // Should throw validation exception for duplicate endpoint - assertThrows(JedisValidationException.class, () -> provider.add(duplicateConfig)); - } - - @Test - void testAddNullClusterConfig() { - // Should throw validation exception for null config - assertThrows(JedisValidationException.class, () -> provider.add(null)); - } - - @Test - void testRemoveExistingCluster() { - Connection mockConnection = mock(Connection.class); - when(mockConnection.ping()).thenReturn(true); - - try (MockedConstruction mockedPool = mockPool(mockConnection)) { - // Create initial provider with endpoint1 - ClusterConfig clusterConfig1 = createClusterConfig(endpoint1.getHostAndPort(), 1.0f); - - MultiClusterClientConfig multiConfig = MultiClusterClientConfig - .builder(new ClusterConfig[] { clusterConfig1 }).build(); - - try ( - MultiClusterPooledConnectionProvider providerWithMockedPool = new MultiClusterPooledConnectionProvider( - multiConfig)) { - - // Add endpoint2 as second cluster - ClusterConfig newConfig = createClusterConfig(endpoint2.getHostAndPort(), 2.0f); - providerWithMockedPool.add(newConfig); - - // Now remove endpoint1 (original cluster) - assertDoesNotThrow(() -> providerWithMockedPool.remove(endpoint1.getHostAndPort())); - - // Verify endpoint1 was removed - assertNull(providerWithMockedPool.getCluster(endpoint1.getHostAndPort())); - // Verify endpoint2 still exists - assertNotNull(providerWithMockedPool.getCluster(endpoint2.getHostAndPort())); - } - } - } - - private MockedConstruction mockPool(Connection mockConnection) { - return mockConstruction(TrackingConnectionPool.class, (mock, context) -> { - when(mock.getResource()).thenReturn(mockConnection); - doNothing().when(mock).close(); - }); - } - - @Test - void testRemoveNonExistentCluster() { - HostAndPort nonExistentEndpoint = new HostAndPort("localhost", 9999); - - // Should throw validation exception for non-existent endpoint - assertThrows(JedisValidationException.class, () -> provider.remove(nonExistentEndpoint)); - } - - @Test - void testRemoveLastRemainingCluster() { - // Should throw validation exception when trying to remove the last cluster - assertThrows(JedisValidationException.class, () -> provider.remove(endpoint1.getHostAndPort())); - } - - @Test - void testRemoveNullEndpoint() { - // Should throw validation exception for null endpoint - assertThrows(JedisValidationException.class, () -> provider.remove(null)); - } - - @Test - void testAddAndRemoveMultipleClusters() { - // Add endpoint2 as second cluster - ClusterConfig config2 = createClusterConfig(endpoint2.getHostAndPort(), 2.0f); - - // Create a third endpoint for this test - HostAndPort endpoint3 = new HostAndPort("localhost", 6381); - ClusterConfig config3 = createClusterConfig(endpoint3, 3.0f); - - provider.add(config2); - provider.add(config3); - - // Verify all clusters exist - assertNotNull(provider.getCluster(endpoint1.getHostAndPort())); - assertNotNull(provider.getCluster(endpoint2.getHostAndPort())); - assertNotNull(provider.getCluster(endpoint3)); - - // Remove endpoint2 - provider.remove(endpoint2.getHostAndPort()); - - // Verify correct cluster was removed - assertNull(provider.getCluster(endpoint2.getHostAndPort())); - assertNotNull(provider.getCluster(endpoint1.getHostAndPort())); - assertNotNull(provider.getCluster(endpoint3)); - } - - @Test - void testActiveClusterHandlingOnAdd() { - // The initial cluster should be active - assertNotNull(provider.getCluster()); - - // Add endpoint2 with higher weight - ClusterConfig newConfig = createClusterConfig(endpoint2.getHostAndPort(), 5.0f); - provider.add(newConfig); - - // Active cluster should still be valid (implementation may or may not switch) - assertNotNull(provider.getCluster()); - } - - @Test - void testActiveClusterHandlingOnRemove() { - Connection mockConnection = mock(Connection.class); - when(mockConnection.ping()).thenReturn(true); - - try (MockedConstruction mockedPool = mockPool(mockConnection)) { - // Create initial provider with endpoint1 - ClusterConfig clusterConfig1 = createClusterConfig(endpoint1.getHostAndPort(), 1.0f); - - MultiClusterClientConfig multiConfig = MultiClusterClientConfig - .builder(new ClusterConfig[] { clusterConfig1 }).build(); - - try ( - MultiClusterPooledConnectionProvider providerWithMockedPool = new MultiClusterPooledConnectionProvider( - multiConfig)) { - - // Add endpoint2 as second cluster - ClusterConfig newConfig = createClusterConfig(endpoint2.getHostAndPort(), 2.0f); - providerWithMockedPool.add(newConfig); - - // Get current active cluster - Object initialActiveCluster = providerWithMockedPool.getCluster(); - assertNotNull(initialActiveCluster); - - // Remove endpoint1 (original cluster, might be active) - providerWithMockedPool.remove(endpoint1.getHostAndPort()); - - // Should still have an active cluster - assertNotNull(providerWithMockedPool.getCluster()); - } - } - } -} diff --git a/src/test/java/redis/clients/jedis/mcf/MultiClusterInitializationTest.java b/src/test/java/redis/clients/jedis/mcf/MultiClusterInitializationTest.java deleted file mode 100644 index 7b580042dc..0000000000 --- a/src/test/java/redis/clients/jedis/mcf/MultiClusterInitializationTest.java +++ /dev/null @@ -1,162 +0,0 @@ -package redis.clients.jedis.mcf; - -import static org.junit.jupiter.api.Assertions.*; -import static org.mockito.Mockito.*; - -import org.junit.jupiter.api.BeforeEach; -import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.extension.ExtendWith; -import org.mockito.MockedConstruction; -import org.mockito.junit.jupiter.MockitoExtension; - -import redis.clients.jedis.Connection; -import redis.clients.jedis.ConnectionPool; -import redis.clients.jedis.DefaultJedisClientConfig; -import redis.clients.jedis.HostAndPort; -import redis.clients.jedis.JedisClientConfig; -import redis.clients.jedis.MultiClusterClientConfig; -import redis.clients.jedis.exceptions.JedisValidationException; - -/** - * Tests for MultiClusterPooledConnectionProvider initialization edge cases - */ -@ExtendWith(MockitoExtension.class) -public class MultiClusterInitializationTest { - - private HostAndPort endpoint1; - private HostAndPort endpoint2; - private HostAndPort endpoint3; - private JedisClientConfig clientConfig; - - @BeforeEach - void setUp() { - endpoint1 = new HostAndPort("localhost", 6379); - endpoint2 = new HostAndPort("localhost", 6380); - endpoint3 = new HostAndPort("localhost", 6381); - clientConfig = DefaultJedisClientConfig.builder().build(); - } - - private MockedConstruction mockPool() { - Connection mockConnection = mock(Connection.class); - lenient().when(mockConnection.ping()).thenReturn(true); - return mockConstruction(ConnectionPool.class, (mock, context) -> { - when(mock.getResource()).thenReturn(mockConnection); - doNothing().when(mock).close(); - }); - } - - @Test - void testInitializationWithMixedHealthCheckConfiguration() { - try (MockedConstruction mockedPool = mockPool()) { - // Create clusters with mixed health check configuration - MultiClusterClientConfig.ClusterConfig cluster1 = MultiClusterClientConfig.ClusterConfig - .builder(endpoint1, clientConfig).weight(1.0f).healthCheckEnabled(false) // No health - // check - .build(); - - MultiClusterClientConfig.ClusterConfig cluster2 = MultiClusterClientConfig.ClusterConfig - .builder(endpoint2, clientConfig).weight(2.0f) - .healthCheckStrategySupplier(EchoStrategy.DEFAULT) // With - // health - // check - .build(); - - MultiClusterClientConfig config = new MultiClusterClientConfig.Builder( - new MultiClusterClientConfig.ClusterConfig[] { cluster1, cluster2 }).build(); - - try (MultiClusterPooledConnectionProvider provider = new MultiClusterPooledConnectionProvider( - config)) { - // Should initialize successfully - assertNotNull(provider.getCluster()); - - // Should select cluster1 (no health check, assumed healthy) or cluster2 based on weight - // Since cluster2 has higher weight and health checks, it should be selected if healthy - assertTrue(provider.getCluster() == provider.getCluster(endpoint1) - || provider.getCluster() == provider.getCluster(endpoint2)); - } - } - } - - @Test - void testInitializationWithAllHealthChecksDisabled() { - try (MockedConstruction mockedPool = mockPool()) { - // Create clusters with no health checks - MultiClusterClientConfig.ClusterConfig cluster1 = MultiClusterClientConfig.ClusterConfig - .builder(endpoint1, clientConfig).weight(1.0f).healthCheckEnabled(false).build(); - - MultiClusterClientConfig.ClusterConfig cluster2 = MultiClusterClientConfig.ClusterConfig - .builder(endpoint2, clientConfig).weight(3.0f) // Higher weight - .healthCheckEnabled(false).build(); - - MultiClusterClientConfig config = new MultiClusterClientConfig.Builder( - new MultiClusterClientConfig.ClusterConfig[] { cluster1, cluster2 }).build(); - - try (MultiClusterPooledConnectionProvider provider = new MultiClusterPooledConnectionProvider( - config)) { - // Should select cluster2 (highest weight, no health checks) - assertEquals(provider.getCluster(endpoint2), provider.getCluster()); - } - } - } - - @Test - void testInitializationWithSingleCluster() { - try (MockedConstruction mockedPool = mockPool()) { - MultiClusterClientConfig.ClusterConfig cluster = MultiClusterClientConfig.ClusterConfig - .builder(endpoint1, clientConfig).weight(1.0f).healthCheckEnabled(false).build(); - - MultiClusterClientConfig config = new MultiClusterClientConfig.Builder( - new MultiClusterClientConfig.ClusterConfig[] { cluster }).build(); - - try (MultiClusterPooledConnectionProvider provider = new MultiClusterPooledConnectionProvider( - config)) { - // Should select the only available cluster - assertEquals(provider.getCluster(endpoint1), provider.getCluster()); - } - } - } - - @Test - void testErrorHandlingWithNullConfiguration() { - assertThrows(JedisValidationException.class, () -> { - new MultiClusterPooledConnectionProvider(null); - }); - } - - @Test - void testErrorHandlingWithEmptyClusterArray() { - assertThrows(JedisValidationException.class, () -> { - new MultiClusterClientConfig.Builder(new MultiClusterClientConfig.ClusterConfig[0]).build(); - }); - } - - @Test - void testErrorHandlingWithNullClusterConfig() { - assertThrows(IllegalArgumentException.class, () -> { - new MultiClusterClientConfig.Builder(new MultiClusterClientConfig.ClusterConfig[] { null }) - .build(); - }); - } - - @Test - void testInitializationWithZeroWeights() { - try (MockedConstruction mockedPool = mockPool()) { - MultiClusterClientConfig.ClusterConfig cluster1 = MultiClusterClientConfig.ClusterConfig - .builder(endpoint1, clientConfig).weight(0.0f) // Zero weight - .healthCheckEnabled(false).build(); - - MultiClusterClientConfig.ClusterConfig cluster2 = MultiClusterClientConfig.ClusterConfig - .builder(endpoint2, clientConfig).weight(0.0f) // Zero weight - .healthCheckEnabled(false).build(); - - MultiClusterClientConfig config = new MultiClusterClientConfig.Builder( - new MultiClusterClientConfig.ClusterConfig[] { cluster1, cluster2 }).build(); - - try (MultiClusterPooledConnectionProvider provider = new MultiClusterPooledConnectionProvider( - config)) { - // Should still initialize and select one of the clusters - assertNotNull(provider.getCluster()); - } - } - } -} diff --git a/src/test/java/redis/clients/jedis/mcf/MultiClusterPooledConnectionProviderHelper.java b/src/test/java/redis/clients/jedis/mcf/MultiClusterPooledConnectionProviderHelper.java deleted file mode 100644 index f5076694c8..0000000000 --- a/src/test/java/redis/clients/jedis/mcf/MultiClusterPooledConnectionProviderHelper.java +++ /dev/null @@ -1,20 +0,0 @@ -package redis.clients.jedis.mcf; - -import redis.clients.jedis.Endpoint; - -public class MultiClusterPooledConnectionProviderHelper { - - public static void onHealthStatusChange(MultiClusterPooledConnectionProvider provider, - Endpoint endpoint, HealthStatus oldStatus, HealthStatus newStatus) { - provider.onHealthStatusChange(new HealthStatusChangeEvent(endpoint, oldStatus, newStatus)); - } - - public static void periodicFailbackCheck(MultiClusterPooledConnectionProvider provider) { - provider.periodicFailbackCheck(); - } - - public static Endpoint switchToHealthyCluster(MultiClusterPooledConnectionProvider provider, - SwitchReason reason, MultiClusterPooledConnectionProvider.Cluster iterateFrom) { - return provider.switchToHealthyCluster(reason, iterateFrom); - } -} diff --git a/src/test/java/redis/clients/jedis/mcf/CircuitBreakerThresholdsTest.java b/src/test/java/redis/clients/jedis/mcf/MultiDbCircuitBreakerThresholdsTest.java similarity index 73% rename from src/test/java/redis/clients/jedis/mcf/CircuitBreakerThresholdsTest.java rename to src/test/java/redis/clients/jedis/mcf/MultiDbCircuitBreakerThresholdsTest.java index 755325c705..7a0f4319c6 100644 --- a/src/test/java/redis/clients/jedis/mcf/CircuitBreakerThresholdsTest.java +++ b/src/test/java/redis/clients/jedis/mcf/MultiDbCircuitBreakerThresholdsTest.java @@ -16,11 +16,11 @@ import redis.clients.jedis.Connection; import redis.clients.jedis.DefaultJedisClientConfig; import redis.clients.jedis.HostAndPort; -import redis.clients.jedis.MultiClusterClientConfig; -import redis.clients.jedis.MultiClusterClientConfig.ClusterConfig; +import redis.clients.jedis.MultiDbConfig; +import redis.clients.jedis.MultiDbConfig.DatabaseConfig; import redis.clients.jedis.Protocol; import redis.clients.jedis.exceptions.JedisConnectionException; -import redis.clients.jedis.mcf.MultiClusterPooledConnectionProvider.Cluster; +import redis.clients.jedis.mcf.MultiDbConnectionProvider.Database; import redis.clients.jedis.util.ReflectionTestUtil; /** @@ -28,40 +28,40 @@ * must be exceeded to trigger failover. Uses a real CircuitBreaker and real Retry, but mocks the * provider and cluster wiring to avoid network I/O. */ -public class CircuitBreakerThresholdsTest { +public class MultiDbCircuitBreakerThresholdsTest { - private MultiClusterPooledConnectionProvider realProvider; - private MultiClusterPooledConnectionProvider spyProvider; - private Cluster cluster; - private CircuitBreakerCommandExecutor executor; + private MultiDbConnectionProvider realProvider; + private MultiDbConnectionProvider spyProvider; + private Database cluster; + private MultiDbCommandExecutor executor; private CommandObject dummyCommand; private TrackingConnectionPool poolMock; private HostAndPort fakeEndpoint = new HostAndPort("fake", 6379); private HostAndPort fakeEndpoint2 = new HostAndPort("fake2", 6379); - private ClusterConfig[] fakeClusterConfigs; + private DatabaseConfig[] fakeDatabaseConfigs; @BeforeEach public void setup() throws Exception { - ClusterConfig[] clusterConfigs = new ClusterConfig[] { - ClusterConfig.builder(fakeEndpoint, DefaultJedisClientConfig.builder().build()) + DatabaseConfig[] databaseConfigs = new DatabaseConfig[] { + DatabaseConfig.builder(fakeEndpoint, DefaultJedisClientConfig.builder().build()) .healthCheckEnabled(false).weight(1.0f).build(), - ClusterConfig.builder(fakeEndpoint2, DefaultJedisClientConfig.builder().build()) + DatabaseConfig.builder(fakeEndpoint2, DefaultJedisClientConfig.builder().build()) .healthCheckEnabled(false).weight(0.5f).build() }; - fakeClusterConfigs = clusterConfigs; + fakeDatabaseConfigs = databaseConfigs; - MultiClusterClientConfig.Builder cfgBuilder = MultiClusterClientConfig.builder(clusterConfigs) + MultiDbConfig.Builder cfgBuilder = MultiDbConfig.builder(databaseConfigs) .circuitBreakerFailureRateThreshold(50.0f).circuitBreakerMinNumOfFailures(3) .circuitBreakerSlidingWindowSize(10).retryMaxAttempts(1).retryOnFailover(false); - MultiClusterClientConfig mcc = cfgBuilder.build(); + MultiDbConfig mcc = cfgBuilder.build(); - realProvider = new MultiClusterPooledConnectionProvider(mcc); + realProvider = new MultiDbConnectionProvider(mcc); spyProvider = spy(realProvider); - cluster = spyProvider.getCluster(); + cluster = spyProvider.getDatabase(); - executor = new CircuitBreakerCommandExecutor(spyProvider); + executor = new MultiDbCommandExecutor(spyProvider); dummyCommand = new CommandObject<>(new CommandArguments(Protocol.Command.PING), BuilderFactory.STRING); @@ -88,7 +88,7 @@ public void belowMinFailures_doesNotFailover() { } // Below min failures; CB remains CLOSED - assertEquals(CircuitBreaker.State.CLOSED, spyProvider.getClusterCircuitBreaker().getState()); + assertEquals(CircuitBreaker.State.CLOSED, spyProvider.getDatabaseCircuitBreaker().getState()); } /** @@ -111,10 +111,10 @@ public void minFailuresAndRateExceeded_triggersFailover() { // Next call should hit open CB (CallNotPermitted) and trigger failover assertThrows(JedisConnectionException.class, () -> executor.executeCommand(dummyCommand)); - verify(spyProvider, atLeastOnce()).switchToHealthyCluster(eq(SwitchReason.CIRCUIT_BREAKER), + verify(spyProvider, atLeastOnce()).switchToHealthyDatabase(eq(SwitchReason.CIRCUIT_BREAKER), any()); assertEquals(CircuitBreaker.State.FORCED_OPEN, - spyProvider.getCluster(fakeEndpoint).getCircuitBreaker().getState()); + spyProvider.getDatabase(fakeEndpoint).getCircuitBreaker().getState()); } /** @@ -123,15 +123,13 @@ public void minFailuresAndRateExceeded_triggersFailover() { @Test public void rateBelowThreshold_doesNotFailover() throws Exception { // Use local provider with higher threshold (80%) and no retries - MultiClusterClientConfig.Builder cfgBuilder = MultiClusterClientConfig - .builder(fakeClusterConfigs).circuitBreakerFailureRateThreshold(80.0f) - .circuitBreakerMinNumOfFailures(3).circuitBreakerSlidingWindowSize(10).retryMaxAttempts(1) - .retryOnFailover(false); - MultiClusterPooledConnectionProvider rp = new MultiClusterPooledConnectionProvider( - cfgBuilder.build()); - MultiClusterPooledConnectionProvider sp = spy(rp); - Cluster c = sp.getCluster(); - try (CircuitBreakerCommandExecutor ex = new CircuitBreakerCommandExecutor(sp)) { + MultiDbConfig.Builder cfgBuilder = MultiDbConfig.builder(fakeDatabaseConfigs) + .circuitBreakerFailureRateThreshold(80.0f).circuitBreakerMinNumOfFailures(3) + .circuitBreakerSlidingWindowSize(10).retryMaxAttempts(1).retryOnFailover(false); + MultiDbConnectionProvider rp = new MultiDbConnectionProvider(cfgBuilder.build()); + MultiDbConnectionProvider sp = spy(rp); + Database c = sp.getDatabase(); + try (MultiDbCommandExecutor ex = new MultiDbCommandExecutor(sp)) { CommandObject cmd = new CommandObject<>(new CommandArguments(Protocol.Command.PING), BuilderFactory.STRING); @@ -158,17 +156,16 @@ public void rateBelowThreshold_doesNotFailover() throws Exception { assertThrows(JedisConnectionException.class, () -> ex.executeCommand(cmd)); } - assertEquals(CircuitBreaker.State.CLOSED, sp.getClusterCircuitBreaker().getState()); + assertEquals(CircuitBreaker.State.CLOSED, sp.getDatabaseCircuitBreaker().getState()); } } @Test public void providerBuilder_zeroRate_mapsToHundredAndHugeMinCalls() { - MultiClusterClientConfig.Builder cfgBuilder = MultiClusterClientConfig - .builder(fakeClusterConfigs); + MultiDbConfig.Builder cfgBuilder = MultiDbConfig.builder(fakeDatabaseConfigs); cfgBuilder.circuitBreakerFailureRateThreshold(0.0f).circuitBreakerMinNumOfFailures(3) .circuitBreakerSlidingWindowSize(10); - MultiClusterClientConfig mcc = cfgBuilder.build(); + MultiDbConfig mcc = cfgBuilder.build(); CircuitBreakerThresholdsAdapter adapter = new CircuitBreakerThresholdsAdapter(mcc); @@ -192,17 +189,15 @@ public void providerBuilder_zeroRate_mapsToHundredAndHugeMinCalls() { public void thresholdMatrix(int minFailures, float ratePercent, int successes, int failures, boolean expectFailoverOnNext) throws Exception { - MultiClusterClientConfig.Builder cfgBuilder = MultiClusterClientConfig - .builder(fakeClusterConfigs).circuitBreakerFailureRateThreshold(ratePercent) - .circuitBreakerMinNumOfFailures(minFailures) + MultiDbConfig.Builder cfgBuilder = MultiDbConfig.builder(fakeDatabaseConfigs) + .circuitBreakerFailureRateThreshold(ratePercent).circuitBreakerMinNumOfFailures(minFailures) .circuitBreakerSlidingWindowSize(Math.max(10, successes + failures + 2)).retryMaxAttempts(1) .retryOnFailover(false); - MultiClusterPooledConnectionProvider real = new MultiClusterPooledConnectionProvider( - cfgBuilder.build()); - MultiClusterPooledConnectionProvider spy = spy(real); - Cluster c = spy.getCluster(); - try (CircuitBreakerCommandExecutor ex = new CircuitBreakerCommandExecutor(spy)) { + MultiDbConnectionProvider real = new MultiDbConnectionProvider(cfgBuilder.build()); + MultiDbConnectionProvider spy = spy(real); + Database c = spy.getDatabase(); + try (MultiDbCommandExecutor ex = new MultiDbCommandExecutor(spy)) { CommandObject cmd = new CommandObject<>(new CommandArguments(Protocol.Command.PING), BuilderFactory.STRING); @@ -237,7 +232,7 @@ public void thresholdMatrix(int minFailures, float ratePercent, int successes, i if (expectFailoverOnNext) { assertThrows(Exception.class, () -> ex.executeCommand(cmd)); - verify(spy, atLeastOnce()).switchToHealthyCluster(eq(SwitchReason.CIRCUIT_BREAKER), any()); + verify(spy, atLeastOnce()).switchToHealthyDatabase(eq(SwitchReason.CIRCUIT_BREAKER), any()); assertEquals(CircuitBreaker.State.FORCED_OPEN, c.getCircuitBreaker().getState()); } else { CircuitBreaker.State st = c.getCircuitBreaker().getState(); diff --git a/src/test/java/redis/clients/jedis/mcf/MultiDbConnectionProviderDynamicEndpointUnitTest.java b/src/test/java/redis/clients/jedis/mcf/MultiDbConnectionProviderDynamicEndpointUnitTest.java new file mode 100644 index 0000000000..663f33529e --- /dev/null +++ b/src/test/java/redis/clients/jedis/mcf/MultiDbConnectionProviderDynamicEndpointUnitTest.java @@ -0,0 +1,200 @@ +package redis.clients.jedis.mcf; + +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.mockito.MockedConstruction; + +import redis.clients.jedis.Connection; +import redis.clients.jedis.DefaultJedisClientConfig; +import redis.clients.jedis.EndpointConfig; +import redis.clients.jedis.HostAndPort; +import redis.clients.jedis.HostAndPorts; +import redis.clients.jedis.JedisClientConfig; +import redis.clients.jedis.MultiDbConfig; +import redis.clients.jedis.MultiDbConfig.DatabaseConfig; +import redis.clients.jedis.exceptions.JedisValidationException; + +import static org.junit.jupiter.api.Assertions.*; +import static org.mockito.Mockito.doNothing; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.mockConstruction; +import static org.mockito.Mockito.when; + +public class MultiDbConnectionProviderDynamicEndpointUnitTest { + + private MultiDbConnectionProvider provider; + private JedisClientConfig clientConfig; + private final EndpointConfig endpoint1 = HostAndPorts.getRedisEndpoint("standalone0"); + private final EndpointConfig endpoint2 = HostAndPorts.getRedisEndpoint("standalone1"); + + @BeforeEach + void setUp() { + clientConfig = DefaultJedisClientConfig.builder().build(); + + // Create initial provider with endpoint1 + DatabaseConfig initialConfig = createDatabaseConfig(endpoint1.getHostAndPort(), 1.0f); + + MultiDbConfig multiConfig = new MultiDbConfig.Builder(new DatabaseConfig[] { initialConfig }) + .build(); + + provider = new MultiDbConnectionProvider(multiConfig); + } + + // Helper method to create database configurations + private DatabaseConfig createDatabaseConfig(HostAndPort hostAndPort, float weight) { + // Disable health check for unit tests to avoid real connections + return DatabaseConfig.builder(hostAndPort, clientConfig).weight(weight) + .healthCheckEnabled(false).build(); + } + + @Test + void testAddNewDatabase() { + DatabaseConfig newConfig = createDatabaseConfig(endpoint2.getHostAndPort(), 2.0f); + + // Should not throw exception + assertDoesNotThrow(() -> provider.add(newConfig)); + + // Verify the database was added by checking it can be retrieved + assertNotNull(provider.getDatabase(endpoint2.getHostAndPort())); + } + + @Test + void testAddDuplicateDatabase() { + DatabaseConfig duplicateConfig = createDatabaseConfig(endpoint1.getHostAndPort(), 2.0f); + + // Should throw validation exception for duplicate endpoint + assertThrows(JedisValidationException.class, () -> provider.add(duplicateConfig)); + } + + @Test + void testAddNullDatabaseConfig() { + // Should throw validation exception for null config + assertThrows(JedisValidationException.class, () -> provider.add(null)); + } + + @Test + void testRemoveExistingDatabase() { + Connection mockConnection = mock(Connection.class); + when(mockConnection.ping()).thenReturn(true); + + try (MockedConstruction mockedPool = mockPool(mockConnection)) { + // Create initial provider with endpoint1 + DatabaseConfig dbConfig1 = createDatabaseConfig(endpoint1.getHostAndPort(), 1.0f); + + MultiDbConfig multiConfig = MultiDbConfig.builder(new DatabaseConfig[] { dbConfig1 }).build(); + + try (MultiDbConnectionProvider providerWithMockedPool = new MultiDbConnectionProvider( + multiConfig)) { + + // Add endpoint2 as second database + DatabaseConfig newConfig = createDatabaseConfig(endpoint2.getHostAndPort(), 2.0f); + providerWithMockedPool.add(newConfig); + + // Now remove endpoint1 (original database) + assertDoesNotThrow(() -> providerWithMockedPool.remove(endpoint1.getHostAndPort())); + + // Verify endpoint1 was removed + assertNull(providerWithMockedPool.getDatabase(endpoint1.getHostAndPort())); + // Verify endpoint2 still exists + assertNotNull(providerWithMockedPool.getDatabase(endpoint2.getHostAndPort())); + } + } + } + + private MockedConstruction mockPool(Connection mockConnection) { + return mockConstruction(TrackingConnectionPool.class, (mock, context) -> { + when(mock.getResource()).thenReturn(mockConnection); + doNothing().when(mock).close(); + }); + } + + @Test + void testRemoveNonExistentCluster() { + HostAndPort nonExistentEndpoint = new HostAndPort("localhost", 9999); + + // Should throw validation exception for non-existent endpoint + assertThrows(JedisValidationException.class, () -> provider.remove(nonExistentEndpoint)); + } + + @Test + void testRemoveLastRemainingDatabase() { + // Should throw validation exception when trying to remove the last database + assertThrows(JedisValidationException.class, () -> provider.remove(endpoint1.getHostAndPort())); + } + + @Test + void testRemoveNullEndpoint() { + // Should throw validation exception for null endpoint + assertThrows(JedisValidationException.class, () -> provider.remove(null)); + } + + @Test + void testAddAndRemoveMultipleDatabases() { + // Add endpoint2 as second database + DatabaseConfig config2 = createDatabaseConfig(endpoint2.getHostAndPort(), 2.0f); + + // Create a third endpoint for this test + HostAndPort endpoint3 = new HostAndPort("localhost", 6381); + DatabaseConfig config3 = createDatabaseConfig(endpoint3, 3.0f); + + provider.add(config2); + provider.add(config3); + + // Verify all databases exist + assertNotNull(provider.getDatabase(endpoint1.getHostAndPort())); + assertNotNull(provider.getDatabase(endpoint2.getHostAndPort())); + assertNotNull(provider.getDatabase(endpoint3)); + + // Remove endpoint2 + provider.remove(endpoint2.getHostAndPort()); + + // Verify correct database was removed + assertNull(provider.getDatabase(endpoint2.getHostAndPort())); + assertNotNull(provider.getDatabase(endpoint1.getHostAndPort())); + assertNotNull(provider.getDatabase(endpoint3)); + } + + @Test + void testActiveDatabaseHandlingOnAdd() { + // The initial database should be active + assertNotNull(provider.getDatabase()); + + // Add endpoint2 with higher weight + DatabaseConfig newConfig = createDatabaseConfig(endpoint2.getHostAndPort(), 5.0f); + provider.add(newConfig); + + // Active database should still be valid (implementation may or may not switch) + assertNotNull(provider.getDatabase()); + } + + @Test + void testActiveClusterHandlingOnRemove() { + Connection mockConnection = mock(Connection.class); + when(mockConnection.ping()).thenReturn(true); + + try (MockedConstruction mockedPool = mockPool(mockConnection)) { + // Create initial provider with endpoint1 + DatabaseConfig dbConfig1 = createDatabaseConfig(endpoint1.getHostAndPort(), 1.0f); + + MultiDbConfig multiConfig = MultiDbConfig.builder(new DatabaseConfig[] { dbConfig1 }).build(); + + try (MultiDbConnectionProvider providerWithMockedPool = new MultiDbConnectionProvider( + multiConfig)) { + + // Add endpoint2 as second database + DatabaseConfig newConfig = createDatabaseConfig(endpoint2.getHostAndPort(), 2.0f); + providerWithMockedPool.add(newConfig); + + // Get current active database + Object initialActiveDb = providerWithMockedPool.getDatabase(); + assertNotNull(initialActiveDb); + + // Remove endpoint1 (original database, might be active) + providerWithMockedPool.remove(endpoint1.getHostAndPort()); + + // Should still have an active database + assertNotNull(providerWithMockedPool.getDatabase()); + } + } + } +} diff --git a/src/test/java/redis/clients/jedis/mcf/MultiClusterFailoverAttemptsConfigTest.java b/src/test/java/redis/clients/jedis/mcf/MultiDbConnectionProviderFailoverAttemptsConfigTest.java similarity index 67% rename from src/test/java/redis/clients/jedis/mcf/MultiClusterFailoverAttemptsConfigTest.java rename to src/test/java/redis/clients/jedis/mcf/MultiDbConnectionProviderFailoverAttemptsConfigTest.java index e4992fb92b..0b062e4298 100644 --- a/src/test/java/redis/clients/jedis/mcf/MultiClusterFailoverAttemptsConfigTest.java +++ b/src/test/java/redis/clients/jedis/mcf/MultiDbConnectionProviderFailoverAttemptsConfigTest.java @@ -8,8 +8,8 @@ import redis.clients.jedis.DefaultJedisClientConfig; import redis.clients.jedis.HostAndPort; import redis.clients.jedis.JedisClientConfig; -import redis.clients.jedis.MultiClusterClientConfig; -import redis.clients.jedis.MultiClusterClientConfig.ClusterConfig; +import redis.clients.jedis.MultiDbConfig; +import redis.clients.jedis.MultiDbConfig.DatabaseConfig; import redis.clients.jedis.mcf.JedisFailoverException.JedisPermanentlyNotAvailableException; import redis.clients.jedis.mcf.JedisFailoverException.JedisTemporarilyNotAvailableException; import redis.clients.jedis.util.ReflectionTestUtil; @@ -22,34 +22,34 @@ /** * Tests for how getMaxNumFailoverAttempts and getDelayInBetweenFailoverAttempts impact - * MultiClusterPooledConnectionProvider behaviour when no healthy clusters are available. + * MultiDbConnectionProvider behaviour when no healthy databases are available. */ -public class MultiClusterFailoverAttemptsConfigTest { +public class MultiDbConnectionProviderFailoverAttemptsConfigTest { private HostAndPort endpoint0 = new HostAndPort("purposefully-incorrect", 0000); private HostAndPort endpoint1 = new HostAndPort("purposefully-incorrect", 0001); - private MultiClusterPooledConnectionProvider provider; + private MultiDbConnectionProvider provider; @BeforeEach void setUp() throws Exception { JedisClientConfig clientCfg = DefaultJedisClientConfig.builder().build(); - ClusterConfig[] clusterConfigs = new ClusterConfig[] { - ClusterConfig.builder(endpoint0, clientCfg).weight(1.0f).healthCheckEnabled(false).build(), - ClusterConfig.builder(endpoint1, clientCfg).weight(0.5f).healthCheckEnabled(false) + DatabaseConfig[] databaseConfigs = new DatabaseConfig[] { + DatabaseConfig.builder(endpoint0, clientCfg).weight(1.0f).healthCheckEnabled(false).build(), + DatabaseConfig.builder(endpoint1, clientCfg).weight(0.5f).healthCheckEnabled(false) .build() }; - MultiClusterClientConfig.Builder builder = new MultiClusterClientConfig.Builder(clusterConfigs); + MultiDbConfig.Builder builder = new MultiDbConfig.Builder(databaseConfigs); // Use small values by default for tests unless overridden per-test via reflection setBuilderFailoverConfig(builder, /* maxAttempts */ 10, /* delayMs */ 12000); - provider = new MultiClusterPooledConnectionProvider(builder.build()); + provider = new MultiDbConnectionProvider(builder.build()); - // Disable both clusters to force handleNoHealthyCluster path - provider.getCluster(endpoint0).setDisabled(true); - provider.getCluster(endpoint1).setDisabled(true); + // Disable both databases to force handleNoHealthyCluster path + provider.getDatabase(endpoint0).setDisabled(true); + provider.getDatabase(endpoint1).setDisabled(true); } @AfterEach @@ -69,9 +69,8 @@ void delayBetweenFailoverAttempts_gatesCounterIncrementsWithinWindow() throws Ex // First call: should throw temporary and start the freeze window, incrementing attempt count to // 1 - assertThrows(JedisTemporarilyNotAvailableException.class, - () -> MultiClusterPooledConnectionProviderHelper.switchToHealthyCluster(provider, - SwitchReason.HEALTH_CHECK, provider.getCluster())); + assertThrows(JedisTemporarilyNotAvailableException.class, () -> MultiDbConnectionProviderHelper + .switchToHealthyCluster(provider, SwitchReason.HEALTH_CHECK, provider.getDatabase())); int afterFirst = getProviderAttemptCount(); assertEquals(1, afterFirst); @@ -79,8 +78,8 @@ void delayBetweenFailoverAttempts_gatesCounterIncrementsWithinWindow() throws Ex // and should NOT increment the attempt count beyond 1 for (int i = 0; i < 50; i++) { assertThrows(JedisTemporarilyNotAvailableException.class, - () -> MultiClusterPooledConnectionProviderHelper.switchToHealthyCluster(provider, - SwitchReason.HEALTH_CHECK, provider.getCluster())); + () -> MultiDbConnectionProviderHelper.switchToHealthyCluster(provider, + SwitchReason.HEALTH_CHECK, provider.getDatabase())); assertEquals(1, getProviderAttemptCount()); } } @@ -97,9 +96,8 @@ void delayBetweenFailoverAttempts_permanentExceptionAfterAttemptsExhausted() thr // First call: should throw temporary and start the freeze window, incrementing attempt count to // 1 - assertThrows(JedisTemporarilyNotAvailableException.class, - () -> MultiClusterPooledConnectionProviderHelper.switchToHealthyCluster(provider, - SwitchReason.HEALTH_CHECK, provider.getCluster())); + assertThrows(JedisTemporarilyNotAvailableException.class, () -> MultiDbConnectionProviderHelper + .switchToHealthyCluster(provider, SwitchReason.HEALTH_CHECK, provider.getDatabase())); int afterFirst = getProviderAttemptCount(); assertEquals(1, afterFirst); @@ -107,14 +105,14 @@ void delayBetweenFailoverAttempts_permanentExceptionAfterAttemptsExhausted() thr // and should NOT increment the attempt count beyond 1 for (int i = 0; i < 50; i++) { assertThrows(JedisTemporarilyNotAvailableException.class, - () -> provider.switchToHealthyCluster(SwitchReason.HEALTH_CHECK, provider.getCluster())); + () -> provider.switchToHealthyDatabase(SwitchReason.HEALTH_CHECK, provider.getDatabase())); assertEquals(1, getProviderAttemptCount()); } await().atMost(Durations.TWO_HUNDRED_MILLISECONDS).pollInterval(Duration.ofMillis(10)) .until(() -> { Exception e = assertThrows(JedisFailoverException.class, () -> provider - .switchToHealthyCluster(SwitchReason.HEALTH_CHECK, provider.getCluster())); + .switchToHealthyDatabase(SwitchReason.HEALTH_CHECK, provider.getDatabase())); return e instanceof JedisPermanentlyNotAvailableException; }); } @@ -130,15 +128,15 @@ void maxNumFailoverAttempts_zeroDelay_leadsToPermanentAfterExceeding() throws Ex // Expect exactly 'maxAttempts' temporary exceptions, then a permanent one assertThrows(JedisTemporarilyNotAvailableException.class, - () -> provider.switchToHealthyCluster(SwitchReason.HEALTH_CHECK, provider.getCluster())); // attempt + () -> provider.switchToHealthyDatabase(SwitchReason.HEALTH_CHECK, provider.getDatabase())); // attempt // 1 assertThrows(JedisTemporarilyNotAvailableException.class, - () -> provider.switchToHealthyCluster(SwitchReason.HEALTH_CHECK, provider.getCluster())); // attempt + () -> provider.switchToHealthyDatabase(SwitchReason.HEALTH_CHECK, provider.getDatabase())); // attempt // 2 // Next should exceed max and become permanent assertThrows(JedisPermanentlyNotAvailableException.class, - () -> provider.switchToHealthyCluster(SwitchReason.HEALTH_CHECK, provider.getCluster())); // attempt + () -> provider.switchToHealthyDatabase(SwitchReason.HEALTH_CHECK, provider.getDatabase())); // attempt // 3 // -> // permanent @@ -146,17 +144,17 @@ void maxNumFailoverAttempts_zeroDelay_leadsToPermanentAfterExceeding() throws Ex // ======== Test helper methods (reflection) ======== - private static void setBuilderFailoverConfig(MultiClusterClientConfig.Builder builder, - int maxAttempts, int delayMs) throws Exception { + private static void setBuilderFailoverConfig(MultiDbConfig.Builder builder, int maxAttempts, + int delayMs) throws Exception { ReflectionTestUtil.setField(builder, "maxNumFailoverAttempts", maxAttempts); ReflectionTestUtil.setField(builder, "delayInBetweenFailoverAttempts", delayMs); } private void setProviderFailoverConfig(int maxAttempts, int delayMs) throws Exception { - // Access the underlying MultiClusterClientConfig inside provider and adjust fields for this + // Access the underlying MultiDbConfig inside provider and adjust fields for this // test - Object cfg = ReflectionTestUtil.getField(provider, "multiClusterClientConfig"); + Object cfg = ReflectionTestUtil.getField(provider, "multiDbConfig"); ReflectionTestUtil.setField(cfg, "maxNumFailoverAttempts", maxAttempts); @@ -164,13 +162,13 @@ private void setProviderFailoverConfig(int maxAttempts, int delayMs) throws Exce } private int getProviderMaxAttempts() throws Exception { - Object cfg = ReflectionTestUtil.getField(provider, "multiClusterClientConfig"); + Object cfg = ReflectionTestUtil.getField(provider, "multiDbConfig"); return ReflectionTestUtil.getField(cfg, "maxNumFailoverAttempts"); } private int getProviderDelayMs() throws Exception { - Object cfg = ReflectionTestUtil.getField(provider, "multiClusterClientConfig"); + Object cfg = ReflectionTestUtil.getField(provider, "multiDbConfig"); return ReflectionTestUtil.getField(cfg, "delayInBetweenFailoverAttempts"); } diff --git a/src/test/java/redis/clients/jedis/mcf/MultiDbConnectionProviderHelper.java b/src/test/java/redis/clients/jedis/mcf/MultiDbConnectionProviderHelper.java new file mode 100644 index 0000000000..4ae061c9f5 --- /dev/null +++ b/src/test/java/redis/clients/jedis/mcf/MultiDbConnectionProviderHelper.java @@ -0,0 +1,20 @@ +package redis.clients.jedis.mcf; + +import redis.clients.jedis.Endpoint; + +public class MultiDbConnectionProviderHelper { + + public static void onHealthStatusChange(MultiDbConnectionProvider provider, Endpoint endpoint, + HealthStatus oldStatus, HealthStatus newStatus) { + provider.onHealthStatusChange(new HealthStatusChangeEvent(endpoint, oldStatus, newStatus)); + } + + public static void periodicFailbackCheck(MultiDbConnectionProvider provider) { + provider.periodicFailbackCheck(); + } + + public static Endpoint switchToHealthyCluster(MultiDbConnectionProvider provider, + SwitchReason reason, MultiDbConnectionProvider.Database iterateFrom) { + return provider.switchToHealthyDatabase(reason, iterateFrom); + } +} diff --git a/src/test/java/redis/clients/jedis/mcf/MultiDbConnectionProviderInitializationTest.java b/src/test/java/redis/clients/jedis/mcf/MultiDbConnectionProviderInitializationTest.java new file mode 100644 index 0000000000..1935647d46 --- /dev/null +++ b/src/test/java/redis/clients/jedis/mcf/MultiDbConnectionProviderInitializationTest.java @@ -0,0 +1,153 @@ +package redis.clients.jedis.mcf; + +import static org.junit.jupiter.api.Assertions.*; +import static org.mockito.Mockito.*; + +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.mockito.MockedConstruction; +import org.mockito.junit.jupiter.MockitoExtension; + +import redis.clients.jedis.Connection; +import redis.clients.jedis.ConnectionPool; +import redis.clients.jedis.DefaultJedisClientConfig; +import redis.clients.jedis.HostAndPort; +import redis.clients.jedis.JedisClientConfig; +import redis.clients.jedis.MultiDbConfig; +import redis.clients.jedis.MultiDbConfig.DatabaseConfig; +import redis.clients.jedis.exceptions.JedisValidationException; + +/** + * Tests for MultiDbConnectionProvider initialization edge cases + */ +@ExtendWith(MockitoExtension.class) +public class MultiDbConnectionProviderInitializationTest { + + private HostAndPort endpoint1; + private HostAndPort endpoint2; + private HostAndPort endpoint3; + private JedisClientConfig clientConfig; + + @BeforeEach + void setUp() { + endpoint1 = new HostAndPort("localhost", 6379); + endpoint2 = new HostAndPort("localhost", 6380); + endpoint3 = new HostAndPort("localhost", 6381); + clientConfig = DefaultJedisClientConfig.builder().build(); + } + + private MockedConstruction mockPool() { + Connection mockConnection = mock(Connection.class); + lenient().when(mockConnection.ping()).thenReturn(true); + return mockConstruction(ConnectionPool.class, (mock, context) -> { + when(mock.getResource()).thenReturn(mockConnection); + doNothing().when(mock).close(); + }); + } + + @Test + void testInitializationWithMixedHealthCheckConfiguration() { + try (MockedConstruction mockedPool = mockPool()) { + // Create databases with mixed health check configuration + DatabaseConfig db1 = DatabaseConfig.builder(endpoint1, clientConfig).weight(1.0f) + .healthCheckEnabled(false) // No health + // check + .build(); + + DatabaseConfig db2 = DatabaseConfig.builder(endpoint2, clientConfig).weight(2.0f) + .healthCheckStrategySupplier(EchoStrategy.DEFAULT) // With + // health + // check + .build(); + + MultiDbConfig config = new MultiDbConfig.Builder(new DatabaseConfig[] { db1, db2 }).build(); + + try (MultiDbConnectionProvider provider = new MultiDbConnectionProvider(config)) { + // Should initialize successfully + assertNotNull(provider.getDatabase()); + + // Should select db1 (no health check, assumed healthy) or db2 based on weight + // Since db2 has higher weight and health checks, it should be selected if healthy + assertTrue(provider.getDatabase() == provider.getDatabase(endpoint1) + || provider.getDatabase() == provider.getDatabase(endpoint2)); + } + } + } + + @Test + void testInitializationWithAllHealthChecksDisabled() { + try (MockedConstruction mockedPool = mockPool()) { + // Create databases with no health checks + DatabaseConfig db1 = DatabaseConfig.builder(endpoint1, clientConfig).weight(1.0f) + .healthCheckEnabled(false).build(); + + DatabaseConfig db22 = DatabaseConfig.builder(endpoint2, clientConfig).weight(3.0f) // Higher + // weight + .healthCheckEnabled(false).build(); + + MultiDbConfig config = new MultiDbConfig.Builder(new DatabaseConfig[] { db1, db22 }).build(); + + try (MultiDbConnectionProvider provider = new MultiDbConnectionProvider(config)) { + // Should select db22 (highest weight, no health checks) + assertEquals(provider.getDatabase(endpoint2), provider.getDatabase()); + } + } + } + + @Test + void testInitializationWithSingleCluster() { + try (MockedConstruction mockedPool = mockPool()) { + DatabaseConfig db = DatabaseConfig.builder(endpoint1, clientConfig).weight(1.0f) + .healthCheckEnabled(false).build(); + + MultiDbConfig config = new MultiDbConfig.Builder(new DatabaseConfig[] { db }).build(); + + try (MultiDbConnectionProvider provider = new MultiDbConnectionProvider(config)) { + // Should select the only available db + assertEquals(provider.getDatabase(endpoint1), provider.getDatabase()); + } + } + } + + @Test + void testErrorHandlingWithNullConfiguration() { + assertThrows(JedisValidationException.class, () -> { + new MultiDbConnectionProvider(null); + }); + } + + @Test + void testErrorHandlingWithEmptyClusterArray() { + assertThrows(JedisValidationException.class, () -> { + new MultiDbConfig.Builder(new DatabaseConfig[0]).build(); + }); + } + + @Test + void testErrorHandlingWithNullDatabaseConfig() { + assertThrows(IllegalArgumentException.class, () -> { + new MultiDbConfig.Builder(new DatabaseConfig[] { null }).build(); + }); + } + + @Test + void testInitializationWithZeroWeights() { + try (MockedConstruction mockedPool = mockPool()) { + DatabaseConfig db1 = DatabaseConfig.builder(endpoint1, clientConfig).weight(0.0f) // Zero + // weight + .healthCheckEnabled(false).build(); + + DatabaseConfig db2 = DatabaseConfig.builder(endpoint2, clientConfig).weight(0.0f) // Zero + // weight + .healthCheckEnabled(false).build(); + + MultiDbConfig config = new MultiDbConfig.Builder(new DatabaseConfig[] { db1, db2 }).build(); + + try (MultiDbConnectionProvider provider = new MultiDbConnectionProvider(config)) { + // Should still initialize and select one of the databases + assertNotNull(provider.getDatabase()); + } + } + } +} diff --git a/src/test/java/redis/clients/jedis/mcf/MultiClusterPooledConnectionProviderTest.java b/src/test/java/redis/clients/jedis/mcf/MultiDbConnectionProviderTest.java similarity index 64% rename from src/test/java/redis/clients/jedis/mcf/MultiClusterPooledConnectionProviderTest.java rename to src/test/java/redis/clients/jedis/mcf/MultiDbConnectionProviderTest.java index 88b2948016..841896aa34 100644 --- a/src/test/java/redis/clients/jedis/mcf/MultiClusterPooledConnectionProviderTest.java +++ b/src/test/java/redis/clients/jedis/mcf/MultiDbConnectionProviderTest.java @@ -5,10 +5,10 @@ import org.awaitility.Durations; import org.junit.jupiter.api.*; import redis.clients.jedis.*; -import redis.clients.jedis.MultiClusterClientConfig.ClusterConfig; +import redis.clients.jedis.MultiDbConfig.DatabaseConfig; import redis.clients.jedis.exceptions.JedisConnectionException; import redis.clients.jedis.exceptions.JedisValidationException; -import redis.clients.jedis.mcf.MultiClusterPooledConnectionProvider.Cluster; +import redis.clients.jedis.mcf.MultiDbConnectionProvider.Database; import redis.clients.jedis.mcf.ProbingPolicy.BuiltIn; import redis.clients.jedis.mcf.JedisFailoverException.JedisPermanentlyNotAvailableException; import redis.clients.jedis.mcf.JedisFailoverException.JedisTemporarilyNotAvailableException; @@ -23,27 +23,26 @@ import static org.junit.jupiter.api.Assertions.*; /** - * @see MultiClusterPooledConnectionProvider + * @see MultiDbConnectionProvider */ @Tag("integration") -public class MultiClusterPooledConnectionProviderTest { +public class MultiDbConnectionProviderTest { private final EndpointConfig endpointStandalone0 = HostAndPorts.getRedisEndpoint("standalone0"); private final EndpointConfig endpointStandalone1 = HostAndPorts.getRedisEndpoint("standalone1"); - private MultiClusterPooledConnectionProvider provider; + private MultiDbConnectionProvider provider; @BeforeEach public void setUp() { - ClusterConfig[] clusterConfigs = new ClusterConfig[2]; - clusterConfigs[0] = ClusterConfig.builder(endpointStandalone0.getHostAndPort(), + DatabaseConfig[] databaseConfigs = new DatabaseConfig[2]; + databaseConfigs[0] = DatabaseConfig.builder(endpointStandalone0.getHostAndPort(), endpointStandalone0.getClientConfigBuilder().build()).weight(0.5f).build(); - clusterConfigs[1] = ClusterConfig.builder(endpointStandalone1.getHostAndPort(), + databaseConfigs[1] = DatabaseConfig.builder(endpointStandalone1.getHostAndPort(), endpointStandalone1.getClientConfigBuilder().build()).weight(0.3f).build(); - provider = new MultiClusterPooledConnectionProvider( - new MultiClusterClientConfig.Builder(clusterConfigs).build()); + provider = new MultiDbConnectionProvider(new MultiDbConfig.Builder(databaseConfigs).build()); } @AfterEach @@ -55,7 +54,7 @@ public void destroy() { @Test public void testCircuitBreakerForcedTransitions() { - CircuitBreaker circuitBreaker = provider.getClusterCircuitBreaker(); + CircuitBreaker circuitBreaker = provider.getDatabaseCircuitBreaker(); circuitBreaker.getState(); if (CircuitBreaker.State.FORCED_OPEN.equals(circuitBreaker.getState())) @@ -69,46 +68,47 @@ public void testCircuitBreakerForcedTransitions() { } @Test - public void testIterateActiveCluster() throws InterruptedException { - waitForClustersToGetHealthy(provider.getCluster(endpointStandalone0.getHostAndPort()), - provider.getCluster(endpointStandalone1.getHostAndPort())); + public void testSwitchToHealthyDatabase() throws InterruptedException { + waitForDatabaseToGetHealthy(provider.getDatabase(endpointStandalone0.getHostAndPort()), + provider.getDatabase(endpointStandalone1.getHostAndPort())); - Endpoint e2 = provider.switchToHealthyCluster(SwitchReason.HEALTH_CHECK, provider.getCluster()); + Endpoint e2 = provider.switchToHealthyDatabase(SwitchReason.HEALTH_CHECK, + provider.getDatabase()); assertEquals(endpointStandalone1.getHostAndPort(), e2); } @Test public void testCanIterateOnceMore() { Endpoint endpoint0 = endpointStandalone0.getHostAndPort(); - waitForClustersToGetHealthy(provider.getCluster(endpoint0), - provider.getCluster(endpointStandalone1.getHostAndPort())); + waitForDatabaseToGetHealthy(provider.getDatabase(endpoint0), + provider.getDatabase(endpointStandalone1.getHostAndPort())); - provider.setActiveCluster(endpoint0); - provider.getCluster().setDisabled(true); - provider.switchToHealthyCluster(SwitchReason.HEALTH_CHECK, provider.getCluster(endpoint0)); + provider.setActiveDatabase(endpoint0); + provider.getDatabase().setDisabled(true); + provider.switchToHealthyDatabase(SwitchReason.HEALTH_CHECK, provider.getDatabase(endpoint0)); - assertFalse(provider.canIterateFrom(provider.getCluster())); + assertFalse(provider.canIterateFrom(provider.getDatabase())); } - private void waitForClustersToGetHealthy(Cluster... clusters) { + private void waitForDatabaseToGetHealthy(Database... databases) { Awaitility.await().pollInterval(Durations.ONE_HUNDRED_MILLISECONDS) .atMost(Durations.TWO_SECONDS) - .until(() -> Arrays.stream(clusters).allMatch(Cluster::isHealthy)); + .until(() -> Arrays.stream(databases).allMatch(Database::isHealthy)); } @Test - public void testRunClusterFailoverPostProcessor() { - ClusterConfig[] clusterConfigs = new ClusterConfig[2]; - clusterConfigs[0] = ClusterConfig + public void testDatabaseSwitchListener() { + DatabaseConfig[] databaseConfigs = new DatabaseConfig[2]; + databaseConfigs[0] = DatabaseConfig .builder(new HostAndPort("purposefully-incorrect", 0000), DefaultJedisClientConfig.builder().build()) .weight(0.5f).healthCheckEnabled(false).build(); - clusterConfigs[1] = ClusterConfig + databaseConfigs[1] = DatabaseConfig .builder(new HostAndPort("purposefully-incorrect", 0001), DefaultJedisClientConfig.builder().build()) .weight(0.4f).healthCheckEnabled(false).build(); - MultiClusterClientConfig.Builder builder = new MultiClusterClientConfig.Builder(clusterConfigs); + MultiDbConfig.Builder builder = new MultiDbConfig.Builder(databaseConfigs); // Configures a single failed command to trigger an open circuit on the next subsequent failure builder.circuitBreakerSlidingWindowSize(3).circuitBreakerMinNumOfFailures(1) @@ -116,9 +116,8 @@ public void testRunClusterFailoverPostProcessor() { AtomicBoolean isValidTest = new AtomicBoolean(false); - MultiClusterPooledConnectionProvider localProvider = new MultiClusterPooledConnectionProvider( - builder.build()); - localProvider.setClusterSwitchListener(a -> { + MultiDbConnectionProvider localProvider = new MultiDbConnectionProvider(builder.build()); + localProvider.setDatabaseSwitchListener(a -> { isValidTest.set(true); }); @@ -137,22 +136,13 @@ public void testRunClusterFailoverPostProcessor() { } @Test - public void testSetActiveMultiClusterIndexEqualsZero() { - assertThrows(JedisValidationException.class, () -> provider.setActiveCluster(null)); // Should - // throw an - // exception + public void testSetActiveDatabaseNull() { + assertThrows(JedisValidationException.class, () -> provider.setActiveDatabase(null)); } @Test - public void testSetActiveMultiClusterIndexLessThanZero() { - assertThrows(JedisValidationException.class, () -> provider.setActiveCluster(null)); // Should - // throw an - // exception - } - - @Test - public void testSetActiveMultiClusterIndexOutOfRange() { - assertThrows(JedisValidationException.class, () -> provider.setActiveCluster(new Endpoint() { + public void testSetActiveDatabaseByMissingEndpoint() { + assertThrows(JedisValidationException.class, () -> provider.setActiveDatabase(new Endpoint() { @Override public String getHost() { return "purposefully-incorrect"; @@ -171,15 +161,14 @@ public void testConnectionPoolConfigApplied() { poolConfig.setMaxTotal(8); poolConfig.setMaxIdle(4); poolConfig.setMinIdle(1); - ClusterConfig[] clusterConfigs = new ClusterConfig[2]; - clusterConfigs[0] = new ClusterConfig(endpointStandalone0.getHostAndPort(), + DatabaseConfig[] databaseConfigs = new DatabaseConfig[2]; + databaseConfigs[0] = new DatabaseConfig(endpointStandalone0.getHostAndPort(), endpointStandalone0.getClientConfigBuilder().build(), poolConfig); - clusterConfigs[1] = new ClusterConfig(endpointStandalone1.getHostAndPort(), + databaseConfigs[1] = new DatabaseConfig(endpointStandalone1.getHostAndPort(), endpointStandalone0.getClientConfigBuilder().build(), poolConfig); - try ( - MultiClusterPooledConnectionProvider customProvider = new MultiClusterPooledConnectionProvider( - new MultiClusterClientConfig.Builder(clusterConfigs).build())) { - MultiClusterPooledConnectionProvider.Cluster activeCluster = customProvider.getCluster(); + try (MultiDbConnectionProvider customProvider = new MultiDbConnectionProvider( + new MultiDbConfig.Builder(databaseConfigs).build())) { + MultiDbConnectionProvider.Database activeCluster = customProvider.getDatabase(); ConnectionPool connectionPool = activeCluster.getConnectionPool(); assertEquals(8, connectionPool.getMaxTotal()); assertEquals(4, connectionPool.getMaxIdle()); @@ -202,13 +191,13 @@ void testHealthChecksStopAfterProviderClose() throws InterruptedException { }); // Create new provider with health check strategy (don't use the setUp() provider) - ClusterConfig config = ClusterConfig + DatabaseConfig config = DatabaseConfig .builder(endpointStandalone0.getHostAndPort(), endpointStandalone0.getClientConfigBuilder().build()) .healthCheckStrategy(countingStrategy).build(); - MultiClusterPooledConnectionProvider testProvider = new MultiClusterPooledConnectionProvider( - new MultiClusterClientConfig.Builder(Collections.singletonList(config)).build()); + MultiDbConnectionProvider testProvider = new MultiDbConnectionProvider( + new MultiDbConfig.Builder(Collections.singletonList(config)).build()); try { // Wait for some health checks to occur @@ -236,22 +225,22 @@ void testHealthChecksStopAfterProviderClose() throws InterruptedException { @Test public void userCommand_firstTemporary_thenPermanent_inOrder() { - ClusterConfig[] clusterConfigs = new ClusterConfig[2]; - clusterConfigs[0] = ClusterConfig.builder(endpointStandalone0.getHostAndPort(), + DatabaseConfig[] databaseConfigs = new DatabaseConfig[2]; + databaseConfigs[0] = DatabaseConfig.builder(endpointStandalone0.getHostAndPort(), endpointStandalone0.getClientConfigBuilder().build()).weight(0.5f).build(); - clusterConfigs[1] = ClusterConfig.builder(endpointStandalone1.getHostAndPort(), + databaseConfigs[1] = DatabaseConfig.builder(endpointStandalone1.getHostAndPort(), endpointStandalone1.getClientConfigBuilder().build()).weight(0.3f).build(); - MultiClusterPooledConnectionProvider testProvider = new MultiClusterPooledConnectionProvider( - new MultiClusterClientConfig.Builder(clusterConfigs).delayInBetweenFailoverAttempts(100) + MultiDbConnectionProvider testProvider = new MultiDbConnectionProvider( + new MultiDbConfig.Builder(databaseConfigs).delayInBetweenFailoverAttempts(100) .maxNumFailoverAttempts(2).retryMaxAttempts(1).build()); try (UnifiedJedis jedis = new UnifiedJedis(testProvider)) { jedis.get("foo"); - // Disable both clusters so any attempt to switch results in 'no healthy cluster' path - testProvider.getCluster(endpointStandalone0.getHostAndPort()).setDisabled(true); - testProvider.getCluster(endpointStandalone1.getHostAndPort()).setDisabled(true); + // Disable both databases so any attempt to switch results in 'no healthy database' path + testProvider.getDatabase(endpointStandalone0.getHostAndPort()).setDisabled(true); + testProvider.getDatabase(endpointStandalone1.getHostAndPort()).setDisabled(true); // Simulate user running a command that fails and triggers failover iteration assertThrows(JedisTemporarilyNotAvailableException.class, () -> jedis.get("foo")); @@ -266,12 +255,12 @@ public void userCommand_firstTemporary_thenPermanent_inOrder() { @Test public void userCommand_connectionExceptions_thenMultipleTemporary_thenPermanent_inOrder() { - ClusterConfig[] clusterConfigs = new ClusterConfig[2]; - clusterConfigs[0] = ClusterConfig + DatabaseConfig[] databaseConfigs = new DatabaseConfig[2]; + databaseConfigs[0] = DatabaseConfig .builder(endpointStandalone0.getHostAndPort(), endpointStandalone0.getClientConfigBuilder().build()) .weight(0.5f).healthCheckEnabled(false).build(); - clusterConfigs[1] = ClusterConfig + databaseConfigs[1] = DatabaseConfig .builder(endpointStandalone1.getHostAndPort(), endpointStandalone1.getClientConfigBuilder().build()) .weight(0.3f).healthCheckEnabled(false).build(); @@ -279,8 +268,8 @@ public void userCommand_connectionExceptions_thenMultipleTemporary_thenPermanent // ATTENTION: these configuration settings are not random and // adjusted to get exact numbers of failures with exact exception types // and open to impact from other defaulted values withing the components in use. - MultiClusterPooledConnectionProvider testProvider = new MultiClusterPooledConnectionProvider( - new MultiClusterClientConfig.Builder(clusterConfigs).delayInBetweenFailoverAttempts(100) + MultiDbConnectionProvider testProvider = new MultiDbConnectionProvider( + new MultiDbConfig.Builder(databaseConfigs).delayInBetweenFailoverAttempts(100) .maxNumFailoverAttempts(2).retryMaxAttempts(1).circuitBreakerSlidingWindowSize(5) .circuitBreakerFailureRateThreshold(60).build()) { }; @@ -288,8 +277,8 @@ public void userCommand_connectionExceptions_thenMultipleTemporary_thenPermanent try (UnifiedJedis jedis = new UnifiedJedis(testProvider)) { jedis.get("foo"); - // disable most weighted cluster so that it will fail on initial requests - testProvider.getCluster(endpointStandalone0.getHostAndPort()).setDisabled(true); + // disable most weighted database so that it will fail on initial requests + testProvider.getDatabase(endpointStandalone0.getHostAndPort()).setDisabled(true); Exception e = assertThrows(JedisConnectionException.class, () -> jedis.get("foo")); assertEquals(JedisConnectionException.class, e.getClass()); @@ -298,7 +287,7 @@ public void userCommand_connectionExceptions_thenMultipleTemporary_thenPermanent assertEquals(JedisConnectionException.class, e.getClass()); // then disable the second ones - testProvider.getCluster(endpointStandalone1.getHostAndPort()).setDisabled(true); + testProvider.getDatabase(endpointStandalone1.getHostAndPort()).setDisabled(true); assertThrows(JedisTemporarilyNotAvailableException.class, () -> jedis.get("foo")); assertThrows(JedisTemporarilyNotAvailableException.class, () -> jedis.get("foo")); diff --git a/src/test/java/redis/clients/jedis/mcf/PeriodicFailbackTest.java b/src/test/java/redis/clients/jedis/mcf/PeriodicFailbackTest.java index d657e75829..f58df34e0c 100644 --- a/src/test/java/redis/clients/jedis/mcf/PeriodicFailbackTest.java +++ b/src/test/java/redis/clients/jedis/mcf/PeriodicFailbackTest.java @@ -2,7 +2,7 @@ import static org.junit.jupiter.api.Assertions.*; import static org.mockito.Mockito.*; -import static redis.clients.jedis.mcf.MultiClusterPooledConnectionProviderHelper.onHealthStatusChange; +import static redis.clients.jedis.mcf.MultiDbConnectionProviderHelper.onHealthStatusChange; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -14,7 +14,7 @@ import redis.clients.jedis.DefaultJedisClientConfig; import redis.clients.jedis.HostAndPort; import redis.clients.jedis.JedisClientConfig; -import redis.clients.jedis.MultiClusterClientConfig; +import redis.clients.jedis.MultiDbConfig; @ExtendWith(MockitoExtension.class) class PeriodicFailbackTest { @@ -42,33 +42,32 @@ private MockedConstruction mockPool() { @Test void testPeriodicFailbackCheckWithDisabledCluster() throws InterruptedException { try (MockedConstruction mockedPool = mockPool()) { - MultiClusterClientConfig.ClusterConfig cluster1 = MultiClusterClientConfig.ClusterConfig + MultiDbConfig.DatabaseConfig cluster1 = MultiDbConfig.DatabaseConfig .builder(endpoint1, clientConfig).weight(1.0f).healthCheckEnabled(false).build(); - MultiClusterClientConfig.ClusterConfig cluster2 = MultiClusterClientConfig.ClusterConfig + MultiDbConfig.DatabaseConfig cluster2 = MultiDbConfig.DatabaseConfig .builder(endpoint2, clientConfig).weight(2.0f).healthCheckEnabled(false).build(); - MultiClusterClientConfig config = new MultiClusterClientConfig.Builder( - new MultiClusterClientConfig.ClusterConfig[] { cluster1, cluster2 }) - .failbackSupported(true).failbackCheckInterval(100).build(); + MultiDbConfig config = new MultiDbConfig.Builder( + new MultiDbConfig.DatabaseConfig[] { cluster1, cluster2 }).failbackSupported(true) + .failbackCheckInterval(100).build(); - try (MultiClusterPooledConnectionProvider provider = new MultiClusterPooledConnectionProvider( - config)) { + try (MultiDbConnectionProvider provider = new MultiDbConnectionProvider(config)) { // Initially, cluster2 should be active (highest weight: 2.0f vs 1.0f) - assertEquals(provider.getCluster(endpoint2), provider.getCluster()); + assertEquals(provider.getDatabase(endpoint2), provider.getDatabase()); // Start grace period for cluster2 manually - provider.getCluster(endpoint2).setGracePeriod(); - provider.getCluster(endpoint2).setDisabled(true); + provider.getDatabase(endpoint2).setGracePeriod(); + provider.getDatabase(endpoint2).setDisabled(true); // Force failover to cluster1 since cluster2 is disabled - provider.switchToHealthyCluster(SwitchReason.FORCED, provider.getCluster(endpoint2)); + provider.switchToHealthyDatabase(SwitchReason.FORCED, provider.getDatabase(endpoint2)); // Manually trigger periodic check - MultiClusterPooledConnectionProviderHelper.periodicFailbackCheck(provider); + MultiDbConnectionProviderHelper.periodicFailbackCheck(provider); // Should still be on cluster1 (cluster2 is in grace period) - assertEquals(provider.getCluster(endpoint1), provider.getCluster()); + assertEquals(provider.getDatabase(endpoint1), provider.getDatabase()); } } } @@ -76,47 +75,46 @@ void testPeriodicFailbackCheckWithDisabledCluster() throws InterruptedException @Test void testPeriodicFailbackCheckWithHealthyCluster() throws InterruptedException { try (MockedConstruction mockedPool = mockPool()) { - MultiClusterClientConfig.ClusterConfig cluster1 = MultiClusterClientConfig.ClusterConfig + MultiDbConfig.DatabaseConfig cluster1 = MultiDbConfig.DatabaseConfig .builder(endpoint1, clientConfig).weight(1.0f).healthCheckEnabled(false).build(); - MultiClusterClientConfig.ClusterConfig cluster2 = MultiClusterClientConfig.ClusterConfig + MultiDbConfig.DatabaseConfig cluster2 = MultiDbConfig.DatabaseConfig .builder(endpoint2, clientConfig).weight(2.0f).healthCheckEnabled(false).build(); - MultiClusterClientConfig config = new MultiClusterClientConfig.Builder( - new MultiClusterClientConfig.ClusterConfig[] { cluster1, cluster2 }) - .failbackSupported(true).failbackCheckInterval(50).gracePeriod(100).build(); // Add - // grace - // period + MultiDbConfig config = new MultiDbConfig.Builder( + new MultiDbConfig.DatabaseConfig[] { cluster1, cluster2 }).failbackSupported(true) + .failbackCheckInterval(50).gracePeriod(100).build(); // Add + // grace + // period - try (MultiClusterPooledConnectionProvider provider = new MultiClusterPooledConnectionProvider( - config)) { + try (MultiDbConnectionProvider provider = new MultiDbConnectionProvider(config)) { // Initially, cluster2 should be active (highest weight: 2.0f vs 1.0f) - assertEquals(provider.getCluster(endpoint2), provider.getCluster()); + assertEquals(provider.getDatabase(endpoint2), provider.getDatabase()); // Make cluster2 unhealthy to force failover to cluster1 onHealthStatusChange(provider, endpoint2, HealthStatus.HEALTHY, HealthStatus.UNHEALTHY); // Should now be on cluster1 (cluster2 is in grace period) - assertEquals(provider.getCluster(endpoint1), provider.getCluster()); + assertEquals(provider.getDatabase(endpoint1), provider.getDatabase()); // Verify cluster2 is in grace period - assertTrue(provider.getCluster(endpoint2).isInGracePeriod()); + assertTrue(provider.getDatabase(endpoint2).isInGracePeriod()); // Make cluster2 healthy again (but it's still in grace period) onHealthStatusChange(provider, endpoint2, HealthStatus.UNHEALTHY, HealthStatus.HEALTHY); // Trigger periodic check immediately - should still be on cluster1 - MultiClusterPooledConnectionProviderHelper.periodicFailbackCheck(provider); - assertEquals(provider.getCluster(endpoint1), provider.getCluster()); + MultiDbConnectionProviderHelper.periodicFailbackCheck(provider); + assertEquals(provider.getDatabase(endpoint1), provider.getDatabase()); // Wait for grace period to expire Thread.sleep(150); // Trigger periodic check after grace period expires - MultiClusterPooledConnectionProviderHelper.periodicFailbackCheck(provider); + MultiDbConnectionProviderHelper.periodicFailbackCheck(provider); // Should have failed back to cluster2 (higher weight, grace period expired) - assertEquals(provider.getCluster(endpoint2), provider.getCluster()); + assertEquals(provider.getDatabase(endpoint2), provider.getDatabase()); } } } @@ -124,27 +122,25 @@ void testPeriodicFailbackCheckWithHealthyCluster() throws InterruptedException { @Test void testPeriodicFailbackCheckWithFailbackDisabled() throws InterruptedException { try (MockedConstruction mockedPool = mockPool()) { - MultiClusterClientConfig.ClusterConfig cluster1 = MultiClusterClientConfig.ClusterConfig + MultiDbConfig.DatabaseConfig cluster1 = MultiDbConfig.DatabaseConfig .builder(endpoint1, clientConfig).weight(1.0f).healthCheckEnabled(false).build(); - MultiClusterClientConfig.ClusterConfig cluster2 = MultiClusterClientConfig.ClusterConfig + MultiDbConfig.DatabaseConfig cluster2 = MultiDbConfig.DatabaseConfig .builder(endpoint2, clientConfig).weight(2.0f).healthCheckEnabled(false).build(); - MultiClusterClientConfig config = new MultiClusterClientConfig.Builder( - new MultiClusterClientConfig.ClusterConfig[] { cluster1, cluster2 }) - .failbackSupported(false) // Disabled + MultiDbConfig config = new MultiDbConfig.Builder( + new MultiDbConfig.DatabaseConfig[] { cluster1, cluster2 }).failbackSupported(false) // Disabled .failbackCheckInterval(50).build(); - try (MultiClusterPooledConnectionProvider provider = new MultiClusterPooledConnectionProvider( - config)) { + try (MultiDbConnectionProvider provider = new MultiDbConnectionProvider(config)) { // Initially, cluster2 should be active (highest weight: 2.0f vs 1.0f) - assertEquals(provider.getCluster(endpoint2), provider.getCluster()); + assertEquals(provider.getDatabase(endpoint2), provider.getDatabase()); // Make cluster2 unhealthy to force failover to cluster1 onHealthStatusChange(provider, endpoint2, HealthStatus.HEALTHY, HealthStatus.UNHEALTHY); // Should now be on cluster1 - assertEquals(provider.getCluster(endpoint1), provider.getCluster()); + assertEquals(provider.getDatabase(endpoint1), provider.getDatabase()); // Make cluster2 healthy again onHealthStatusChange(provider, endpoint2, HealthStatus.UNHEALTHY, HealthStatus.HEALTHY); @@ -153,10 +149,10 @@ void testPeriodicFailbackCheckWithFailbackDisabled() throws InterruptedException Thread.sleep(100); // Trigger periodic check - MultiClusterPooledConnectionProviderHelper.periodicFailbackCheck(provider); + MultiDbConnectionProviderHelper.periodicFailbackCheck(provider); // Should still be on cluster1 (failback disabled) - assertEquals(provider.getCluster(endpoint1), provider.getCluster()); + assertEquals(provider.getDatabase(endpoint1), provider.getDatabase()); } } } @@ -166,38 +162,37 @@ void testPeriodicFailbackCheckSelectsHighestWeightCluster() throws InterruptedEx try (MockedConstruction mockedPool = mockPool()) { HostAndPort endpoint3 = new HostAndPort("localhost", 6381); - MultiClusterClientConfig.ClusterConfig cluster1 = MultiClusterClientConfig.ClusterConfig + MultiDbConfig.DatabaseConfig cluster1 = MultiDbConfig.DatabaseConfig .builder(endpoint1, clientConfig).weight(1.0f).healthCheckEnabled(false).build(); - MultiClusterClientConfig.ClusterConfig cluster2 = MultiClusterClientConfig.ClusterConfig + MultiDbConfig.DatabaseConfig cluster2 = MultiDbConfig.DatabaseConfig .builder(endpoint2, clientConfig).weight(2.0f).healthCheckEnabled(false).build(); - MultiClusterClientConfig.ClusterConfig cluster3 = MultiClusterClientConfig.ClusterConfig + MultiDbConfig.DatabaseConfig cluster3 = MultiDbConfig.DatabaseConfig .builder(endpoint3, clientConfig).weight(3.0f) // Highest weight .healthCheckEnabled(false).build(); - MultiClusterClientConfig config = new MultiClusterClientConfig.Builder( - new MultiClusterClientConfig.ClusterConfig[] { cluster1, cluster2, cluster3 }) + MultiDbConfig config = new MultiDbConfig.Builder( + new MultiDbConfig.DatabaseConfig[] { cluster1, cluster2, cluster3 }) .failbackSupported(true).failbackCheckInterval(50).gracePeriod(100).build(); // Add // grace // period - try (MultiClusterPooledConnectionProvider provider = new MultiClusterPooledConnectionProvider( - config)) { + try (MultiDbConnectionProvider provider = new MultiDbConnectionProvider(config)) { // Initially, cluster3 should be active (highest weight: 3.0f vs 2.0f vs 1.0f) - assertEquals(provider.getCluster(endpoint3), provider.getCluster()); + assertEquals(provider.getDatabase(endpoint3), provider.getDatabase()); // Make cluster3 unhealthy to force failover to cluster2 (next highest weight) onHealthStatusChange(provider, endpoint3, HealthStatus.HEALTHY, HealthStatus.UNHEALTHY); // Should now be on cluster2 (weight 2.0f, higher than cluster1's 1.0f) - assertEquals(provider.getCluster(endpoint2), provider.getCluster()); + assertEquals(provider.getDatabase(endpoint2), provider.getDatabase()); // Make cluster2 unhealthy to force failover to cluster1 onHealthStatusChange(provider, endpoint2, HealthStatus.HEALTHY, HealthStatus.UNHEALTHY); // Should now be on cluster1 (only healthy cluster left) - assertEquals(provider.getCluster(endpoint1), provider.getCluster()); + assertEquals(provider.getDatabase(endpoint1), provider.getDatabase()); // Make cluster2 and cluster3 healthy again onHealthStatusChange(provider, endpoint2, HealthStatus.UNHEALTHY, HealthStatus.HEALTHY); @@ -207,10 +202,10 @@ void testPeriodicFailbackCheckSelectsHighestWeightCluster() throws InterruptedEx Thread.sleep(150); // Trigger periodic check - MultiClusterPooledConnectionProviderHelper.periodicFailbackCheck(provider); + MultiDbConnectionProviderHelper.periodicFailbackCheck(provider); // Should have failed back to cluster3 (highest weight, grace period expired) - assertEquals(provider.getCluster(endpoint3), provider.getCluster()); + assertEquals(provider.getDatabase(endpoint3), provider.getDatabase()); } } } diff --git a/src/test/java/redis/clients/jedis/misc/AutomaticFailoverTest.java b/src/test/java/redis/clients/jedis/misc/AutomaticFailoverTest.java index 3be7d29656..ac74738226 100644 --- a/src/test/java/redis/clients/jedis/misc/AutomaticFailoverTest.java +++ b/src/test/java/redis/clients/jedis/misc/AutomaticFailoverTest.java @@ -17,9 +17,9 @@ import redis.clients.jedis.*; import redis.clients.jedis.exceptions.JedisAccessControlException; import redis.clients.jedis.exceptions.JedisConnectionException; -import redis.clients.jedis.mcf.ClusterSwitchEventArgs; -import redis.clients.jedis.mcf.MultiClusterPooledConnectionProvider; -import redis.clients.jedis.mcf.MultiClusterPooledConnectionProviderHelper; +import redis.clients.jedis.mcf.DatabaseSwitchEvent; +import redis.clients.jedis.mcf.MultiDbConnectionProvider; +import redis.clients.jedis.mcf.MultiDbConnectionProviderHelper; import redis.clients.jedis.mcf.SwitchReason; import redis.clients.jedis.util.IOUtils; @@ -47,10 +47,10 @@ public class AutomaticFailoverTest { private Jedis jedis2; - private List getClusterConfigs( + private List getDatabaseConfigs( JedisClientConfig clientConfig, HostAndPort... hostPorts) { return Arrays.stream(hostPorts) - .map(hp -> new MultiClusterClientConfig.ClusterConfig(hp, clientConfig)) + .map(hp -> new MultiDbConfig.DatabaseConfig(hp, clientConfig)) .collect(Collectors.toList()); } @@ -68,17 +68,17 @@ public void cleanUp() { @Test public void pipelineWithSwitch() { - MultiClusterPooledConnectionProvider provider = new MultiClusterPooledConnectionProvider( - new MultiClusterClientConfig.Builder( - getClusterConfigs(clientConfig, hostPortWithFailure, workingEndpoint.getHostAndPort())) + MultiDbConnectionProvider provider = new MultiDbConnectionProvider( + new MultiDbConfig.Builder( + getDatabaseConfigs(clientConfig, hostPortWithFailure, workingEndpoint.getHostAndPort())) .build()); try (UnifiedJedis client = new UnifiedJedis(provider)) { AbstractPipeline pipe = client.pipelined(); pipe.set("pstr", "foobar"); pipe.hset("phash", "foo", "bar"); - MultiClusterPooledConnectionProviderHelper.switchToHealthyCluster(provider, - SwitchReason.HEALTH_CHECK, provider.getCluster()); + MultiDbConnectionProviderHelper.switchToHealthyCluster(provider, + SwitchReason.HEALTH_CHECK, provider.getDatabase()); pipe.sync(); } @@ -88,17 +88,17 @@ public void pipelineWithSwitch() { @Test public void transactionWithSwitch() { - MultiClusterPooledConnectionProvider provider = new MultiClusterPooledConnectionProvider( - new MultiClusterClientConfig.Builder( - getClusterConfigs(clientConfig, hostPortWithFailure, workingEndpoint.getHostAndPort())) + MultiDbConnectionProvider provider = new MultiDbConnectionProvider( + new MultiDbConfig.Builder( + getDatabaseConfigs(clientConfig, hostPortWithFailure, workingEndpoint.getHostAndPort())) .build()); try (UnifiedJedis client = new UnifiedJedis(provider)) { AbstractTransaction tx = client.multi(); tx.set("tstr", "foobar"); tx.hset("thash", "foo", "bar"); - MultiClusterPooledConnectionProviderHelper.switchToHealthyCluster(provider, - SwitchReason.HEALTH_CHECK, provider.getCluster()); + MultiDbConnectionProviderHelper.switchToHealthyCluster(provider, + SwitchReason.HEALTH_CHECK, provider.getDatabase()); assertEquals(Arrays.asList("OK", 1L), tx.exec()); } @@ -112,16 +112,16 @@ public void commandFailoverUnresolvableHost() { int slidingWindowSize = 2; HostAndPort unresolvableHostAndPort = new HostAndPort("unresolvable", 6379); - MultiClusterClientConfig.Builder builder = new MultiClusterClientConfig.Builder( - getClusterConfigs(clientConfig, unresolvableHostAndPort, workingEndpoint.getHostAndPort())) + MultiDbConfig.Builder builder = new MultiDbConfig.Builder( + getDatabaseConfigs(clientConfig, unresolvableHostAndPort, workingEndpoint.getHostAndPort())) .retryWaitDuration(1).retryMaxAttempts(1) .circuitBreakerSlidingWindowSize(slidingWindowSize) .circuitBreakerMinNumOfFailures(slidingWindowMinFails); RedisFailoverReporter failoverReporter = new RedisFailoverReporter(); - MultiClusterPooledConnectionProvider connectionProvider = new MultiClusterPooledConnectionProvider( + MultiDbConnectionProvider connectionProvider = new MultiDbConnectionProvider( builder.build()); - connectionProvider.setClusterSwitchListener(failoverReporter); + connectionProvider.setDatabaseSwitchListener(failoverReporter); UnifiedJedis jedis = new UnifiedJedis(connectionProvider); @@ -152,8 +152,8 @@ public void commandFailover() { int slidingWindowSize = 6; int retryMaxAttempts = 3; - MultiClusterClientConfig.Builder builder = new MultiClusterClientConfig.Builder( - getClusterConfigs(clientConfig, hostPortWithFailure, workingEndpoint.getHostAndPort())) + MultiDbConfig.Builder builder = new MultiDbConfig.Builder( + getDatabaseConfigs(clientConfig, hostPortWithFailure, workingEndpoint.getHostAndPort())) .retryMaxAttempts(retryMaxAttempts) // Default // is // 3 @@ -162,9 +162,9 @@ public void commandFailover() { .circuitBreakerSlidingWindowSize(slidingWindowSize); RedisFailoverReporter failoverReporter = new RedisFailoverReporter(); - MultiClusterPooledConnectionProvider connectionProvider = new MultiClusterPooledConnectionProvider( + MultiDbConnectionProvider connectionProvider = new MultiDbConnectionProvider( builder.build()); - connectionProvider.setClusterSwitchListener(failoverReporter); + connectionProvider.setDatabaseSwitchListener(failoverReporter); UnifiedJedis jedis = new UnifiedJedis(connectionProvider); @@ -194,15 +194,15 @@ public void commandFailover() { public void pipelineFailover() { int slidingWindowSize = 10; - MultiClusterClientConfig.Builder builder = new MultiClusterClientConfig.Builder( - getClusterConfigs(clientConfig, hostPortWithFailure, workingEndpoint.getHostAndPort())) + MultiDbConfig.Builder builder = new MultiDbConfig.Builder( + getDatabaseConfigs(clientConfig, hostPortWithFailure, workingEndpoint.getHostAndPort())) .circuitBreakerSlidingWindowSize(slidingWindowSize) .fallbackExceptionList(Collections.singletonList(JedisConnectionException.class)); RedisFailoverReporter failoverReporter = new RedisFailoverReporter(); - MultiClusterPooledConnectionProvider cacheProvider = new MultiClusterPooledConnectionProvider( + MultiDbConnectionProvider cacheProvider = new MultiDbConnectionProvider( builder.build()); - cacheProvider.setClusterSwitchListener(failoverReporter); + cacheProvider.setDatabaseSwitchListener(failoverReporter); UnifiedJedis jedis = new UnifiedJedis(cacheProvider); @@ -226,15 +226,15 @@ public void pipelineFailover() { public void failoverFromAuthError() { int slidingWindowSize = 10; - MultiClusterClientConfig.Builder builder = new MultiClusterClientConfig.Builder( - getClusterConfigs(clientConfig, endpointForAuthFailure.getHostAndPort(), + MultiDbConfig.Builder builder = new MultiDbConfig.Builder( + getDatabaseConfigs(clientConfig, endpointForAuthFailure.getHostAndPort(), workingEndpoint.getHostAndPort())).circuitBreakerSlidingWindowSize(slidingWindowSize) .fallbackExceptionList(Collections.singletonList(JedisAccessControlException.class)); RedisFailoverReporter failoverReporter = new RedisFailoverReporter(); - MultiClusterPooledConnectionProvider cacheProvider = new MultiClusterPooledConnectionProvider( + MultiDbConnectionProvider cacheProvider = new MultiDbConnectionProvider( builder.build()); - cacheProvider.setClusterSwitchListener(failoverReporter); + cacheProvider.setDatabaseSwitchListener(failoverReporter); UnifiedJedis jedis = new UnifiedJedis(cacheProvider); @@ -250,13 +250,13 @@ public void failoverFromAuthError() { jedis.close(); } - static class RedisFailoverReporter implements Consumer { + static class RedisFailoverReporter implements Consumer { boolean failedOver = false; @Override - public void accept(ClusterSwitchEventArgs e) { - log.info("Jedis fail over to cluster: " + e.getClusterName()); + public void accept(DatabaseSwitchEvent e) { + log.info("Jedis fail over to cluster: " + e.getDatabaseName()); failedOver = true; } } diff --git a/src/test/java/redis/clients/jedis/providers/MultiClusterProviderHealthStatusChangeEventTest.java b/src/test/java/redis/clients/jedis/providers/MultiDbProviderHealthStatusChangeTest.java similarity index 52% rename from src/test/java/redis/clients/jedis/providers/MultiClusterProviderHealthStatusChangeEventTest.java rename to src/test/java/redis/clients/jedis/providers/MultiDbProviderHealthStatusChangeTest.java index bde6ab7fc6..ed874c816c 100644 --- a/src/test/java/redis/clients/jedis/providers/MultiClusterProviderHealthStatusChangeEventTest.java +++ b/src/test/java/redis/clients/jedis/providers/MultiDbProviderHealthStatusChangeTest.java @@ -14,17 +14,17 @@ import redis.clients.jedis.DefaultJedisClientConfig; import redis.clients.jedis.HostAndPort; import redis.clients.jedis.JedisClientConfig; -import redis.clients.jedis.MultiClusterClientConfig; +import redis.clients.jedis.MultiDbConfig; import redis.clients.jedis.mcf.HealthStatus; -import redis.clients.jedis.mcf.MultiClusterPooledConnectionProvider; -import redis.clients.jedis.mcf.MultiClusterPooledConnectionProviderHelper; +import redis.clients.jedis.mcf.MultiDbConnectionProvider; +import redis.clients.jedis.mcf.MultiDbConnectionProviderHelper; /** - * Tests for MultiClusterPooledConnectionProvider event handling behavior during initialization and - * throughout its lifecycle with HealthStatusChangeEvents. + * Tests for MultiDbConnectionProvider event handling behavior during initialization and throughout + * its lifecycle with HealthStatusChangeEvents. */ @ExtendWith(MockitoExtension.class) -public class MultiClusterProviderHealthStatusChangeEventTest { +public class MultiDbProviderHealthStatusChangeTest { private HostAndPort endpoint1; private HostAndPort endpoint2; @@ -52,30 +52,29 @@ private MockedConstruction mockConnectionPool() { void postInit_unhealthy_active_sets_grace_and_fails_over() throws Exception { try (MockedConstruction mockedPool = mockConnectionPool()) { // Create clusters without health checks - MultiClusterClientConfig.ClusterConfig cluster1 = MultiClusterClientConfig.ClusterConfig + MultiDbConfig.DatabaseConfig cluster1 = MultiDbConfig.DatabaseConfig .builder(endpoint1, clientConfig).weight(1.0f).healthCheckEnabled(false).build(); - MultiClusterClientConfig.ClusterConfig cluster2 = MultiClusterClientConfig.ClusterConfig + MultiDbConfig.DatabaseConfig cluster2 = MultiDbConfig.DatabaseConfig .builder(endpoint2, clientConfig).weight(0.5f).healthCheckEnabled(false).build(); - MultiClusterClientConfig config = new MultiClusterClientConfig.Builder( - new MultiClusterClientConfig.ClusterConfig[] { cluster1, cluster2 }).build(); + MultiDbConfig config = new MultiDbConfig.Builder( + new MultiDbConfig.DatabaseConfig[] { cluster1, cluster2 }).build(); - try (MultiClusterPooledConnectionProvider provider = new MultiClusterPooledConnectionProvider( - config)) { + try (MultiDbConnectionProvider provider = new MultiDbConnectionProvider(config)) { - assertFalse(provider.getCluster(endpoint1).isInGracePeriod()); - assertEquals(provider.getCluster(), provider.getCluster(endpoint1)); + assertFalse(provider.getDatabase(endpoint1).isInGracePeriod()); + assertEquals(provider.getDatabase(), provider.getDatabase(endpoint1)); // This should process immediately since initialization is complete assertDoesNotThrow(() -> { - MultiClusterPooledConnectionProviderHelper.onHealthStatusChange(provider, endpoint1, + MultiDbConnectionProviderHelper.onHealthStatusChange(provider, endpoint1, HealthStatus.HEALTHY, HealthStatus.UNHEALTHY); }, "Post-initialization events should be processed immediately"); // Verify the cluster has changed according to the UNHEALTHY status - assertTrue(provider.getCluster(endpoint1).isInGracePeriod(), + assertTrue(provider.getDatabase(endpoint1).isInGracePeriod(), "UNHEALTHY status on active cluster should cause a grace period"); - assertNotEquals(provider.getCluster(), provider.getCluster(endpoint1), + assertNotEquals(provider.getDatabase(), provider.getDatabase(endpoint1), "UNHEALTHY status on active cluster should cause a failover"); } } @@ -84,46 +83,45 @@ void postInit_unhealthy_active_sets_grace_and_fails_over() throws Exception { @Test void postInit_nonActive_changes_do_not_switch_active() throws Exception { try (MockedConstruction mockedPool = mockConnectionPool()) { - MultiClusterClientConfig.ClusterConfig cluster1 = MultiClusterClientConfig.ClusterConfig + MultiDbConfig.DatabaseConfig cluster1 = MultiDbConfig.DatabaseConfig .builder(endpoint1, clientConfig).weight(1.0f).healthCheckEnabled(false).build(); - MultiClusterClientConfig.ClusterConfig cluster2 = MultiClusterClientConfig.ClusterConfig + MultiDbConfig.DatabaseConfig cluster2 = MultiDbConfig.DatabaseConfig .builder(endpoint2, clientConfig).weight(0.5f).healthCheckEnabled(false).build(); - MultiClusterClientConfig config = new MultiClusterClientConfig.Builder( - new MultiClusterClientConfig.ClusterConfig[] { cluster1, cluster2 }).build(); + MultiDbConfig config = new MultiDbConfig.Builder( + new MultiDbConfig.DatabaseConfig[] { cluster1, cluster2 }).build(); - try (MultiClusterPooledConnectionProvider provider = new MultiClusterPooledConnectionProvider( - config)) { + try (MultiDbConnectionProvider provider = new MultiDbConnectionProvider(config)) { // Verify initial state - assertEquals(provider.getCluster(endpoint1), provider.getCluster(), + assertEquals(provider.getDatabase(endpoint1), provider.getDatabase(), "Should start with endpoint1 active"); // Simulate multiple rapid events for the same endpoint (post-init behavior) - MultiClusterPooledConnectionProviderHelper.onHealthStatusChange(provider, endpoint1, + MultiDbConnectionProviderHelper.onHealthStatusChange(provider, endpoint1, HealthStatus.HEALTHY, HealthStatus.UNHEALTHY); // After first UNHEALTHY on active cluster: it enters grace period and provider fails over - assertTrue(provider.getCluster(endpoint1).isInGracePeriod(), + assertTrue(provider.getDatabase(endpoint1).isInGracePeriod(), "Active cluster should enter grace period"); - assertEquals(provider.getCluster(endpoint2), provider.getCluster(), + assertEquals(provider.getDatabase(endpoint2), provider.getDatabase(), "Should fail over to endpoint2"); - MultiClusterPooledConnectionProviderHelper.onHealthStatusChange(provider, endpoint1, + MultiDbConnectionProviderHelper.onHealthStatusChange(provider, endpoint1, HealthStatus.UNHEALTHY, HealthStatus.HEALTHY); // Healthy event for non-active cluster should not immediately revert active cluster - assertEquals(provider.getCluster(endpoint2), provider.getCluster(), + assertEquals(provider.getDatabase(endpoint2), provider.getDatabase(), "Active cluster should remain endpoint2"); - assertTrue(provider.getCluster(endpoint1).isInGracePeriod(), + assertTrue(provider.getDatabase(endpoint1).isInGracePeriod(), "Grace period should still be in effect"); - MultiClusterPooledConnectionProviderHelper.onHealthStatusChange(provider, endpoint1, + MultiDbConnectionProviderHelper.onHealthStatusChange(provider, endpoint1, HealthStatus.HEALTHY, HealthStatus.UNHEALTHY); // Further UNHEALTHY for non-active cluster is a no-op - assertEquals(provider.getCluster(endpoint2), provider.getCluster(), + assertEquals(provider.getDatabase(endpoint2), provider.getDatabase(), "Active cluster unchanged"); - assertTrue(provider.getCluster(endpoint1).isInGracePeriod(), "Still in grace period"); + assertTrue(provider.getDatabase(endpoint1).isInGracePeriod(), "Still in grace period"); } } } @@ -131,26 +129,25 @@ void postInit_nonActive_changes_do_not_switch_active() throws Exception { @Test void init_selects_highest_weight_healthy_when_checks_disabled() throws Exception { try (MockedConstruction mockedPool = mockConnectionPool()) { - MultiClusterClientConfig.ClusterConfig cluster1 = MultiClusterClientConfig.ClusterConfig + MultiDbConfig.DatabaseConfig cluster1 = MultiDbConfig.DatabaseConfig .builder(endpoint1, clientConfig).weight(1.0f).healthCheckEnabled(false).build(); - MultiClusterClientConfig.ClusterConfig cluster2 = MultiClusterClientConfig.ClusterConfig + MultiDbConfig.DatabaseConfig cluster2 = MultiDbConfig.DatabaseConfig .builder(endpoint2, clientConfig).weight(2.0f).healthCheckEnabled(false).build(); - MultiClusterClientConfig config = new MultiClusterClientConfig.Builder( - new MultiClusterClientConfig.ClusterConfig[] { cluster1, cluster2 }).build(); + MultiDbConfig config = new MultiDbConfig.Builder( + new MultiDbConfig.DatabaseConfig[] { cluster1, cluster2 }).build(); - try (MultiClusterPooledConnectionProvider provider = new MultiClusterPooledConnectionProvider( - config)) { + try (MultiDbConnectionProvider provider = new MultiDbConnectionProvider(config)) { // This test verifies that multiple endpoints are properly initialized // Verify both clusters are initialized properly - assertNotNull(provider.getCluster(endpoint1), "Cluster 1 should be available"); - assertNotNull(provider.getCluster(endpoint2), "Cluster 2 should be available"); + assertNotNull(provider.getDatabase(endpoint1), "Database 1 should be available"); + assertNotNull(provider.getDatabase(endpoint2), "Database 2 should be available"); // Both should be healthy (no health checks = assumed healthy) - assertTrue(provider.getCluster(endpoint1).isHealthy(), "Cluster 1 should be healthy"); - assertTrue(provider.getCluster(endpoint2).isHealthy(), "Cluster 2 should be healthy"); + assertTrue(provider.getDatabase(endpoint1).isHealthy(), "Database 1 should be healthy"); + assertTrue(provider.getDatabase(endpoint2).isHealthy(), "Database 2 should be healthy"); } } } @@ -158,22 +155,21 @@ void init_selects_highest_weight_healthy_when_checks_disabled() throws Exception @Test void init_single_cluster_initializes_and_is_healthy() throws Exception { try (MockedConstruction mockedPool = mockConnectionPool()) { - MultiClusterClientConfig.ClusterConfig cluster1 = MultiClusterClientConfig.ClusterConfig + MultiDbConfig.DatabaseConfig cluster1 = MultiDbConfig.DatabaseConfig .builder(endpoint1, clientConfig).weight(1.0f).healthCheckEnabled(false).build(); - MultiClusterClientConfig config = new MultiClusterClientConfig.Builder( - new MultiClusterClientConfig.ClusterConfig[] { cluster1 }).build(); + MultiDbConfig config = new MultiDbConfig.Builder( + new MultiDbConfig.DatabaseConfig[] { cluster1 }).build(); // This test verifies that the provider initializes correctly and doesn't lose events // In practice, with health checks disabled, no events should be generated during init - try (MultiClusterPooledConnectionProvider provider = new MultiClusterPooledConnectionProvider( - config)) { + try (MultiDbConnectionProvider provider = new MultiDbConnectionProvider(config)) { // Verify successful initialization - assertNotNull(provider.getCluster(), "Provider should have initialized successfully"); - assertEquals(provider.getCluster(endpoint1), provider.getCluster(), + assertNotNull(provider.getDatabase(), "Provider should have initialized successfully"); + assertEquals(provider.getDatabase(endpoint1), provider.getDatabase(), "Should have selected the configured cluster"); - assertTrue(provider.getCluster().isHealthy(), - "Cluster should be healthy (assumed healthy with no health checks)"); + assertTrue(provider.getDatabase().isHealthy(), + "Database should be healthy (assumed healthy with no health checks)"); } } } @@ -183,42 +179,41 @@ void init_single_cluster_initializes_and_is_healthy() throws Exception { @Test void postInit_two_hop_failover_chain_respected() throws Exception { try (MockedConstruction mockedPool = mockConnectionPool()) { - MultiClusterClientConfig.ClusterConfig cluster1 = MultiClusterClientConfig.ClusterConfig + MultiDbConfig.DatabaseConfig cluster1 = MultiDbConfig.DatabaseConfig .builder(endpoint1, clientConfig).weight(1.0f).healthCheckEnabled(false).build(); - MultiClusterClientConfig.ClusterConfig cluster2 = MultiClusterClientConfig.ClusterConfig + MultiDbConfig.DatabaseConfig cluster2 = MultiDbConfig.DatabaseConfig .builder(endpoint2, clientConfig).weight(0.5f).healthCheckEnabled(false).build(); - MultiClusterClientConfig.ClusterConfig cluster3 = MultiClusterClientConfig.ClusterConfig + MultiDbConfig.DatabaseConfig cluster3 = MultiDbConfig.DatabaseConfig .builder(endpoint3, clientConfig).weight(0.2f).healthCheckEnabled(false).build(); - MultiClusterClientConfig config = new MultiClusterClientConfig.Builder( - new MultiClusterClientConfig.ClusterConfig[] { cluster1, cluster2, cluster3 }).build(); + MultiDbConfig config = new MultiDbConfig.Builder( + new MultiDbConfig.DatabaseConfig[] { cluster1, cluster2, cluster3 }).build(); - try (MultiClusterPooledConnectionProvider provider = new MultiClusterPooledConnectionProvider( - config)) { + try (MultiDbConnectionProvider provider = new MultiDbConnectionProvider(config)) { // First event: endpoint1 (active) becomes UNHEALTHY -> failover to endpoint2, endpoint1 // enters grace - MultiClusterPooledConnectionProviderHelper.onHealthStatusChange(provider, endpoint1, + MultiDbConnectionProviderHelper.onHealthStatusChange(provider, endpoint1, HealthStatus.HEALTHY, HealthStatus.UNHEALTHY); - assertTrue(provider.getCluster(endpoint1).isInGracePeriod(), + assertTrue(provider.getDatabase(endpoint1).isInGracePeriod(), "Endpoint1 should be in grace after unhealthy"); - assertEquals(provider.getCluster(endpoint2), provider.getCluster(), + assertEquals(provider.getDatabase(endpoint2), provider.getDatabase(), "Should have failed over to endpoint2"); // Second event: endpoint2 (now active) becomes UNHEALTHY -> failover to endpoint3 - MultiClusterPooledConnectionProviderHelper.onHealthStatusChange(provider, endpoint2, + MultiDbConnectionProviderHelper.onHealthStatusChange(provider, endpoint2, HealthStatus.HEALTHY, HealthStatus.UNHEALTHY); - assertTrue(provider.getCluster(endpoint2).isInGracePeriod(), + assertTrue(provider.getDatabase(endpoint2).isInGracePeriod(), "Endpoint2 should be in grace after unhealthy"); - assertEquals(provider.getCluster(endpoint3), provider.getCluster(), + assertEquals(provider.getDatabase(endpoint3), provider.getDatabase(), "Should have failed over to endpoint3"); // Third event: endpoint1 becomes HEALTHY again -> no immediate switch due to grace period // behavior - MultiClusterPooledConnectionProviderHelper.onHealthStatusChange(provider, endpoint1, + MultiDbConnectionProviderHelper.onHealthStatusChange(provider, endpoint1, HealthStatus.UNHEALTHY, HealthStatus.HEALTHY); - assertEquals(provider.getCluster(endpoint3), provider.getCluster(), + assertEquals(provider.getDatabase(endpoint3), provider.getDatabase(), "Active cluster should remain endpoint3"); } } @@ -227,33 +222,32 @@ void postInit_two_hop_failover_chain_respected() throws Exception { @Test void postInit_rapid_events_respect_grace_and_keep_active_stable() throws Exception { try (MockedConstruction mockedPool = mockConnectionPool()) { - MultiClusterClientConfig.ClusterConfig cluster1 = MultiClusterClientConfig.ClusterConfig + MultiDbConfig.DatabaseConfig cluster1 = MultiDbConfig.DatabaseConfig .builder(endpoint1, clientConfig).weight(1.0f).healthCheckEnabled(false).build(); - MultiClusterClientConfig.ClusterConfig cluster2 = MultiClusterClientConfig.ClusterConfig + MultiDbConfig.DatabaseConfig cluster2 = MultiDbConfig.DatabaseConfig .builder(endpoint2, clientConfig).weight(0.5f).healthCheckEnabled(false).build(); - MultiClusterClientConfig config = new MultiClusterClientConfig.Builder( - new MultiClusterClientConfig.ClusterConfig[] { cluster1, cluster2 }).build(); + MultiDbConfig config = new MultiDbConfig.Builder( + new MultiDbConfig.DatabaseConfig[] { cluster1, cluster2 }).build(); - try (MultiClusterPooledConnectionProvider provider = new MultiClusterPooledConnectionProvider( - config)) { + try (MultiDbConnectionProvider provider = new MultiDbConnectionProvider(config)) { // Verify initial state - assertEquals(HealthStatus.HEALTHY, provider.getCluster(endpoint1).getHealthStatus(), + assertEquals(HealthStatus.HEALTHY, provider.getDatabase(endpoint1).getHealthStatus(), "Should start as HEALTHY"); // Send rapid sequence of events post-init - MultiClusterPooledConnectionProviderHelper.onHealthStatusChange(provider, endpoint1, + MultiDbConnectionProviderHelper.onHealthStatusChange(provider, endpoint1, HealthStatus.HEALTHY, HealthStatus.UNHEALTHY); // triggers failover and grace - MultiClusterPooledConnectionProviderHelper.onHealthStatusChange(provider, endpoint1, + MultiDbConnectionProviderHelper.onHealthStatusChange(provider, endpoint1, HealthStatus.UNHEALTHY, HealthStatus.HEALTHY); // non-active cluster becomes healthy - MultiClusterPooledConnectionProviderHelper.onHealthStatusChange(provider, endpoint1, + MultiDbConnectionProviderHelper.onHealthStatusChange(provider, endpoint1, HealthStatus.HEALTHY, HealthStatus.UNHEALTHY); // still non-active and in grace; no change // Final expectations: endpoint1 is in grace, provider remains on endpoint2 - assertTrue(provider.getCluster(endpoint1).isInGracePeriod(), + assertTrue(provider.getDatabase(endpoint1).isInGracePeriod(), "Endpoint1 should be in grace period"); - assertEquals(provider.getCluster(endpoint2), provider.getCluster(), + assertEquals(provider.getDatabase(endpoint2), provider.getDatabase(), "Active cluster should remain endpoint2"); } } diff --git a/src/test/java/redis/clients/jedis/scenario/ActiveActiveFailoverTest.java b/src/test/java/redis/clients/jedis/scenario/ActiveActiveFailoverTest.java index a6deb256eb..e6ebc42b8d 100644 --- a/src/test/java/redis/clients/jedis/scenario/ActiveActiveFailoverTest.java +++ b/src/test/java/redis/clients/jedis/scenario/ActiveActiveFailoverTest.java @@ -9,10 +9,10 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; import redis.clients.jedis.*; -import redis.clients.jedis.MultiClusterClientConfig.ClusterConfig; +import redis.clients.jedis.MultiDbConfig.DatabaseConfig; import redis.clients.jedis.exceptions.JedisConnectionException; -import redis.clients.jedis.mcf.ClusterSwitchEventArgs; -import redis.clients.jedis.mcf.MultiClusterPooledConnectionProvider; +import redis.clients.jedis.mcf.DatabaseSwitchEvent; +import redis.clients.jedis.mcf.MultiDbConnectionProvider; import redis.clients.jedis.util.ClientTestUtil; import java.io.IOException; @@ -62,13 +62,13 @@ public void testFailover() { .socketTimeoutMillis(SOCKET_TIMEOUT_MS) .connectionTimeoutMillis(CONNECTION_TIMEOUT_MS).build(); - ClusterConfig primary = ClusterConfig.builder(endpoint.getHostAndPort(0), config) + DatabaseConfig primary = DatabaseConfig.builder(endpoint.getHostAndPort(0), config) .connectionPoolConfig(RecommendedSettings.poolConfig).weight(1.0f).build(); - ClusterConfig secondary = ClusterConfig.builder(endpoint.getHostAndPort(1), config) + DatabaseConfig secondary = DatabaseConfig.builder(endpoint.getHostAndPort(1), config) .connectionPoolConfig(RecommendedSettings.poolConfig).weight(0.5f).build(); - MultiClusterClientConfig multiConfig = MultiClusterClientConfig.builder() + MultiDbConfig multiConfig = MultiDbConfig.builder() .endpoint(primary) .endpoint(secondary) .circuitBreakerSlidingWindowSize(1) // SLIDING WINDOW SIZE IN SECONDS @@ -82,7 +82,7 @@ public void testFailover() { .fastFailover(true) .retryOnFailover(false) .build(); - class FailoverReporter implements Consumer { + class FailoverReporter implements Consumer { String currentClusterName = "not set"; @@ -99,10 +99,10 @@ public String getCurrentClusterName() { } @Override - public void accept(ClusterSwitchEventArgs e) { - this.currentClusterName = e.getClusterName(); + public void accept(DatabaseSwitchEvent e) { + this.currentClusterName = e.getDatabaseName(); log.info("\n\n====FailoverEvent=== \nJedis failover to cluster: {}\n====FailoverEvent===\n\n", - e.getClusterName()); + e.getDatabaseName()); if (failoverHappened) { failbackHappened = true; @@ -208,9 +208,9 @@ public void accept(ClusterSwitchEventArgs e) { throw new RuntimeException(e); } - MultiClusterPooledConnectionProvider provider = ClientTestUtil.getConnectionProvider(client); - ConnectionPool pool1 = provider.getCluster(endpoint.getHostAndPort(0)).getConnectionPool(); - ConnectionPool pool2 = provider.getCluster(endpoint.getHostAndPort(1)).getConnectionPool(); + MultiDbConnectionProvider provider = ClientTestUtil.getConnectionProvider(client); + ConnectionPool pool1 = provider.getDatabase(endpoint.getHostAndPort(0)).getConnectionPool(); + ConnectionPool pool2 = provider.getDatabase(endpoint.getHostAndPort(1)).getConnectionPool(); await().atMost(Duration.ofSeconds(1)).until(() -> pool1.getNumActive() == 0); await().atMost(Duration.ofSeconds(1)).until(() -> pool2.getNumActive() == 0);