From 01bce89eab79bc9cfca663379f1cccc11309663b Mon Sep 17 00:00:00 2001 From: ggivo Date: Mon, 6 Oct 2025 15:14:06 +0300 Subject: [PATCH 01/18] [churn] Fix test-on-docker should run integration tests accidentally running only unit test locally after merge conflict resolved in commit f8de2fe5f280e16295deb44347887be48ad19861 --- Makefile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index 13cb801f34..603328df1c 100644 --- a/Makefile +++ b/Makefile @@ -522,10 +522,10 @@ stop: test: | start mvn-test-local stop mvn-test-local: - @TEST_ENV_PROVIDER=local mvn -Dwith-param-names=true -Dtest=${TEST} clean compile test + @TEST_ENV_PROVIDER=local mvn -Dwith-param-names=true -Dtest=${TEST} clean verify mvn-test: - mvn -Dwith-param-names=true -Dtest=${TEST} clean compile test + mvn -Dwith-param-names=true -Dtest=${TEST} clean verify package: | start mvn-package stop From 838a281def2db67e760461d47781817e2d5c46b9 Mon Sep 17 00:00:00 2001 From: ggivo Date: Fri, 3 Oct 2025 19:43:24 +0300 Subject: [PATCH 02/18] Rename MultiDb classes - MultiClusterPooledConnectionProvider -> MultiDatabaseConnectionProvider - Cluster -> Database - MultiClusterClientConfig -> MultiDatabaseConfig - ClusterConfig -> DatabaseConfig --- pom.xml | 2 +- ...ntConfig.java => MultiDatabaseConfig.java} | 215 +++++---- .../redis/clients/jedis/MultiDbClient.java | 56 +-- .../redis/clients/jedis/UnifiedJedis.java | 12 +- .../jedis/builders/MultiDbClientBuilder.java | 25 +- .../mcf/CircuitBreakerCommandExecutor.java | 24 +- .../jedis/mcf/CircuitBreakerFailoverBase.java | 40 +- ...cuitBreakerFailoverConnectionProvider.java | 15 +- .../mcf/CircuitBreakerThresholdsAdapter.java | 14 +- .../jedis/mcf/ClusterSwitchEventArgs.java | 6 +- .../redis/clients/jedis/mcf/EchoStrategy.java | 2 +- .../jedis/mcf/JedisFailoverException.java | 10 +- .../jedis/mcf/MultiClusterPipeline.java | 4 +- .../jedis/mcf/MultiClusterTransaction.java | 6 +- ...a => MultiDatabaseConnectionProvider.java} | 415 +++++++++--------- .../clients/jedis/MultiDbClientTest.java | 18 +- ...UnifiedJedisConstructorReflectionTest.java | 3 +- .../failover/FailoverIntegrationTest.java | 85 ++-- .../mcf/ActiveActiveLocalFailoverTest.java | 25 +- .../mcf/CircuitBreakerThresholdsTest.java | 71 ++- .../mcf/ClusterEvaluateThresholdsTest.java | 22 +- .../clients/jedis/mcf/DefaultValuesTest.java | 10 +- .../mcf/FailbackMechanismIntegrationTest.java | 193 ++++---- .../jedis/mcf/FailbackMechanismUnitTest.java | 64 +-- .../jedis/mcf/HealthCheckIntegrationTest.java | 34 +- .../clients/jedis/mcf/HealthCheckTest.java | 57 ++- .../MultiClusterDynamicEndpointUnitTest.java | 78 ++-- ...ultiClusterFailoverAttemptsConfigTest.java | 56 +-- .../mcf/MultiClusterInitializationTest.java | 67 ++- ...ClusterPooledConnectionProviderHelper.java | 20 - ...MultiDatabaseConnectionProviderHelper.java | 20 + ... MultiDatabaseConnectionProviderTest.java} | 120 ++--- .../jedis/mcf/PeriodicFailbackTest.java | 103 +++-- .../jedis/misc/AutomaticFailoverTest.java | 60 +-- ...erProviderHealthStatusChangeEventTest.java | 142 +++--- .../scenario/ActiveActiveFailoverTest.java | 16 +- 36 files changed, 1036 insertions(+), 1074 deletions(-) rename src/main/java/redis/clients/jedis/{MultiClusterClientConfig.java => MultiDatabaseConfig.java} (88%) rename src/main/java/redis/clients/jedis/mcf/{MultiClusterPooledConnectionProvider.java => MultiDatabaseConnectionProvider.java} (65%) delete mode 100644 src/test/java/redis/clients/jedis/mcf/MultiClusterPooledConnectionProviderHelper.java create mode 100644 src/test/java/redis/clients/jedis/mcf/MultiDatabaseConnectionProviderHelper.java rename src/test/java/redis/clients/jedis/mcf/{MultiClusterPooledConnectionProviderTest.java => MultiDatabaseConnectionProviderTest.java} (69%) diff --git a/pom.xml b/pom.xml index fd98539365..957561819d 100644 --- a/pom.xml +++ b/pom.xml @@ -488,7 +488,7 @@ **/Health*.java **/*IT.java **/scenario/RestEndpointUtil.java - src/main/java/redis/clients/jedis/MultiClusterClientConfig.java + src/main/java/redis/clients/jedis/MultiDatabaseConfig.java src/main/java/redis/clients/jedis/HostAndPort.java **/builders/*.java **/MultiDb*.java diff --git a/src/main/java/redis/clients/jedis/MultiClusterClientConfig.java b/src/main/java/redis/clients/jedis/MultiDatabaseConfig.java similarity index 88% rename from src/main/java/redis/clients/jedis/MultiClusterClientConfig.java rename to src/main/java/redis/clients/jedis/MultiDatabaseConfig.java index 35e51ee600..96ab7d7971 100644 --- a/src/main/java/redis/clients/jedis/MultiClusterClientConfig.java +++ b/src/main/java/redis/clients/jedis/MultiDatabaseConfig.java @@ -21,8 +21,8 @@ * This configuration enables seamless failover between multiple Redis clusters, databases, or * endpoints by providing comprehensive settings for retry logic, circuit breaker behavior, health * checks, and failback mechanisms. It is designed to work with - * {@link redis.clients.jedis.mcf.MultiClusterPooledConnectionProvider} to provide high availability - * and disaster recovery capabilities. + * {@link redis.clients.jedis.mcf.MultiDatabaseConnectionProvider} to provide high availability and + * disaster recovery capabilities. *

*

* Key Features: @@ -49,27 +49,26 @@ * { * @code * // Configure individual clusters - * ClusterConfig primary = ClusterConfig.builder(primaryEndpoint, clientConfig).weight(1.0f) + * DatabaseConfig primary = DatabaseConfig.builder(primaryEndpoint, clientConfig).weight(1.0f) * .build(); * - * ClusterConfig secondary = ClusterConfig.builder(secondaryEndpoint, clientConfig).weight(0.5f) + * DatabaseConfig secondary = DatabaseConfig.builder(secondaryEndpoint, clientConfig).weight(0.5f) * .healthCheckEnabled(true).build(); * * // Build multi-cluster configuration - * MultiClusterClientConfig config = MultiClusterClientConfig.builder(primary, secondary) + * MultiDatabaseConfig config = MultiDatabaseConfig.builder(primary, secondary) * .circuitBreakerFailureRateThreshold(10.0f).retryMaxAttempts(3).failbackSupported(true) * .gracePeriod(10000).build(); * * // Use with connection provider - * MultiClusterPooledConnectionProvider provider = new MultiClusterPooledConnectionProvider( - * config); + * MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider(config); * } * *

* The configuration leverages Resilience4j for * circuit breaker and retry implementations, providing battle-tested fault tolerance patterns. *

- * @see redis.clients.jedis.mcf.MultiClusterPooledConnectionProvider + * @see redis.clients.jedis.mcf.MultiDatabaseConnectionProvider * @see redis.clients.jedis.mcf.HealthCheckStrategy * @see redis.clients.jedis.mcf.EchoStrategy * @see redis.clients.jedis.mcf.LagAwareStrategy @@ -77,7 +76,7 @@ */ // TODO: move @Experimental -public final class MultiClusterClientConfig { +public final class MultiDatabaseConfig { /** * Functional interface for creating {@link HealthCheckStrategy} instances for specific Redis @@ -158,7 +157,7 @@ public static interface StrategySupplier { private static final int DELAY_IN_BETWEEN_FAILOVER_ATTEMPTS_DEFAULT = 12000; /** Array of cluster configurations defining the available Redis endpoints and their settings. */ - private final ClusterConfig[] clusterConfigs; + private final DatabaseConfig[] databaseConfigs; // ============ Retry Configuration ============ // Based on Resilience4j Retry: https://resilience4j.readme.io/docs/retry @@ -438,35 +437,35 @@ public static interface StrategySupplier { private int delayInBetweenFailoverAttempts; /** - * Constructs a new MultiClusterClientConfig with the specified cluster configurations. + * Constructs a new MultiDatabaseConfig with the specified cluster configurations. *

* This constructor validates that at least one cluster configuration is provided and that all * configurations are non-null. Use the {@link Builder} class for more convenient configuration * with default values. *

- * @param clusterConfigs array of cluster configurations defining the available Redis endpoints - * @throws JedisValidationException if clusterConfigs is null or empty + * @param databaseConfigs array of cluster configurations defining the available Redis endpoints + * @throws JedisValidationException if databaseConfigs is null or empty * @throws IllegalArgumentException if any cluster configuration is null - * @see Builder#Builder(ClusterConfig[]) + * @see Builder#Builder(DatabaseConfig[]) */ - public MultiClusterClientConfig(ClusterConfig[] clusterConfigs) { + public MultiDatabaseConfig(DatabaseConfig[] databaseConfigs) { - if (clusterConfigs == null || clusterConfigs.length < 1) throw new JedisValidationException( - "ClusterClientConfigs are required for MultiClusterPooledConnectionProvider"); + if (databaseConfigs == null || databaseConfigs.length < 1) throw new JedisValidationException( + "DatabaseClientConfigs are required for MultiDatabaseConnectionProvider"); - for (ClusterConfig clusterConfig : clusterConfigs) { - if (clusterConfig == null) - throw new IllegalArgumentException("ClusterClientConfigs must not contain null elements"); + for (DatabaseConfig databaseConfig : databaseConfigs) { + if (databaseConfig == null) + throw new IllegalArgumentException("DatabaseClientConfigs must not contain null elements"); } - this.clusterConfigs = clusterConfigs; + this.databaseConfigs = databaseConfigs; } /** * Returns the array of cluster configurations defining available Redis endpoints. * @return array of cluster configurations, never null or empty */ - public ClusterConfig[] getClusterConfigs() { - return clusterConfigs; + public DatabaseConfig[] getDatabaseConfigs() { + return databaseConfigs; } /** @@ -637,79 +636,79 @@ public boolean isFastFailover() { } /** - * Creates a new Builder instance for configuring MultiClusterClientConfig. + * Creates a new Builder instance for configuring MultiDatabaseConfig. *

* At least one cluster configuration must be added to the builder before calling build(). Use the * endpoint() methods to add cluster configurations. *

* @return new Builder instance - * @throws JedisValidationException if clusterConfigs is null or empty - * @see Builder#Builder(ClusterConfig[]) + * @throws JedisValidationException if databaseConfigs is null or empty + * @see Builder#Builder(DatabaseConfig[]) */ public static Builder builder() { return new Builder(); } /** - * Creates a new Builder instance for configuring MultiClusterClientConfig. - * @param clusterConfigs array of cluster configurations defining available Redis endpoints + * Creates a new Builder instance for configuring MultiDatabaseConfig. + * @param databaseConfigs array of cluster configurations defining available Redis endpoints * @return new Builder instance - * @throws JedisValidationException if clusterConfigs is null or empty - * @see Builder#Builder(ClusterConfig[]) + * @throws JedisValidationException if databaseConfigs is null or empty + * @see Builder#Builder(DatabaseConfig[]) */ - public static Builder builder(ClusterConfig[] clusterConfigs) { - return new Builder(clusterConfigs); + public static Builder builder(DatabaseConfig[] databaseConfigs) { + return new Builder(databaseConfigs); } /** - * Creates a new Builder instance for configuring MultiClusterClientConfig. - * @param clusterConfigs list of cluster configurations defining available Redis endpoints + * Creates a new Builder instance for configuring MultiDatabaseConfig. + * @param databaseConfigs list of cluster configurations defining available Redis endpoints * @return new Builder instance - * @throws JedisValidationException if clusterConfigs is null or empty + * @throws JedisValidationException if databaseConfigs is null or empty * @see Builder#Builder(List) */ - public static Builder builder(List clusterConfigs) { - return new Builder(clusterConfigs); + public static Builder builder(List databaseConfigs) { + return new Builder(databaseConfigs); } /** * Configuration class for individual Redis cluster endpoints within a multi-cluster setup. *

- * Each ClusterConfig represents a single Redis endpoint that can participate in the multi-cluster - * failover system. It encapsulates the connection details, weight for priority-based selection, - * and health check configuration for that endpoint. + * Each DatabaseConfig represents a single Redis endpoint that can participate in the + * multi-cluster failover system. It encapsulates the connection details, weight for + * priority-based selection, and health check configuration for that endpoint. *

* @see Builder * @see StrategySupplier * @see redis.clients.jedis.mcf.HealthCheckStrategy */ - public static class ClusterConfig { + public static class DatabaseConfig { - /** The Redis endpoint (host and port) for this cluster. */ + /** The Redis endpoint (host and port) for this database. */ private final Endpoint endpoint; /** Jedis client configuration containing connection settings and authentication. */ private final JedisClientConfig jedisClientConfig; - /** Optional connection pool configuration for managing connections to this cluster. */ + /** Optional connection pool configuration for managing connections to this database. */ private GenericObjectPoolConfig connectionPoolConfig; /** - * Weight value for cluster selection priority. Higher weights indicate higher priority. Default - * value is 1.0f. + * Weight value for database selection priority. Higher weights indicate higher priority. + * Default value is 1.0f. */ private float weight = 1.0f; /** - * Strategy supplier for creating health check instances for this cluster. Default is + * Strategy supplier for creating health check instances for this database. Default is * EchoStrategy.DEFAULT. */ private StrategySupplier healthCheckStrategySupplier; /** - * Constructs a ClusterConfig with basic endpoint and client configuration. + * Constructs a DatabaseConfig with basic endpoint and client configuration. *

- * This constructor creates a cluster configuration with default settings: weight of 1.0f and + * This constructor creates a database configuration with default settings: weight of 1.0f and * EchoStrategy for health checks. Use the {@link Builder} for more advanced configuration * options. *

@@ -717,13 +716,13 @@ public static class ClusterConfig { * @param clientConfig the Jedis client configuration * @throws IllegalArgumentException if endpoint or clientConfig is null */ - public ClusterConfig(Endpoint endpoint, JedisClientConfig clientConfig) { + public DatabaseConfig(Endpoint endpoint, JedisClientConfig clientConfig) { this.endpoint = endpoint; this.jedisClientConfig = clientConfig; } /** - * Constructs a ClusterConfig with endpoint, client, and connection pool configuration. + * Constructs a DatabaseConfig with endpoint, client, and connection pool configuration. *

* This constructor allows specification of connection pool settings in addition to basic * endpoint configuration. Default weight of 1.0f and EchoStrategy for health checks are used. @@ -733,7 +732,7 @@ public ClusterConfig(Endpoint endpoint, JedisClientConfig clientConfig) { * @param connectionPoolConfig the connection pool configuration * @throws IllegalArgumentException if endpoint or clientConfig is null */ - public ClusterConfig(Endpoint endpoint, JedisClientConfig clientConfig, + public DatabaseConfig(Endpoint endpoint, JedisClientConfig clientConfig, GenericObjectPoolConfig connectionPoolConfig) { this.endpoint = endpoint; this.jedisClientConfig = clientConfig; @@ -744,7 +743,7 @@ public ClusterConfig(Endpoint endpoint, JedisClientConfig clientConfig, * Private constructor used by the Builder to create configured instances. * @param builder the builder containing configuration values */ - private ClusterConfig(Builder builder) { + private DatabaseConfig(Builder builder) { this.endpoint = builder.endpoint; this.jedisClientConfig = builder.jedisClientConfig; this.connectionPoolConfig = builder.connectionPoolConfig; @@ -753,7 +752,7 @@ private ClusterConfig(Builder builder) { } /** - * Returns the Redis endpoint (host and port) for this cluster. + * Returns the Redis endpoint (host and port) for this database. * @return the host and port information */ public Endpoint getEndpoint() { @@ -761,7 +760,7 @@ public Endpoint getEndpoint() { } /** - * Creates a new Builder instance for configuring a ClusterConfig. + * Creates a new Builder instance for configuring a DatabaseConfig. * @param endpoint the Redis endpoint (host and port) * @param clientConfig the Jedis client configuration * @return new Builder instance @@ -773,7 +772,7 @@ public static Builder builder(Endpoint endpoint, JedisClientConfig clientConfig) } /** - * Returns the Jedis client configuration for this cluster. + * Returns the Jedis client configuration for this database. * @return the client configuration containing connection settings and authentication */ public JedisClientConfig getJedisClientConfig() { @@ -781,7 +780,7 @@ public JedisClientConfig getJedisClientConfig() { } /** - * Returns the connection pool configuration for this cluster. + * Returns the connection pool configuration for this database. * @return the connection pool configuration, may be null if not specified */ public GenericObjectPoolConfig getConnectionPoolConfig() { @@ -789,9 +788,9 @@ public GenericObjectPoolConfig getConnectionPoolConfig() { } /** - * Returns the weight value used for cluster selection priority. + * Returns the weight value used for database selection priority. *

- * Higher weight values indicate higher priority. During failover, clusters are selected in + * Higher weight values indicate higher priority. During failover, databases are selected in * descending order of weight (highest weight first). *

* @return the weight value, default is 1.0f @@ -801,9 +800,9 @@ public float getWeight() { } /** - * Returns the health check strategy supplier for this cluster. + * Returns the health check strategy supplier for this database. *

- * The strategy supplier is used to create health check instances that monitor this cluster's + * The strategy supplier is used to create health check instances that monitor this database's * availability. Returns null if health checks are disabled. *

* @return the health check strategy supplier, or null if health checks are disabled @@ -815,9 +814,9 @@ public StrategySupplier getHealthCheckStrategySupplier() { } /** - * Builder class for creating ClusterConfig instances with fluent configuration API. + * Builder class for creating DatabaseConfig instances with fluent configuration API. *

- * The Builder provides a convenient way to configure cluster settings including connection + * The Builder provides a convenient way to configure database settings including connection * pooling, weight-based priority, and health check strategies. All configuration methods return * the builder instance for method chaining. *

@@ -831,7 +830,7 @@ public StrategySupplier getHealthCheckStrategySupplier() { * */ public static class Builder { - /** The Redis endpoint for this cluster configuration. */ + /** The Redis endpoint for this database configuration. */ private Endpoint endpoint; /** The Jedis client configuration. */ @@ -840,7 +839,7 @@ public static class Builder { /** Optional connection pool configuration. */ private GenericObjectPoolConfig connectionPoolConfig; - /** Weight for cluster selection priority. Default: 1.0f */ + /** Weight for database selection priority. Default: 1.0f */ private float weight = 1.0f; /** Health check strategy supplier. Default: EchoStrategy.DEFAULT */ @@ -858,7 +857,7 @@ public Builder(Endpoint endpoint, JedisClientConfig clientConfig) { } /** - * Sets the connection pool configuration for this cluster. + * Sets the connection pool configuration for this database. *

* Connection pooling helps manage connections efficiently and provides better performance * under load. If not specified, default pooling behavior will be used. @@ -873,19 +872,19 @@ public Builder connectionPoolConfig( } /** - * Sets the weight value for cluster selection priority. + * Sets the weight value for database selection priority. *

- * Weight determines the priority order for cluster selection during failover. Clusters with + * Weight determines the priority order for database selection during failover. Databases with * higher weights are preferred over those with lower weights. The system will attempt to use - * the highest-weight healthy cluster available. + * the highest-weight healthy database available. *

*

* Examples: *

*
    *
  • 1.0f: Standard priority (default)
  • - *
  • 0.8f: Lower priority (secondary cluster)
  • - *
  • 0.1f: Lowest priority (backup cluster)
  • + *
  • 0.8f: Lower priority (secondary database)
  • + *
  • 0.1f: Lowest priority (backup database)
  • *
* @param weight the weight value for priority-based selection * @return this builder instance for method chaining @@ -896,10 +895,10 @@ public Builder weight(float weight) { } /** - * Sets a custom health check strategy supplier for this cluster. + * Sets a custom health check strategy supplier for this database. *

- * The strategy supplier creates health check instances that monitor this cluster's - * availability. Different clusters can use different health check strategies based on their + * The strategy supplier creates health check instances that monitor this database's + * availability. Different databases can use different health check strategies based on their * specific requirements. *

* @param healthCheckStrategySupplier the health check strategy supplier @@ -917,14 +916,14 @@ public Builder healthCheckStrategySupplier(StrategySupplier healthCheckStrategyS } /** - * Sets a specific health check strategy instance for this cluster. + * Sets a specific health check strategy instance for this database. *

* This is a convenience method that wraps the provided strategy in a supplier that always * returns the same instance. Use this when you have a pre-configured strategy instance. *

*

* Note: The same strategy instance will be reused, so ensure it's - * thread-safe if multiple clusters might use it. + * thread-safe if multiple databases might use it. *

* @param healthCheckStrategy the health check strategy instance * @return this builder instance for method chaining @@ -940,15 +939,15 @@ public Builder healthCheckStrategy(HealthCheckStrategy healthCheckStrategy) { } /** - * Enables or disables health checks for this cluster. + * Enables or disables health checks for this database. *

- * When health checks are disabled (false), the cluster will not be proactively monitored for + * When health checks are disabled (false), the database will not be proactively monitored for * availability. This means: *

*
    *
  • No background health check threads will be created
  • - *
  • Failback to this cluster must be triggered manually
  • - *
  • The cluster is assumed to be healthy unless circuit breaker opens
  • + *
  • Failback to this database must be triggered manually
  • + *
  • The database is assumed to be healthy unless circuit breaker opens
  • *
*

* When health checks are enabled (true) and no strategy supplier was previously set, the @@ -967,17 +966,17 @@ public Builder healthCheckEnabled(boolean healthCheckEnabled) { } /** - * Builds and returns a new ClusterConfig instance with the configured settings. - * @return a new ClusterConfig instance + * Builds and returns a new DatabaseConfig instance with the configured settings. + * @return a new DatabaseConfig instance */ - public ClusterConfig build() { - return new ClusterConfig(this); + public DatabaseConfig build() { + return new DatabaseConfig(this); } } } /** - * Builder class for creating MultiClusterClientConfig instances with comprehensive configuration + * Builder class for creating MultiDatabaseConfig instances with comprehensive configuration * options. *

* The Builder provides a fluent API for configuring all aspects of multi-cluster failover @@ -985,13 +984,13 @@ public ClusterConfig build() { * sensible defaults based on production best practices while allowing fine-tuning for specific * requirements. *

- * @see MultiClusterClientConfig - * @see ClusterConfig + * @see MultiDatabaseConfig + * @see DatabaseConfig */ public static class Builder { - /** Array of cluster configurations defining available Redis endpoints. */ - private final List clusterConfigs = new ArrayList<>(); + /** Array of database configurations defining available Redis endpoints. */ + private final List databaseConfigs = new ArrayList<>(); // ============ Retry Configuration Fields ============ /** Maximum number of retry attempts including the initial call. */ @@ -1058,35 +1057,35 @@ public Builder() { /** * Constructs a new Builder with the specified cluster configurations. - * @param clusterConfigs array of cluster configurations defining available Redis endpoints - * @throws JedisValidationException if clusterConfigs is null or empty + * @param databaseConfigs array of cluster configurations defining available Redis endpoints + * @throws JedisValidationException if databaseConfigs is null or empty */ - public Builder(ClusterConfig[] clusterConfigs) { + public Builder(DatabaseConfig[] databaseConfigs) { - this(Arrays.asList(clusterConfigs)); + this(Arrays.asList(databaseConfigs)); } /** - * Constructs a new Builder with the specified cluster configurations. - * @param clusterConfigs list of cluster configurations defining available Redis endpoints - * @throws JedisValidationException if clusterConfigs is null or empty + * Constructs a new Builder with the specified database configurations. + * @param databaseConfigs list of database configurations defining available Redis endpoints + * @throws JedisValidationException if databaseConfigs is null or empty */ - public Builder(List clusterConfigs) { - this.clusterConfigs.addAll(clusterConfigs); + public Builder(List databaseConfigs) { + this.databaseConfigs.addAll(databaseConfigs); } /** * Adds a pre-configured endpoint configuration. *

- * This method allows adding a fully configured ClusterConfig instance, providing maximum + * This method allows adding a fully configured DatabaseConfig instance, providing maximum * flexibility for advanced configurations including custom health check strategies, connection * pool settings, etc. *

- * @param clusterConfig the pre-configured cluster configuration + * @param databaseConfig the pre-configured database configuration * @return this builder */ - public Builder endpoint(ClusterConfig clusterConfig) { - this.clusterConfigs.add(clusterConfig); + public Builder endpoint(DatabaseConfig databaseConfig) { + this.databaseConfigs.add(databaseConfig); return this; } @@ -1104,10 +1103,10 @@ public Builder endpoint(ClusterConfig clusterConfig) { */ public Builder endpoint(Endpoint endpoint, float weight, JedisClientConfig clientConfig) { - ClusterConfig clusterConfig = ClusterConfig.builder(endpoint, clientConfig).weight(weight) + DatabaseConfig databaseConfig = DatabaseConfig.builder(endpoint, clientConfig).weight(weight) .build(); - this.clusterConfigs.add(clusterConfig); + this.databaseConfigs.add(databaseConfig); return this; } @@ -1500,18 +1499,18 @@ public Builder delayInBetweenFailoverAttempts(int delayInBetweenFailoverAttempts } /** - * Builds and returns a new MultiClusterClientConfig instance with all configured settings. + * Builds and returns a new MultiDatabaseConfig instance with all configured settings. *

* This method creates the final configuration object by copying all builder settings to the * configuration instance. The builder can be reused after calling build() to create additional * configurations with different settings. *

- * @return a new MultiClusterClientConfig instance with the configured settings + * @return a new MultiDatabaseConfig instance with the configured settings */ - public MultiClusterClientConfig build() { + public MultiDatabaseConfig build() { - MultiClusterClientConfig config = new MultiClusterClientConfig( - this.clusterConfigs.toArray(new ClusterConfig[0])); + MultiDatabaseConfig config = new MultiDatabaseConfig( + this.databaseConfigs.toArray(new DatabaseConfig[0])); // Copy retry configuration config.retryMaxAttempts = this.retryMaxAttempts; diff --git a/src/main/java/redis/clients/jedis/MultiDbClient.java b/src/main/java/redis/clients/jedis/MultiDbClient.java index 9df888651c..9224307a56 100644 --- a/src/main/java/redis/clients/jedis/MultiDbClient.java +++ b/src/main/java/redis/clients/jedis/MultiDbClient.java @@ -1,6 +1,6 @@ package redis.clients.jedis; -import redis.clients.jedis.MultiClusterClientConfig.ClusterConfig; +import redis.clients.jedis.MultiDatabaseConfig.DatabaseConfig; import redis.clients.jedis.annots.Experimental; import redis.clients.jedis.builders.MultiDbClientBuilder; import redis.clients.jedis.csc.Cache; @@ -9,7 +9,7 @@ import redis.clients.jedis.mcf.MultiClusterPipeline; import redis.clients.jedis.mcf.MultiClusterTransaction; import redis.clients.jedis.providers.ConnectionProvider; -import redis.clients.jedis.mcf.MultiClusterPooledConnectionProvider; +import redis.clients.jedis.mcf.MultiDatabaseConnectionProvider; import java.util.Set; @@ -43,14 +43,14 @@ * * MultiDbClient client = MultiDbClient.builder() * .multiDbConfig( - * MultiClusterClientConfig.builder() + * MultiDatabaseConfig.builder() * .endpoint( - * ClusterConfig.builder( + * DatabaseConfig.builder( * primary, * DefaultJedisClientConfig.builder().build()) * .weight(100.0f) * .build()) - * .endpoint(ClusterConfig.builder( + * .endpoint(DatabaseConfig.builder( * secondary, * DefaultJedisClientConfig.builder().build()) * .weight(50.0f).build()) @@ -76,9 +76,9 @@ *

* @author Ivo Gaydazhiev * @since 5.2.0 - * @see MultiClusterPooledConnectionProvider + * @see MultiDatabaseConnectionProvider * @see CircuitBreakerCommandExecutor - * @see MultiClusterClientConfig + * @see MultiDatabaseConfig */ @Experimental public class MultiDbClient extends UnifiedJedis { @@ -91,8 +91,7 @@ public class MultiDbClient extends UnifiedJedis { * {@link #builder()} to create instances. *

* @param commandExecutor the command executor (typically CircuitBreakerCommandExecutor) - * @param connectionProvider the connection provider (typically - * MultiClusterPooledConnectionProvider) + * @param connectionProvider the connection provider (typically MultiDatabaseConnectionProvider) * @param commandObjects the command objects * @param redisProtocol the Redis protocol version * @param cache the client-side cache (may be null) @@ -103,16 +102,16 @@ public class MultiDbClient extends UnifiedJedis { } /** - * Returns the underlying MultiClusterPooledConnectionProvider. + * Returns the underlying MultiDatabaseConnectionProvider. *

* This provides access to multi-cluster specific operations like manual failover, health status * monitoring, and cluster switch event handling. *

* @return the multi-cluster connection provider - * @throws ClassCastException if the provider is not a MultiClusterPooledConnectionProvider + * @throws ClassCastException if the provider is not a MultiDatabaseConnectionProvider */ - private MultiClusterPooledConnectionProvider getMultiClusterProvider() { - return (MultiClusterPooledConnectionProvider) this.provider; + private MultiDatabaseConnectionProvider getMultiDatabaseConnectionProvider() { + return (MultiDatabaseConnectionProvider) this.provider; } /** @@ -124,20 +123,20 @@ private MultiClusterPooledConnectionProvider getMultiClusterProvider() { * @param endpoint the endpoint to switch to */ public void setActiveDatabase(Endpoint endpoint) { - getMultiClusterProvider().setActiveCluster(endpoint); + getMultiDatabaseConnectionProvider().setActiveDatabase(endpoint); } /** * Adds a pre-configured cluster configuration. *

- * This method allows adding a fully configured ClusterConfig instance, providing maximum + * This method allows adding a fully configured DatabaseConfig instance, providing maximum * flexibility for advanced configurations including custom health check strategies, connection * pool settings, etc. *

- * @param clusterConfig the pre-configured cluster configuration + * @param databaseConfig the pre-configured database configuration */ - public void addEndpoint(ClusterConfig clusterConfig) { - getMultiClusterProvider().add(clusterConfig); + public void addEndpoint(DatabaseConfig databaseConfig) { + getMultiDatabaseConnectionProvider().add(databaseConfig); } /** @@ -153,10 +152,10 @@ public void addEndpoint(ClusterConfig clusterConfig) { * @throws redis.clients.jedis.exceptions.JedisValidationException if the endpoint already exists */ public void addEndpoint(Endpoint endpoint, float weight, JedisClientConfig clientConfig) { - ClusterConfig clusterConfig = ClusterConfig.builder(endpoint, clientConfig).weight(weight) + DatabaseConfig databaseConfig = DatabaseConfig.builder(endpoint, clientConfig).weight(weight) .build(); - getMultiClusterProvider().add(clusterConfig); + getMultiDatabaseConnectionProvider().add(databaseConfig); } /** @@ -167,7 +166,7 @@ public void addEndpoint(Endpoint endpoint, float weight, JedisClientConfig clien * @return the set of all configured endpoints */ public Set getEndpoints() { - return getMultiClusterProvider().getEndpoints(); + return getMultiDatabaseConnectionProvider().getEndpoints(); } /** @@ -179,7 +178,7 @@ public Set getEndpoints() { * @return the health status of the endpoint */ public boolean isHealthy(Endpoint endpoint) { - return getMultiClusterProvider().isHealthy(endpoint); + return getMultiDatabaseConnectionProvider().isHealthy(endpoint); } /** @@ -195,7 +194,7 @@ public boolean isHealthy(Endpoint endpoint) { * healthy clusters available */ public void removeEndpoint(Endpoint endpoint) { - getMultiClusterProvider().remove(endpoint); + getMultiDatabaseConnectionProvider().remove(endpoint); } /** @@ -211,7 +210,7 @@ public void removeEndpoint(Endpoint endpoint) { * or doesn't exist */ public void forceActiveEndpoint(Endpoint endpoint, long forcedActiveDurationMs) { - getMultiClusterProvider().forceActiveCluster(endpoint, forcedActiveDurationMs); + getMultiDatabaseConnectionProvider().forceActiveDatabase(endpoint, forcedActiveDurationMs); } /** @@ -224,7 +223,7 @@ public void forceActiveEndpoint(Endpoint endpoint, long forcedActiveDurationMs) */ @Override public MultiClusterPipeline pipelined() { - return new MultiClusterPipeline(getMultiClusterProvider(), commandObjects); + return new MultiClusterPipeline(getMultiDatabaseConnectionProvider(), commandObjects); } /** @@ -237,7 +236,7 @@ public MultiClusterPipeline pipelined() { */ @Override public MultiClusterTransaction multi() { - return new MultiClusterTransaction((MultiClusterPooledConnectionProvider) provider, true, + return new MultiClusterTransaction((MultiDatabaseConnectionProvider) provider, true, commandObjects); } @@ -252,11 +251,12 @@ public MultiClusterTransaction transaction(boolean doMulti) { "It is not allowed to create Transaction from this " + getClass()); } - return new MultiClusterTransaction(getMultiClusterProvider(), doMulti, commandObjects); + return new MultiClusterTransaction(getMultiDatabaseConnectionProvider(), doMulti, + commandObjects); } public Endpoint getActiveEndpoint() { - return getMultiClusterProvider().getCluster().getEndpoint(); + return getMultiDatabaseConnectionProvider().getDatabase().getEndpoint(); } /** diff --git a/src/main/java/redis/clients/jedis/UnifiedJedis.java b/src/main/java/redis/clients/jedis/UnifiedJedis.java index b175dbd319..ebe5a55f54 100644 --- a/src/main/java/redis/clients/jedis/UnifiedJedis.java +++ b/src/main/java/redis/clients/jedis/UnifiedJedis.java @@ -34,7 +34,7 @@ import redis.clients.jedis.json.JsonObjectMapper; import redis.clients.jedis.mcf.CircuitBreakerCommandExecutor; import redis.clients.jedis.mcf.MultiClusterPipeline; -import redis.clients.jedis.mcf.MultiClusterPooledConnectionProvider; +import redis.clients.jedis.mcf.MultiDatabaseConnectionProvider; import redis.clients.jedis.mcf.MultiClusterTransaction; import redis.clients.jedis.params.*; import redis.clients.jedis.providers.*; @@ -240,7 +240,7 @@ public UnifiedJedis(ConnectionProvider provider, int maxAttempts, Duration maxTo *

*/ @Experimental - public UnifiedJedis(MultiClusterPooledConnectionProvider provider) { + public UnifiedJedis(MultiDatabaseConnectionProvider provider) { this(new CircuitBreakerCommandExecutor(provider), provider); } @@ -5099,8 +5099,8 @@ public List tdigestByRevRank(String key, long... ranks) { public PipelineBase pipelined() { if (provider == null) { throw new IllegalStateException("It is not allowed to create Pipeline from this " + getClass()); - } else if (provider instanceof MultiClusterPooledConnectionProvider) { - return new MultiClusterPipeline((MultiClusterPooledConnectionProvider) provider, commandObjects); + } else if (provider instanceof MultiDatabaseConnectionProvider) { + return new MultiClusterPipeline((MultiDatabaseConnectionProvider) provider, commandObjects); } else { return new Pipeline(provider.getConnection(), true, commandObjects); } @@ -5120,8 +5120,8 @@ public AbstractTransaction multi() { public AbstractTransaction transaction(boolean doMulti) { if (provider == null) { throw new IllegalStateException("It is not allowed to create Transaction from this " + getClass()); - } else if (provider instanceof MultiClusterPooledConnectionProvider) { - return new MultiClusterTransaction((MultiClusterPooledConnectionProvider) provider, doMulti, commandObjects); + } else if (provider instanceof MultiDatabaseConnectionProvider) { + return new MultiClusterTransaction((MultiDatabaseConnectionProvider) provider, doMulti, commandObjects); } else { return new Transaction(provider.getConnection(), doMulti, true, commandObjects); } diff --git a/src/main/java/redis/clients/jedis/builders/MultiDbClientBuilder.java b/src/main/java/redis/clients/jedis/builders/MultiDbClientBuilder.java index df3c1f86d6..c4592ec905 100644 --- a/src/main/java/redis/clients/jedis/builders/MultiDbClientBuilder.java +++ b/src/main/java/redis/clients/jedis/builders/MultiDbClientBuilder.java @@ -2,12 +2,12 @@ import java.util.function.Consumer; -import redis.clients.jedis.MultiClusterClientConfig; +import redis.clients.jedis.MultiDatabaseConfig; import redis.clients.jedis.annots.Experimental; import redis.clients.jedis.executors.CommandExecutor; import redis.clients.jedis.mcf.CircuitBreakerCommandExecutor; import redis.clients.jedis.mcf.ClusterSwitchEventArgs; -import redis.clients.jedis.mcf.MultiClusterPooledConnectionProvider; +import redis.clients.jedis.mcf.MultiDatabaseConnectionProvider; import redis.clients.jedis.providers.ConnectionProvider; /** @@ -38,14 +38,14 @@ *

  * MultiDbClient client = MultiDbClient.builder()
  *                 .multiDbConfig(
- *                         MultiClusterClientConfig.builder()
+ *                         MultiDatabaseConfig.builder()
  *                                 .endpoint(
- *                                         ClusterConfig.builder(
+ *                                         DatabaseConfig.builder(
  *                                                         east,
  *                                                         DefaultJedisClientConfig.builder().credentials(credentialsEast).build())
  *                                                 .weight(100.0f)
  *                                                 .build())
- *                                 .endpoint(ClusterConfig.builder(
+ *                                 .endpoint(DatabaseConfig.builder(
  *                                                 west,
  *                                                 DefaultJedisClientConfig.builder().credentials(credentialsWest).build())
  *                                         .weight(50.0f).build())
@@ -67,7 +67,7 @@ public abstract class MultiDbClientBuilder
     extends AbstractClientBuilder, C> {
 
   // Multi-db specific configuration fields
-  private MultiClusterClientConfig multiDbConfig = null;
+  private MultiDatabaseConfig multiDbConfig = null;
   private Consumer databaseSwitchListener = null;
 
   /**
@@ -79,7 +79,7 @@ public abstract class MultiDbClientBuilder
    * @param config the multi-database configuration
    * @return this builder
    */
-  public MultiDbClientBuilder multiDbConfig(MultiClusterClientConfig config) {
+  public MultiDbClientBuilder multiDbConfig(MultiDatabaseConfig config) {
     this.multiDbConfig = config;
     return this;
   }
@@ -107,18 +107,17 @@ protected MultiDbClientBuilder self() {
   @Override
   protected ConnectionProvider createDefaultConnectionProvider() {
 
-    if (this.multiDbConfig == null || this.multiDbConfig.getClusterConfigs() == null
-        || this.multiDbConfig.getClusterConfigs().length < 1) {
+    if (this.multiDbConfig == null || this.multiDbConfig.getDatabaseConfigs() == null
+        || this.multiDbConfig.getDatabaseConfigs().length < 1) {
       throw new IllegalArgumentException("At least one endpoint must be specified");
     }
 
     // Create the multi-cluster connection provider
-    MultiClusterPooledConnectionProvider provider = new MultiClusterPooledConnectionProvider(
-        multiDbConfig);
+    MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider(multiDbConfig);
 
     // Set database switch listener if provided
     if (this.databaseSwitchListener != null) {
-      provider.setClusterSwitchListener(this.databaseSwitchListener);
+      provider.setDatabaseSwitchListener(this.databaseSwitchListener);
     }
 
     return provider;
@@ -128,7 +127,7 @@ protected ConnectionProvider createDefaultConnectionProvider() {
   protected CommandExecutor createDefaultCommandExecutor() {
     // For multi-db clients, we always use CircuitBreakerCommandExecutor
     return new CircuitBreakerCommandExecutor(
-        (MultiClusterPooledConnectionProvider) this.connectionProvider);
+        (MultiDatabaseConnectionProvider) this.connectionProvider);
   }
 
   @Override
diff --git a/src/main/java/redis/clients/jedis/mcf/CircuitBreakerCommandExecutor.java b/src/main/java/redis/clients/jedis/mcf/CircuitBreakerCommandExecutor.java
index 90f269bd70..5a5f24e063 100644
--- a/src/main/java/redis/clients/jedis/mcf/CircuitBreakerCommandExecutor.java
+++ b/src/main/java/redis/clients/jedis/mcf/CircuitBreakerCommandExecutor.java
@@ -9,7 +9,7 @@
 import redis.clients.jedis.annots.Experimental;
 import redis.clients.jedis.exceptions.JedisConnectionException;
 import redis.clients.jedis.executors.CommandExecutor;
-import redis.clients.jedis.mcf.MultiClusterPooledConnectionProvider.Cluster;
+import redis.clients.jedis.mcf.MultiDatabaseConnectionProvider.Database;
 
 /**
  * @author Allen Terleto (aterleto)
@@ -24,26 +24,26 @@
 public class CircuitBreakerCommandExecutor extends CircuitBreakerFailoverBase
     implements CommandExecutor {
 
-  public CircuitBreakerCommandExecutor(MultiClusterPooledConnectionProvider provider) {
+  public CircuitBreakerCommandExecutor(MultiDatabaseConnectionProvider provider) {
     super(provider);
   }
 
   @Override
   public  T executeCommand(CommandObject commandObject) {
-    Cluster cluster = provider.getCluster(); // Pass this by reference for thread safety
+    Database database = provider.getDatabase(); // Pass this by reference for thread safety
 
     DecorateSupplier supplier = Decorators
-        .ofSupplier(() -> this.handleExecuteCommand(commandObject, cluster));
+        .ofSupplier(() -> this.handleExecuteCommand(commandObject, database));
 
-    supplier.withCircuitBreaker(cluster.getCircuitBreaker());
-    supplier.withRetry(cluster.getRetry());
+    supplier.withCircuitBreaker(database.getCircuitBreaker());
+    supplier.withRetry(database.getRetry());
     supplier.withFallback(provider.getFallbackExceptionList(),
-      e -> this.handleClusterFailover(commandObject, cluster));
+      e -> this.handleClusterFailover(commandObject, database));
     try {
       return supplier.decorate().get();
     } catch (Exception e) {
-      if (cluster.getCircuitBreaker().getState() == State.OPEN && isActiveCluster(cluster)) {
-        clusterFailover(cluster);
+      if (database.getCircuitBreaker().getState() == State.OPEN && isActiveDatabase(database)) {
+        clusterFailover(database);
       }
       throw e;
     }
@@ -52,7 +52,7 @@ public  T executeCommand(CommandObject commandObject) {
   /**
    * Functional interface wrapped in retry and circuit breaker logic to handle happy path scenarios
    */
-  private  T handleExecuteCommand(CommandObject commandObject, Cluster cluster) {
+  private  T handleExecuteCommand(CommandObject commandObject, Database cluster) {
     Connection connection;
     try {
       connection = cluster.getConnection();
@@ -63,7 +63,7 @@ private  T handleExecuteCommand(CommandObject commandObject, Cluster clust
     try {
       return connection.executeCommand(commandObject);
     } catch (Exception e) {
-      if (cluster.retryOnFailover() && !isActiveCluster(cluster)
+      if (cluster.retryOnFailover() && !isActiveDatabase(cluster)
           && isCircuitBreakerTrackedException(e, cluster)) {
         throw new ConnectionFailoverException(
             "Command failed during failover: " + cluster.getCircuitBreaker().getName(), e);
@@ -78,7 +78,7 @@ && isCircuitBreakerTrackedException(e, cluster)) {
    * Functional interface wrapped in retry and circuit breaker logic to handle open circuit breaker
    * failure scenarios
    */
-  private  T handleClusterFailover(CommandObject commandObject, Cluster cluster) {
+  private  T handleClusterFailover(CommandObject commandObject, Database cluster) {
 
     clusterFailover(cluster);
 
diff --git a/src/main/java/redis/clients/jedis/mcf/CircuitBreakerFailoverBase.java b/src/main/java/redis/clients/jedis/mcf/CircuitBreakerFailoverBase.java
index 40141fb009..cbe97f27a8 100644
--- a/src/main/java/redis/clients/jedis/mcf/CircuitBreakerFailoverBase.java
+++ b/src/main/java/redis/clients/jedis/mcf/CircuitBreakerFailoverBase.java
@@ -6,7 +6,7 @@
 import java.util.concurrent.locks.ReentrantLock;
 
 import redis.clients.jedis.annots.Experimental;
-import redis.clients.jedis.mcf.MultiClusterPooledConnectionProvider.Cluster;
+import redis.clients.jedis.mcf.MultiDatabaseConnectionProvider.Database;
 import redis.clients.jedis.util.IOUtils;
 
 /**
@@ -23,9 +23,9 @@
 public class CircuitBreakerFailoverBase implements AutoCloseable {
   private final Lock lock = new ReentrantLock(true);
 
-  protected final MultiClusterPooledConnectionProvider provider;
+  protected final MultiDatabaseConnectionProvider provider;
 
-  public CircuitBreakerFailoverBase(MultiClusterPooledConnectionProvider provider) {
+  public CircuitBreakerFailoverBase(MultiDatabaseConnectionProvider provider) {
     this.provider = provider;
   }
 
@@ -38,10 +38,10 @@ public void close() {
    * Functional interface wrapped in retry and circuit breaker logic to handle open circuit breaker
    * failure scenarios
    */
-  protected void clusterFailover(Cluster cluster) {
+  protected void clusterFailover(Database database) {
     lock.lock();
 
-    CircuitBreaker circuitBreaker = cluster.getCircuitBreaker();
+    CircuitBreaker circuitBreaker = database.getCircuitBreaker();
     try {
       // Check state to handle race conditions since iterateActiveCluster() is
       // non-idempotent
@@ -51,29 +51,29 @@ protected void clusterFailover(Cluster cluster) {
         // event publishing.
         // To recover/transition from this forced state the user will need to manually failback
 
-        Cluster activeCluster = provider.getCluster();
-        // This should be possible only if active cluster is switched from by other reasons than
+        Database activeDatabase = provider.getDatabase();
+        // This should be possible only if active database is switched from by other reasons than
         // circuit breaker, just before circuit breaker triggers
-        if (activeCluster != cluster) {
+        if (activeDatabase != database) {
           return;
         }
 
-        cluster.setGracePeriod();
+        database.setGracePeriod();
         circuitBreaker.transitionToForcedOpenState();
 
-        // Iterating the active cluster will allow subsequent calls to the executeCommand() to use
+        // Iterating the active database will allow subsequent calls to the executeCommand() to use
         // the next
-        // cluster's connection pool - according to the configuration's prioritization/order/weight
-        provider.switchToHealthyCluster(SwitchReason.CIRCUIT_BREAKER, cluster);
+        // database's connection pool - according to the configuration's prioritization/order/weight
+        provider.switchToHealthyDatabase(SwitchReason.CIRCUIT_BREAKER, database);
       }
       // this check relies on the fact that many failover attempts can hit with the same CB,
       // only the first one will trigger a failover, and make the CB FORCED_OPEN.
-      // when the rest reaches here, the active cluster is already the next one, and should be
+      // when the rest reaches here, the active database is already the next one, and should be
       // different than
       // active CB. If its the same one and there are no more clusters to failover to, then throw an
       // exception
-      else if (cluster == provider.getCluster()) {
-        provider.switchToHealthyCluster(SwitchReason.CIRCUIT_BREAKER, cluster);
+      else if (database == provider.getDatabase()) {
+        provider.switchToHealthyDatabase(SwitchReason.CIRCUIT_BREAKER, database);
       }
       // Ignore exceptions since we are already in a failure state
     } finally {
@@ -81,13 +81,13 @@ else if (cluster == provider.getCluster()) {
     }
   }
 
-  boolean isActiveCluster(Cluster cluster) {
-    Cluster activeCluster = provider.getCluster();
-    return activeCluster != null && activeCluster.equals(cluster);
+  boolean isActiveDatabase(Database database) {
+    Database activeDatabase = provider.getDatabase();
+    return activeDatabase != null && activeDatabase.equals(database);
   }
 
-  static boolean isCircuitBreakerTrackedException(Exception e, Cluster cluster) {
-    return cluster.getCircuitBreaker().getCircuitBreakerConfig().getRecordExceptionPredicate()
+  static boolean isCircuitBreakerTrackedException(Exception e, Database database) {
+    return database.getCircuitBreaker().getCircuitBreakerConfig().getRecordExceptionPredicate()
         .test(e);
   }
 }
diff --git a/src/main/java/redis/clients/jedis/mcf/CircuitBreakerFailoverConnectionProvider.java b/src/main/java/redis/clients/jedis/mcf/CircuitBreakerFailoverConnectionProvider.java
index 51a5d35788..b45cd04c61 100644
--- a/src/main/java/redis/clients/jedis/mcf/CircuitBreakerFailoverConnectionProvider.java
+++ b/src/main/java/redis/clients/jedis/mcf/CircuitBreakerFailoverConnectionProvider.java
@@ -6,23 +6,22 @@
 
 import redis.clients.jedis.Connection;
 import redis.clients.jedis.annots.Experimental;
-import redis.clients.jedis.mcf.MultiClusterPooledConnectionProvider.Cluster;
+import redis.clients.jedis.mcf.MultiDatabaseConnectionProvider.Database;
 
 /**
  * ConnectionProvider with built-in retry, circuit-breaker, and failover to another cluster/database
  * endpoint. With this executor users can seamlessly failover to Disaster Recovery (DR), Backup, and
- * Active-Active cluster(s) by using simple configuration which is passed through from Resilience4j
- * - https://resilience4j.readme.io/docs
+ * Active-Active cluster(s) by using simple configuration
  */
 @Experimental
 public class CircuitBreakerFailoverConnectionProvider extends CircuitBreakerFailoverBase {
 
-  public CircuitBreakerFailoverConnectionProvider(MultiClusterPooledConnectionProvider provider) {
+  public CircuitBreakerFailoverConnectionProvider(MultiDatabaseConnectionProvider provider) {
     super(provider);
   }
 
   public Connection getConnection() {
-    Cluster cluster = provider.getCluster(); // Pass this by reference for thread safety
+    Database cluster = provider.getDatabase(); // Pass this by reference for thread safety
 
     DecorateSupplier supplier = Decorators
         .ofSupplier(() -> this.handleGetConnection(cluster));
@@ -35,7 +34,7 @@ public Connection getConnection() {
     try {
       return supplier.decorate().get();
     } catch (Exception e) {
-      if (cluster.getCircuitBreaker().getState() == State.OPEN && isActiveCluster(cluster)) {
+      if (cluster.getCircuitBreaker().getState() == State.OPEN && isActiveDatabase(cluster)) {
         clusterFailover(cluster);
       }
       throw e;
@@ -45,7 +44,7 @@ public Connection getConnection() {
   /**
    * Functional interface wrapped in retry and circuit breaker logic to handle happy path scenarios
    */
-  private Connection handleGetConnection(Cluster cluster) {
+  private Connection handleGetConnection(Database cluster) {
     Connection connection = cluster.getConnection();
     connection.ping();
     return connection;
@@ -55,7 +54,7 @@ private Connection handleGetConnection(Cluster cluster) {
    * Functional interface wrapped in retry and circuit breaker logic to handle open circuit breaker
    * failure scenarios
    */
-  private Connection handleClusterFailover(Cluster cluster) {
+  private Connection handleClusterFailover(Database cluster) {
 
     clusterFailover(cluster);
 
diff --git a/src/main/java/redis/clients/jedis/mcf/CircuitBreakerThresholdsAdapter.java b/src/main/java/redis/clients/jedis/mcf/CircuitBreakerThresholdsAdapter.java
index 92211ff2ac..ad37099c50 100644
--- a/src/main/java/redis/clients/jedis/mcf/CircuitBreakerThresholdsAdapter.java
+++ b/src/main/java/redis/clients/jedis/mcf/CircuitBreakerThresholdsAdapter.java
@@ -1,7 +1,7 @@
 package redis.clients.jedis.mcf;
 
 import io.github.resilience4j.circuitbreaker.CircuitBreakerConfig.SlidingWindowType;
-import redis.clients.jedis.MultiClusterClientConfig;
+import redis.clients.jedis.MultiDatabaseConfig;
 
 /**
  * Adapter that disables Resilience4j's built-in circuit breaker evaluation and help delegate
@@ -9,10 +9,10 @@
  * 

* This adapter sets maximum values for failure rate (100%) and minimum calls (Integer.MAX_VALUE) to * effectively disable Resilience4j's automatic circuit breaker transitions, allowing - * {@link MultiClusterPooledConnectionProvider.Cluster#evaluateThresholds(boolean)} to control when - * the circuit breaker opens based on both minimum failure count AND failure rate. + * {@link MultiDatabaseConnectionProvider.Database#evaluateThresholds(boolean)} to control when the + * circuit breaker opens based on both minimum failure count AND failure rate. *

- * @see MultiClusterPooledConnectionProvider.Cluster#evaluateThresholds(boolean) + * @see MultiDatabaseConnectionProvider.Database#evaluateThresholds(boolean) */ class CircuitBreakerThresholdsAdapter { /** Maximum failure rate threshold (100%) to disable Resilience4j evaluation */ @@ -67,9 +67,9 @@ int getSlidingWindowSize() { * method controls circuit breaker state based on the original configuration's dual-threshold * logic. *

- * @param multiClusterClientConfig configuration containing sliding window size + * @param multiDatabaseConfig configuration containing sliding window size */ - CircuitBreakerThresholdsAdapter(MultiClusterClientConfig multiClusterClientConfig) { + CircuitBreakerThresholdsAdapter(MultiDatabaseConfig multiDatabaseConfig) { // IMPORTANT: failureRateThreshold is set to max theoretically disable Resilience4j's evaluation // and rely on our custom evaluateThresholds() logic. @@ -79,6 +79,6 @@ int getSlidingWindowSize() { // and rely on our custom evaluateThresholds() logic. minimumNumberOfCalls = Integer.MAX_VALUE; - slidingWindowSize = multiClusterClientConfig.getCircuitBreakerSlidingWindowSize(); + slidingWindowSize = multiDatabaseConfig.getCircuitBreakerSlidingWindowSize(); } } diff --git a/src/main/java/redis/clients/jedis/mcf/ClusterSwitchEventArgs.java b/src/main/java/redis/clients/jedis/mcf/ClusterSwitchEventArgs.java index 1fe6cebe4d..2c3e283445 100644 --- a/src/main/java/redis/clients/jedis/mcf/ClusterSwitchEventArgs.java +++ b/src/main/java/redis/clients/jedis/mcf/ClusterSwitchEventArgs.java @@ -1,7 +1,7 @@ package redis.clients.jedis.mcf; import redis.clients.jedis.Endpoint; -import redis.clients.jedis.mcf.MultiClusterPooledConnectionProvider.Cluster; +import redis.clients.jedis.mcf.MultiDatabaseConnectionProvider.Database; public class ClusterSwitchEventArgs { @@ -9,10 +9,10 @@ public class ClusterSwitchEventArgs { private final String ClusterName; private final Endpoint Endpoint; - public ClusterSwitchEventArgs(SwitchReason reason, Endpoint endpoint, Cluster cluster) { + public ClusterSwitchEventArgs(SwitchReason reason, Endpoint endpoint, Database database) { this.reason = reason; // TODO: @ggivo do we need cluster name? - this.ClusterName = cluster.getCircuitBreaker().getName(); + this.ClusterName = database.getCircuitBreaker().getName(); this.Endpoint = endpoint; } diff --git a/src/main/java/redis/clients/jedis/mcf/EchoStrategy.java b/src/main/java/redis/clients/jedis/mcf/EchoStrategy.java index 3c73e17d6f..51173ace31 100644 --- a/src/main/java/redis/clients/jedis/mcf/EchoStrategy.java +++ b/src/main/java/redis/clients/jedis/mcf/EchoStrategy.java @@ -8,7 +8,7 @@ import redis.clients.jedis.JedisClientConfig; import redis.clients.jedis.JedisPooled; import redis.clients.jedis.UnifiedJedis; -import redis.clients.jedis.MultiClusterClientConfig.StrategySupplier; +import redis.clients.jedis.MultiDatabaseConfig.StrategySupplier; public class EchoStrategy implements HealthCheckStrategy { private static final int MAX_HEALTH_CHECK_POOL_SIZE = 2; diff --git a/src/main/java/redis/clients/jedis/mcf/JedisFailoverException.java b/src/main/java/redis/clients/jedis/mcf/JedisFailoverException.java index 3543517703..fec047824f 100644 --- a/src/main/java/redis/clients/jedis/mcf/JedisFailoverException.java +++ b/src/main/java/redis/clients/jedis/mcf/JedisFailoverException.java @@ -11,7 +11,7 @@ * @see JedisFailoverException.JedisTemporarilyNotAvailableException */ public class JedisFailoverException extends JedisConnectionException { - private static final String MESSAGE = "Cluster/database endpoint could not failover since the MultiClusterClientConfig was not " + private static final String MESSAGE = "Cluster/database endpoint could not failover since the MultiDatabaseConfig was not " + "provided with an additional cluster/database endpoint according to its prioritized sequence. " + "If applicable, consider falling back OR restarting with an available cluster/database endpoint"; @@ -28,8 +28,8 @@ public JedisFailoverException() { * the max number of failover attempts has been exceeded. And there is still no healthy cluster. *

* See the configuration properties - * {@link redis.clients.jedis.MultiClusterClientConfig#maxNumFailoverAttempts} and - * {@link redis.clients.jedis.MultiClusterClientConfig#delayInBetweenFailoverAttempts} for more + * {@link redis.clients.jedis.MultiDatabaseConfig#maxNumFailoverAttempts} and + * {@link redis.clients.jedis.MultiDatabaseConfig#delayInBetweenFailoverAttempts} for more * details. */ public static class JedisPermanentlyNotAvailableException extends JedisFailoverException { @@ -49,8 +49,8 @@ public JedisPermanentlyNotAvailableException() { * temporary condition and it is possible that there will be a healthy cluster available. *

* See the configuration properties - * {@link redis.clients.jedis.MultiClusterClientConfig#maxNumFailoverAttempts} and - * {@link redis.clients.jedis.MultiClusterClientConfig#delayInBetweenFailoverAttempts} for more + * {@link redis.clients.jedis.MultiDatabaseConfig#maxNumFailoverAttempts} and + * {@link redis.clients.jedis.MultiDatabaseConfig#delayInBetweenFailoverAttempts} for more * details. */ public static class JedisTemporarilyNotAvailableException extends JedisFailoverException { diff --git a/src/main/java/redis/clients/jedis/mcf/MultiClusterPipeline.java b/src/main/java/redis/clients/jedis/mcf/MultiClusterPipeline.java index c227b27e99..d23f56411f 100644 --- a/src/main/java/redis/clients/jedis/mcf/MultiClusterPipeline.java +++ b/src/main/java/redis/clients/jedis/mcf/MultiClusterPipeline.java @@ -21,7 +21,7 @@ public class MultiClusterPipeline extends PipelineBase implements Closeable { private final Queue>> commands = new LinkedList<>(); @Deprecated - public MultiClusterPipeline(MultiClusterPooledConnectionProvider pooledProvider) { + public MultiClusterPipeline(MultiDatabaseConnectionProvider pooledProvider) { super(new CommandObjects()); this.failoverProvider = new CircuitBreakerFailoverConnectionProvider(pooledProvider); @@ -32,7 +32,7 @@ public MultiClusterPipeline(MultiClusterPooledConnectionProvider pooledProvider) } } - public MultiClusterPipeline(MultiClusterPooledConnectionProvider pooledProvider, + public MultiClusterPipeline(MultiDatabaseConnectionProvider pooledProvider, CommandObjects commandObjects) { super(commandObjects); this.failoverProvider = new CircuitBreakerFailoverConnectionProvider(pooledProvider); diff --git a/src/main/java/redis/clients/jedis/mcf/MultiClusterTransaction.java b/src/main/java/redis/clients/jedis/mcf/MultiClusterTransaction.java index 2de927826c..6f634549e2 100644 --- a/src/main/java/redis/clients/jedis/mcf/MultiClusterTransaction.java +++ b/src/main/java/redis/clients/jedis/mcf/MultiClusterTransaction.java @@ -39,7 +39,7 @@ public class MultiClusterTransaction extends TransactionBase { * @param provider */ @Deprecated - public MultiClusterTransaction(MultiClusterPooledConnectionProvider provider) { + public MultiClusterTransaction(MultiDatabaseConnectionProvider provider) { this(provider, true); } @@ -50,7 +50,7 @@ public MultiClusterTransaction(MultiClusterPooledConnectionProvider provider) { * @param doMulti {@code false} should be set to enable manual WATCH, UNWATCH and MULTI */ @Deprecated - public MultiClusterTransaction(MultiClusterPooledConnectionProvider provider, boolean doMulti) { + public MultiClusterTransaction(MultiDatabaseConnectionProvider provider, boolean doMulti) { this.failoverProvider = new CircuitBreakerFailoverConnectionProvider(provider); try (Connection connection = failoverProvider.getConnection()) { @@ -68,7 +68,7 @@ public MultiClusterTransaction(MultiClusterPooledConnectionProvider provider, bo * @param doMulti {@code false} should be set to enable manual WATCH, UNWATCH and MULTI * @param commandObjects command objects */ - public MultiClusterTransaction(MultiClusterPooledConnectionProvider provider, boolean doMulti, + public MultiClusterTransaction(MultiDatabaseConnectionProvider provider, boolean doMulti, CommandObjects commandObjects) { super(commandObjects); this.failoverProvider = new CircuitBreakerFailoverConnectionProvider(provider); diff --git a/src/main/java/redis/clients/jedis/mcf/MultiClusterPooledConnectionProvider.java b/src/main/java/redis/clients/jedis/mcf/MultiDatabaseConnectionProvider.java similarity index 65% rename from src/main/java/redis/clients/jedis/mcf/MultiClusterPooledConnectionProvider.java rename to src/main/java/redis/clients/jedis/mcf/MultiDatabaseConnectionProvider.java index a389a0d7b4..9b7d12cb96 100644 --- a/src/main/java/redis/clients/jedis/mcf/MultiClusterPooledConnectionProvider.java +++ b/src/main/java/redis/clients/jedis/mcf/MultiDatabaseConnectionProvider.java @@ -33,7 +33,7 @@ import org.slf4j.LoggerFactory; import redis.clients.jedis.*; -import redis.clients.jedis.MultiClusterClientConfig.ClusterConfig; +import redis.clients.jedis.MultiDatabaseConfig.DatabaseConfig; import redis.clients.jedis.annots.Experimental; import redis.clients.jedis.annots.VisibleForTesting; import redis.clients.jedis.exceptions.JedisConnectionException; @@ -41,7 +41,7 @@ import redis.clients.jedis.exceptions.JedisValidationException; import redis.clients.jedis.mcf.JedisFailoverException.*; import redis.clients.jedis.providers.ConnectionProvider; -import redis.clients.jedis.MultiClusterClientConfig.StrategySupplier; +import redis.clients.jedis.MultiDatabaseConfig.StrategySupplier; import redis.clients.jedis.util.Pool; /** @@ -53,35 +53,34 @@ * configuration which is passed through from Resilience4j - * docs *

- * Support for manual failback is provided by way of {@link #setActiveCluster(Endpoint)} + * Support for manual failback is provided by way of {@link #setActiveDatabase(Endpoint)} *

*/ @Experimental -public class MultiClusterPooledConnectionProvider implements ConnectionProvider { +public class MultiDatabaseConnectionProvider implements ConnectionProvider { private final Logger log = LoggerFactory.getLogger(getClass()); /** - * Ordered map of cluster/database endpoints which were provided at startup via the - * MultiClusterClientConfig. Users can move down (failover) or (up) failback the map depending on + * Ordered map of database. Users can move down (failover) or (up) failback the map depending on * their availability and order. */ - private final Map multiClusterMap = new ConcurrentHashMap<>(); + private final Map databaseMap = new ConcurrentHashMap<>(); /** - * Indicates the actively used cluster/database endpoint (connection pool) amongst the - * pre-configured list which were provided at startup via the MultiClusterClientConfig. All - * traffic will be routed with this cluster/database + * Indicates the actively used database endpoint (connection pool) amongst the pre-configured list + * which were provided at startup via the MultiDatabaseConfig. All traffic will be routed with + * this database */ - private volatile Cluster activeCluster; + private volatile Database activeDatabase; - private final Lock activeClusterChangeLock = new ReentrantLock(true); + private final Lock activeDatabaseChangeLock = new ReentrantLock(true); /** * Functional interface for listening to cluster switch events. The event args contain the reason * for the switch, the endpoint, and the cluster. */ - private Consumer clusterSwitchListener; + private Consumer databaseSwitchListener; private List> fallbackExceptionList; @@ -99,33 +98,33 @@ public class MultiClusterPooledConnectionProvider implements ConnectionProvider return t; }); - // Store retry and circuit breaker configs for dynamic cluster addition/removal + // Store retry and circuit breaker configs for dynamic database addition/removal private RetryConfig retryConfig; private CircuitBreakerConfig circuitBreakerConfig; - private MultiClusterClientConfig multiClusterClientConfig; + private MultiDatabaseConfig multiDatabaseConfig; private AtomicLong failoverFreezeUntil = new AtomicLong(0); private AtomicInteger failoverAttemptCount = new AtomicInteger(0); - public MultiClusterPooledConnectionProvider(MultiClusterClientConfig multiClusterClientConfig) { + public MultiDatabaseConnectionProvider(MultiDatabaseConfig multiDatabaseConfig) { - if (multiClusterClientConfig == null) throw new JedisValidationException( - "MultiClusterClientConfig must not be NULL for MultiClusterPooledConnectionProvider"); + if (multiDatabaseConfig == null) throw new JedisValidationException( + "MultiDatabaseConfig must not be NULL for MultiDatabaseConnectionProvider"); - this.multiClusterClientConfig = multiClusterClientConfig; + this.multiDatabaseConfig = multiDatabaseConfig; ////////////// Configure Retry //////////////////// RetryConfig.Builder retryConfigBuilder = RetryConfig.custom(); - retryConfigBuilder.maxAttempts(multiClusterClientConfig.getRetryMaxAttempts()); + retryConfigBuilder.maxAttempts(multiDatabaseConfig.getRetryMaxAttempts()); retryConfigBuilder.intervalFunction( - IntervalFunction.ofExponentialBackoff(multiClusterClientConfig.getRetryWaitDuration(), - multiClusterClientConfig.getRetryWaitDurationExponentialBackoffMultiplier())); + IntervalFunction.ofExponentialBackoff(multiDatabaseConfig.getRetryWaitDuration(), + multiDatabaseConfig.getRetryWaitDurationExponentialBackoffMultiplier())); retryConfigBuilder.failAfterMaxAttempts(false); // JedisConnectionException will be thrown retryConfigBuilder.retryExceptions( - multiClusterClientConfig.getRetryIncludedExceptionList().stream().toArray(Class[]::new)); + multiDatabaseConfig.getRetryIncludedExceptionList().stream().toArray(Class[]::new)); - List retryIgnoreExceptionList = multiClusterClientConfig.getRetryIgnoreExceptionList(); + List retryIgnoreExceptionList = multiDatabaseConfig.getRetryIgnoreExceptionList(); if (retryIgnoreExceptionList != null) retryConfigBuilder.ignoreExceptions(retryIgnoreExceptionList.stream().toArray(Class[]::new)); @@ -136,14 +135,14 @@ public MultiClusterPooledConnectionProvider(MultiClusterClientConfig multiCluste CircuitBreakerConfig.Builder circuitBreakerConfigBuilder = CircuitBreakerConfig.custom(); CircuitBreakerThresholdsAdapter adapter = new CircuitBreakerThresholdsAdapter( - multiClusterClientConfig); + multiDatabaseConfig); circuitBreakerConfigBuilder.minimumNumberOfCalls(adapter.getMinimumNumberOfCalls()); circuitBreakerConfigBuilder.failureRateThreshold(adapter.getFailureRateThreshold()); circuitBreakerConfigBuilder.slidingWindowSize(adapter.getSlidingWindowSize()); circuitBreakerConfigBuilder.slidingWindowType(adapter.getSlidingWindowType()); - circuitBreakerConfigBuilder.recordExceptions(multiClusterClientConfig - .getCircuitBreakerIncludedExceptionList().stream().toArray(Class[]::new)); + circuitBreakerConfigBuilder.recordExceptions( + multiDatabaseConfig.getCircuitBreakerIncludedExceptionList().stream().toArray(Class[]::new)); circuitBreakerConfigBuilder.automaticTransitionFromOpenToHalfOpenEnabled(false); // State // transitions // are @@ -152,45 +151,45 @@ public MultiClusterPooledConnectionProvider(MultiClusterClientConfig multiCluste // states // are used - List circuitBreakerIgnoreExceptionList = multiClusterClientConfig + List circuitBreakerIgnoreExceptionList = multiDatabaseConfig .getCircuitBreakerIgnoreExceptionList(); if (circuitBreakerIgnoreExceptionList != null) circuitBreakerConfigBuilder .ignoreExceptions(circuitBreakerIgnoreExceptionList.stream().toArray(Class[]::new)); this.circuitBreakerConfig = circuitBreakerConfigBuilder.build(); - ////////////// Configure Cluster Map //////////////////// + ////////////// Configure Database Map //////////////////// - ClusterConfig[] clusterConfigs = multiClusterClientConfig.getClusterConfigs(); + DatabaseConfig[] databaseConfigs = multiDatabaseConfig.getDatabaseConfigs(); - // Now add clusters - health checks will start but events will be queued - for (ClusterConfig config : clusterConfigs) { - addClusterInternal(multiClusterClientConfig, config); + // Now add databases - health checks will start but events will be queued + for (DatabaseConfig config : databaseConfigs) { + addClusterInternal(multiDatabaseConfig, config); } // Initialize StatusTracker for waiting on health check results StatusTracker statusTracker = new StatusTracker(healthStatusManager); // Wait for initial health check results and select active cluster based on weights - activeCluster = waitForInitialHealthyCluster(statusTracker); + activeDatabase = waitForInitialHealthyCluster(statusTracker); // Mark initialization as complete - handleHealthStatusChange can now process events initializationComplete = true; - Cluster temp = activeCluster; + Database temp = activeDatabase; if (!temp.isHealthy()) { - // Race condition: Direct assignment to 'activeCluster' is not thread safe because + // Race condition: Direct assignment to 'activeDatabase' is not thread safe because // 'onHealthStatusChange' may execute concurrently once 'initializationComplete' // is set to true. - // Simple rule is to never assign value of 'activeCluster' outside of - // 'activeClusterChangeLock' once the 'initializationComplete' is done. + // Simple rule is to never assign value of 'activeDatabase' outside of + // 'activeDatabaseChangeLock' once the 'initializationComplete' is done. waitForInitialHealthyCluster(statusTracker); - switchToHealthyCluster(SwitchReason.HEALTH_CHECK, temp); + switchToHealthyDatabase(SwitchReason.HEALTH_CHECK, temp); } - this.fallbackExceptionList = multiClusterClientConfig.getFallbackExceptionList(); + this.fallbackExceptionList = multiDatabaseConfig.getFallbackExceptionList(); // Start periodic failback checker - if (multiClusterClientConfig.isFailbackSupported()) { - long failbackInterval = multiClusterClientConfig.getFailbackCheckInterval(); + if (multiDatabaseConfig.isFailbackSupported()) { + long failbackInterval = multiDatabaseConfig.getFailbackCheckInterval(); failbackScheduler.scheduleAtFixedRate(this::periodicFailbackCheck, failbackInterval, failbackInterval, TimeUnit.MILLISECONDS); } @@ -198,25 +197,25 @@ public MultiClusterPooledConnectionProvider(MultiClusterClientConfig multiCluste /** * Adds a new cluster endpoint to the provider. - * @param clusterConfig the configuration for the new cluster + * @param databaseConfig the configuration for the new database * @throws JedisValidationException if the endpoint already exists */ - public void add(ClusterConfig clusterConfig) { - if (clusterConfig == null) { - throw new JedisValidationException("ClusterConfig must not be null"); + public void add(DatabaseConfig databaseConfig) { + if (databaseConfig == null) { + throw new JedisValidationException("DatabaseConfig must not be null"); } - Endpoint endpoint = clusterConfig.getEndpoint(); - if (multiClusterMap.containsKey(endpoint)) { + Endpoint endpoint = databaseConfig.getEndpoint(); + if (databaseMap.containsKey(endpoint)) { throw new JedisValidationException( "Endpoint " + endpoint + " already exists in the provider"); } - activeClusterChangeLock.lock(); + activeDatabaseChangeLock.lock(); try { - addClusterInternal(multiClusterClientConfig, clusterConfig); + addClusterInternal(multiDatabaseConfig, databaseConfig); } finally { - activeClusterChangeLock.unlock(); + activeDatabaseChangeLock.unlock(); } } @@ -231,35 +230,35 @@ public void remove(Endpoint endpoint) { throw new JedisValidationException("Endpoint must not be null"); } - if (!multiClusterMap.containsKey(endpoint)) { + if (!databaseMap.containsKey(endpoint)) { throw new JedisValidationException( "Endpoint " + endpoint + " does not exist in the provider"); } - if (multiClusterMap.size() < 2) { + if (databaseMap.size() < 2) { throw new JedisValidationException("Cannot remove the last remaining endpoint"); } log.debug("Removing endpoint {}", endpoint); - Map.Entry notificationData = null; - activeClusterChangeLock.lock(); + Map.Entry notificationData = null; + activeDatabaseChangeLock.lock(); try { - Cluster clusterToRemove = multiClusterMap.get(endpoint); - boolean isActiveCluster = (activeCluster == clusterToRemove); + Database databaseToRemove = databaseMap.get(endpoint); + boolean isActiveDatabase = (activeDatabase == databaseToRemove); - if (isActiveCluster) { + if (isActiveDatabase) { log.info("Active cluster is being removed. Finding a new active cluster..."); - Map.Entry candidate = findWeightedHealthyClusterToIterate( - clusterToRemove); + Map.Entry candidate = findWeightedHealthyClusterToIterate( + databaseToRemove); if (candidate != null) { - Cluster selectedCluster = candidate.getValue(); - if (setActiveCluster(selectedCluster, true)) { + Database selectedCluster = candidate.getValue(); + if (setActiveDatabase(selectedCluster, true)) { log.info("New active cluster set to {}", candidate.getKey()); notificationData = candidate; } } else { throw new JedisException( - "Cluster can not be removed due to no healthy cluster available to switch!"); + "Database can not be removed due to no healthy cluster available to switch!"); } } @@ -268,15 +267,15 @@ public void remove(Endpoint endpoint) { healthStatusManager.remove(endpoint); // Remove from cluster map - multiClusterMap.remove(endpoint); + databaseMap.remove(endpoint); // Close the cluster resources - if (clusterToRemove != null) { - clusterToRemove.setDisabled(true); - clusterToRemove.close(); + if (databaseToRemove != null) { + databaseToRemove.setDisabled(true); + databaseToRemove.close(); } } finally { - activeClusterChangeLock.unlock(); + activeDatabaseChangeLock.unlock(); } if (notificationData != null) { onClusterSwitch(SwitchReason.FORCED, notificationData.getKey(), notificationData.getValue()); @@ -284,17 +283,16 @@ public void remove(Endpoint endpoint) { } /** - * Internal method to add a cluster configuration. This method is not thread-safe and should be + * Internal method to add a database configuration. This method is not thread-safe and should be * called within appropriate locks. */ - private void addClusterInternal(MultiClusterClientConfig multiClusterClientConfig, - ClusterConfig config) { - if (multiClusterMap.containsKey(config.getEndpoint())) { + private void addClusterInternal(MultiDatabaseConfig multiDatabaseConfig, DatabaseConfig config) { + if (databaseMap.containsKey(config.getEndpoint())) { throw new JedisValidationException( "Endpoint " + config.getEndpoint() + " already exists in the provider"); } - String clusterId = "cluster:" + config.getEndpoint(); + String clusterId = "database:" + config.getEndpoint(); Retry retry = RetryRegistry.of(retryConfig).retry(clusterId); @@ -315,7 +313,7 @@ private void addClusterInternal(MultiClusterClientConfig multiClusterClientConfi .hostAndPort(hostPort(config.getEndpoint())).clientConfig(config.getJedisClientConfig()) .poolConfig(config.getConnectionPoolConfig()).build(); - Cluster cluster; + Database database; StrategySupplier strategySupplier = config.getHealthCheckStrategySupplier(); if (strategySupplier != null) { HealthCheckStrategy hcs = strategySupplier.get(hostPort(config.getEndpoint()), @@ -323,19 +321,19 @@ private void addClusterInternal(MultiClusterClientConfig multiClusterClientConfi // Register listeners BEFORE adding clusters to avoid missing events healthStatusManager.registerListener(config.getEndpoint(), this::onHealthStatusChange); HealthCheck hc = healthStatusManager.add(config.getEndpoint(), hcs); - cluster = new Cluster(config.getEndpoint(), pool, retry, hc, circuitBreaker, - config.getWeight(), multiClusterClientConfig); + database = new Database(config.getEndpoint(), pool, retry, hc, circuitBreaker, + config.getWeight(), multiDatabaseConfig); } else { - cluster = new Cluster(config.getEndpoint(), pool, retry, circuitBreaker, config.getWeight(), - multiClusterClientConfig); + database = new Database(config.getEndpoint(), pool, retry, circuitBreaker, config.getWeight(), + multiDatabaseConfig); } - multiClusterMap.put(config.getEndpoint(), cluster); + databaseMap.put(config.getEndpoint(), database); // this is the place where we listen tracked errors and check if - // thresholds are exceeded for the cluster + // thresholds are exceeded for the database circuitBreakerEventPublisher.onError(event -> { - cluster.evaluateThresholds(false); + database.evaluateThresholds(false); }); } @@ -353,14 +351,14 @@ void onHealthStatusChange(HealthStatusChangeEvent eventArgs) { HealthStatus newStatus = eventArgs.getNewStatus(); log.debug("Health status changed for {} from {} to {}", endpoint, eventArgs.getOldStatus(), newStatus); - Cluster clusterWithHealthChange = multiClusterMap.get(endpoint); + Database clusterWithHealthChange = databaseMap.get(endpoint); if (clusterWithHealthChange == null) return; if (initializationComplete) { - if (!newStatus.isHealthy() && clusterWithHealthChange == activeCluster) { + if (!newStatus.isHealthy() && clusterWithHealthChange == activeDatabase) { clusterWithHealthChange.setGracePeriod(); - switchToHealthyCluster(SwitchReason.HEALTH_CHECK, clusterWithHealthChange); + switchToHealthyDatabase(SwitchReason.HEALTH_CHECK, clusterWithHealthChange); } } } @@ -373,46 +371,46 @@ void onHealthStatusChange(HealthStatusChangeEvent eventArgs) { * @return the first healthy cluster found, ordered by weight (highest first) * @throws JedisConnectionException if all clusters are unhealthy */ - private Cluster waitForInitialHealthyCluster(StatusTracker statusTracker) { + private Database waitForInitialHealthyCluster(StatusTracker statusTracker) { // Sort clusters by weight in descending order - List> sortedClusters = multiClusterMap.entrySet().stream() - .sorted(Map.Entry. comparingByValue( - Comparator.comparing(Cluster::getWeight).reversed())) + List> sortedClusters = databaseMap.entrySet().stream() + .sorted(Map.Entry. comparingByValue( + Comparator.comparing(Database::getWeight).reversed())) .collect(Collectors.toList()); log.info("Selecting initial cluster from {} configured clusters", sortedClusters.size()); // Select cluster in weight order - for (Map.Entry entry : sortedClusters) { + for (Map.Entry entry : sortedClusters) { Endpoint endpoint = entry.getKey(); - Cluster cluster = entry.getValue(); + Database database = entry.getValue(); - log.info("Evaluating cluster {} (weight: {})", endpoint, cluster.getWeight()); + log.info("Evaluating database {} (weight: {})", endpoint, database.getWeight()); HealthStatus status; // Check if health checks are enabled for this endpoint if (healthStatusManager.hasHealthCheck(endpoint)) { log.info("Health checks enabled for {}, waiting for result", endpoint); - // Wait for this cluster's health status to be determined + // Wait for this database's health status to be determined status = statusTracker.waitForHealthStatus(endpoint); } else { // No health check configured - assume healthy - log.info("No health check configured for cluster {}, defaulting to HEALTHY", endpoint); + log.info("No health check configured for database {}, defaulting to HEALTHY", endpoint); status = HealthStatus.HEALTHY; } if (status.isHealthy()) { - log.info("Found healthy cluster: {} (weight: {})", endpoint, cluster.getWeight()); - return cluster; + log.info("Found healthy database: {} (weight: {})", endpoint, database.getWeight()); + return database; } else { - log.info("Cluster {} is unhealthy, trying next cluster", endpoint); + log.info("Database {} is unhealthy, trying next database", endpoint); } } // All clusters are unhealthy throw new JedisConnectionException( - "All configured clusters are unhealthy. Cannot initialize MultiClusterPooledConnectionProvider."); + "All configured clusters are unhealthy. Cannot initialize MultiDatabaseConnectionProvider."); } /** @@ -422,36 +420,36 @@ private Cluster waitForInitialHealthyCluster(StatusTracker statusTracker) { void periodicFailbackCheck() { try { // Find the best candidate cluster for failback - Map.Entry bestCandidate = null; - float bestWeight = activeCluster.getWeight(); + Map.Entry bestCandidate = null; + float bestWeight = activeDatabase.getWeight(); - for (Map.Entry entry : multiClusterMap.entrySet()) { - Cluster cluster = entry.getValue(); + for (Map.Entry entry : databaseMap.entrySet()) { + Database database = entry.getValue(); - // Skip if this is already the active cluster - if (cluster == activeCluster) { + // Skip if this is already the active database + if (database == activeDatabase) { continue; } - // Skip if cluster is not healthy - if (!cluster.isHealthy()) { + // Skip if database is not healthy + if (!database.isHealthy()) { continue; } - // This cluster is a valid candidate - if (cluster.getWeight() > bestWeight) { + // This database is a valid candidate + if (database.getWeight() > bestWeight) { bestCandidate = entry; - bestWeight = cluster.getWeight(); + bestWeight = database.getWeight(); } } // Perform failback if we found a better candidate if (bestCandidate != null) { - Cluster selectedCluster = bestCandidate.getValue(); + Database selectedCluster = bestCandidate.getValue(); log.info("Performing failback from {} to {} (higher weight cluster available)", - activeCluster.getCircuitBreaker().getName(), + activeDatabase.getCircuitBreaker().getName(), selectedCluster.getCircuitBreaker().getName()); - if (setActiveCluster(selectedCluster, true)) { + if (setActiveDatabase(selectedCluster, true)) { onClusterSwitch(SwitchReason.FAILBACK, bestCandidate.getKey(), selectedCluster); } } @@ -460,24 +458,24 @@ void periodicFailbackCheck() { } } - Endpoint switchToHealthyCluster(SwitchReason reason, Cluster iterateFrom) { - Map.Entry clusterToIterate = findWeightedHealthyClusterToIterate( + Endpoint switchToHealthyDatabase(SwitchReason reason, Database iterateFrom) { + Map.Entry databaseToIterate = findWeightedHealthyClusterToIterate( iterateFrom); - if (clusterToIterate == null) { + if (databaseToIterate == null) { // throws exception anyway since not able to iterate handleNoHealthyCluster(); } - Cluster cluster = clusterToIterate.getValue(); - boolean changed = setActiveCluster(cluster, false); + Database database = databaseToIterate.getValue(); + boolean changed = setActiveDatabase(database, false); if (!changed) return null; failoverAttemptCount.set(0); - onClusterSwitch(reason, clusterToIterate.getKey(), cluster); - return clusterToIterate.getKey(); + onClusterSwitch(reason, databaseToIterate.getKey(), database); + return databaseToIterate.getKey(); } private void handleNoHealthyCluster() { - int max = multiClusterClientConfig.getMaxNumFailoverAttempts(); + int max = multiDatabaseConfig.getMaxNumFailoverAttempts(); log.error("No healthy cluster available to switch to"); if (failoverAttemptCount.get() > max) { throw new JedisPermanentlyNotAvailableException(); @@ -496,7 +494,7 @@ private boolean markAsFreeze() { long until = failoverFreezeUntil.get(); long now = System.currentTimeMillis(); if (until <= now) { - long nextUntil = now + multiClusterClientConfig.getDelayInBetweenFailoverAttempts(); + long nextUntil = now + multiDatabaseConfig.getDelayInBetweenFailoverAttempts(); if (failoverFreezeUntil.compareAndSet(until, nextUntil)) { return true; } @@ -515,20 +513,20 @@ private boolean markAsFreeze() { */ @VisibleForTesting public void assertOperability() { - Cluster current = activeCluster; + Database current = activeDatabase; if (!current.isHealthy() && !this.canIterateFrom(current)) { handleNoHealthyCluster(); } } - private static Comparator> maxByWeight = Map.Entry - . comparingByValue(Comparator.comparing(Cluster::getWeight)); + private static Comparator> maxByWeight = Map.Entry + . comparingByValue(Comparator.comparing(Database::getWeight)); - private static Predicate> filterByHealth = c -> c.getValue() + private static Predicate> filterByHealth = c -> c.getValue() .isHealthy(); - private Map.Entry findWeightedHealthyClusterToIterate(Cluster iterateFrom) { - return multiClusterMap.entrySet().stream().filter(filterByHealth) + private Map.Entry findWeightedHealthyClusterToIterate(Database iterateFrom) { + return databaseMap.entrySet().stream().filter(filterByHealth) .filter(entry -> entry.getValue() != iterateFrom).max(maxByWeight).orElse(null); } @@ -539,12 +537,12 @@ private Map.Entry findWeightedHealthyClusterToIterate(Cluster * from the target connection. */ public void validateTargetConnection(Endpoint endpoint) { - Cluster cluster = multiClusterMap.get(endpoint); - validateTargetConnection(cluster); + Database database = databaseMap.get(endpoint); + validateTargetConnection(database); } - private void validateTargetConnection(Cluster cluster) { - CircuitBreaker circuitBreaker = cluster.getCircuitBreaker(); + private void validateTargetConnection(Database database) { + CircuitBreaker circuitBreaker = database.getCircuitBreaker(); State originalState = circuitBreaker.getState(); try { @@ -555,7 +553,7 @@ private void validateTargetConnection(Cluster cluster) { // yet circuitBreaker.transitionToClosedState(); - try (Connection targetConnection = cluster.getConnection()) { + try (Connection targetConnection = database.getConnection()) { targetConnection.ping(); } } catch (Exception e) { @@ -575,77 +573,77 @@ private void validateTargetConnection(Cluster cluster) { * @return */ public Set getEndpoints() { - return new HashSet<>(multiClusterMap.keySet()); + return new HashSet<>(databaseMap.keySet()); } - public void setActiveCluster(Endpoint endpoint) { + public void setActiveDatabase(Endpoint endpoint) { if (endpoint == null) { throw new JedisValidationException( "Provided endpoint is null. Please use one from the configuration"); } - Cluster cluster = multiClusterMap.get(endpoint); - if (cluster == null) { + Database database = databaseMap.get(endpoint); + if (database == null) { throw new JedisValidationException("Provided endpoint: " + endpoint + " is not within " + "the configured endpoints. Please use one from the configuration"); } - if (setActiveCluster(cluster, true)) { - onClusterSwitch(SwitchReason.FORCED, endpoint, cluster); + if (setActiveDatabase(database, true)) { + onClusterSwitch(SwitchReason.FORCED, endpoint, database); } } - public void forceActiveCluster(Endpoint endpoint, long forcedActiveDuration) { - Cluster cluster = multiClusterMap.get(endpoint); + public void forceActiveDatabase(Endpoint endpoint, long forcedActiveDuration) { + Database database = databaseMap.get(endpoint); - if (cluster == null) { + if (database == null) { throw new JedisValidationException("Provided endpoint: " + endpoint + " is not within " + "the configured endpoints. Please use one from the configuration"); } - cluster.clearGracePeriod(); - if (!cluster.isHealthy()) { + database.clearGracePeriod(); + if (!database.isHealthy()) { throw new JedisValidationException("Provided endpoint: " + endpoint + " is not healthy. Please consider a healthy endpoint from the configuration"); } - multiClusterMap.entrySet().stream().forEach(entry -> { + databaseMap.entrySet().stream().forEach(entry -> { if (entry.getKey() != endpoint) { entry.getValue().setGracePeriod(forcedActiveDuration); } }); - setActiveCluster(endpoint); + setActiveDatabase(endpoint); } - private boolean setActiveCluster(Cluster cluster, boolean validateConnection) { - // Cluster cluster = clusterEntry.getValue(); + private boolean setActiveDatabase(Database database, boolean validateConnection) { + // Database database = clusterEntry.getValue(); // Field-level synchronization is used to avoid the edge case in which // incrementActiveMultiClusterIndex() is called at the same time - activeClusterChangeLock.lock(); - Cluster oldCluster; + activeDatabaseChangeLock.lock(); + Database oldCluster; try { - // Allows an attempt to reset the current cluster from a FORCED_OPEN to CLOSED state in the + // Allows an attempt to reset the current database from a FORCED_OPEN to CLOSED state in the // event that no failover is possible - if (activeCluster == cluster && !cluster.isCBForcedOpen()) return false; + if (activeDatabase == database && !database.isCBForcedOpen()) return false; - if (validateConnection) validateTargetConnection(cluster); + if (validateConnection) validateTargetConnection(database); - String originalClusterName = getClusterCircuitBreaker().getName(); + String originalClusterName = getDatabaseCircuitBreaker().getName(); - if (activeCluster == cluster) - log.warn("Cluster/database endpoint '{}' successfully closed its circuit breaker", + if (activeDatabase == database) + log.warn("Database/database endpoint '{}' successfully closed its circuit breaker", originalClusterName); - else log.warn("Cluster/database endpoint successfully updated from '{}' to '{}'", - originalClusterName, cluster.circuitBreaker.getName()); - oldCluster = activeCluster; - activeCluster = cluster; + else log.warn("Database/database endpoint successfully updated from '{}' to '{}'", + originalClusterName, database.circuitBreaker.getName()); + oldCluster = activeDatabase; + activeDatabase = database; } finally { - activeClusterChangeLock.unlock(); + activeDatabaseChangeLock.unlock(); } - boolean switched = oldCluster != cluster; - if (switched && this.multiClusterClientConfig.isFastFailover()) { - log.info("Forcing disconnect of all active connections in old cluster: {}", + boolean switched = oldCluster != database; + if (switched && this.multiDatabaseConfig.isFastFailover()) { + log.info("Forcing disconnect of all active connections in old database: {}", oldCluster.circuitBreaker.getName()); oldCluster.forceDisconnect(); - log.info("Disconnected all active connections in old cluster: {}", + log.info("Disconnected all active connections in old database: {}", oldCluster.circuitBreaker.getName()); } @@ -671,38 +669,38 @@ public void close() { } // Close all cluster connection pools - for (Cluster cluster : multiClusterMap.values()) { - cluster.close(); + for (Database database : databaseMap.values()) { + database.close(); } } @Override public Connection getConnection() { - return activeCluster.getConnection(); + return activeDatabase.getConnection(); } public Connection getConnection(Endpoint endpoint) { - return multiClusterMap.get(endpoint).getConnection(); + return databaseMap.get(endpoint).getConnection(); } @Override public Connection getConnection(CommandArguments args) { - return activeCluster.getConnection(); + return activeDatabase.getConnection(); } @Override public Map> getConnectionMap() { - ConnectionPool connectionPool = activeCluster.connectionPool; + ConnectionPool connectionPool = activeDatabase.connectionPool; return Collections.singletonMap(connectionPool.getFactory(), connectionPool); } - public Cluster getCluster() { - return activeCluster; + public Database getDatabase() { + return activeDatabase; } @VisibleForTesting - public Cluster getCluster(Endpoint endpoint) { - return multiClusterMap.get(endpoint); + public Database getDatabase(Endpoint endpoint) { + return databaseMap.get(endpoint); } /** @@ -713,7 +711,7 @@ public Cluster getCluster(Endpoint endpoint) { * @return the active cluster endpoint */ public Endpoint getActiveEndpoint() { - return activeCluster.getEndpoint(); + return activeDatabase.getEndpoint(); } /** @@ -722,51 +720,51 @@ public Endpoint getActiveEndpoint() { * @return the health status of the endpoint */ public boolean isHealthy(Endpoint endpoint) { - Cluster cluster = getCluster(endpoint); - if (cluster == null) { + Database database = getDatabase(endpoint); + if (database == null) { throw new JedisValidationException( "Endpoint " + endpoint + " does not exist in the provider"); } - return cluster.isHealthy(); + return database.isHealthy(); } - public CircuitBreaker getClusterCircuitBreaker() { - return activeCluster.getCircuitBreaker(); + public CircuitBreaker getDatabaseCircuitBreaker() { + return activeDatabase.getCircuitBreaker(); } /** * Indicates the final cluster/database endpoint (connection pool), according to the - * pre-configured list provided at startup via the MultiClusterClientConfig, is unavailable and + * pre-configured list provided at startup via the MultiDatabaseConfig, is unavailable and * therefore no further failover is possible. Users can manually failback to an available cluster */ - public boolean canIterateFrom(Cluster iterateFrom) { - Map.Entry e = findWeightedHealthyClusterToIterate(iterateFrom); + public boolean canIterateFrom(Database iterateFrom) { + Map.Entry e = findWeightedHealthyClusterToIterate(iterateFrom); return e != null; } - public void onClusterSwitch(SwitchReason reason, Endpoint endpoint, Cluster cluster) { - if (clusterSwitchListener != null) { - ClusterSwitchEventArgs eventArgs = new ClusterSwitchEventArgs(reason, endpoint, cluster); - clusterSwitchListener.accept(eventArgs); + public void onClusterSwitch(SwitchReason reason, Endpoint endpoint, Database database) { + if (databaseSwitchListener != null) { + ClusterSwitchEventArgs eventArgs = new ClusterSwitchEventArgs(reason, endpoint, database); + databaseSwitchListener.accept(eventArgs); } } - public void setClusterSwitchListener(Consumer clusterSwitchListener) { - this.clusterSwitchListener = clusterSwitchListener; + public void setDatabaseSwitchListener(Consumer databaseSwitchListener) { + this.databaseSwitchListener = databaseSwitchListener; } public List> getFallbackExceptionList() { return fallbackExceptionList; } - public static class Cluster { + public static class Database { private TrackingConnectionPool connectionPool; private final Retry retry; private final CircuitBreaker circuitBreaker; private final float weight; private final HealthCheck healthCheck; - private final MultiClusterClientConfig multiClusterClientConfig; + private final MultiDatabaseConfig multiDbConfig; private boolean disabled = false; private final Endpoint endpoint; @@ -774,29 +772,28 @@ public static class Cluster { private volatile long gracePeriodEndsAt = 0; private final Logger log = LoggerFactory.getLogger(getClass()); - private Cluster(Endpoint endpoint, TrackingConnectionPool connectionPool, Retry retry, - CircuitBreaker circuitBreaker, float weight, - MultiClusterClientConfig multiClusterClientConfig) { + private Database(Endpoint endpoint, TrackingConnectionPool connectionPool, Retry retry, + CircuitBreaker circuitBreaker, float weight, MultiDatabaseConfig multiDatabaseConfig) { this.endpoint = endpoint; this.connectionPool = connectionPool; this.retry = retry; this.circuitBreaker = circuitBreaker; this.weight = weight; - this.multiClusterClientConfig = multiClusterClientConfig; + this.multiDbConfig = multiDatabaseConfig; this.healthCheck = null; } - private Cluster(Endpoint endpoint, TrackingConnectionPool connectionPool, Retry retry, + private Database(Endpoint endpoint, TrackingConnectionPool connectionPool, Retry retry, HealthCheck hc, CircuitBreaker circuitBreaker, float weight, - MultiClusterClientConfig multiClusterClientConfig) { + MultiDatabaseConfig multiDbConfig) { this.endpoint = endpoint; this.connectionPool = connectionPool; this.retry = retry; this.circuitBreaker = circuitBreaker; this.weight = weight; - this.multiClusterClientConfig = multiClusterClientConfig; + this.multiDbConfig = multiDbConfig; this.healthCheck = hc; } @@ -805,7 +802,7 @@ public Endpoint getEndpoint() { } public Connection getConnection() { - if (!isHealthy()) throw new JedisConnectionException("Cluster is not healthy"); + if (!isHealthy()) throw new JedisConnectionException("Database is not healthy"); if (connectionPool.isClosed()) { connectionPool = TrackingConnectionPool.from(connectionPool); } @@ -850,15 +847,15 @@ public boolean isHealthy() { } public boolean retryOnFailover() { - return multiClusterClientConfig.isRetryOnFailover(); + return multiDbConfig.isRetryOnFailover(); } public int getCircuitBreakerMinNumOfFailures() { - return multiClusterClientConfig.getCircuitBreakerMinNumOfFailures(); + return multiDbConfig.getCircuitBreakerMinNumOfFailures(); } public float getCircuitBreakerFailureRateThreshold() { - return multiClusterClientConfig.getCircuitBreakerFailureRateThreshold(); + return multiDbConfig.getCircuitBreakerFailureRateThreshold(); } public boolean isDisabled() { @@ -880,7 +877,7 @@ public boolean isInGracePeriod() { * Sets the grace period for this cluster */ public void setGracePeriod() { - setGracePeriod(multiClusterClientConfig.getGracePeriod()); + setGracePeriod(multiDbConfig.getGracePeriod()); } public void setGracePeriod(long gracePeriod) { @@ -897,7 +894,7 @@ public void clearGracePeriod() { * Whether failback is supported by client */ public boolean isFailbackSupported() { - return multiClusterClientConfig.isFailbackSupported(); + return multiDbConfig.isFailbackSupported(); } public void forceDisconnect() { @@ -915,15 +912,15 @@ && isThresholdsExceeded(this, lastFailRecorded)) { } } - private static boolean isThresholdsExceeded(Cluster cluster, boolean lastFailRecorded) { - Metrics metrics = cluster.getCircuitBreaker().getMetrics(); + private static boolean isThresholdsExceeded(Database database, boolean lastFailRecorded) { + Metrics metrics = database.getCircuitBreaker().getMetrics(); // ATTENTION: this is to increment fails in regard to the current call that is failing, // DO NOT remove the increment, it will change the behaviour in case of initial requests to - // cluster fail + // database fail int fails = metrics.getNumberOfFailedCalls() + (lastFailRecorded ? 0 : 1); int succ = metrics.getNumberOfSuccessfulCalls(); - if (fails >= cluster.getCircuitBreakerMinNumOfFailures()) { - float ratePercentThreshold = cluster.getCircuitBreakerFailureRateThreshold();// 0..100 + if (fails >= database.getCircuitBreakerMinNumOfFailures()) { + float ratePercentThreshold = database.getCircuitBreakerFailureRateThreshold();// 0..100 int total = fails + succ; if (total == 0) return false; float failureRatePercent = (fails * 100.0f) / total; @@ -936,7 +933,7 @@ private static boolean isThresholdsExceeded(Cluster cluster, boolean lastFailRec public String toString() { return circuitBreaker.getName() + "{" + "connectionPool=" + connectionPool + ", retry=" + retry + ", circuitBreaker=" + circuitBreaker + ", weight=" + weight + ", healthStatus=" - + getHealthStatus() + ", multiClusterClientConfig=" + multiClusterClientConfig + '}'; + + getHealthStatus() + ", multiClusterClientConfig=" + multiDbConfig + '}'; } } diff --git a/src/test/java/redis/clients/jedis/MultiDbClientTest.java b/src/test/java/redis/clients/jedis/MultiDbClientTest.java index 9c0126a5af..05e923f2d4 100644 --- a/src/test/java/redis/clients/jedis/MultiDbClientTest.java +++ b/src/test/java/redis/clients/jedis/MultiDbClientTest.java @@ -15,7 +15,7 @@ import static org.hamcrest.Matchers.not; import static org.junit.jupiter.api.Assertions.*; -import redis.clients.jedis.MultiClusterClientConfig.ClusterConfig; +import redis.clients.jedis.MultiDatabaseConfig.DatabaseConfig; import redis.clients.jedis.exceptions.JedisValidationException; import redis.clients.jedis.mcf.ClusterSwitchEventArgs; import redis.clients.jedis.mcf.SwitchReason; @@ -56,7 +56,7 @@ public static void setupAdminClients() throws IOException { @BeforeEach void setUp() { // Create a simple resilient client with mock endpoints for testing - MultiClusterClientConfig clientConfig = MultiClusterClientConfig.builder() + MultiDatabaseConfig clientConfig = MultiDatabaseConfig.builder() .endpoint(endpoint1.getHostAndPort(), 100.0f, endpoint1.getClientConfigBuilder().build()) .endpoint(endpoint2.getHostAndPort(), 50.0f, endpoint2.getClientConfigBuilder().build()) .build(); @@ -86,11 +86,11 @@ void testAddRemoveEndpointWithEndpointInterface() { } @Test - void testAddRemoveEndpointWithClusterConfig() { + void testAddRemoveEndpointWithDatabaseConfig() { // todo : (@ggivo) Replace HostAndPort with Endpoint HostAndPort newEndpoint = new HostAndPort("unavailable", 6381); - ClusterConfig newConfig = ClusterConfig + DatabaseConfig newConfig = DatabaseConfig .builder(newEndpoint, DefaultJedisClientConfig.builder().build()).weight(25.0f).build(); assertDoesNotThrow(() -> client.addEndpoint(newConfig)); @@ -121,9 +121,9 @@ void testSetActiveDatabase() { @Test void testBuilderWithMultipleEndpointTypes() { - MultiClusterClientConfig clientConfig = MultiClusterClientConfig.builder() + MultiDatabaseConfig clientConfig = MultiDatabaseConfig.builder() .endpoint(endpoint1.getHostAndPort(), 100.0f, DefaultJedisClientConfig.builder().build()) - .endpoint(ClusterConfig + .endpoint(DatabaseConfig .builder(endpoint2.getHostAndPort(), DefaultJedisClientConfig.builder().build()) .weight(50.0f).build()) .build(); @@ -172,11 +172,11 @@ public void testForceActiveEndpointWithNonExistingEndpoint() { @Test public void testWithDatabaseSwitchListener() { - MultiClusterClientConfig endpointsConfig = MultiClusterClientConfig.builder() - .endpoint(ClusterConfig + MultiDatabaseConfig endpointsConfig = MultiDatabaseConfig.builder() + .endpoint(DatabaseConfig .builder(endpoint1.getHostAndPort(), endpoint1.getClientConfigBuilder().build()) .weight(100.0f).build()) - .endpoint(ClusterConfig + .endpoint(DatabaseConfig .builder(endpoint2.getHostAndPort(), endpoint2.getClientConfigBuilder().build()) .weight(50.0f).build()) .build(); diff --git a/src/test/java/redis/clients/jedis/builders/UnifiedJedisConstructorReflectionTest.java b/src/test/java/redis/clients/jedis/builders/UnifiedJedisConstructorReflectionTest.java index f2b2f56e14..39c76b6338 100644 --- a/src/test/java/redis/clients/jedis/builders/UnifiedJedisConstructorReflectionTest.java +++ b/src/test/java/redis/clients/jedis/builders/UnifiedJedisConstructorReflectionTest.java @@ -184,8 +184,7 @@ private static boolean clusterConstructorThatShouldBeDeprecatedAndRemoved(Constr private static boolean multiClusterPooledConnectionProviderShouldBeReplacedWithResilientClient( Constructor ctor) { Class[] types = ctor.getParameterTypes(); - return types.length == 1 - && types[0].getSimpleName().equals("MultiClusterPooledConnectionProvider"); + return types.length == 1 && types[0].getSimpleName().equals("MultiDatabaseConnectionProvider"); } private static String prettySignature(Constructor ctor) { diff --git a/src/test/java/redis/clients/jedis/failover/FailoverIntegrationTest.java b/src/test/java/redis/clients/jedis/failover/FailoverIntegrationTest.java index 6eb047f6c7..3416f68b74 100644 --- a/src/test/java/redis/clients/jedis/failover/FailoverIntegrationTest.java +++ b/src/test/java/redis/clients/jedis/failover/FailoverIntegrationTest.java @@ -16,10 +16,10 @@ import redis.clients.jedis.EndpointConfig; import redis.clients.jedis.HostAndPorts; import redis.clients.jedis.JedisClientConfig; -import redis.clients.jedis.MultiClusterClientConfig; +import redis.clients.jedis.MultiDatabaseConfig; import redis.clients.jedis.UnifiedJedis; import redis.clients.jedis.exceptions.JedisConnectionException; -import redis.clients.jedis.mcf.MultiClusterPooledConnectionProvider; +import redis.clients.jedis.mcf.MultiDatabaseConnectionProvider; import redis.clients.jedis.scenario.RecommendedSettings; import java.io.IOException; @@ -57,7 +57,7 @@ public class FailoverIntegrationTest { private static UnifiedJedis jedis2; private static String JEDIS1_ID = ""; private static String JEDIS2_ID = ""; - private MultiClusterPooledConnectionProvider provider; + private MultiDatabaseConnectionProvider provider; private UnifiedJedis failoverClient; @BeforeAll @@ -138,7 +138,7 @@ public void testAutomaticFailoverWhenServerBecomesUnavailable() throws Exception assertThat(getNodeId(failoverClient.info("server")), equalTo(JEDIS1_ID)); await().atMost(1, TimeUnit.SECONDS).pollInterval(50, TimeUnit.MILLISECONDS) - .until(() -> provider.getCluster(endpoint2.getHostAndPort()).isHealthy()); + .until(() -> provider.getDatabase(endpoint2.getHostAndPort()).isHealthy()); // Disable redisProxy1 redisProxy1.disable(); @@ -149,7 +149,7 @@ public void testAutomaticFailoverWhenServerBecomesUnavailable() throws Exception // 3. Subsequent calls should be routed to Endpoint 2 assertThrows(JedisConnectionException.class, () -> failoverClient.info("server")); - assertThat(provider.getCluster(endpoint1.getHostAndPort()).getCircuitBreaker().getState(), + assertThat(provider.getDatabase(endpoint1.getHostAndPort()).getCircuitBreaker().getState(), equalTo(CircuitBreaker.State.FORCED_OPEN)); // Check that the failoverClient is now using Endpoint 2 @@ -160,7 +160,7 @@ public void testAutomaticFailoverWhenServerBecomesUnavailable() throws Exception // Endpoint1 and Endpoint2 are NOT available, assertThrows(JedisConnectionException.class, () -> failoverClient.info("server")); - assertThat(provider.getCluster(endpoint2.getHostAndPort()).getCircuitBreaker().getState(), + assertThat(provider.getDatabase(endpoint2.getHostAndPort()).getCircuitBreaker().getState(), equalTo(CircuitBreaker.State.FORCED_OPEN)); // and since no other nodes are available, it should propagate the errors to the caller @@ -173,20 +173,20 @@ public void testManualFailoverNewCommandsAreSentToActiveCluster() throws Interru assertThat(getNodeId(failoverClient.info("server")), equalTo(JEDIS1_ID)); await().atMost(1, TimeUnit.SECONDS).pollInterval(50, TimeUnit.MILLISECONDS) - .until(() -> provider.getCluster(endpoint2.getHostAndPort()).isHealthy()); + .until(() -> provider.getDatabase(endpoint2.getHostAndPort()).isHealthy()); - provider.setActiveCluster(endpoint2.getHostAndPort()); + provider.setActiveDatabase(endpoint2.getHostAndPort()); assertThat(getNodeId(failoverClient.info("server")), equalTo(JEDIS2_ID)); } - private List getClusterConfigs( + private List getDatabaseConfigs( JedisClientConfig clientConfig, EndpointConfig... endpoints) { int weight = endpoints.length; AtomicInteger weightCounter = new AtomicInteger(weight); return Arrays.stream(endpoints) - .map(e -> MultiClusterClientConfig.ClusterConfig.builder(e.getHostAndPort(), clientConfig) + .map(e -> MultiDatabaseConfig.DatabaseConfig.builder(e.getHostAndPort(), clientConfig) .weight(1.0f / weightCounter.getAndIncrement()).healthCheckEnabled(false).build()) .collect(Collectors.toList()); } @@ -197,21 +197,21 @@ public void testManualFailoverInflightCommandsCompleteGracefully() throws ExecutionException, InterruptedException { await().atMost(1, TimeUnit.SECONDS).pollInterval(50, TimeUnit.MILLISECONDS) - .until(() -> provider.getCluster(endpoint2.getHostAndPort()).isHealthy()); + .until(() -> provider.getDatabase(endpoint2.getHostAndPort()).isHealthy()); assertThat(getNodeId(failoverClient.info("server")), equalTo(JEDIS1_ID)); // We will trigger failover while this command is in-flight Future> blpop = executor.submit(() -> failoverClient.blpop(1000, "test-list")); - provider.setActiveCluster(endpoint2.getHostAndPort()); + provider.setActiveDatabase(endpoint2.getHostAndPort()); // After the manual failover, commands should be executed against Endpoint 2 assertThat(getNodeId(failoverClient.info("server")), equalTo(JEDIS2_ID)); // Failover was manually triggered, and there were no errors // previous endpoint CB should still be in CLOSED state - assertThat(provider.getCluster(endpoint1.getHostAndPort()).getCircuitBreaker().getState(), + assertThat(provider.getDatabase(endpoint1.getHostAndPort()).getCircuitBreaker().getState(), equalTo(CircuitBreaker.State.CLOSED)); jedis1.rpush("test-list", "somevalue"); @@ -228,12 +228,12 @@ public void testManualFailoverInflightCommandsWithErrorsPropagateError() throws assertThat(getNodeId(failoverClient.info("server")), equalTo(JEDIS1_ID)); await().atMost(1, TimeUnit.SECONDS).pollInterval(50, TimeUnit.MILLISECONDS) - .until(() -> provider.getCluster(endpoint2.getHostAndPort()).isHealthy()); + .until(() -> provider.getDatabase(endpoint2.getHostAndPort()).isHealthy()); Future> blpop = executor.submit(() -> failoverClient.blpop(10000, "test-list-1")); // trigger failover manually - provider.setActiveCluster(endpoint2.getHostAndPort()); + provider.setActiveDatabase(endpoint2.getHostAndPort()); Future infoCmd = executor.submit(() -> failoverClient.info("server")); // After the manual failover, commands should be executed against Endpoint 2 @@ -247,7 +247,7 @@ public void testManualFailoverInflightCommandsWithErrorsPropagateError() throws assertThat(exception.getCause(), instanceOf(JedisConnectionException.class)); // Check that the circuit breaker for Endpoint 1 is open after the error - assertThat(provider.getCluster(endpoint1.getHostAndPort()).getCircuitBreaker().getState(), + assertThat(provider.getDatabase(endpoint1.getHostAndPort()).getCircuitBreaker().getState(), equalTo(CircuitBreaker.State.OPEN)); // Ensure that the active cluster is still Endpoint 2 @@ -261,18 +261,15 @@ public void testManualFailoverInflightCommandsWithErrorsPropagateError() throws */ @Test public void testCircuitBreakerCountsEachConnectionErrorSeparately() throws IOException { - MultiClusterClientConfig failoverConfig = new MultiClusterClientConfig.Builder( - getClusterConfigs( - DefaultJedisClientConfig.builder() - .socketTimeoutMillis(RecommendedSettings.DEFAULT_TIMEOUT_MS) - .connectionTimeoutMillis(RecommendedSettings.DEFAULT_TIMEOUT_MS).build(), - endpoint1, endpoint2)).retryMaxAttempts(2).retryWaitDuration(1) - .circuitBreakerSlidingWindowSize(3).circuitBreakerMinNumOfFailures(2) - .circuitBreakerFailureRateThreshold(50f) // %50 failure rate - .build(); - - MultiClusterPooledConnectionProvider provider = new MultiClusterPooledConnectionProvider( - failoverConfig); + MultiDatabaseConfig failoverConfig = new MultiDatabaseConfig.Builder(getDatabaseConfigs( + DefaultJedisClientConfig.builder().socketTimeoutMillis(RecommendedSettings.DEFAULT_TIMEOUT_MS) + .connectionTimeoutMillis(RecommendedSettings.DEFAULT_TIMEOUT_MS).build(), + endpoint1, endpoint2)).retryMaxAttempts(2).retryWaitDuration(1) + .circuitBreakerSlidingWindowSize(3).circuitBreakerMinNumOfFailures(2) + .circuitBreakerFailureRateThreshold(50f) // %50 failure rate + .build(); + + MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider(failoverConfig); try (UnifiedJedis client = new UnifiedJedis(provider)) { // Verify initial connection to first endpoint assertThat(getNodeId(client.info("server")), equalTo(JEDIS1_ID)); @@ -298,7 +295,7 @@ public void testCircuitBreakerCountsEachConnectionErrorSeparately() throws IOExc assertThrows(JedisConnectionException.class, () -> client.info("server")); // Circuit breaker should be open after just one command with retries - assertThat(provider.getCluster(endpoint1.getHostAndPort()).getCircuitBreaker().getState(), + assertThat(provider.getDatabase(endpoint1.getHostAndPort()).getCircuitBreaker().getState(), equalTo(CircuitBreaker.State.FORCED_OPEN)); // Next command should be routed to the second endpoint @@ -318,7 +315,7 @@ public void testCircuitBreakerCountsEachConnectionErrorSeparately() throws IOExc @Test public void testInflightCommandsAreRetriedAfterFailover() throws Exception { - MultiClusterPooledConnectionProvider customProvider = createProvider( + MultiDatabaseConnectionProvider customProvider = createProvider( builder -> builder.retryOnFailover(true)); // Create a custom client with retryOnFailover enabled for this specific test @@ -342,7 +339,7 @@ public void testInflightCommandsAreRetriedAfterFailover() throws Exception { assertThat(getNodeId(customClient.info("server")), equalTo(JEDIS2_ID)); // Check that the circuit breaker for Endpoint 1 is open assertThat( - customProvider.getCluster(endpoint1.getHostAndPort()).getCircuitBreaker().getState(), + customProvider.getDatabase(endpoint1.getHostAndPort()).getCircuitBreaker().getState(), equalTo(CircuitBreaker.State.FORCED_OPEN)); // Disable redisProxy1 to enforce connection drop for the in-flight (blpop) command @@ -360,7 +357,7 @@ public void testInflightCommandsAreRetriedAfterFailover() throws Exception { @Test public void testInflightCommandsAreNotRetriedAfterFailover() throws Exception { // Create a custom provider and client with retry disabled for this specific test - MultiClusterPooledConnectionProvider customProvider = createProvider( + MultiDatabaseConnectionProvider customProvider = createProvider( builder -> builder.retryOnFailover(false)); try (UnifiedJedis customClient = new UnifiedJedis(customProvider)) { @@ -376,7 +373,7 @@ public void testInflightCommandsAreNotRetriedAfterFailover() throws Exception { // Check that the circuit breaker for Endpoint 1 is open assertThat( - customProvider.getCluster(endpoint1.getHostAndPort()).getCircuitBreaker().getState(), + customProvider.getDatabase(endpoint1.getHostAndPort()).getCircuitBreaker().getState(), equalTo(CircuitBreaker.State.FORCED_OPEN)); // Disable redisProxy1 to enforce the current blpop command failure @@ -417,34 +414,34 @@ private static String generateTestValue(int byteSize) { } /** - * Creates a MultiClusterPooledConnectionProvider with standard configuration + * Creates a MultiDatabaseConnectionProvider with standard configuration * @return A configured provider */ - private MultiClusterPooledConnectionProvider createProvider() { + private MultiDatabaseConnectionProvider createProvider() { JedisClientConfig clientConfig = DefaultJedisClientConfig.builder() .socketTimeoutMillis(RecommendedSettings.DEFAULT_TIMEOUT_MS) .connectionTimeoutMillis(RecommendedSettings.DEFAULT_TIMEOUT_MS).build(); - MultiClusterClientConfig failoverConfig = new MultiClusterClientConfig.Builder( - getClusterConfigs(clientConfig, endpoint1, endpoint2)).retryMaxAttempts(1) + MultiDatabaseConfig failoverConfig = new MultiDatabaseConfig.Builder( + getDatabaseConfigs(clientConfig, endpoint1, endpoint2)).retryMaxAttempts(1) .retryWaitDuration(1).circuitBreakerSlidingWindowSize(3) .circuitBreakerMinNumOfFailures(1).circuitBreakerFailureRateThreshold(50f).build(); - return new MultiClusterPooledConnectionProvider(failoverConfig); + return new MultiDatabaseConnectionProvider(failoverConfig); } /** - * Creates a MultiClusterPooledConnectionProvider with standard configuration + * Creates a MultiDatabaseConnectionProvider with standard configuration * @return A configured provider */ - private MultiClusterPooledConnectionProvider createProvider( - Function configCustomizer) { + private MultiDatabaseConnectionProvider createProvider( + Function configCustomizer) { JedisClientConfig clientConfig = DefaultJedisClientConfig.builder() .socketTimeoutMillis(RecommendedSettings.DEFAULT_TIMEOUT_MS) .connectionTimeoutMillis(RecommendedSettings.DEFAULT_TIMEOUT_MS).build(); - MultiClusterClientConfig.Builder builder = new MultiClusterClientConfig.Builder( - getClusterConfigs(clientConfig, endpoint1, endpoint2)).retryMaxAttempts(1) + MultiDatabaseConfig.Builder builder = new MultiDatabaseConfig.Builder( + getDatabaseConfigs(clientConfig, endpoint1, endpoint2)).retryMaxAttempts(1) .retryWaitDuration(1).circuitBreakerSlidingWindowSize(3) .circuitBreakerMinNumOfFailures(1).circuitBreakerFailureRateThreshold(50f); @@ -452,6 +449,6 @@ private MultiClusterPooledConnectionProvider createProvider( builder = configCustomizer.apply(builder); } - return new MultiClusterPooledConnectionProvider(builder.build()); + return new MultiDatabaseConnectionProvider(builder.build()); } } diff --git a/src/test/java/redis/clients/jedis/mcf/ActiveActiveLocalFailoverTest.java b/src/test/java/redis/clients/jedis/mcf/ActiveActiveLocalFailoverTest.java index bc00caf8ed..da295e837e 100644 --- a/src/test/java/redis/clients/jedis/mcf/ActiveActiveLocalFailoverTest.java +++ b/src/test/java/redis/clients/jedis/mcf/ActiveActiveLocalFailoverTest.java @@ -1,7 +1,5 @@ package redis.clients.jedis.mcf; -import io.github.resilience4j.circuitbreaker.CircuitBreakerConfig; - import io.github.resilience4j.ratelimiter.RateLimiterConfig; import org.hamcrest.Matchers; import org.junit.jupiter.api.AfterAll; @@ -18,7 +16,7 @@ import eu.rekawek.toxiproxy.ToxiproxyClient; import eu.rekawek.toxiproxy.model.Toxic; import redis.clients.jedis.*; -import redis.clients.jedis.MultiClusterClientConfig.ClusterConfig; +import redis.clients.jedis.MultiDatabaseConfig.DatabaseConfig; import redis.clients.jedis.scenario.ActiveActiveFailoverTest; import redis.clients.jedis.scenario.MultiThreadedFakeApp; import redis.clients.jedis.scenario.RecommendedSettings; @@ -96,18 +94,18 @@ public void testFailover(boolean fastFailover, long minFailoverCompletionDuratio "TESTING WITH PARAMETERS: fastFailover: {} numberOfThreads: {} minFailoverCompletionDuration: {} maxFailoverCompletionDuration: {] ", fastFailover, numberOfThreads, minFailoverCompletionDuration, maxFailoverCompletionDuration); - MultiClusterClientConfig.ClusterConfig[] clusterConfig = new MultiClusterClientConfig.ClusterConfig[2]; + MultiDatabaseConfig.DatabaseConfig[] clusterConfig = new MultiDatabaseConfig.DatabaseConfig[2]; JedisClientConfig config = endpoint1.getClientConfigBuilder() .socketTimeoutMillis(RecommendedSettings.DEFAULT_TIMEOUT_MS) .connectionTimeoutMillis(RecommendedSettings.DEFAULT_TIMEOUT_MS).build(); - clusterConfig[0] = ClusterConfig.builder(endpoint1.getHostAndPort(), config) + clusterConfig[0] = DatabaseConfig.builder(endpoint1.getHostAndPort(), config) .connectionPoolConfig(RecommendedSettings.poolConfig).weight(1.0f).build(); - clusterConfig[1] = ClusterConfig.builder(endpoint2.getHostAndPort(), config) + clusterConfig[1] = DatabaseConfig.builder(endpoint2.getHostAndPort(), config) .connectionPoolConfig(RecommendedSettings.poolConfig).weight(0.5f).build(); - MultiClusterClientConfig.Builder builder = new MultiClusterClientConfig.Builder(clusterConfig); + MultiDatabaseConfig.Builder builder = new MultiDatabaseConfig.Builder(clusterConfig); builder.circuitBreakerSlidingWindowSize(1); // SLIDING WINDOW SIZE IN SECONDS builder.circuitBreakerFailureRateThreshold(10.0f); // percentage of failures to trigger circuit @@ -164,11 +162,10 @@ public void accept(ClusterSwitchEventArgs e) { ensureEndpointAvailability(endpoint2.getHostAndPort(), config); // Create the connection provider - MultiClusterPooledConnectionProvider provider = new MultiClusterPooledConnectionProvider( - builder.build()); + MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider(builder.build()); FailoverReporter reporter = new FailoverReporter(); - provider.setClusterSwitchListener(reporter); - provider.setActiveCluster(endpoint1.getHostAndPort()); + provider.setDatabaseSwitchListener(reporter); + provider.setActiveDatabase(endpoint1.getHostAndPort()); UnifiedJedis client = new UnifiedJedis(provider); @@ -180,7 +177,7 @@ public void accept(ClusterSwitchEventArgs e) { AtomicBoolean unexpectedErrors = new AtomicBoolean(false); AtomicReference lastException = new AtomicReference(); AtomicLong stopRunningAt = new AtomicLong(); - String cluster2Id = provider.getCluster(endpoint2.getHostAndPort()).getCircuitBreaker() + String cluster2Id = provider.getDatabase(endpoint2.getHostAndPort()).getCircuitBreaker() .getName(); // Start thread that imitates an application that uses the client @@ -198,7 +195,7 @@ public void accept(ClusterSwitchEventArgs e) { while (true) { try { if (System.currentTimeMillis() > stopRunningAt.get()) break; - currentClusterId = provider.getCluster().getCircuitBreaker().getName(); + currentClusterId = provider.getDatabase().getCircuitBreaker().getName(); Map executionInfo = new HashMap() { { put("threadId", String.valueOf(threadId)); @@ -287,7 +284,7 @@ public boolean isCompleted(Duration checkInterval, Duration delayAfter, Duration } log.info("Fake app completed"); - ConnectionPool pool = provider.getCluster(endpoint1.getHostAndPort()).getConnectionPool(); + ConnectionPool pool = provider.getDatabase(endpoint1.getHostAndPort()).getConnectionPool(); log.info("First connection pool state: active: {}, idle: {}", pool.getNumActive(), pool.getNumIdle()); diff --git a/src/test/java/redis/clients/jedis/mcf/CircuitBreakerThresholdsTest.java b/src/test/java/redis/clients/jedis/mcf/CircuitBreakerThresholdsTest.java index 755325c705..bf7f702004 100644 --- a/src/test/java/redis/clients/jedis/mcf/CircuitBreakerThresholdsTest.java +++ b/src/test/java/redis/clients/jedis/mcf/CircuitBreakerThresholdsTest.java @@ -16,11 +16,11 @@ import redis.clients.jedis.Connection; import redis.clients.jedis.DefaultJedisClientConfig; import redis.clients.jedis.HostAndPort; -import redis.clients.jedis.MultiClusterClientConfig; -import redis.clients.jedis.MultiClusterClientConfig.ClusterConfig; +import redis.clients.jedis.MultiDatabaseConfig; +import redis.clients.jedis.MultiDatabaseConfig.DatabaseConfig; import redis.clients.jedis.Protocol; import redis.clients.jedis.exceptions.JedisConnectionException; -import redis.clients.jedis.mcf.MultiClusterPooledConnectionProvider.Cluster; +import redis.clients.jedis.mcf.MultiDatabaseConnectionProvider.Database; import redis.clients.jedis.util.ReflectionTestUtil; /** @@ -30,36 +30,36 @@ */ public class CircuitBreakerThresholdsTest { - private MultiClusterPooledConnectionProvider realProvider; - private MultiClusterPooledConnectionProvider spyProvider; - private Cluster cluster; + private MultiDatabaseConnectionProvider realProvider; + private MultiDatabaseConnectionProvider spyProvider; + private Database cluster; private CircuitBreakerCommandExecutor executor; private CommandObject dummyCommand; private TrackingConnectionPool poolMock; private HostAndPort fakeEndpoint = new HostAndPort("fake", 6379); private HostAndPort fakeEndpoint2 = new HostAndPort("fake2", 6379); - private ClusterConfig[] fakeClusterConfigs; + private DatabaseConfig[] fakeDatabaseConfigs; @BeforeEach public void setup() throws Exception { - ClusterConfig[] clusterConfigs = new ClusterConfig[] { - ClusterConfig.builder(fakeEndpoint, DefaultJedisClientConfig.builder().build()) + DatabaseConfig[] databaseConfigs = new DatabaseConfig[] { + DatabaseConfig.builder(fakeEndpoint, DefaultJedisClientConfig.builder().build()) .healthCheckEnabled(false).weight(1.0f).build(), - ClusterConfig.builder(fakeEndpoint2, DefaultJedisClientConfig.builder().build()) + DatabaseConfig.builder(fakeEndpoint2, DefaultJedisClientConfig.builder().build()) .healthCheckEnabled(false).weight(0.5f).build() }; - fakeClusterConfigs = clusterConfigs; + fakeDatabaseConfigs = databaseConfigs; - MultiClusterClientConfig.Builder cfgBuilder = MultiClusterClientConfig.builder(clusterConfigs) + MultiDatabaseConfig.Builder cfgBuilder = MultiDatabaseConfig.builder(databaseConfigs) .circuitBreakerFailureRateThreshold(50.0f).circuitBreakerMinNumOfFailures(3) .circuitBreakerSlidingWindowSize(10).retryMaxAttempts(1).retryOnFailover(false); - MultiClusterClientConfig mcc = cfgBuilder.build(); + MultiDatabaseConfig mcc = cfgBuilder.build(); - realProvider = new MultiClusterPooledConnectionProvider(mcc); + realProvider = new MultiDatabaseConnectionProvider(mcc); spyProvider = spy(realProvider); - cluster = spyProvider.getCluster(); + cluster = spyProvider.getDatabase(); executor = new CircuitBreakerCommandExecutor(spyProvider); @@ -88,7 +88,7 @@ public void belowMinFailures_doesNotFailover() { } // Below min failures; CB remains CLOSED - assertEquals(CircuitBreaker.State.CLOSED, spyProvider.getClusterCircuitBreaker().getState()); + assertEquals(CircuitBreaker.State.CLOSED, spyProvider.getDatabaseCircuitBreaker().getState()); } /** @@ -111,10 +111,10 @@ public void minFailuresAndRateExceeded_triggersFailover() { // Next call should hit open CB (CallNotPermitted) and trigger failover assertThrows(JedisConnectionException.class, () -> executor.executeCommand(dummyCommand)); - verify(spyProvider, atLeastOnce()).switchToHealthyCluster(eq(SwitchReason.CIRCUIT_BREAKER), + verify(spyProvider, atLeastOnce()).switchToHealthyDatabase(eq(SwitchReason.CIRCUIT_BREAKER), any()); assertEquals(CircuitBreaker.State.FORCED_OPEN, - spyProvider.getCluster(fakeEndpoint).getCircuitBreaker().getState()); + spyProvider.getDatabase(fakeEndpoint).getCircuitBreaker().getState()); } /** @@ -123,14 +123,12 @@ public void minFailuresAndRateExceeded_triggersFailover() { @Test public void rateBelowThreshold_doesNotFailover() throws Exception { // Use local provider with higher threshold (80%) and no retries - MultiClusterClientConfig.Builder cfgBuilder = MultiClusterClientConfig - .builder(fakeClusterConfigs).circuitBreakerFailureRateThreshold(80.0f) - .circuitBreakerMinNumOfFailures(3).circuitBreakerSlidingWindowSize(10).retryMaxAttempts(1) - .retryOnFailover(false); - MultiClusterPooledConnectionProvider rp = new MultiClusterPooledConnectionProvider( - cfgBuilder.build()); - MultiClusterPooledConnectionProvider sp = spy(rp); - Cluster c = sp.getCluster(); + MultiDatabaseConfig.Builder cfgBuilder = MultiDatabaseConfig.builder(fakeDatabaseConfigs) + .circuitBreakerFailureRateThreshold(80.0f).circuitBreakerMinNumOfFailures(3) + .circuitBreakerSlidingWindowSize(10).retryMaxAttempts(1).retryOnFailover(false); + MultiDatabaseConnectionProvider rp = new MultiDatabaseConnectionProvider(cfgBuilder.build()); + MultiDatabaseConnectionProvider sp = spy(rp); + Database c = sp.getDatabase(); try (CircuitBreakerCommandExecutor ex = new CircuitBreakerCommandExecutor(sp)) { CommandObject cmd = new CommandObject<>(new CommandArguments(Protocol.Command.PING), BuilderFactory.STRING); @@ -158,17 +156,16 @@ public void rateBelowThreshold_doesNotFailover() throws Exception { assertThrows(JedisConnectionException.class, () -> ex.executeCommand(cmd)); } - assertEquals(CircuitBreaker.State.CLOSED, sp.getClusterCircuitBreaker().getState()); + assertEquals(CircuitBreaker.State.CLOSED, sp.getDatabaseCircuitBreaker().getState()); } } @Test public void providerBuilder_zeroRate_mapsToHundredAndHugeMinCalls() { - MultiClusterClientConfig.Builder cfgBuilder = MultiClusterClientConfig - .builder(fakeClusterConfigs); + MultiDatabaseConfig.Builder cfgBuilder = MultiDatabaseConfig.builder(fakeDatabaseConfigs); cfgBuilder.circuitBreakerFailureRateThreshold(0.0f).circuitBreakerMinNumOfFailures(3) .circuitBreakerSlidingWindowSize(10); - MultiClusterClientConfig mcc = cfgBuilder.build(); + MultiDatabaseConfig mcc = cfgBuilder.build(); CircuitBreakerThresholdsAdapter adapter = new CircuitBreakerThresholdsAdapter(mcc); @@ -192,16 +189,14 @@ public void providerBuilder_zeroRate_mapsToHundredAndHugeMinCalls() { public void thresholdMatrix(int minFailures, float ratePercent, int successes, int failures, boolean expectFailoverOnNext) throws Exception { - MultiClusterClientConfig.Builder cfgBuilder = MultiClusterClientConfig - .builder(fakeClusterConfigs).circuitBreakerFailureRateThreshold(ratePercent) - .circuitBreakerMinNumOfFailures(minFailures) + MultiDatabaseConfig.Builder cfgBuilder = MultiDatabaseConfig.builder(fakeDatabaseConfigs) + .circuitBreakerFailureRateThreshold(ratePercent).circuitBreakerMinNumOfFailures(minFailures) .circuitBreakerSlidingWindowSize(Math.max(10, successes + failures + 2)).retryMaxAttempts(1) .retryOnFailover(false); - MultiClusterPooledConnectionProvider real = new MultiClusterPooledConnectionProvider( - cfgBuilder.build()); - MultiClusterPooledConnectionProvider spy = spy(real); - Cluster c = spy.getCluster(); + MultiDatabaseConnectionProvider real = new MultiDatabaseConnectionProvider(cfgBuilder.build()); + MultiDatabaseConnectionProvider spy = spy(real); + Database c = spy.getDatabase(); try (CircuitBreakerCommandExecutor ex = new CircuitBreakerCommandExecutor(spy)) { CommandObject cmd = new CommandObject<>(new CommandArguments(Protocol.Command.PING), @@ -237,7 +232,7 @@ public void thresholdMatrix(int minFailures, float ratePercent, int successes, i if (expectFailoverOnNext) { assertThrows(Exception.class, () -> ex.executeCommand(cmd)); - verify(spy, atLeastOnce()).switchToHealthyCluster(eq(SwitchReason.CIRCUIT_BREAKER), any()); + verify(spy, atLeastOnce()).switchToHealthyDatabase(eq(SwitchReason.CIRCUIT_BREAKER), any()); assertEquals(CircuitBreaker.State.FORCED_OPEN, c.getCircuitBreaker().getState()); } else { CircuitBreaker.State st = c.getCircuitBreaker().getState(); diff --git a/src/test/java/redis/clients/jedis/mcf/ClusterEvaluateThresholdsTest.java b/src/test/java/redis/clients/jedis/mcf/ClusterEvaluateThresholdsTest.java index c603509f32..251d69140c 100644 --- a/src/test/java/redis/clients/jedis/mcf/ClusterEvaluateThresholdsTest.java +++ b/src/test/java/redis/clients/jedis/mcf/ClusterEvaluateThresholdsTest.java @@ -11,8 +11,8 @@ import org.junit.jupiter.params.provider.CsvSource; import redis.clients.jedis.DefaultJedisClientConfig; import redis.clients.jedis.HostAndPort; -import redis.clients.jedis.MultiClusterClientConfig; -import redis.clients.jedis.mcf.MultiClusterPooledConnectionProvider.Cluster; +import redis.clients.jedis.MultiDatabaseConfig; +import redis.clients.jedis.mcf.MultiDatabaseConnectionProvider.Database; /** * Tests for circuit breaker thresholds: both failure-rate threshold and minimum number of failures @@ -21,15 +21,15 @@ */ public class ClusterEvaluateThresholdsTest { - private MultiClusterPooledConnectionProvider provider; - private Cluster cluster; + private MultiDatabaseConnectionProvider provider; + private Database cluster; private CircuitBreaker circuitBreaker; private CircuitBreaker.Metrics metrics; @BeforeEach public void setup() { - provider = mock(MultiClusterPooledConnectionProvider.class); - cluster = mock(Cluster.class); + provider = mock(MultiDatabaseConnectionProvider.class); + cluster = mock(Database.class); circuitBreaker = mock(CircuitBreaker.class); metrics = mock(CircuitBreaker.Metrics.class); @@ -58,7 +58,7 @@ public void belowMinFailures_doesNotFailover() { cluster.evaluateThresholds(false); verify(circuitBreaker, never()).transitionToOpenState(); - verify(provider, never()).switchToHealthyCluster(any(), any()); + verify(provider, never()).switchToHealthyDatabase(any(), any()); } /** @@ -95,18 +95,18 @@ public void rateBelowThreshold_doesNotFailover() { cluster.evaluateThresholds(false); verify(circuitBreaker, never()).transitionToOpenState(); - verify(provider, never()).switchToHealthyCluster(any(), any()); + verify(provider, never()).switchToHealthyDatabase(any(), any()); } @Test public void providerBuilder_zeroRate_mapsToHundredAndHugeMinCalls() { - MultiClusterClientConfig.Builder cfgBuilder = MultiClusterClientConfig - .builder(java.util.Arrays.asList(MultiClusterClientConfig.ClusterConfig + MultiDatabaseConfig.Builder cfgBuilder = MultiDatabaseConfig + .builder(java.util.Arrays.asList(MultiDatabaseConfig.DatabaseConfig .builder(new HostAndPort("localhost", 6379), DefaultJedisClientConfig.builder().build()) .healthCheckEnabled(false).build())); cfgBuilder.circuitBreakerFailureRateThreshold(0.0f).circuitBreakerMinNumOfFailures(3) .circuitBreakerSlidingWindowSize(10); - MultiClusterClientConfig mcc = cfgBuilder.build(); + MultiDatabaseConfig mcc = cfgBuilder.build(); CircuitBreakerThresholdsAdapter adapter = new CircuitBreakerThresholdsAdapter(mcc); diff --git a/src/test/java/redis/clients/jedis/mcf/DefaultValuesTest.java b/src/test/java/redis/clients/jedis/mcf/DefaultValuesTest.java index 6939ef7069..51d0aa3ec2 100644 --- a/src/test/java/redis/clients/jedis/mcf/DefaultValuesTest.java +++ b/src/test/java/redis/clients/jedis/mcf/DefaultValuesTest.java @@ -9,7 +9,7 @@ import redis.clients.jedis.DefaultJedisClientConfig; import redis.clients.jedis.HostAndPort; import redis.clients.jedis.JedisClientConfig; -import redis.clients.jedis.MultiClusterClientConfig; +import redis.clients.jedis.MultiDatabaseConfig; public class DefaultValuesTest { @@ -19,16 +19,16 @@ public class DefaultValuesTest { @Test void testDefaultValuesInConfig() { - MultiClusterClientConfig.ClusterConfig clusterConfig = MultiClusterClientConfig.ClusterConfig + MultiDatabaseConfig.DatabaseConfig clusterConfig = MultiDatabaseConfig.DatabaseConfig .builder(fakeEndpoint, config).build(); - MultiClusterClientConfig multiConfig = new MultiClusterClientConfig.Builder( - new MultiClusterClientConfig.ClusterConfig[] { clusterConfig }).build(); + MultiDatabaseConfig multiConfig = new MultiDatabaseConfig.Builder( + new MultiDatabaseConfig.DatabaseConfig[] { clusterConfig }).build(); // check for grace period assertEquals(60000, multiConfig.getGracePeriod()); // check for cluster config - assertEquals(clusterConfig, multiConfig.getClusterConfigs()[0]); + assertEquals(clusterConfig, multiConfig.getDatabaseConfigs()[0]); // check healthchecks enabled assertNotNull(clusterConfig.getHealthCheckStrategySupplier()); diff --git a/src/test/java/redis/clients/jedis/mcf/FailbackMechanismIntegrationTest.java b/src/test/java/redis/clients/jedis/mcf/FailbackMechanismIntegrationTest.java index afedc66f4d..c216e69317 100644 --- a/src/test/java/redis/clients/jedis/mcf/FailbackMechanismIntegrationTest.java +++ b/src/test/java/redis/clients/jedis/mcf/FailbackMechanismIntegrationTest.java @@ -17,7 +17,7 @@ import redis.clients.jedis.DefaultJedisClientConfig; import redis.clients.jedis.HostAndPort; import redis.clients.jedis.JedisClientConfig; -import redis.clients.jedis.MultiClusterClientConfig; +import redis.clients.jedis.MultiDatabaseConfig; @ExtendWith(MockitoExtension.class) class FailbackMechanismIntegrationTest { @@ -49,40 +49,38 @@ private MockedConstruction mockPool() { void testFailbackDisabledDoesNotPerformFailback() throws InterruptedException { try (MockedConstruction mockedPool = mockPool()) { // Create clusters with different weights - MultiClusterClientConfig.ClusterConfig cluster1 = MultiClusterClientConfig.ClusterConfig + MultiDatabaseConfig.DatabaseConfig cluster1 = MultiDatabaseConfig.DatabaseConfig .builder(endpoint1, clientConfig).weight(1.0f).healthCheckEnabled(false).build(); // Lower // weight - MultiClusterClientConfig.ClusterConfig cluster2 = MultiClusterClientConfig.ClusterConfig + MultiDatabaseConfig.DatabaseConfig cluster2 = MultiDatabaseConfig.DatabaseConfig .builder(endpoint2, clientConfig).weight(2.0f) // Higher weight .healthCheckEnabled(false).build(); - MultiClusterClientConfig config = new MultiClusterClientConfig.Builder( - new MultiClusterClientConfig.ClusterConfig[] { cluster1, cluster2 }) - .failbackSupported(false) // Disabled + MultiDatabaseConfig config = new MultiDatabaseConfig.Builder( + new MultiDatabaseConfig.DatabaseConfig[] { cluster1, cluster2 }).failbackSupported(false) // Disabled .failbackCheckInterval(100) // Short interval for testing .build(); - try (MultiClusterPooledConnectionProvider provider = new MultiClusterPooledConnectionProvider( - config)) { + try (MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider(config)) { // Initially, cluster2 should be active (highest weight) - assertEquals(provider.getCluster(endpoint2), provider.getCluster()); + assertEquals(provider.getDatabase(endpoint2), provider.getDatabase()); // Make cluster2 unhealthy to force failover to cluster1 - MultiClusterPooledConnectionProviderHelper.onHealthStatusChange(provider, endpoint2, + MultiDatabaseConnectionProviderHelper.onHealthStatusChange(provider, endpoint2, HealthStatus.HEALTHY, HealthStatus.UNHEALTHY); // Should now be on cluster1 (only healthy option) - assertEquals(provider.getCluster(endpoint1), provider.getCluster()); + assertEquals(provider.getDatabase(endpoint1), provider.getDatabase()); // Make cluster2 healthy again (higher weight - would normally trigger failback) - MultiClusterPooledConnectionProviderHelper.onHealthStatusChange(provider, endpoint2, + MultiDatabaseConnectionProviderHelper.onHealthStatusChange(provider, endpoint2, HealthStatus.UNHEALTHY, HealthStatus.HEALTHY); // Wait longer than failback interval // Should still be on cluster1 since failback is disabled await().atMost(Durations.FIVE_HUNDRED_MILLISECONDS).pollInterval(FIFTY_MILLISECONDS) - .until(() -> provider.getCluster(endpoint1) == provider.getCluster()); + .until(() -> provider.getDatabase(endpoint1) == provider.getDatabase()); } } } @@ -91,39 +89,38 @@ void testFailbackDisabledDoesNotPerformFailback() throws InterruptedException { void testFailbackToHigherWeightCluster() throws InterruptedException { try (MockedConstruction mockedPool = mockPool()) { // Create clusters with different weights - MultiClusterClientConfig.ClusterConfig cluster1 = MultiClusterClientConfig.ClusterConfig + MultiDatabaseConfig.DatabaseConfig cluster1 = MultiDatabaseConfig.DatabaseConfig .builder(endpoint1, clientConfig).weight(2.0f) // Higher weight .healthCheckEnabled(false).build(); - MultiClusterClientConfig.ClusterConfig cluster2 = MultiClusterClientConfig.ClusterConfig + MultiDatabaseConfig.DatabaseConfig cluster2 = MultiDatabaseConfig.DatabaseConfig .builder(endpoint2, clientConfig).weight(1.0f) // Lower weight .healthCheckEnabled(false).build(); - MultiClusterClientConfig config = new MultiClusterClientConfig.Builder( - new MultiClusterClientConfig.ClusterConfig[] { cluster1, cluster2 }) - .failbackSupported(true).failbackCheckInterval(100) // Short interval for testing + MultiDatabaseConfig config = new MultiDatabaseConfig.Builder( + new MultiDatabaseConfig.DatabaseConfig[] { cluster1, cluster2 }).failbackSupported(true) + .failbackCheckInterval(100) // Short interval for testing .gracePeriod(100).build(); - try (MultiClusterPooledConnectionProvider provider = new MultiClusterPooledConnectionProvider( - config)) { + try (MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider(config)) { // Initially, cluster1 should be active (highest weight) - assertEquals(provider.getCluster(endpoint1), provider.getCluster()); + assertEquals(provider.getDatabase(endpoint1), provider.getDatabase()); // Make cluster1 unhealthy to force failover to cluster2 - MultiClusterPooledConnectionProviderHelper.onHealthStatusChange(provider, endpoint1, + MultiDatabaseConnectionProviderHelper.onHealthStatusChange(provider, endpoint1, HealthStatus.HEALTHY, HealthStatus.UNHEALTHY); // Should now be on cluster2 (lower weight, but only healthy option) - assertEquals(provider.getCluster(endpoint2), provider.getCluster()); + assertEquals(provider.getDatabase(endpoint2), provider.getDatabase()); // Make cluster1 healthy again - MultiClusterPooledConnectionProviderHelper.onHealthStatusChange(provider, endpoint1, + MultiDatabaseConnectionProviderHelper.onHealthStatusChange(provider, endpoint1, HealthStatus.UNHEALTHY, HealthStatus.HEALTHY); // Wait for failback check interval + some buffer // Should have failed back to cluster1 (higher weight) await().atMost(Durations.FIVE_HUNDRED_MILLISECONDS).pollInterval(FIFTY_MILLISECONDS) - .until(() -> provider.getCluster(endpoint1) == provider.getCluster()); + .until(() -> provider.getDatabase(endpoint1) == provider.getDatabase()); } } } @@ -132,43 +129,42 @@ void testFailbackToHigherWeightCluster() throws InterruptedException { void testNoFailbackToLowerWeightCluster() throws InterruptedException { try (MockedConstruction mockedPool = mockPool()) { // Create three clusters with different weights to properly test no failback to lower weight - MultiClusterClientConfig.ClusterConfig cluster1 = MultiClusterClientConfig.ClusterConfig + MultiDatabaseConfig.DatabaseConfig cluster1 = MultiDatabaseConfig.DatabaseConfig .builder(endpoint1, clientConfig).weight(1.0f) // Lowest weight .healthCheckEnabled(false).build(); - MultiClusterClientConfig.ClusterConfig cluster2 = MultiClusterClientConfig.ClusterConfig + MultiDatabaseConfig.DatabaseConfig cluster2 = MultiDatabaseConfig.DatabaseConfig .builder(endpoint2, clientConfig).weight(2.0f) // Medium weight .healthCheckEnabled(false).build(); - MultiClusterClientConfig.ClusterConfig cluster3 = MultiClusterClientConfig.ClusterConfig + MultiDatabaseConfig.DatabaseConfig cluster3 = MultiDatabaseConfig.DatabaseConfig .builder(endpoint3, clientConfig).weight(3.0f) // Highest weight .healthCheckEnabled(false).build(); - MultiClusterClientConfig config = new MultiClusterClientConfig.Builder( - new MultiClusterClientConfig.ClusterConfig[] { cluster1, cluster2, cluster3 }) + MultiDatabaseConfig config = new MultiDatabaseConfig.Builder( + new MultiDatabaseConfig.DatabaseConfig[] { cluster1, cluster2, cluster3 }) .failbackSupported(true).failbackCheckInterval(100).build(); - try (MultiClusterPooledConnectionProvider provider = new MultiClusterPooledConnectionProvider( - config)) { + try (MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider(config)) { // Initially, cluster3 should be active (highest weight) - assertEquals(provider.getCluster(endpoint3), provider.getCluster()); + assertEquals(provider.getDatabase(endpoint3), provider.getDatabase()); // Make cluster3 unhealthy to force failover to cluster2 (medium weight) - MultiClusterPooledConnectionProviderHelper.onHealthStatusChange(provider, endpoint3, + MultiDatabaseConnectionProviderHelper.onHealthStatusChange(provider, endpoint3, HealthStatus.HEALTHY, HealthStatus.UNHEALTHY); // Should now be on cluster2 (highest weight among healthy clusters) - assertEquals(provider.getCluster(endpoint2), provider.getCluster()); + assertEquals(provider.getDatabase(endpoint2), provider.getDatabase()); // Make cluster1 (lowest weight) healthy - this should NOT trigger failback // since we don't failback to lower weight clusters - MultiClusterPooledConnectionProviderHelper.onHealthStatusChange(provider, endpoint1, + MultiDatabaseConnectionProviderHelper.onHealthStatusChange(provider, endpoint1, HealthStatus.UNHEALTHY, HealthStatus.HEALTHY); // Wait for failback check interval // Should still be on cluster2 (no failback to lower weight cluster1) await().atMost(Durations.FIVE_HUNDRED_MILLISECONDS).pollInterval(FIFTY_MILLISECONDS) - .until(() -> provider.getCluster(endpoint2) == provider.getCluster()); + .until(() -> provider.getDatabase(endpoint2) == provider.getDatabase()); } } } @@ -176,39 +172,38 @@ void testNoFailbackToLowerWeightCluster() throws InterruptedException { @Test void testFailbackToHigherWeightClusterImmediately() throws InterruptedException { try (MockedConstruction mockedPool = mockPool()) { - MultiClusterClientConfig.ClusterConfig cluster1 = MultiClusterClientConfig.ClusterConfig + MultiDatabaseConfig.DatabaseConfig cluster1 = MultiDatabaseConfig.DatabaseConfig .builder(endpoint1, clientConfig).weight(2.0f).healthCheckEnabled(false).build(); // Higher // weight - MultiClusterClientConfig.ClusterConfig cluster2 = MultiClusterClientConfig.ClusterConfig + MultiDatabaseConfig.DatabaseConfig cluster2 = MultiDatabaseConfig.DatabaseConfig .builder(endpoint2, clientConfig).weight(1.0f).healthCheckEnabled(false).build(); // Lower // weight - MultiClusterClientConfig config = new MultiClusterClientConfig.Builder( - new MultiClusterClientConfig.ClusterConfig[] { cluster1, cluster2 }) - .failbackSupported(true).failbackCheckInterval(100).gracePeriod(50).build(); + MultiDatabaseConfig config = new MultiDatabaseConfig.Builder( + new MultiDatabaseConfig.DatabaseConfig[] { cluster1, cluster2 }).failbackSupported(true) + .failbackCheckInterval(100).gracePeriod(50).build(); - try (MultiClusterPooledConnectionProvider provider = new MultiClusterPooledConnectionProvider( - config)) { + try (MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider(config)) { // Initially, cluster1 should be active (highest weight) - assertEquals(provider.getCluster(endpoint1), provider.getCluster()); + assertEquals(provider.getDatabase(endpoint1), provider.getDatabase()); // Make cluster1 unhealthy to force failover to cluster2 - MultiClusterPooledConnectionProviderHelper.onHealthStatusChange(provider, endpoint1, + MultiDatabaseConnectionProviderHelper.onHealthStatusChange(provider, endpoint1, HealthStatus.HEALTHY, HealthStatus.UNHEALTHY); // Should now be on cluster2 (only healthy option) - assertEquals(provider.getCluster(endpoint2), provider.getCluster()); + assertEquals(provider.getDatabase(endpoint2), provider.getDatabase()); // Make cluster1 healthy again - MultiClusterPooledConnectionProviderHelper.onHealthStatusChange(provider, endpoint1, + MultiDatabaseConnectionProviderHelper.onHealthStatusChange(provider, endpoint1, HealthStatus.UNHEALTHY, HealthStatus.HEALTHY); // Wait for failback check // Should have failed back to cluster1 immediately (higher weight, no stability period // required) await().atMost(Durations.TWO_HUNDRED_MILLISECONDS).pollInterval(FIFTY_MILLISECONDS) - .until(() -> provider.getCluster(endpoint1) == provider.getCluster()); + .until(() -> provider.getDatabase(endpoint1) == provider.getDatabase()); } } } @@ -216,45 +211,44 @@ void testFailbackToHigherWeightClusterImmediately() throws InterruptedException @Test void testUnhealthyClusterCancelsFailback() throws InterruptedException { try (MockedConstruction mockedPool = mockPool()) { - MultiClusterClientConfig.ClusterConfig cluster1 = MultiClusterClientConfig.ClusterConfig + MultiDatabaseConfig.DatabaseConfig cluster1 = MultiDatabaseConfig.DatabaseConfig .builder(endpoint1, clientConfig).weight(2.0f).healthCheckEnabled(false).build(); // Higher // weight - MultiClusterClientConfig.ClusterConfig cluster2 = MultiClusterClientConfig.ClusterConfig + MultiDatabaseConfig.DatabaseConfig cluster2 = MultiDatabaseConfig.DatabaseConfig .builder(endpoint2, clientConfig).weight(1.0f).healthCheckEnabled(false).build(); // Lower // weight - MultiClusterClientConfig config = new MultiClusterClientConfig.Builder( - new MultiClusterClientConfig.ClusterConfig[] { cluster1, cluster2 }) - .failbackSupported(true).failbackCheckInterval(200).build(); + MultiDatabaseConfig config = new MultiDatabaseConfig.Builder( + new MultiDatabaseConfig.DatabaseConfig[] { cluster1, cluster2 }).failbackSupported(true) + .failbackCheckInterval(200).build(); - try (MultiClusterPooledConnectionProvider provider = new MultiClusterPooledConnectionProvider( - config)) { + try (MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider(config)) { // Initially, cluster1 should be active (highest weight) - assertEquals(provider.getCluster(endpoint1), provider.getCluster()); + assertEquals(provider.getDatabase(endpoint1), provider.getDatabase()); // Make cluster1 unhealthy to force failover to cluster2 - MultiClusterPooledConnectionProviderHelper.onHealthStatusChange(provider, endpoint1, + MultiDatabaseConnectionProviderHelper.onHealthStatusChange(provider, endpoint1, HealthStatus.HEALTHY, HealthStatus.UNHEALTHY); // Should now be on cluster2 (only healthy option) - assertEquals(provider.getCluster(endpoint2), provider.getCluster()); + assertEquals(provider.getDatabase(endpoint2), provider.getDatabase()); // Make cluster1 healthy again (should trigger failback attempt) - MultiClusterPooledConnectionProviderHelper.onHealthStatusChange(provider, endpoint1, + MultiDatabaseConnectionProviderHelper.onHealthStatusChange(provider, endpoint1, HealthStatus.UNHEALTHY, HealthStatus.HEALTHY); // Wait a bit Thread.sleep(100); // Make cluster1 unhealthy again before failback completes - MultiClusterPooledConnectionProviderHelper.onHealthStatusChange(provider, endpoint1, + MultiDatabaseConnectionProviderHelper.onHealthStatusChange(provider, endpoint1, HealthStatus.HEALTHY, HealthStatus.UNHEALTHY); // Wait past the original failback interval // Should still be on cluster2 (failback was cancelled due to cluster1 becoming unhealthy) await().atMost(Durations.TWO_HUNDRED_MILLISECONDS).pollInterval(FIFTY_MILLISECONDS) - .until(() -> provider.getCluster(endpoint2) == provider.getCluster()); + .until(() -> provider.getDatabase(endpoint2) == provider.getDatabase()); } } } @@ -262,42 +256,41 @@ void testUnhealthyClusterCancelsFailback() throws InterruptedException { @Test void testMultipleClusterFailbackPriority() throws InterruptedException { try (MockedConstruction mockedPool = mockPool()) { - MultiClusterClientConfig.ClusterConfig cluster1 = MultiClusterClientConfig.ClusterConfig + MultiDatabaseConfig.DatabaseConfig cluster1 = MultiDatabaseConfig.DatabaseConfig .builder(endpoint1, clientConfig).weight(1.0f).healthCheckEnabled(false).build(); // Lowest // weight - MultiClusterClientConfig.ClusterConfig cluster2 = MultiClusterClientConfig.ClusterConfig + MultiDatabaseConfig.DatabaseConfig cluster2 = MultiDatabaseConfig.DatabaseConfig .builder(endpoint2, clientConfig).weight(2.0f).healthCheckEnabled(false).build(); // Medium // weight - MultiClusterClientConfig.ClusterConfig cluster3 = MultiClusterClientConfig.ClusterConfig + MultiDatabaseConfig.DatabaseConfig cluster3 = MultiDatabaseConfig.DatabaseConfig .builder(endpoint3, clientConfig).weight(3.0f) // Highest weight .healthCheckEnabled(false).build(); - MultiClusterClientConfig config = new MultiClusterClientConfig.Builder( - new MultiClusterClientConfig.ClusterConfig[] { cluster1, cluster2, cluster3 }) + MultiDatabaseConfig config = new MultiDatabaseConfig.Builder( + new MultiDatabaseConfig.DatabaseConfig[] { cluster1, cluster2, cluster3 }) .failbackSupported(true).failbackCheckInterval(100).gracePeriod(100).build(); - try (MultiClusterPooledConnectionProvider provider = new MultiClusterPooledConnectionProvider( - config)) { + try (MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider(config)) { // Initially, cluster3 should be active (highest weight) - assertEquals(provider.getCluster(endpoint3), provider.getCluster()); + assertEquals(provider.getDatabase(endpoint3), provider.getDatabase()); // Make cluster3 unhealthy to force failover to cluster2 (next highest weight) - MultiClusterPooledConnectionProviderHelper.onHealthStatusChange(provider, endpoint3, + MultiDatabaseConnectionProviderHelper.onHealthStatusChange(provider, endpoint3, HealthStatus.HEALTHY, HealthStatus.UNHEALTHY); // Should now be on cluster2 (highest weight among healthy clusters) - assertEquals(provider.getCluster(endpoint2), provider.getCluster()); + assertEquals(provider.getDatabase(endpoint2), provider.getDatabase()); // Make cluster3 healthy again - MultiClusterPooledConnectionProviderHelper.onHealthStatusChange(provider, endpoint3, + MultiDatabaseConnectionProviderHelper.onHealthStatusChange(provider, endpoint3, HealthStatus.UNHEALTHY, HealthStatus.HEALTHY); // Wait for failback // Should fail back to cluster3 (highest weight) await().atMost(Durations.FIVE_HUNDRED_MILLISECONDS).pollInterval(FIFTY_MILLISECONDS) - .until(() -> provider.getCluster(endpoint3) == provider.getCluster()); + .until(() -> provider.getDatabase(endpoint3) == provider.getDatabase()); } } } @@ -305,34 +298,33 @@ void testMultipleClusterFailbackPriority() throws InterruptedException { @Test void testGracePeriodDisablesClusterOnUnhealthy() throws InterruptedException { try (MockedConstruction mockedPool = mockPool()) { - MultiClusterClientConfig.ClusterConfig cluster1 = MultiClusterClientConfig.ClusterConfig + MultiDatabaseConfig.DatabaseConfig cluster1 = MultiDatabaseConfig.DatabaseConfig .builder(endpoint1, clientConfig).weight(1.0f).healthCheckEnabled(false).build(); // Lower // weight - MultiClusterClientConfig.ClusterConfig cluster2 = MultiClusterClientConfig.ClusterConfig + MultiDatabaseConfig.DatabaseConfig cluster2 = MultiDatabaseConfig.DatabaseConfig .builder(endpoint2, clientConfig).weight(2.0f).healthCheckEnabled(false).build(); // Higher // weight - MultiClusterClientConfig config = new MultiClusterClientConfig.Builder( - new MultiClusterClientConfig.ClusterConfig[] { cluster1, cluster2 }) - .failbackSupported(true).failbackCheckInterval(100).gracePeriod(200) // 200ms grace - // period + MultiDatabaseConfig config = new MultiDatabaseConfig.Builder( + new MultiDatabaseConfig.DatabaseConfig[] { cluster1, cluster2 }).failbackSupported(true) + .failbackCheckInterval(100).gracePeriod(200) // 200ms grace + // period .build(); - try (MultiClusterPooledConnectionProvider provider = new MultiClusterPooledConnectionProvider( - config)) { + try (MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider(config)) { // Initially, cluster2 should be active (highest weight) - assertEquals(provider.getCluster(endpoint2), provider.getCluster()); + assertEquals(provider.getDatabase(endpoint2), provider.getDatabase()); // Now make cluster2 unhealthy - it should be disabled for grace period - MultiClusterPooledConnectionProviderHelper.onHealthStatusChange(provider, endpoint2, + MultiDatabaseConnectionProviderHelper.onHealthStatusChange(provider, endpoint2, HealthStatus.HEALTHY, HealthStatus.UNHEALTHY); // Should failover to cluster1 - assertEquals(provider.getCluster(endpoint1), provider.getCluster()); + assertEquals(provider.getDatabase(endpoint1), provider.getDatabase()); // Cluster2 should be in grace period - assertTrue(provider.getCluster(endpoint2).isInGracePeriod()); + assertTrue(provider.getDatabase(endpoint2).isInGracePeriod()); } } } @@ -340,51 +332,50 @@ void testGracePeriodDisablesClusterOnUnhealthy() throws InterruptedException { @Test void testGracePeriodReEnablesClusterAfterPeriod() throws InterruptedException { try (MockedConstruction mockedPool = mockPool()) { - MultiClusterClientConfig.ClusterConfig cluster1 = MultiClusterClientConfig.ClusterConfig + MultiDatabaseConfig.DatabaseConfig cluster1 = MultiDatabaseConfig.DatabaseConfig .builder(endpoint1, clientConfig).weight(1.0f).healthCheckEnabled(false).build(); // Lower // weight - MultiClusterClientConfig.ClusterConfig cluster2 = MultiClusterClientConfig.ClusterConfig + MultiDatabaseConfig.DatabaseConfig cluster2 = MultiDatabaseConfig.DatabaseConfig .builder(endpoint2, clientConfig).weight(2.0f).healthCheckEnabled(false).build(); // Higher // weight - MultiClusterClientConfig config = new MultiClusterClientConfig.Builder( - new MultiClusterClientConfig.ClusterConfig[] { cluster1, cluster2 }) - .failbackSupported(true).failbackCheckInterval(50) // Short interval for testing + MultiDatabaseConfig config = new MultiDatabaseConfig.Builder( + new MultiDatabaseConfig.DatabaseConfig[] { cluster1, cluster2 }).failbackSupported(true) + .failbackCheckInterval(50) // Short interval for testing .gracePeriod(100) // Short grace period for testing .build(); - try (MultiClusterPooledConnectionProvider provider = new MultiClusterPooledConnectionProvider( - config)) { + try (MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider(config)) { // Initially, cluster2 should be active (highest weight) - assertEquals(provider.getCluster(endpoint2), provider.getCluster()); + assertEquals(provider.getDatabase(endpoint2), provider.getDatabase()); // Make cluster2 unhealthy to start grace period and force failover - MultiClusterPooledConnectionProviderHelper.onHealthStatusChange(provider, endpoint2, + MultiDatabaseConnectionProviderHelper.onHealthStatusChange(provider, endpoint2, HealthStatus.HEALTHY, HealthStatus.UNHEALTHY); // Should failover to cluster1 - assertEquals(provider.getCluster(endpoint1), provider.getCluster()); + assertEquals(provider.getDatabase(endpoint1), provider.getDatabase()); // Cluster2 should be in grace period - assertTrue(provider.getCluster(endpoint2).isInGracePeriod()); + assertTrue(provider.getDatabase(endpoint2).isInGracePeriod()); // Make cluster2 healthy again while it's still in grace period - MultiClusterPooledConnectionProviderHelper.onHealthStatusChange(provider, endpoint2, + MultiDatabaseConnectionProviderHelper.onHealthStatusChange(provider, endpoint2, HealthStatus.UNHEALTHY, HealthStatus.HEALTHY); // Should still be on cluster1 because cluster2 is in grace period - assertEquals(provider.getCluster(endpoint1), provider.getCluster()); + assertEquals(provider.getDatabase(endpoint1), provider.getDatabase()); // Wait for grace period to expire // Cluster2 should no longer be in grace period await().atMost(Durations.FIVE_HUNDRED_MILLISECONDS).pollInterval(FIFTY_MILLISECONDS) - .until(() -> !provider.getCluster(endpoint2).isInGracePeriod()); + .until(() -> !provider.getDatabase(endpoint2).isInGracePeriod()); // Wait for failback check to run // Should now failback to cluster2 (higher weight) since grace period has expired await().atMost(Durations.FIVE_HUNDRED_MILLISECONDS).pollInterval(FIFTY_MILLISECONDS) - .until(() -> provider.getCluster(endpoint2) == provider.getCluster()); + .until(() -> provider.getDatabase(endpoint2) == provider.getDatabase()); } } } diff --git a/src/test/java/redis/clients/jedis/mcf/FailbackMechanismUnitTest.java b/src/test/java/redis/clients/jedis/mcf/FailbackMechanismUnitTest.java index fee216f2be..a200296e18 100644 --- a/src/test/java/redis/clients/jedis/mcf/FailbackMechanismUnitTest.java +++ b/src/test/java/redis/clients/jedis/mcf/FailbackMechanismUnitTest.java @@ -9,7 +9,7 @@ import redis.clients.jedis.DefaultJedisClientConfig; import redis.clients.jedis.HostAndPort; import redis.clients.jedis.JedisClientConfig; -import redis.clients.jedis.MultiClusterClientConfig; +import redis.clients.jedis.MultiDatabaseConfig; @ExtendWith(MockitoExtension.class) class FailbackMechanismUnitTest { @@ -26,17 +26,17 @@ void setUp() { @Test void testFailbackCheckIntervalConfiguration() { // Test default value - MultiClusterClientConfig.ClusterConfig clusterConfig = MultiClusterClientConfig.ClusterConfig + MultiDatabaseConfig.DatabaseConfig clusterConfig = MultiDatabaseConfig.DatabaseConfig .builder(endpoint1, clientConfig).healthCheckEnabled(false).build(); - MultiClusterClientConfig defaultConfig = new MultiClusterClientConfig.Builder( - new MultiClusterClientConfig.ClusterConfig[] { clusterConfig }).build(); + MultiDatabaseConfig defaultConfig = new MultiDatabaseConfig.Builder( + new MultiDatabaseConfig.DatabaseConfig[] { clusterConfig }).build(); assertEquals(120000, defaultConfig.getFailbackCheckInterval()); // Test custom value - MultiClusterClientConfig customConfig = new MultiClusterClientConfig.Builder( - new MultiClusterClientConfig.ClusterConfig[] { clusterConfig }).failbackCheckInterval(3000) + MultiDatabaseConfig customConfig = new MultiDatabaseConfig.Builder( + new MultiDatabaseConfig.DatabaseConfig[] { clusterConfig }).failbackCheckInterval(3000) .build(); assertEquals(3000, customConfig.getFailbackCheckInterval()); @@ -44,18 +44,18 @@ void testFailbackCheckIntervalConfiguration() { @Test void testFailbackSupportedConfiguration() { - MultiClusterClientConfig.ClusterConfig clusterConfig = MultiClusterClientConfig.ClusterConfig + MultiDatabaseConfig.DatabaseConfig clusterConfig = MultiDatabaseConfig.DatabaseConfig .builder(endpoint1, clientConfig).healthCheckEnabled(false).build(); // Test default (should be true) - MultiClusterClientConfig defaultConfig = new MultiClusterClientConfig.Builder( - new MultiClusterClientConfig.ClusterConfig[] { clusterConfig }).build(); + MultiDatabaseConfig defaultConfig = new MultiDatabaseConfig.Builder( + new MultiDatabaseConfig.DatabaseConfig[] { clusterConfig }).build(); assertTrue(defaultConfig.isFailbackSupported()); // Test disabled - MultiClusterClientConfig disabledConfig = new MultiClusterClientConfig.Builder( - new MultiClusterClientConfig.ClusterConfig[] { clusterConfig }).failbackSupported(false) + MultiDatabaseConfig disabledConfig = new MultiDatabaseConfig.Builder( + new MultiDatabaseConfig.DatabaseConfig[] { clusterConfig }).failbackSupported(false) .build(); assertFalse(disabledConfig.isFailbackSupported()); @@ -63,19 +63,19 @@ void testFailbackSupportedConfiguration() { @Test void testFailbackCheckIntervalValidation() { - MultiClusterClientConfig.ClusterConfig clusterConfig = MultiClusterClientConfig.ClusterConfig + MultiDatabaseConfig.DatabaseConfig clusterConfig = MultiDatabaseConfig.DatabaseConfig .builder(endpoint1, clientConfig).healthCheckEnabled(false).build(); // Test zero interval (should be allowed) - MultiClusterClientConfig zeroConfig = new MultiClusterClientConfig.Builder( - new MultiClusterClientConfig.ClusterConfig[] { clusterConfig }).failbackCheckInterval(0) + MultiDatabaseConfig zeroConfig = new MultiDatabaseConfig.Builder( + new MultiDatabaseConfig.DatabaseConfig[] { clusterConfig }).failbackCheckInterval(0) .build(); assertEquals(0, zeroConfig.getFailbackCheckInterval()); // Test negative interval (should be allowed - implementation decision) - MultiClusterClientConfig negativeConfig = new MultiClusterClientConfig.Builder( - new MultiClusterClientConfig.ClusterConfig[] { clusterConfig }).failbackCheckInterval(-1000) + MultiDatabaseConfig negativeConfig = new MultiDatabaseConfig.Builder( + new MultiDatabaseConfig.DatabaseConfig[] { clusterConfig }).failbackCheckInterval(-1000) .build(); assertEquals(-1000, negativeConfig.getFailbackCheckInterval()); @@ -83,12 +83,12 @@ void testFailbackCheckIntervalValidation() { @Test void testBuilderChaining() { - MultiClusterClientConfig.ClusterConfig clusterConfig = MultiClusterClientConfig.ClusterConfig + MultiDatabaseConfig.DatabaseConfig clusterConfig = MultiDatabaseConfig.DatabaseConfig .builder(endpoint1, clientConfig).healthCheckEnabled(false).build(); // Test that builder methods can be chained - MultiClusterClientConfig config = new MultiClusterClientConfig.Builder( - new MultiClusterClientConfig.ClusterConfig[] { clusterConfig }).failbackSupported(true) + MultiDatabaseConfig config = new MultiDatabaseConfig.Builder( + new MultiDatabaseConfig.DatabaseConfig[] { clusterConfig }).failbackSupported(true) .failbackCheckInterval(2000).retryOnFailover(true).build(); assertTrue(config.isFailbackSupported()); @@ -99,47 +99,47 @@ void testBuilderChaining() { @Test void testGracePeriodConfiguration() { // Test default value - MultiClusterClientConfig.ClusterConfig clusterConfig = MultiClusterClientConfig.ClusterConfig + MultiDatabaseConfig.DatabaseConfig clusterConfig = MultiDatabaseConfig.DatabaseConfig .builder(endpoint1, clientConfig).healthCheckEnabled(false).build(); - MultiClusterClientConfig defaultConfig = new MultiClusterClientConfig.Builder( - new MultiClusterClientConfig.ClusterConfig[] { clusterConfig }).build(); + MultiDatabaseConfig defaultConfig = new MultiDatabaseConfig.Builder( + new MultiDatabaseConfig.DatabaseConfig[] { clusterConfig }).build(); assertEquals(60000, defaultConfig.getGracePeriod()); // Test custom value - MultiClusterClientConfig customConfig = new MultiClusterClientConfig.Builder( - new MultiClusterClientConfig.ClusterConfig[] { clusterConfig }).gracePeriod(5000).build(); + MultiDatabaseConfig customConfig = new MultiDatabaseConfig.Builder( + new MultiDatabaseConfig.DatabaseConfig[] { clusterConfig }).gracePeriod(5000).build(); assertEquals(5000, customConfig.getGracePeriod()); } @Test void testGracePeriodValidation() { - MultiClusterClientConfig.ClusterConfig clusterConfig = MultiClusterClientConfig.ClusterConfig + MultiDatabaseConfig.DatabaseConfig clusterConfig = MultiDatabaseConfig.DatabaseConfig .builder(endpoint1, clientConfig).healthCheckEnabled(false).build(); // Test zero grace period (should be allowed) - MultiClusterClientConfig zeroConfig = new MultiClusterClientConfig.Builder( - new MultiClusterClientConfig.ClusterConfig[] { clusterConfig }).gracePeriod(0).build(); + MultiDatabaseConfig zeroConfig = new MultiDatabaseConfig.Builder( + new MultiDatabaseConfig.DatabaseConfig[] { clusterConfig }).gracePeriod(0).build(); assertEquals(0, zeroConfig.getGracePeriod()); // Test negative grace period (should be allowed - implementation decision) - MultiClusterClientConfig negativeConfig = new MultiClusterClientConfig.Builder( - new MultiClusterClientConfig.ClusterConfig[] { clusterConfig }).gracePeriod(-1000).build(); + MultiDatabaseConfig negativeConfig = new MultiDatabaseConfig.Builder( + new MultiDatabaseConfig.DatabaseConfig[] { clusterConfig }).gracePeriod(-1000).build(); assertEquals(-1000, negativeConfig.getGracePeriod()); } @Test void testGracePeriodBuilderChaining() { - MultiClusterClientConfig.ClusterConfig clusterConfig = MultiClusterClientConfig.ClusterConfig + MultiDatabaseConfig.DatabaseConfig clusterConfig = MultiDatabaseConfig.DatabaseConfig .builder(endpoint1, clientConfig).healthCheckEnabled(false).build(); // Test that builder methods can be chained - MultiClusterClientConfig config = new MultiClusterClientConfig.Builder( - new MultiClusterClientConfig.ClusterConfig[] { clusterConfig }).failbackSupported(true) + MultiDatabaseConfig config = new MultiDatabaseConfig.Builder( + new MultiDatabaseConfig.DatabaseConfig[] { clusterConfig }).failbackSupported(true) .failbackCheckInterval(2000).gracePeriod(8000).retryOnFailover(true).build(); assertTrue(config.isFailbackSupported()); diff --git a/src/test/java/redis/clients/jedis/mcf/HealthCheckIntegrationTest.java b/src/test/java/redis/clients/jedis/mcf/HealthCheckIntegrationTest.java index d1cb8b90e9..c43baf9933 100644 --- a/src/test/java/redis/clients/jedis/mcf/HealthCheckIntegrationTest.java +++ b/src/test/java/redis/clients/jedis/mcf/HealthCheckIntegrationTest.java @@ -15,10 +15,10 @@ import redis.clients.jedis.EndpointConfig; import redis.clients.jedis.HostAndPorts; import redis.clients.jedis.JedisClientConfig; -import redis.clients.jedis.MultiClusterClientConfig; +import redis.clients.jedis.MultiDatabaseConfig; import redis.clients.jedis.UnifiedJedis; -import redis.clients.jedis.MultiClusterClientConfig.ClusterConfig; -import redis.clients.jedis.MultiClusterClientConfig.StrategySupplier; +import redis.clients.jedis.MultiDatabaseConfig.DatabaseConfig; +import redis.clients.jedis.MultiDatabaseConfig.StrategySupplier; import redis.clients.jedis.mcf.ProbingPolicy.BuiltIn; import redis.clients.jedis.scenario.RecommendedSettings; @@ -32,7 +32,7 @@ public class HealthCheckIntegrationTest { @Test public void testDisableHealthCheck() { // No health check strategy supplier means health check is disabled - MultiClusterPooledConnectionProvider customProvider = getMCCF(null); + MultiDatabaseConnectionProvider customProvider = getMCCF(null); try (UnifiedJedis customClient = new UnifiedJedis(customProvider)) { // Verify that the client can connect and execute commands String result = customClient.ping(); @@ -43,11 +43,10 @@ public void testDisableHealthCheck() { @Test public void testDefaultStrategySupplier() { // Create a default strategy supplier that creates EchoStrategy instances - MultiClusterClientConfig.StrategySupplier defaultSupplier = (hostAndPort, - jedisClientConfig) -> { + MultiDatabaseConfig.StrategySupplier defaultSupplier = (hostAndPort, jedisClientConfig) -> { return new EchoStrategy(hostAndPort, jedisClientConfig); }; - MultiClusterPooledConnectionProvider customProvider = getMCCF(defaultSupplier); + MultiDatabaseConnectionProvider customProvider = getMCCF(defaultSupplier); try (UnifiedJedis customClient = new UnifiedJedis(customProvider)) { // Verify that the client can connect and execute commands String result = customClient.ping(); @@ -58,8 +57,7 @@ public void testDefaultStrategySupplier() { @Test public void testCustomStrategySupplier() { // Create a StrategySupplier that uses the JedisClientConfig when available - MultiClusterClientConfig.StrategySupplier strategySupplier = (hostAndPort, - jedisClientConfig) -> { + MultiDatabaseConfig.StrategySupplier strategySupplier = (hostAndPort, jedisClientConfig) -> { return new TestHealthCheckStrategy(HealthCheckStrategy.Config.builder().interval(500) .timeout(500).numProbes(1).policy(BuiltIn.ANY_SUCCESS).build(), (endpoint) -> { // Create connection per health check to avoid resource leak @@ -72,7 +70,7 @@ public void testCustomStrategySupplier() { }); }; - MultiClusterPooledConnectionProvider customProvider = getMCCF(strategySupplier); + MultiDatabaseConnectionProvider customProvider = getMCCF(strategySupplier); try (UnifiedJedis customClient = new UnifiedJedis(customProvider)) { // Verify that the client can connect and execute commands String result = customClient.ping(); @@ -80,23 +78,23 @@ public void testCustomStrategySupplier() { } } - private MultiClusterPooledConnectionProvider getMCCF( - MultiClusterClientConfig.StrategySupplier strategySupplier) { - Function modifier = builder -> strategySupplier == null + private MultiDatabaseConnectionProvider getMCCF( + MultiDatabaseConfig.StrategySupplier strategySupplier) { + Function modifier = builder -> strategySupplier == null ? builder.healthCheckEnabled(false) : builder.healthCheckStrategySupplier(strategySupplier); - List clusterConfigs = Arrays.stream(new EndpointConfig[] { endpoint1 }) + List databaseConfigs = Arrays.stream(new EndpointConfig[] { endpoint1 }) .map(e -> modifier - .apply(MultiClusterClientConfig.ClusterConfig.builder(e.getHostAndPort(), clientConfig)) + .apply(MultiDatabaseConfig.DatabaseConfig.builder(e.getHostAndPort(), clientConfig)) .build()) .collect(Collectors.toList()); - MultiClusterClientConfig mccf = new MultiClusterClientConfig.Builder(clusterConfigs) - .retryMaxAttempts(1).retryWaitDuration(1).circuitBreakerSlidingWindowSize(1) + MultiDatabaseConfig mccf = new MultiDatabaseConfig.Builder(databaseConfigs).retryMaxAttempts(1) + .retryWaitDuration(1).circuitBreakerSlidingWindowSize(1) .circuitBreakerFailureRateThreshold(100).build(); - return new MultiClusterPooledConnectionProvider(mccf); + return new MultiDatabaseConnectionProvider(mccf); } // ========== Probe Logic Integration Tests ========== diff --git a/src/test/java/redis/clients/jedis/mcf/HealthCheckTest.java b/src/test/java/redis/clients/jedis/mcf/HealthCheckTest.java index b7205fd808..a9a592de1f 100644 --- a/src/test/java/redis/clients/jedis/mcf/HealthCheckTest.java +++ b/src/test/java/redis/clients/jedis/mcf/HealthCheckTest.java @@ -9,7 +9,7 @@ import redis.clients.jedis.Endpoint; import redis.clients.jedis.HostAndPort; import redis.clients.jedis.JedisClientConfig; -import redis.clients.jedis.MultiClusterClientConfig; +import redis.clients.jedis.MultiDatabaseConfig; import redis.clients.jedis.UnifiedJedis; import redis.clients.jedis.mcf.ProbingPolicy.BuiltIn; @@ -338,7 +338,7 @@ void testEchoStrategyCustomIntervalTimeout() { @Test void testEchoStrategyDefaultSupplier() { - MultiClusterClientConfig.StrategySupplier supplier = EchoStrategy.DEFAULT; + MultiDatabaseConfig.StrategySupplier supplier = EchoStrategy.DEFAULT; HealthCheckStrategy strategy = supplier.get(testEndpoint, testConfig); assertInstanceOf(EchoStrategy.class, strategy); @@ -348,12 +348,12 @@ void testEchoStrategyDefaultSupplier() { @Test void testNewFieldLocations() { - // Test new field locations in ClusterConfig and MultiClusterClientConfig - MultiClusterClientConfig.ClusterConfig clusterConfig = MultiClusterClientConfig.ClusterConfig + // Test new field locations in DatabaseConfig and MultiDatabaseConfig + MultiDatabaseConfig.DatabaseConfig clusterConfig = MultiDatabaseConfig.DatabaseConfig .builder(testEndpoint, testConfig).weight(2.5f).build(); - MultiClusterClientConfig multiConfig = new MultiClusterClientConfig.Builder( - new MultiClusterClientConfig.ClusterConfig[] { clusterConfig }).retryOnFailover(true) + MultiDatabaseConfig multiConfig = new MultiDatabaseConfig.Builder( + new MultiDatabaseConfig.DatabaseConfig[] { clusterConfig }).retryOnFailover(true) .failbackSupported(false).build(); assertEquals(2.5f, clusterConfig.getWeight()); @@ -363,8 +363,8 @@ void testNewFieldLocations() { @Test void testDefaultValues() { - // Test default values in ClusterConfig - MultiClusterClientConfig.ClusterConfig clusterConfig = MultiClusterClientConfig.ClusterConfig + // Test default values in DatabaseConfig + MultiDatabaseConfig.DatabaseConfig clusterConfig = MultiDatabaseConfig.DatabaseConfig .builder(testEndpoint, testConfig).build(); assertEquals(1.0f, clusterConfig.getWeight()); // Default weight @@ -374,22 +374,22 @@ void testDefaultValues() { // health // check) - // Test default values in MultiClusterClientConfig - MultiClusterClientConfig multiConfig = new MultiClusterClientConfig.Builder( - new MultiClusterClientConfig.ClusterConfig[] { clusterConfig }).build(); + // Test default values in MultiDatabaseConfig + MultiDatabaseConfig multiConfig = new MultiDatabaseConfig.Builder( + new MultiDatabaseConfig.DatabaseConfig[] { clusterConfig }).build(); assertFalse(multiConfig.isRetryOnFailover()); // Default is false assertTrue(multiConfig.isFailbackSupported()); // Default is true } @Test - void testClusterConfigWithHealthCheckStrategy() { + void testDatabaseConfigWithHealthCheckStrategy() { HealthCheckStrategy customStrategy = mock(HealthCheckStrategy.class); - MultiClusterClientConfig.StrategySupplier supplier = (hostAndPort, + MultiDatabaseConfig.StrategySupplier supplier = (hostAndPort, jedisClientConfig) -> customStrategy; - MultiClusterClientConfig.ClusterConfig clusterConfig = MultiClusterClientConfig.ClusterConfig + MultiDatabaseConfig.DatabaseConfig clusterConfig = MultiDatabaseConfig.DatabaseConfig .builder(testEndpoint, testConfig).healthCheckStrategySupplier(supplier).build(); assertNotNull(clusterConfig.getHealthCheckStrategySupplier()); @@ -399,35 +399,34 @@ void testClusterConfigWithHealthCheckStrategy() { } @Test - void testClusterConfigWithStrategySupplier() { - MultiClusterClientConfig.StrategySupplier customSupplier = (hostAndPort, jedisClientConfig) -> { + void testDatabaseConfigWithStrategySupplier() { + MultiDatabaseConfig.StrategySupplier customSupplier = (hostAndPort, jedisClientConfig) -> { return mock(HealthCheckStrategy.class); }; - MultiClusterClientConfig.ClusterConfig clusterConfig = MultiClusterClientConfig.ClusterConfig + MultiDatabaseConfig.DatabaseConfig clusterConfig = MultiDatabaseConfig.DatabaseConfig .builder(testEndpoint, testConfig).healthCheckStrategySupplier(customSupplier).build(); assertEquals(customSupplier, clusterConfig.getHealthCheckStrategySupplier()); } @Test - void testClusterConfigWithEchoStrategy() { - MultiClusterClientConfig.StrategySupplier echoSupplier = (hostAndPort, jedisClientConfig) -> { + void testDatabaseConfigWithEchoStrategy() { + MultiDatabaseConfig.StrategySupplier echoSupplier = (hostAndPort, jedisClientConfig) -> { return new EchoStrategy(hostAndPort, jedisClientConfig); }; - MultiClusterClientConfig.ClusterConfig clusterConfig = MultiClusterClientConfig.ClusterConfig + MultiDatabaseConfig.DatabaseConfig clusterConfig = MultiDatabaseConfig.DatabaseConfig .builder(testEndpoint, testConfig).healthCheckStrategySupplier(echoSupplier).build(); - MultiClusterClientConfig.StrategySupplier supplier = clusterConfig - .getHealthCheckStrategySupplier(); + MultiDatabaseConfig.StrategySupplier supplier = clusterConfig.getHealthCheckStrategySupplier(); assertNotNull(supplier); assertInstanceOf(EchoStrategy.class, supplier.get(testEndpoint, testConfig)); } @Test - void testClusterConfigWithDefaultHealthCheck() { - MultiClusterClientConfig.ClusterConfig clusterConfig = MultiClusterClientConfig.ClusterConfig + void testDatabaseConfigWithDefaultHealthCheck() { + MultiDatabaseConfig.DatabaseConfig clusterConfig = MultiDatabaseConfig.DatabaseConfig .builder(testEndpoint, testConfig).build(); // Should use default EchoStrategy assertNotNull(clusterConfig.getHealthCheckStrategySupplier()); @@ -435,16 +434,16 @@ void testClusterConfigWithDefaultHealthCheck() { } @Test - void testClusterConfigWithDisabledHealthCheck() { - MultiClusterClientConfig.ClusterConfig clusterConfig = MultiClusterClientConfig.ClusterConfig + void testDatabaseConfigWithDisabledHealthCheck() { + MultiDatabaseConfig.DatabaseConfig clusterConfig = MultiDatabaseConfig.DatabaseConfig .builder(testEndpoint, testConfig).healthCheckEnabled(false).build(); assertNull(clusterConfig.getHealthCheckStrategySupplier()); } @Test - void testClusterConfigHealthCheckEnabledExplicitly() { - MultiClusterClientConfig.ClusterConfig clusterConfig = MultiClusterClientConfig.ClusterConfig + void testDatabaseConfigHealthCheckEnabledExplicitly() { + MultiDatabaseConfig.DatabaseConfig clusterConfig = MultiDatabaseConfig.DatabaseConfig .builder(testEndpoint, testConfig).healthCheckEnabled(true).build(); assertNotNull(clusterConfig.getHealthCheckStrategySupplier()); @@ -516,7 +515,7 @@ void testHealthCheckIntegration() throws InterruptedException { @Test void testStrategySupplierPolymorphism() { // Test that the polymorphic design works correctly - MultiClusterClientConfig.StrategySupplier supplier = (hostAndPort, jedisClientConfig) -> { + MultiDatabaseConfig.StrategySupplier supplier = (hostAndPort, jedisClientConfig) -> { if (jedisClientConfig != null) { return new EchoStrategy(hostAndPort, jedisClientConfig, HealthCheckStrategy.Config.builder().interval(500).timeout(250).numProbes(1).build()); diff --git a/src/test/java/redis/clients/jedis/mcf/MultiClusterDynamicEndpointUnitTest.java b/src/test/java/redis/clients/jedis/mcf/MultiClusterDynamicEndpointUnitTest.java index 90ff443794..973c854f6c 100644 --- a/src/test/java/redis/clients/jedis/mcf/MultiClusterDynamicEndpointUnitTest.java +++ b/src/test/java/redis/clients/jedis/mcf/MultiClusterDynamicEndpointUnitTest.java @@ -11,8 +11,8 @@ import redis.clients.jedis.HostAndPort; import redis.clients.jedis.HostAndPorts; import redis.clients.jedis.JedisClientConfig; -import redis.clients.jedis.MultiClusterClientConfig; -import redis.clients.jedis.MultiClusterClientConfig.ClusterConfig; +import redis.clients.jedis.MultiDatabaseConfig; +import redis.clients.jedis.MultiDatabaseConfig.DatabaseConfig; import redis.clients.jedis.exceptions.JedisValidationException; import static org.junit.jupiter.api.Assertions.*; @@ -23,7 +23,7 @@ public class MultiClusterDynamicEndpointUnitTest { - private MultiClusterPooledConnectionProvider provider; + private MultiDatabaseConnectionProvider provider; private JedisClientConfig clientConfig; private final EndpointConfig endpoint1 = HostAndPorts.getRedisEndpoint("standalone0"); private final EndpointConfig endpoint2 = HostAndPorts.getRedisEndpoint("standalone1"); @@ -33,42 +33,42 @@ void setUp() { clientConfig = DefaultJedisClientConfig.builder().build(); // Create initial provider with endpoint1 - ClusterConfig initialConfig = createClusterConfig(endpoint1.getHostAndPort(), 1.0f); + DatabaseConfig initialConfig = createDatabaseConfig(endpoint1.getHostAndPort(), 1.0f); - MultiClusterClientConfig multiConfig = new MultiClusterClientConfig.Builder( - new ClusterConfig[] { initialConfig }).build(); + MultiDatabaseConfig multiConfig = new MultiDatabaseConfig.Builder( + new DatabaseConfig[] { initialConfig }).build(); - provider = new MultiClusterPooledConnectionProvider(multiConfig); + provider = new MultiDatabaseConnectionProvider(multiConfig); } // Helper method to create cluster configurations - private ClusterConfig createClusterConfig(HostAndPort hostAndPort, float weight) { + private DatabaseConfig createDatabaseConfig(HostAndPort hostAndPort, float weight) { // Disable health check for unit tests to avoid real connections - return ClusterConfig.builder(hostAndPort, clientConfig).weight(weight).healthCheckEnabled(false) - .build(); + return DatabaseConfig.builder(hostAndPort, clientConfig).weight(weight) + .healthCheckEnabled(false).build(); } @Test void testAddNewCluster() { - ClusterConfig newConfig = createClusterConfig(endpoint2.getHostAndPort(), 2.0f); + DatabaseConfig newConfig = createDatabaseConfig(endpoint2.getHostAndPort(), 2.0f); // Should not throw exception assertDoesNotThrow(() -> provider.add(newConfig)); // Verify the cluster was added by checking it can be retrieved - assertNotNull(provider.getCluster(endpoint2.getHostAndPort())); + assertNotNull(provider.getDatabase(endpoint2.getHostAndPort())); } @Test void testAddDuplicateCluster() { - ClusterConfig duplicateConfig = createClusterConfig(endpoint1.getHostAndPort(), 2.0f); + DatabaseConfig duplicateConfig = createDatabaseConfig(endpoint1.getHostAndPort(), 2.0f); // Should throw validation exception for duplicate endpoint assertThrows(JedisValidationException.class, () -> provider.add(duplicateConfig)); } @Test - void testAddNullClusterConfig() { + void testAddNullDatabaseConfig() { // Should throw validation exception for null config assertThrows(JedisValidationException.class, () -> provider.add(null)); } @@ -80,26 +80,26 @@ void testRemoveExistingCluster() { try (MockedConstruction mockedPool = mockPool(mockConnection)) { // Create initial provider with endpoint1 - ClusterConfig clusterConfig1 = createClusterConfig(endpoint1.getHostAndPort(), 1.0f); + DatabaseConfig clusterConfig1 = createDatabaseConfig(endpoint1.getHostAndPort(), 1.0f); - MultiClusterClientConfig multiConfig = MultiClusterClientConfig - .builder(new ClusterConfig[] { clusterConfig1 }).build(); + MultiDatabaseConfig multiConfig = MultiDatabaseConfig + .builder(new DatabaseConfig[] { clusterConfig1 }).build(); try ( - MultiClusterPooledConnectionProvider providerWithMockedPool = new MultiClusterPooledConnectionProvider( + MultiDatabaseConnectionProvider providerWithMockedPool = new MultiDatabaseConnectionProvider( multiConfig)) { // Add endpoint2 as second cluster - ClusterConfig newConfig = createClusterConfig(endpoint2.getHostAndPort(), 2.0f); + DatabaseConfig newConfig = createDatabaseConfig(endpoint2.getHostAndPort(), 2.0f); providerWithMockedPool.add(newConfig); // Now remove endpoint1 (original cluster) assertDoesNotThrow(() -> providerWithMockedPool.remove(endpoint1.getHostAndPort())); // Verify endpoint1 was removed - assertNull(providerWithMockedPool.getCluster(endpoint1.getHostAndPort())); + assertNull(providerWithMockedPool.getDatabase(endpoint1.getHostAndPort())); // Verify endpoint2 still exists - assertNotNull(providerWithMockedPool.getCluster(endpoint2.getHostAndPort())); + assertNotNull(providerWithMockedPool.getDatabase(endpoint2.getHostAndPort())); } } } @@ -134,40 +134,40 @@ void testRemoveNullEndpoint() { @Test void testAddAndRemoveMultipleClusters() { // Add endpoint2 as second cluster - ClusterConfig config2 = createClusterConfig(endpoint2.getHostAndPort(), 2.0f); + DatabaseConfig config2 = createDatabaseConfig(endpoint2.getHostAndPort(), 2.0f); // Create a third endpoint for this test HostAndPort endpoint3 = new HostAndPort("localhost", 6381); - ClusterConfig config3 = createClusterConfig(endpoint3, 3.0f); + DatabaseConfig config3 = createDatabaseConfig(endpoint3, 3.0f); provider.add(config2); provider.add(config3); // Verify all clusters exist - assertNotNull(provider.getCluster(endpoint1.getHostAndPort())); - assertNotNull(provider.getCluster(endpoint2.getHostAndPort())); - assertNotNull(provider.getCluster(endpoint3)); + assertNotNull(provider.getDatabase(endpoint1.getHostAndPort())); + assertNotNull(provider.getDatabase(endpoint2.getHostAndPort())); + assertNotNull(provider.getDatabase(endpoint3)); // Remove endpoint2 provider.remove(endpoint2.getHostAndPort()); // Verify correct cluster was removed - assertNull(provider.getCluster(endpoint2.getHostAndPort())); - assertNotNull(provider.getCluster(endpoint1.getHostAndPort())); - assertNotNull(provider.getCluster(endpoint3)); + assertNull(provider.getDatabase(endpoint2.getHostAndPort())); + assertNotNull(provider.getDatabase(endpoint1.getHostAndPort())); + assertNotNull(provider.getDatabase(endpoint3)); } @Test void testActiveClusterHandlingOnAdd() { // The initial cluster should be active - assertNotNull(provider.getCluster()); + assertNotNull(provider.getDatabase()); // Add endpoint2 with higher weight - ClusterConfig newConfig = createClusterConfig(endpoint2.getHostAndPort(), 5.0f); + DatabaseConfig newConfig = createDatabaseConfig(endpoint2.getHostAndPort(), 5.0f); provider.add(newConfig); // Active cluster should still be valid (implementation may or may not switch) - assertNotNull(provider.getCluster()); + assertNotNull(provider.getDatabase()); } @Test @@ -177,28 +177,28 @@ void testActiveClusterHandlingOnRemove() { try (MockedConstruction mockedPool = mockPool(mockConnection)) { // Create initial provider with endpoint1 - ClusterConfig clusterConfig1 = createClusterConfig(endpoint1.getHostAndPort(), 1.0f); + DatabaseConfig clusterConfig1 = createDatabaseConfig(endpoint1.getHostAndPort(), 1.0f); - MultiClusterClientConfig multiConfig = MultiClusterClientConfig - .builder(new ClusterConfig[] { clusterConfig1 }).build(); + MultiDatabaseConfig multiConfig = MultiDatabaseConfig + .builder(new DatabaseConfig[] { clusterConfig1 }).build(); try ( - MultiClusterPooledConnectionProvider providerWithMockedPool = new MultiClusterPooledConnectionProvider( + MultiDatabaseConnectionProvider providerWithMockedPool = new MultiDatabaseConnectionProvider( multiConfig)) { // Add endpoint2 as second cluster - ClusterConfig newConfig = createClusterConfig(endpoint2.getHostAndPort(), 2.0f); + DatabaseConfig newConfig = createDatabaseConfig(endpoint2.getHostAndPort(), 2.0f); providerWithMockedPool.add(newConfig); // Get current active cluster - Object initialActiveCluster = providerWithMockedPool.getCluster(); + Object initialActiveCluster = providerWithMockedPool.getDatabase(); assertNotNull(initialActiveCluster); // Remove endpoint1 (original cluster, might be active) providerWithMockedPool.remove(endpoint1.getHostAndPort()); // Should still have an active cluster - assertNotNull(providerWithMockedPool.getCluster()); + assertNotNull(providerWithMockedPool.getDatabase()); } } } diff --git a/src/test/java/redis/clients/jedis/mcf/MultiClusterFailoverAttemptsConfigTest.java b/src/test/java/redis/clients/jedis/mcf/MultiClusterFailoverAttemptsConfigTest.java index e4992fb92b..2742084082 100644 --- a/src/test/java/redis/clients/jedis/mcf/MultiClusterFailoverAttemptsConfigTest.java +++ b/src/test/java/redis/clients/jedis/mcf/MultiClusterFailoverAttemptsConfigTest.java @@ -8,8 +8,8 @@ import redis.clients.jedis.DefaultJedisClientConfig; import redis.clients.jedis.HostAndPort; import redis.clients.jedis.JedisClientConfig; -import redis.clients.jedis.MultiClusterClientConfig; -import redis.clients.jedis.MultiClusterClientConfig.ClusterConfig; +import redis.clients.jedis.MultiDatabaseConfig; +import redis.clients.jedis.MultiDatabaseConfig.DatabaseConfig; import redis.clients.jedis.mcf.JedisFailoverException.JedisPermanentlyNotAvailableException; import redis.clients.jedis.mcf.JedisFailoverException.JedisTemporarilyNotAvailableException; import redis.clients.jedis.util.ReflectionTestUtil; @@ -22,34 +22,34 @@ /** * Tests for how getMaxNumFailoverAttempts and getDelayInBetweenFailoverAttempts impact - * MultiClusterPooledConnectionProvider behaviour when no healthy clusters are available. + * MultiDatabaseConnectionProvider behaviour when no healthy clusters are available. */ public class MultiClusterFailoverAttemptsConfigTest { private HostAndPort endpoint0 = new HostAndPort("purposefully-incorrect", 0000); private HostAndPort endpoint1 = new HostAndPort("purposefully-incorrect", 0001); - private MultiClusterPooledConnectionProvider provider; + private MultiDatabaseConnectionProvider provider; @BeforeEach void setUp() throws Exception { JedisClientConfig clientCfg = DefaultJedisClientConfig.builder().build(); - ClusterConfig[] clusterConfigs = new ClusterConfig[] { - ClusterConfig.builder(endpoint0, clientCfg).weight(1.0f).healthCheckEnabled(false).build(), - ClusterConfig.builder(endpoint1, clientCfg).weight(0.5f).healthCheckEnabled(false) + DatabaseConfig[] databaseConfigs = new DatabaseConfig[] { + DatabaseConfig.builder(endpoint0, clientCfg).weight(1.0f).healthCheckEnabled(false).build(), + DatabaseConfig.builder(endpoint1, clientCfg).weight(0.5f).healthCheckEnabled(false) .build() }; - MultiClusterClientConfig.Builder builder = new MultiClusterClientConfig.Builder(clusterConfigs); + MultiDatabaseConfig.Builder builder = new MultiDatabaseConfig.Builder(databaseConfigs); // Use small values by default for tests unless overridden per-test via reflection setBuilderFailoverConfig(builder, /* maxAttempts */ 10, /* delayMs */ 12000); - provider = new MultiClusterPooledConnectionProvider(builder.build()); + provider = new MultiDatabaseConnectionProvider(builder.build()); // Disable both clusters to force handleNoHealthyCluster path - provider.getCluster(endpoint0).setDisabled(true); - provider.getCluster(endpoint1).setDisabled(true); + provider.getDatabase(endpoint0).setDisabled(true); + provider.getDatabase(endpoint1).setDisabled(true); } @AfterEach @@ -70,8 +70,8 @@ void delayBetweenFailoverAttempts_gatesCounterIncrementsWithinWindow() throws Ex // First call: should throw temporary and start the freeze window, incrementing attempt count to // 1 assertThrows(JedisTemporarilyNotAvailableException.class, - () -> MultiClusterPooledConnectionProviderHelper.switchToHealthyCluster(provider, - SwitchReason.HEALTH_CHECK, provider.getCluster())); + () -> MultiDatabaseConnectionProviderHelper.switchToHealthyCluster(provider, + SwitchReason.HEALTH_CHECK, provider.getDatabase())); int afterFirst = getProviderAttemptCount(); assertEquals(1, afterFirst); @@ -79,8 +79,8 @@ void delayBetweenFailoverAttempts_gatesCounterIncrementsWithinWindow() throws Ex // and should NOT increment the attempt count beyond 1 for (int i = 0; i < 50; i++) { assertThrows(JedisTemporarilyNotAvailableException.class, - () -> MultiClusterPooledConnectionProviderHelper.switchToHealthyCluster(provider, - SwitchReason.HEALTH_CHECK, provider.getCluster())); + () -> MultiDatabaseConnectionProviderHelper.switchToHealthyCluster(provider, + SwitchReason.HEALTH_CHECK, provider.getDatabase())); assertEquals(1, getProviderAttemptCount()); } } @@ -98,8 +98,8 @@ void delayBetweenFailoverAttempts_permanentExceptionAfterAttemptsExhausted() thr // First call: should throw temporary and start the freeze window, incrementing attempt count to // 1 assertThrows(JedisTemporarilyNotAvailableException.class, - () -> MultiClusterPooledConnectionProviderHelper.switchToHealthyCluster(provider, - SwitchReason.HEALTH_CHECK, provider.getCluster())); + () -> MultiDatabaseConnectionProviderHelper.switchToHealthyCluster(provider, + SwitchReason.HEALTH_CHECK, provider.getDatabase())); int afterFirst = getProviderAttemptCount(); assertEquals(1, afterFirst); @@ -107,14 +107,14 @@ void delayBetweenFailoverAttempts_permanentExceptionAfterAttemptsExhausted() thr // and should NOT increment the attempt count beyond 1 for (int i = 0; i < 50; i++) { assertThrows(JedisTemporarilyNotAvailableException.class, - () -> provider.switchToHealthyCluster(SwitchReason.HEALTH_CHECK, provider.getCluster())); + () -> provider.switchToHealthyDatabase(SwitchReason.HEALTH_CHECK, provider.getDatabase())); assertEquals(1, getProviderAttemptCount()); } await().atMost(Durations.TWO_HUNDRED_MILLISECONDS).pollInterval(Duration.ofMillis(10)) .until(() -> { Exception e = assertThrows(JedisFailoverException.class, () -> provider - .switchToHealthyCluster(SwitchReason.HEALTH_CHECK, provider.getCluster())); + .switchToHealthyDatabase(SwitchReason.HEALTH_CHECK, provider.getDatabase())); return e instanceof JedisPermanentlyNotAvailableException; }); } @@ -130,15 +130,15 @@ void maxNumFailoverAttempts_zeroDelay_leadsToPermanentAfterExceeding() throws Ex // Expect exactly 'maxAttempts' temporary exceptions, then a permanent one assertThrows(JedisTemporarilyNotAvailableException.class, - () -> provider.switchToHealthyCluster(SwitchReason.HEALTH_CHECK, provider.getCluster())); // attempt + () -> provider.switchToHealthyDatabase(SwitchReason.HEALTH_CHECK, provider.getDatabase())); // attempt // 1 assertThrows(JedisTemporarilyNotAvailableException.class, - () -> provider.switchToHealthyCluster(SwitchReason.HEALTH_CHECK, provider.getCluster())); // attempt + () -> provider.switchToHealthyDatabase(SwitchReason.HEALTH_CHECK, provider.getDatabase())); // attempt // 2 // Next should exceed max and become permanent assertThrows(JedisPermanentlyNotAvailableException.class, - () -> provider.switchToHealthyCluster(SwitchReason.HEALTH_CHECK, provider.getCluster())); // attempt + () -> provider.switchToHealthyDatabase(SwitchReason.HEALTH_CHECK, provider.getDatabase())); // attempt // 3 // -> // permanent @@ -146,17 +146,17 @@ void maxNumFailoverAttempts_zeroDelay_leadsToPermanentAfterExceeding() throws Ex // ======== Test helper methods (reflection) ======== - private static void setBuilderFailoverConfig(MultiClusterClientConfig.Builder builder, - int maxAttempts, int delayMs) throws Exception { + private static void setBuilderFailoverConfig(MultiDatabaseConfig.Builder builder, int maxAttempts, + int delayMs) throws Exception { ReflectionTestUtil.setField(builder, "maxNumFailoverAttempts", maxAttempts); ReflectionTestUtil.setField(builder, "delayInBetweenFailoverAttempts", delayMs); } private void setProviderFailoverConfig(int maxAttempts, int delayMs) throws Exception { - // Access the underlying MultiClusterClientConfig inside provider and adjust fields for this + // Access the underlying MultiDatabaseConfig inside provider and adjust fields for this // test - Object cfg = ReflectionTestUtil.getField(provider, "multiClusterClientConfig"); + Object cfg = ReflectionTestUtil.getField(provider, "multiDatabaseConfig"); ReflectionTestUtil.setField(cfg, "maxNumFailoverAttempts", maxAttempts); @@ -164,13 +164,13 @@ private void setProviderFailoverConfig(int maxAttempts, int delayMs) throws Exce } private int getProviderMaxAttempts() throws Exception { - Object cfg = ReflectionTestUtil.getField(provider, "multiClusterClientConfig"); + Object cfg = ReflectionTestUtil.getField(provider, "multiDatabaseConfig"); return ReflectionTestUtil.getField(cfg, "maxNumFailoverAttempts"); } private int getProviderDelayMs() throws Exception { - Object cfg = ReflectionTestUtil.getField(provider, "multiClusterClientConfig"); + Object cfg = ReflectionTestUtil.getField(provider, "multiDatabaseConfig"); return ReflectionTestUtil.getField(cfg, "delayInBetweenFailoverAttempts"); } diff --git a/src/test/java/redis/clients/jedis/mcf/MultiClusterInitializationTest.java b/src/test/java/redis/clients/jedis/mcf/MultiClusterInitializationTest.java index 7b580042dc..9aed74a5ea 100644 --- a/src/test/java/redis/clients/jedis/mcf/MultiClusterInitializationTest.java +++ b/src/test/java/redis/clients/jedis/mcf/MultiClusterInitializationTest.java @@ -14,11 +14,11 @@ import redis.clients.jedis.DefaultJedisClientConfig; import redis.clients.jedis.HostAndPort; import redis.clients.jedis.JedisClientConfig; -import redis.clients.jedis.MultiClusterClientConfig; +import redis.clients.jedis.MultiDatabaseConfig; import redis.clients.jedis.exceptions.JedisValidationException; /** - * Tests for MultiClusterPooledConnectionProvider initialization edge cases + * Tests for MultiDatabaseConnectionProvider initialization edge cases */ @ExtendWith(MockitoExtension.class) public class MultiClusterInitializationTest { @@ -49,30 +49,29 @@ private MockedConstruction mockPool() { void testInitializationWithMixedHealthCheckConfiguration() { try (MockedConstruction mockedPool = mockPool()) { // Create clusters with mixed health check configuration - MultiClusterClientConfig.ClusterConfig cluster1 = MultiClusterClientConfig.ClusterConfig + MultiDatabaseConfig.DatabaseConfig cluster1 = MultiDatabaseConfig.DatabaseConfig .builder(endpoint1, clientConfig).weight(1.0f).healthCheckEnabled(false) // No health // check .build(); - MultiClusterClientConfig.ClusterConfig cluster2 = MultiClusterClientConfig.ClusterConfig + MultiDatabaseConfig.DatabaseConfig cluster2 = MultiDatabaseConfig.DatabaseConfig .builder(endpoint2, clientConfig).weight(2.0f) .healthCheckStrategySupplier(EchoStrategy.DEFAULT) // With // health // check .build(); - MultiClusterClientConfig config = new MultiClusterClientConfig.Builder( - new MultiClusterClientConfig.ClusterConfig[] { cluster1, cluster2 }).build(); + MultiDatabaseConfig config = new MultiDatabaseConfig.Builder( + new MultiDatabaseConfig.DatabaseConfig[] { cluster1, cluster2 }).build(); - try (MultiClusterPooledConnectionProvider provider = new MultiClusterPooledConnectionProvider( - config)) { + try (MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider(config)) { // Should initialize successfully - assertNotNull(provider.getCluster()); + assertNotNull(provider.getDatabase()); // Should select cluster1 (no health check, assumed healthy) or cluster2 based on weight // Since cluster2 has higher weight and health checks, it should be selected if healthy - assertTrue(provider.getCluster() == provider.getCluster(endpoint1) - || provider.getCluster() == provider.getCluster(endpoint2)); + assertTrue(provider.getDatabase() == provider.getDatabase(endpoint1) + || provider.getDatabase() == provider.getDatabase(endpoint2)); } } } @@ -81,20 +80,19 @@ void testInitializationWithMixedHealthCheckConfiguration() { void testInitializationWithAllHealthChecksDisabled() { try (MockedConstruction mockedPool = mockPool()) { // Create clusters with no health checks - MultiClusterClientConfig.ClusterConfig cluster1 = MultiClusterClientConfig.ClusterConfig + MultiDatabaseConfig.DatabaseConfig cluster1 = MultiDatabaseConfig.DatabaseConfig .builder(endpoint1, clientConfig).weight(1.0f).healthCheckEnabled(false).build(); - MultiClusterClientConfig.ClusterConfig cluster2 = MultiClusterClientConfig.ClusterConfig + MultiDatabaseConfig.DatabaseConfig cluster2 = MultiDatabaseConfig.DatabaseConfig .builder(endpoint2, clientConfig).weight(3.0f) // Higher weight .healthCheckEnabled(false).build(); - MultiClusterClientConfig config = new MultiClusterClientConfig.Builder( - new MultiClusterClientConfig.ClusterConfig[] { cluster1, cluster2 }).build(); + MultiDatabaseConfig config = new MultiDatabaseConfig.Builder( + new MultiDatabaseConfig.DatabaseConfig[] { cluster1, cluster2 }).build(); - try (MultiClusterPooledConnectionProvider provider = new MultiClusterPooledConnectionProvider( - config)) { + try (MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider(config)) { // Should select cluster2 (highest weight, no health checks) - assertEquals(provider.getCluster(endpoint2), provider.getCluster()); + assertEquals(provider.getDatabase(endpoint2), provider.getDatabase()); } } } @@ -102,16 +100,15 @@ void testInitializationWithAllHealthChecksDisabled() { @Test void testInitializationWithSingleCluster() { try (MockedConstruction mockedPool = mockPool()) { - MultiClusterClientConfig.ClusterConfig cluster = MultiClusterClientConfig.ClusterConfig + MultiDatabaseConfig.DatabaseConfig cluster = MultiDatabaseConfig.DatabaseConfig .builder(endpoint1, clientConfig).weight(1.0f).healthCheckEnabled(false).build(); - MultiClusterClientConfig config = new MultiClusterClientConfig.Builder( - new MultiClusterClientConfig.ClusterConfig[] { cluster }).build(); + MultiDatabaseConfig config = new MultiDatabaseConfig.Builder( + new MultiDatabaseConfig.DatabaseConfig[] { cluster }).build(); - try (MultiClusterPooledConnectionProvider provider = new MultiClusterPooledConnectionProvider( - config)) { + try (MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider(config)) { // Should select the only available cluster - assertEquals(provider.getCluster(endpoint1), provider.getCluster()); + assertEquals(provider.getDatabase(endpoint1), provider.getDatabase()); } } } @@ -119,43 +116,41 @@ void testInitializationWithSingleCluster() { @Test void testErrorHandlingWithNullConfiguration() { assertThrows(JedisValidationException.class, () -> { - new MultiClusterPooledConnectionProvider(null); + new MultiDatabaseConnectionProvider(null); }); } @Test void testErrorHandlingWithEmptyClusterArray() { assertThrows(JedisValidationException.class, () -> { - new MultiClusterClientConfig.Builder(new MultiClusterClientConfig.ClusterConfig[0]).build(); + new MultiDatabaseConfig.Builder(new MultiDatabaseConfig.DatabaseConfig[0]).build(); }); } @Test - void testErrorHandlingWithNullClusterConfig() { + void testErrorHandlingWithNullDatabaseConfig() { assertThrows(IllegalArgumentException.class, () -> { - new MultiClusterClientConfig.Builder(new MultiClusterClientConfig.ClusterConfig[] { null }) - .build(); + new MultiDatabaseConfig.Builder(new MultiDatabaseConfig.DatabaseConfig[] { null }).build(); }); } @Test void testInitializationWithZeroWeights() { try (MockedConstruction mockedPool = mockPool()) { - MultiClusterClientConfig.ClusterConfig cluster1 = MultiClusterClientConfig.ClusterConfig + MultiDatabaseConfig.DatabaseConfig cluster1 = MultiDatabaseConfig.DatabaseConfig .builder(endpoint1, clientConfig).weight(0.0f) // Zero weight .healthCheckEnabled(false).build(); - MultiClusterClientConfig.ClusterConfig cluster2 = MultiClusterClientConfig.ClusterConfig + MultiDatabaseConfig.DatabaseConfig cluster2 = MultiDatabaseConfig.DatabaseConfig .builder(endpoint2, clientConfig).weight(0.0f) // Zero weight .healthCheckEnabled(false).build(); - MultiClusterClientConfig config = new MultiClusterClientConfig.Builder( - new MultiClusterClientConfig.ClusterConfig[] { cluster1, cluster2 }).build(); + MultiDatabaseConfig config = new MultiDatabaseConfig.Builder( + new MultiDatabaseConfig.DatabaseConfig[] { cluster1, cluster2 }).build(); - try (MultiClusterPooledConnectionProvider provider = new MultiClusterPooledConnectionProvider( - config)) { + try (MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider(config)) { // Should still initialize and select one of the clusters - assertNotNull(provider.getCluster()); + assertNotNull(provider.getDatabase()); } } } diff --git a/src/test/java/redis/clients/jedis/mcf/MultiClusterPooledConnectionProviderHelper.java b/src/test/java/redis/clients/jedis/mcf/MultiClusterPooledConnectionProviderHelper.java deleted file mode 100644 index f5076694c8..0000000000 --- a/src/test/java/redis/clients/jedis/mcf/MultiClusterPooledConnectionProviderHelper.java +++ /dev/null @@ -1,20 +0,0 @@ -package redis.clients.jedis.mcf; - -import redis.clients.jedis.Endpoint; - -public class MultiClusterPooledConnectionProviderHelper { - - public static void onHealthStatusChange(MultiClusterPooledConnectionProvider provider, - Endpoint endpoint, HealthStatus oldStatus, HealthStatus newStatus) { - provider.onHealthStatusChange(new HealthStatusChangeEvent(endpoint, oldStatus, newStatus)); - } - - public static void periodicFailbackCheck(MultiClusterPooledConnectionProvider provider) { - provider.periodicFailbackCheck(); - } - - public static Endpoint switchToHealthyCluster(MultiClusterPooledConnectionProvider provider, - SwitchReason reason, MultiClusterPooledConnectionProvider.Cluster iterateFrom) { - return provider.switchToHealthyCluster(reason, iterateFrom); - } -} diff --git a/src/test/java/redis/clients/jedis/mcf/MultiDatabaseConnectionProviderHelper.java b/src/test/java/redis/clients/jedis/mcf/MultiDatabaseConnectionProviderHelper.java new file mode 100644 index 0000000000..a88e53feed --- /dev/null +++ b/src/test/java/redis/clients/jedis/mcf/MultiDatabaseConnectionProviderHelper.java @@ -0,0 +1,20 @@ +package redis.clients.jedis.mcf; + +import redis.clients.jedis.Endpoint; + +public class MultiDatabaseConnectionProviderHelper { + + public static void onHealthStatusChange(MultiDatabaseConnectionProvider provider, + Endpoint endpoint, HealthStatus oldStatus, HealthStatus newStatus) { + provider.onHealthStatusChange(new HealthStatusChangeEvent(endpoint, oldStatus, newStatus)); + } + + public static void periodicFailbackCheck(MultiDatabaseConnectionProvider provider) { + provider.periodicFailbackCheck(); + } + + public static Endpoint switchToHealthyCluster(MultiDatabaseConnectionProvider provider, + SwitchReason reason, MultiDatabaseConnectionProvider.Database iterateFrom) { + return provider.switchToHealthyDatabase(reason, iterateFrom); + } +} diff --git a/src/test/java/redis/clients/jedis/mcf/MultiClusterPooledConnectionProviderTest.java b/src/test/java/redis/clients/jedis/mcf/MultiDatabaseConnectionProviderTest.java similarity index 69% rename from src/test/java/redis/clients/jedis/mcf/MultiClusterPooledConnectionProviderTest.java rename to src/test/java/redis/clients/jedis/mcf/MultiDatabaseConnectionProviderTest.java index 88b2948016..fa84564645 100644 --- a/src/test/java/redis/clients/jedis/mcf/MultiClusterPooledConnectionProviderTest.java +++ b/src/test/java/redis/clients/jedis/mcf/MultiDatabaseConnectionProviderTest.java @@ -5,10 +5,10 @@ import org.awaitility.Durations; import org.junit.jupiter.api.*; import redis.clients.jedis.*; -import redis.clients.jedis.MultiClusterClientConfig.ClusterConfig; +import redis.clients.jedis.MultiDatabaseConfig.DatabaseConfig; import redis.clients.jedis.exceptions.JedisConnectionException; import redis.clients.jedis.exceptions.JedisValidationException; -import redis.clients.jedis.mcf.MultiClusterPooledConnectionProvider.Cluster; +import redis.clients.jedis.mcf.MultiDatabaseConnectionProvider.Database; import redis.clients.jedis.mcf.ProbingPolicy.BuiltIn; import redis.clients.jedis.mcf.JedisFailoverException.JedisPermanentlyNotAvailableException; import redis.clients.jedis.mcf.JedisFailoverException.JedisTemporarilyNotAvailableException; @@ -23,27 +23,27 @@ import static org.junit.jupiter.api.Assertions.*; /** - * @see MultiClusterPooledConnectionProvider + * @see MultiDatabaseConnectionProvider */ @Tag("integration") -public class MultiClusterPooledConnectionProviderTest { +public class MultiDatabaseConnectionProviderTest { private final EndpointConfig endpointStandalone0 = HostAndPorts.getRedisEndpoint("standalone0"); private final EndpointConfig endpointStandalone1 = HostAndPorts.getRedisEndpoint("standalone1"); - private MultiClusterPooledConnectionProvider provider; + private MultiDatabaseConnectionProvider provider; @BeforeEach public void setUp() { - ClusterConfig[] clusterConfigs = new ClusterConfig[2]; - clusterConfigs[0] = ClusterConfig.builder(endpointStandalone0.getHostAndPort(), + DatabaseConfig[] databaseConfigs = new DatabaseConfig[2]; + databaseConfigs[0] = DatabaseConfig.builder(endpointStandalone0.getHostAndPort(), endpointStandalone0.getClientConfigBuilder().build()).weight(0.5f).build(); - clusterConfigs[1] = ClusterConfig.builder(endpointStandalone1.getHostAndPort(), + databaseConfigs[1] = DatabaseConfig.builder(endpointStandalone1.getHostAndPort(), endpointStandalone1.getClientConfigBuilder().build()).weight(0.3f).build(); - provider = new MultiClusterPooledConnectionProvider( - new MultiClusterClientConfig.Builder(clusterConfigs).build()); + provider = new MultiDatabaseConnectionProvider( + new MultiDatabaseConfig.Builder(databaseConfigs).build()); } @AfterEach @@ -55,7 +55,7 @@ public void destroy() { @Test public void testCircuitBreakerForcedTransitions() { - CircuitBreaker circuitBreaker = provider.getClusterCircuitBreaker(); + CircuitBreaker circuitBreaker = provider.getDatabaseCircuitBreaker(); circuitBreaker.getState(); if (CircuitBreaker.State.FORCED_OPEN.equals(circuitBreaker.getState())) @@ -70,45 +70,46 @@ public void testCircuitBreakerForcedTransitions() { @Test public void testIterateActiveCluster() throws InterruptedException { - waitForClustersToGetHealthy(provider.getCluster(endpointStandalone0.getHostAndPort()), - provider.getCluster(endpointStandalone1.getHostAndPort())); + waitForClustersToGetHealthy(provider.getDatabase(endpointStandalone0.getHostAndPort()), + provider.getDatabase(endpointStandalone1.getHostAndPort())); - Endpoint e2 = provider.switchToHealthyCluster(SwitchReason.HEALTH_CHECK, provider.getCluster()); + Endpoint e2 = provider.switchToHealthyDatabase(SwitchReason.HEALTH_CHECK, + provider.getDatabase()); assertEquals(endpointStandalone1.getHostAndPort(), e2); } @Test public void testCanIterateOnceMore() { Endpoint endpoint0 = endpointStandalone0.getHostAndPort(); - waitForClustersToGetHealthy(provider.getCluster(endpoint0), - provider.getCluster(endpointStandalone1.getHostAndPort())); + waitForClustersToGetHealthy(provider.getDatabase(endpoint0), + provider.getDatabase(endpointStandalone1.getHostAndPort())); - provider.setActiveCluster(endpoint0); - provider.getCluster().setDisabled(true); - provider.switchToHealthyCluster(SwitchReason.HEALTH_CHECK, provider.getCluster(endpoint0)); + provider.setActiveDatabase(endpoint0); + provider.getDatabase().setDisabled(true); + provider.switchToHealthyDatabase(SwitchReason.HEALTH_CHECK, provider.getDatabase(endpoint0)); - assertFalse(provider.canIterateFrom(provider.getCluster())); + assertFalse(provider.canIterateFrom(provider.getDatabase())); } - private void waitForClustersToGetHealthy(Cluster... clusters) { + private void waitForClustersToGetHealthy(Database... clusters) { Awaitility.await().pollInterval(Durations.ONE_HUNDRED_MILLISECONDS) .atMost(Durations.TWO_SECONDS) - .until(() -> Arrays.stream(clusters).allMatch(Cluster::isHealthy)); + .until(() -> Arrays.stream(clusters).allMatch(Database::isHealthy)); } @Test public void testRunClusterFailoverPostProcessor() { - ClusterConfig[] clusterConfigs = new ClusterConfig[2]; - clusterConfigs[0] = ClusterConfig + DatabaseConfig[] databaseConfigs = new DatabaseConfig[2]; + databaseConfigs[0] = DatabaseConfig .builder(new HostAndPort("purposefully-incorrect", 0000), DefaultJedisClientConfig.builder().build()) .weight(0.5f).healthCheckEnabled(false).build(); - clusterConfigs[1] = ClusterConfig + databaseConfigs[1] = DatabaseConfig .builder(new HostAndPort("purposefully-incorrect", 0001), DefaultJedisClientConfig.builder().build()) .weight(0.4f).healthCheckEnabled(false).build(); - MultiClusterClientConfig.Builder builder = new MultiClusterClientConfig.Builder(clusterConfigs); + MultiDatabaseConfig.Builder builder = new MultiDatabaseConfig.Builder(databaseConfigs); // Configures a single failed command to trigger an open circuit on the next subsequent failure builder.circuitBreakerSlidingWindowSize(3).circuitBreakerMinNumOfFailures(1) @@ -116,9 +117,9 @@ public void testRunClusterFailoverPostProcessor() { AtomicBoolean isValidTest = new AtomicBoolean(false); - MultiClusterPooledConnectionProvider localProvider = new MultiClusterPooledConnectionProvider( + MultiDatabaseConnectionProvider localProvider = new MultiDatabaseConnectionProvider( builder.build()); - localProvider.setClusterSwitchListener(a -> { + localProvider.setDatabaseSwitchListener(a -> { isValidTest.set(true); }); @@ -138,21 +139,23 @@ public void testRunClusterFailoverPostProcessor() { @Test public void testSetActiveMultiClusterIndexEqualsZero() { - assertThrows(JedisValidationException.class, () -> provider.setActiveCluster(null)); // Should - // throw an - // exception + assertThrows(JedisValidationException.class, () -> provider.setActiveDatabase(null)); // Should + // throw + // an + // exception } @Test public void testSetActiveMultiClusterIndexLessThanZero() { - assertThrows(JedisValidationException.class, () -> provider.setActiveCluster(null)); // Should - // throw an - // exception + assertThrows(JedisValidationException.class, () -> provider.setActiveDatabase(null)); // Should + // throw + // an + // exception } @Test public void testSetActiveMultiClusterIndexOutOfRange() { - assertThrows(JedisValidationException.class, () -> provider.setActiveCluster(new Endpoint() { + assertThrows(JedisValidationException.class, () -> provider.setActiveDatabase(new Endpoint() { @Override public String getHost() { return "purposefully-incorrect"; @@ -171,15 +174,14 @@ public void testConnectionPoolConfigApplied() { poolConfig.setMaxTotal(8); poolConfig.setMaxIdle(4); poolConfig.setMinIdle(1); - ClusterConfig[] clusterConfigs = new ClusterConfig[2]; - clusterConfigs[0] = new ClusterConfig(endpointStandalone0.getHostAndPort(), + DatabaseConfig[] databaseConfigs = new DatabaseConfig[2]; + databaseConfigs[0] = new DatabaseConfig(endpointStandalone0.getHostAndPort(), endpointStandalone0.getClientConfigBuilder().build(), poolConfig); - clusterConfigs[1] = new ClusterConfig(endpointStandalone1.getHostAndPort(), + databaseConfigs[1] = new DatabaseConfig(endpointStandalone1.getHostAndPort(), endpointStandalone0.getClientConfigBuilder().build(), poolConfig); - try ( - MultiClusterPooledConnectionProvider customProvider = new MultiClusterPooledConnectionProvider( - new MultiClusterClientConfig.Builder(clusterConfigs).build())) { - MultiClusterPooledConnectionProvider.Cluster activeCluster = customProvider.getCluster(); + try (MultiDatabaseConnectionProvider customProvider = new MultiDatabaseConnectionProvider( + new MultiDatabaseConfig.Builder(databaseConfigs).build())) { + MultiDatabaseConnectionProvider.Database activeCluster = customProvider.getDatabase(); ConnectionPool connectionPool = activeCluster.getConnectionPool(); assertEquals(8, connectionPool.getMaxTotal()); assertEquals(4, connectionPool.getMaxIdle()); @@ -202,13 +204,13 @@ void testHealthChecksStopAfterProviderClose() throws InterruptedException { }); // Create new provider with health check strategy (don't use the setUp() provider) - ClusterConfig config = ClusterConfig + DatabaseConfig config = DatabaseConfig .builder(endpointStandalone0.getHostAndPort(), endpointStandalone0.getClientConfigBuilder().build()) .healthCheckStrategy(countingStrategy).build(); - MultiClusterPooledConnectionProvider testProvider = new MultiClusterPooledConnectionProvider( - new MultiClusterClientConfig.Builder(Collections.singletonList(config)).build()); + MultiDatabaseConnectionProvider testProvider = new MultiDatabaseConnectionProvider( + new MultiDatabaseConfig.Builder(Collections.singletonList(config)).build()); try { // Wait for some health checks to occur @@ -236,22 +238,22 @@ void testHealthChecksStopAfterProviderClose() throws InterruptedException { @Test public void userCommand_firstTemporary_thenPermanent_inOrder() { - ClusterConfig[] clusterConfigs = new ClusterConfig[2]; - clusterConfigs[0] = ClusterConfig.builder(endpointStandalone0.getHostAndPort(), + DatabaseConfig[] databaseConfigs = new DatabaseConfig[2]; + databaseConfigs[0] = DatabaseConfig.builder(endpointStandalone0.getHostAndPort(), endpointStandalone0.getClientConfigBuilder().build()).weight(0.5f).build(); - clusterConfigs[1] = ClusterConfig.builder(endpointStandalone1.getHostAndPort(), + databaseConfigs[1] = DatabaseConfig.builder(endpointStandalone1.getHostAndPort(), endpointStandalone1.getClientConfigBuilder().build()).weight(0.3f).build(); - MultiClusterPooledConnectionProvider testProvider = new MultiClusterPooledConnectionProvider( - new MultiClusterClientConfig.Builder(clusterConfigs).delayInBetweenFailoverAttempts(100) + MultiDatabaseConnectionProvider testProvider = new MultiDatabaseConnectionProvider( + new MultiDatabaseConfig.Builder(databaseConfigs).delayInBetweenFailoverAttempts(100) .maxNumFailoverAttempts(2).retryMaxAttempts(1).build()); try (UnifiedJedis jedis = new UnifiedJedis(testProvider)) { jedis.get("foo"); // Disable both clusters so any attempt to switch results in 'no healthy cluster' path - testProvider.getCluster(endpointStandalone0.getHostAndPort()).setDisabled(true); - testProvider.getCluster(endpointStandalone1.getHostAndPort()).setDisabled(true); + testProvider.getDatabase(endpointStandalone0.getHostAndPort()).setDisabled(true); + testProvider.getDatabase(endpointStandalone1.getHostAndPort()).setDisabled(true); // Simulate user running a command that fails and triggers failover iteration assertThrows(JedisTemporarilyNotAvailableException.class, () -> jedis.get("foo")); @@ -266,12 +268,12 @@ public void userCommand_firstTemporary_thenPermanent_inOrder() { @Test public void userCommand_connectionExceptions_thenMultipleTemporary_thenPermanent_inOrder() { - ClusterConfig[] clusterConfigs = new ClusterConfig[2]; - clusterConfigs[0] = ClusterConfig + DatabaseConfig[] databaseConfigs = new DatabaseConfig[2]; + databaseConfigs[0] = DatabaseConfig .builder(endpointStandalone0.getHostAndPort(), endpointStandalone0.getClientConfigBuilder().build()) .weight(0.5f).healthCheckEnabled(false).build(); - clusterConfigs[1] = ClusterConfig + databaseConfigs[1] = DatabaseConfig .builder(endpointStandalone1.getHostAndPort(), endpointStandalone1.getClientConfigBuilder().build()) .weight(0.3f).healthCheckEnabled(false).build(); @@ -279,8 +281,8 @@ public void userCommand_connectionExceptions_thenMultipleTemporary_thenPermanent // ATTENTION: these configuration settings are not random and // adjusted to get exact numbers of failures with exact exception types // and open to impact from other defaulted values withing the components in use. - MultiClusterPooledConnectionProvider testProvider = new MultiClusterPooledConnectionProvider( - new MultiClusterClientConfig.Builder(clusterConfigs).delayInBetweenFailoverAttempts(100) + MultiDatabaseConnectionProvider testProvider = new MultiDatabaseConnectionProvider( + new MultiDatabaseConfig.Builder(databaseConfigs).delayInBetweenFailoverAttempts(100) .maxNumFailoverAttempts(2).retryMaxAttempts(1).circuitBreakerSlidingWindowSize(5) .circuitBreakerFailureRateThreshold(60).build()) { }; @@ -289,7 +291,7 @@ public void userCommand_connectionExceptions_thenMultipleTemporary_thenPermanent jedis.get("foo"); // disable most weighted cluster so that it will fail on initial requests - testProvider.getCluster(endpointStandalone0.getHostAndPort()).setDisabled(true); + testProvider.getDatabase(endpointStandalone0.getHostAndPort()).setDisabled(true); Exception e = assertThrows(JedisConnectionException.class, () -> jedis.get("foo")); assertEquals(JedisConnectionException.class, e.getClass()); @@ -298,7 +300,7 @@ public void userCommand_connectionExceptions_thenMultipleTemporary_thenPermanent assertEquals(JedisConnectionException.class, e.getClass()); // then disable the second ones - testProvider.getCluster(endpointStandalone1.getHostAndPort()).setDisabled(true); + testProvider.getDatabase(endpointStandalone1.getHostAndPort()).setDisabled(true); assertThrows(JedisTemporarilyNotAvailableException.class, () -> jedis.get("foo")); assertThrows(JedisTemporarilyNotAvailableException.class, () -> jedis.get("foo")); diff --git a/src/test/java/redis/clients/jedis/mcf/PeriodicFailbackTest.java b/src/test/java/redis/clients/jedis/mcf/PeriodicFailbackTest.java index d657e75829..1496ee3815 100644 --- a/src/test/java/redis/clients/jedis/mcf/PeriodicFailbackTest.java +++ b/src/test/java/redis/clients/jedis/mcf/PeriodicFailbackTest.java @@ -2,7 +2,7 @@ import static org.junit.jupiter.api.Assertions.*; import static org.mockito.Mockito.*; -import static redis.clients.jedis.mcf.MultiClusterPooledConnectionProviderHelper.onHealthStatusChange; +import static redis.clients.jedis.mcf.MultiDatabaseConnectionProviderHelper.onHealthStatusChange; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -14,7 +14,7 @@ import redis.clients.jedis.DefaultJedisClientConfig; import redis.clients.jedis.HostAndPort; import redis.clients.jedis.JedisClientConfig; -import redis.clients.jedis.MultiClusterClientConfig; +import redis.clients.jedis.MultiDatabaseConfig; @ExtendWith(MockitoExtension.class) class PeriodicFailbackTest { @@ -42,33 +42,32 @@ private MockedConstruction mockPool() { @Test void testPeriodicFailbackCheckWithDisabledCluster() throws InterruptedException { try (MockedConstruction mockedPool = mockPool()) { - MultiClusterClientConfig.ClusterConfig cluster1 = MultiClusterClientConfig.ClusterConfig + MultiDatabaseConfig.DatabaseConfig cluster1 = MultiDatabaseConfig.DatabaseConfig .builder(endpoint1, clientConfig).weight(1.0f).healthCheckEnabled(false).build(); - MultiClusterClientConfig.ClusterConfig cluster2 = MultiClusterClientConfig.ClusterConfig + MultiDatabaseConfig.DatabaseConfig cluster2 = MultiDatabaseConfig.DatabaseConfig .builder(endpoint2, clientConfig).weight(2.0f).healthCheckEnabled(false).build(); - MultiClusterClientConfig config = new MultiClusterClientConfig.Builder( - new MultiClusterClientConfig.ClusterConfig[] { cluster1, cluster2 }) - .failbackSupported(true).failbackCheckInterval(100).build(); + MultiDatabaseConfig config = new MultiDatabaseConfig.Builder( + new MultiDatabaseConfig.DatabaseConfig[] { cluster1, cluster2 }).failbackSupported(true) + .failbackCheckInterval(100).build(); - try (MultiClusterPooledConnectionProvider provider = new MultiClusterPooledConnectionProvider( - config)) { + try (MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider(config)) { // Initially, cluster2 should be active (highest weight: 2.0f vs 1.0f) - assertEquals(provider.getCluster(endpoint2), provider.getCluster()); + assertEquals(provider.getDatabase(endpoint2), provider.getDatabase()); // Start grace period for cluster2 manually - provider.getCluster(endpoint2).setGracePeriod(); - provider.getCluster(endpoint2).setDisabled(true); + provider.getDatabase(endpoint2).setGracePeriod(); + provider.getDatabase(endpoint2).setDisabled(true); // Force failover to cluster1 since cluster2 is disabled - provider.switchToHealthyCluster(SwitchReason.FORCED, provider.getCluster(endpoint2)); + provider.switchToHealthyDatabase(SwitchReason.FORCED, provider.getDatabase(endpoint2)); // Manually trigger periodic check - MultiClusterPooledConnectionProviderHelper.periodicFailbackCheck(provider); + MultiDatabaseConnectionProviderHelper.periodicFailbackCheck(provider); // Should still be on cluster1 (cluster2 is in grace period) - assertEquals(provider.getCluster(endpoint1), provider.getCluster()); + assertEquals(provider.getDatabase(endpoint1), provider.getDatabase()); } } } @@ -76,47 +75,46 @@ void testPeriodicFailbackCheckWithDisabledCluster() throws InterruptedException @Test void testPeriodicFailbackCheckWithHealthyCluster() throws InterruptedException { try (MockedConstruction mockedPool = mockPool()) { - MultiClusterClientConfig.ClusterConfig cluster1 = MultiClusterClientConfig.ClusterConfig + MultiDatabaseConfig.DatabaseConfig cluster1 = MultiDatabaseConfig.DatabaseConfig .builder(endpoint1, clientConfig).weight(1.0f).healthCheckEnabled(false).build(); - MultiClusterClientConfig.ClusterConfig cluster2 = MultiClusterClientConfig.ClusterConfig + MultiDatabaseConfig.DatabaseConfig cluster2 = MultiDatabaseConfig.DatabaseConfig .builder(endpoint2, clientConfig).weight(2.0f).healthCheckEnabled(false).build(); - MultiClusterClientConfig config = new MultiClusterClientConfig.Builder( - new MultiClusterClientConfig.ClusterConfig[] { cluster1, cluster2 }) - .failbackSupported(true).failbackCheckInterval(50).gracePeriod(100).build(); // Add - // grace - // period + MultiDatabaseConfig config = new MultiDatabaseConfig.Builder( + new MultiDatabaseConfig.DatabaseConfig[] { cluster1, cluster2 }).failbackSupported(true) + .failbackCheckInterval(50).gracePeriod(100).build(); // Add + // grace + // period - try (MultiClusterPooledConnectionProvider provider = new MultiClusterPooledConnectionProvider( - config)) { + try (MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider(config)) { // Initially, cluster2 should be active (highest weight: 2.0f vs 1.0f) - assertEquals(provider.getCluster(endpoint2), provider.getCluster()); + assertEquals(provider.getDatabase(endpoint2), provider.getDatabase()); // Make cluster2 unhealthy to force failover to cluster1 onHealthStatusChange(provider, endpoint2, HealthStatus.HEALTHY, HealthStatus.UNHEALTHY); // Should now be on cluster1 (cluster2 is in grace period) - assertEquals(provider.getCluster(endpoint1), provider.getCluster()); + assertEquals(provider.getDatabase(endpoint1), provider.getDatabase()); // Verify cluster2 is in grace period - assertTrue(provider.getCluster(endpoint2).isInGracePeriod()); + assertTrue(provider.getDatabase(endpoint2).isInGracePeriod()); // Make cluster2 healthy again (but it's still in grace period) onHealthStatusChange(provider, endpoint2, HealthStatus.UNHEALTHY, HealthStatus.HEALTHY); // Trigger periodic check immediately - should still be on cluster1 - MultiClusterPooledConnectionProviderHelper.periodicFailbackCheck(provider); - assertEquals(provider.getCluster(endpoint1), provider.getCluster()); + MultiDatabaseConnectionProviderHelper.periodicFailbackCheck(provider); + assertEquals(provider.getDatabase(endpoint1), provider.getDatabase()); // Wait for grace period to expire Thread.sleep(150); // Trigger periodic check after grace period expires - MultiClusterPooledConnectionProviderHelper.periodicFailbackCheck(provider); + MultiDatabaseConnectionProviderHelper.periodicFailbackCheck(provider); // Should have failed back to cluster2 (higher weight, grace period expired) - assertEquals(provider.getCluster(endpoint2), provider.getCluster()); + assertEquals(provider.getDatabase(endpoint2), provider.getDatabase()); } } } @@ -124,27 +122,25 @@ void testPeriodicFailbackCheckWithHealthyCluster() throws InterruptedException { @Test void testPeriodicFailbackCheckWithFailbackDisabled() throws InterruptedException { try (MockedConstruction mockedPool = mockPool()) { - MultiClusterClientConfig.ClusterConfig cluster1 = MultiClusterClientConfig.ClusterConfig + MultiDatabaseConfig.DatabaseConfig cluster1 = MultiDatabaseConfig.DatabaseConfig .builder(endpoint1, clientConfig).weight(1.0f).healthCheckEnabled(false).build(); - MultiClusterClientConfig.ClusterConfig cluster2 = MultiClusterClientConfig.ClusterConfig + MultiDatabaseConfig.DatabaseConfig cluster2 = MultiDatabaseConfig.DatabaseConfig .builder(endpoint2, clientConfig).weight(2.0f).healthCheckEnabled(false).build(); - MultiClusterClientConfig config = new MultiClusterClientConfig.Builder( - new MultiClusterClientConfig.ClusterConfig[] { cluster1, cluster2 }) - .failbackSupported(false) // Disabled + MultiDatabaseConfig config = new MultiDatabaseConfig.Builder( + new MultiDatabaseConfig.DatabaseConfig[] { cluster1, cluster2 }).failbackSupported(false) // Disabled .failbackCheckInterval(50).build(); - try (MultiClusterPooledConnectionProvider provider = new MultiClusterPooledConnectionProvider( - config)) { + try (MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider(config)) { // Initially, cluster2 should be active (highest weight: 2.0f vs 1.0f) - assertEquals(provider.getCluster(endpoint2), provider.getCluster()); + assertEquals(provider.getDatabase(endpoint2), provider.getDatabase()); // Make cluster2 unhealthy to force failover to cluster1 onHealthStatusChange(provider, endpoint2, HealthStatus.HEALTHY, HealthStatus.UNHEALTHY); // Should now be on cluster1 - assertEquals(provider.getCluster(endpoint1), provider.getCluster()); + assertEquals(provider.getDatabase(endpoint1), provider.getDatabase()); // Make cluster2 healthy again onHealthStatusChange(provider, endpoint2, HealthStatus.UNHEALTHY, HealthStatus.HEALTHY); @@ -153,10 +149,10 @@ void testPeriodicFailbackCheckWithFailbackDisabled() throws InterruptedException Thread.sleep(100); // Trigger periodic check - MultiClusterPooledConnectionProviderHelper.periodicFailbackCheck(provider); + MultiDatabaseConnectionProviderHelper.periodicFailbackCheck(provider); // Should still be on cluster1 (failback disabled) - assertEquals(provider.getCluster(endpoint1), provider.getCluster()); + assertEquals(provider.getDatabase(endpoint1), provider.getDatabase()); } } } @@ -166,38 +162,37 @@ void testPeriodicFailbackCheckSelectsHighestWeightCluster() throws InterruptedEx try (MockedConstruction mockedPool = mockPool()) { HostAndPort endpoint3 = new HostAndPort("localhost", 6381); - MultiClusterClientConfig.ClusterConfig cluster1 = MultiClusterClientConfig.ClusterConfig + MultiDatabaseConfig.DatabaseConfig cluster1 = MultiDatabaseConfig.DatabaseConfig .builder(endpoint1, clientConfig).weight(1.0f).healthCheckEnabled(false).build(); - MultiClusterClientConfig.ClusterConfig cluster2 = MultiClusterClientConfig.ClusterConfig + MultiDatabaseConfig.DatabaseConfig cluster2 = MultiDatabaseConfig.DatabaseConfig .builder(endpoint2, clientConfig).weight(2.0f).healthCheckEnabled(false).build(); - MultiClusterClientConfig.ClusterConfig cluster3 = MultiClusterClientConfig.ClusterConfig + MultiDatabaseConfig.DatabaseConfig cluster3 = MultiDatabaseConfig.DatabaseConfig .builder(endpoint3, clientConfig).weight(3.0f) // Highest weight .healthCheckEnabled(false).build(); - MultiClusterClientConfig config = new MultiClusterClientConfig.Builder( - new MultiClusterClientConfig.ClusterConfig[] { cluster1, cluster2, cluster3 }) + MultiDatabaseConfig config = new MultiDatabaseConfig.Builder( + new MultiDatabaseConfig.DatabaseConfig[] { cluster1, cluster2, cluster3 }) .failbackSupported(true).failbackCheckInterval(50).gracePeriod(100).build(); // Add // grace // period - try (MultiClusterPooledConnectionProvider provider = new MultiClusterPooledConnectionProvider( - config)) { + try (MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider(config)) { // Initially, cluster3 should be active (highest weight: 3.0f vs 2.0f vs 1.0f) - assertEquals(provider.getCluster(endpoint3), provider.getCluster()); + assertEquals(provider.getDatabase(endpoint3), provider.getDatabase()); // Make cluster3 unhealthy to force failover to cluster2 (next highest weight) onHealthStatusChange(provider, endpoint3, HealthStatus.HEALTHY, HealthStatus.UNHEALTHY); // Should now be on cluster2 (weight 2.0f, higher than cluster1's 1.0f) - assertEquals(provider.getCluster(endpoint2), provider.getCluster()); + assertEquals(provider.getDatabase(endpoint2), provider.getDatabase()); // Make cluster2 unhealthy to force failover to cluster1 onHealthStatusChange(provider, endpoint2, HealthStatus.HEALTHY, HealthStatus.UNHEALTHY); // Should now be on cluster1 (only healthy cluster left) - assertEquals(provider.getCluster(endpoint1), provider.getCluster()); + assertEquals(provider.getDatabase(endpoint1), provider.getDatabase()); // Make cluster2 and cluster3 healthy again onHealthStatusChange(provider, endpoint2, HealthStatus.UNHEALTHY, HealthStatus.HEALTHY); @@ -207,10 +202,10 @@ void testPeriodicFailbackCheckSelectsHighestWeightCluster() throws InterruptedEx Thread.sleep(150); // Trigger periodic check - MultiClusterPooledConnectionProviderHelper.periodicFailbackCheck(provider); + MultiDatabaseConnectionProviderHelper.periodicFailbackCheck(provider); // Should have failed back to cluster3 (highest weight, grace period expired) - assertEquals(provider.getCluster(endpoint3), provider.getCluster()); + assertEquals(provider.getDatabase(endpoint3), provider.getDatabase()); } } } diff --git a/src/test/java/redis/clients/jedis/misc/AutomaticFailoverTest.java b/src/test/java/redis/clients/jedis/misc/AutomaticFailoverTest.java index 3be7d29656..25788fa916 100644 --- a/src/test/java/redis/clients/jedis/misc/AutomaticFailoverTest.java +++ b/src/test/java/redis/clients/jedis/misc/AutomaticFailoverTest.java @@ -18,8 +18,8 @@ import redis.clients.jedis.exceptions.JedisAccessControlException; import redis.clients.jedis.exceptions.JedisConnectionException; import redis.clients.jedis.mcf.ClusterSwitchEventArgs; -import redis.clients.jedis.mcf.MultiClusterPooledConnectionProvider; -import redis.clients.jedis.mcf.MultiClusterPooledConnectionProviderHelper; +import redis.clients.jedis.mcf.MultiDatabaseConnectionProvider; +import redis.clients.jedis.mcf.MultiDatabaseConnectionProviderHelper; import redis.clients.jedis.mcf.SwitchReason; import redis.clients.jedis.util.IOUtils; @@ -47,10 +47,10 @@ public class AutomaticFailoverTest { private Jedis jedis2; - private List getClusterConfigs( + private List getDatabaseConfigs( JedisClientConfig clientConfig, HostAndPort... hostPorts) { return Arrays.stream(hostPorts) - .map(hp -> new MultiClusterClientConfig.ClusterConfig(hp, clientConfig)) + .map(hp -> new MultiDatabaseConfig.DatabaseConfig(hp, clientConfig)) .collect(Collectors.toList()); } @@ -68,17 +68,17 @@ public void cleanUp() { @Test public void pipelineWithSwitch() { - MultiClusterPooledConnectionProvider provider = new MultiClusterPooledConnectionProvider( - new MultiClusterClientConfig.Builder( - getClusterConfigs(clientConfig, hostPortWithFailure, workingEndpoint.getHostAndPort())) + MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider( + new MultiDatabaseConfig.Builder( + getDatabaseConfigs(clientConfig, hostPortWithFailure, workingEndpoint.getHostAndPort())) .build()); try (UnifiedJedis client = new UnifiedJedis(provider)) { AbstractPipeline pipe = client.pipelined(); pipe.set("pstr", "foobar"); pipe.hset("phash", "foo", "bar"); - MultiClusterPooledConnectionProviderHelper.switchToHealthyCluster(provider, - SwitchReason.HEALTH_CHECK, provider.getCluster()); + MultiDatabaseConnectionProviderHelper.switchToHealthyCluster(provider, + SwitchReason.HEALTH_CHECK, provider.getDatabase()); pipe.sync(); } @@ -88,17 +88,17 @@ public void pipelineWithSwitch() { @Test public void transactionWithSwitch() { - MultiClusterPooledConnectionProvider provider = new MultiClusterPooledConnectionProvider( - new MultiClusterClientConfig.Builder( - getClusterConfigs(clientConfig, hostPortWithFailure, workingEndpoint.getHostAndPort())) + MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider( + new MultiDatabaseConfig.Builder( + getDatabaseConfigs(clientConfig, hostPortWithFailure, workingEndpoint.getHostAndPort())) .build()); try (UnifiedJedis client = new UnifiedJedis(provider)) { AbstractTransaction tx = client.multi(); tx.set("tstr", "foobar"); tx.hset("thash", "foo", "bar"); - MultiClusterPooledConnectionProviderHelper.switchToHealthyCluster(provider, - SwitchReason.HEALTH_CHECK, provider.getCluster()); + MultiDatabaseConnectionProviderHelper.switchToHealthyCluster(provider, + SwitchReason.HEALTH_CHECK, provider.getDatabase()); assertEquals(Arrays.asList("OK", 1L), tx.exec()); } @@ -112,16 +112,16 @@ public void commandFailoverUnresolvableHost() { int slidingWindowSize = 2; HostAndPort unresolvableHostAndPort = new HostAndPort("unresolvable", 6379); - MultiClusterClientConfig.Builder builder = new MultiClusterClientConfig.Builder( - getClusterConfigs(clientConfig, unresolvableHostAndPort, workingEndpoint.getHostAndPort())) + MultiDatabaseConfig.Builder builder = new MultiDatabaseConfig.Builder( + getDatabaseConfigs(clientConfig, unresolvableHostAndPort, workingEndpoint.getHostAndPort())) .retryWaitDuration(1).retryMaxAttempts(1) .circuitBreakerSlidingWindowSize(slidingWindowSize) .circuitBreakerMinNumOfFailures(slidingWindowMinFails); RedisFailoverReporter failoverReporter = new RedisFailoverReporter(); - MultiClusterPooledConnectionProvider connectionProvider = new MultiClusterPooledConnectionProvider( + MultiDatabaseConnectionProvider connectionProvider = new MultiDatabaseConnectionProvider( builder.build()); - connectionProvider.setClusterSwitchListener(failoverReporter); + connectionProvider.setDatabaseSwitchListener(failoverReporter); UnifiedJedis jedis = new UnifiedJedis(connectionProvider); @@ -152,8 +152,8 @@ public void commandFailover() { int slidingWindowSize = 6; int retryMaxAttempts = 3; - MultiClusterClientConfig.Builder builder = new MultiClusterClientConfig.Builder( - getClusterConfigs(clientConfig, hostPortWithFailure, workingEndpoint.getHostAndPort())) + MultiDatabaseConfig.Builder builder = new MultiDatabaseConfig.Builder( + getDatabaseConfigs(clientConfig, hostPortWithFailure, workingEndpoint.getHostAndPort())) .retryMaxAttempts(retryMaxAttempts) // Default // is // 3 @@ -162,9 +162,9 @@ public void commandFailover() { .circuitBreakerSlidingWindowSize(slidingWindowSize); RedisFailoverReporter failoverReporter = new RedisFailoverReporter(); - MultiClusterPooledConnectionProvider connectionProvider = new MultiClusterPooledConnectionProvider( + MultiDatabaseConnectionProvider connectionProvider = new MultiDatabaseConnectionProvider( builder.build()); - connectionProvider.setClusterSwitchListener(failoverReporter); + connectionProvider.setDatabaseSwitchListener(failoverReporter); UnifiedJedis jedis = new UnifiedJedis(connectionProvider); @@ -194,15 +194,15 @@ public void commandFailover() { public void pipelineFailover() { int slidingWindowSize = 10; - MultiClusterClientConfig.Builder builder = new MultiClusterClientConfig.Builder( - getClusterConfigs(clientConfig, hostPortWithFailure, workingEndpoint.getHostAndPort())) + MultiDatabaseConfig.Builder builder = new MultiDatabaseConfig.Builder( + getDatabaseConfigs(clientConfig, hostPortWithFailure, workingEndpoint.getHostAndPort())) .circuitBreakerSlidingWindowSize(slidingWindowSize) .fallbackExceptionList(Collections.singletonList(JedisConnectionException.class)); RedisFailoverReporter failoverReporter = new RedisFailoverReporter(); - MultiClusterPooledConnectionProvider cacheProvider = new MultiClusterPooledConnectionProvider( + MultiDatabaseConnectionProvider cacheProvider = new MultiDatabaseConnectionProvider( builder.build()); - cacheProvider.setClusterSwitchListener(failoverReporter); + cacheProvider.setDatabaseSwitchListener(failoverReporter); UnifiedJedis jedis = new UnifiedJedis(cacheProvider); @@ -226,15 +226,15 @@ public void pipelineFailover() { public void failoverFromAuthError() { int slidingWindowSize = 10; - MultiClusterClientConfig.Builder builder = new MultiClusterClientConfig.Builder( - getClusterConfigs(clientConfig, endpointForAuthFailure.getHostAndPort(), + MultiDatabaseConfig.Builder builder = new MultiDatabaseConfig.Builder( + getDatabaseConfigs(clientConfig, endpointForAuthFailure.getHostAndPort(), workingEndpoint.getHostAndPort())).circuitBreakerSlidingWindowSize(slidingWindowSize) .fallbackExceptionList(Collections.singletonList(JedisAccessControlException.class)); RedisFailoverReporter failoverReporter = new RedisFailoverReporter(); - MultiClusterPooledConnectionProvider cacheProvider = new MultiClusterPooledConnectionProvider( + MultiDatabaseConnectionProvider cacheProvider = new MultiDatabaseConnectionProvider( builder.build()); - cacheProvider.setClusterSwitchListener(failoverReporter); + cacheProvider.setDatabaseSwitchListener(failoverReporter); UnifiedJedis jedis = new UnifiedJedis(cacheProvider); diff --git a/src/test/java/redis/clients/jedis/providers/MultiClusterProviderHealthStatusChangeEventTest.java b/src/test/java/redis/clients/jedis/providers/MultiClusterProviderHealthStatusChangeEventTest.java index bde6ab7fc6..c3f974dcd5 100644 --- a/src/test/java/redis/clients/jedis/providers/MultiClusterProviderHealthStatusChangeEventTest.java +++ b/src/test/java/redis/clients/jedis/providers/MultiClusterProviderHealthStatusChangeEventTest.java @@ -14,13 +14,13 @@ import redis.clients.jedis.DefaultJedisClientConfig; import redis.clients.jedis.HostAndPort; import redis.clients.jedis.JedisClientConfig; -import redis.clients.jedis.MultiClusterClientConfig; +import redis.clients.jedis.MultiDatabaseConfig; import redis.clients.jedis.mcf.HealthStatus; -import redis.clients.jedis.mcf.MultiClusterPooledConnectionProvider; -import redis.clients.jedis.mcf.MultiClusterPooledConnectionProviderHelper; +import redis.clients.jedis.mcf.MultiDatabaseConnectionProvider; +import redis.clients.jedis.mcf.MultiDatabaseConnectionProviderHelper; /** - * Tests for MultiClusterPooledConnectionProvider event handling behavior during initialization and + * Tests for MultiDatabaseConnectionProvider event handling behavior during initialization and * throughout its lifecycle with HealthStatusChangeEvents. */ @ExtendWith(MockitoExtension.class) @@ -52,30 +52,30 @@ private MockedConstruction mockConnectionPool() { void postInit_unhealthy_active_sets_grace_and_fails_over() throws Exception { try (MockedConstruction mockedPool = mockConnectionPool()) { // Create clusters without health checks - MultiClusterClientConfig.ClusterConfig cluster1 = MultiClusterClientConfig.ClusterConfig + MultiDatabaseConfig.DatabaseConfig cluster1 = MultiDatabaseConfig.DatabaseConfig .builder(endpoint1, clientConfig).weight(1.0f).healthCheckEnabled(false).build(); - MultiClusterClientConfig.ClusterConfig cluster2 = MultiClusterClientConfig.ClusterConfig + MultiDatabaseConfig.DatabaseConfig cluster2 = MultiDatabaseConfig.DatabaseConfig .builder(endpoint2, clientConfig).weight(0.5f).healthCheckEnabled(false).build(); - MultiClusterClientConfig config = new MultiClusterClientConfig.Builder( - new MultiClusterClientConfig.ClusterConfig[] { cluster1, cluster2 }).build(); + MultiDatabaseConfig config = new MultiDatabaseConfig.Builder( + new MultiDatabaseConfig.DatabaseConfig[] { cluster1, cluster2 }).build(); - try (MultiClusterPooledConnectionProvider provider = new MultiClusterPooledConnectionProvider( + try (MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider( config)) { - assertFalse(provider.getCluster(endpoint1).isInGracePeriod()); - assertEquals(provider.getCluster(), provider.getCluster(endpoint1)); + assertFalse(provider.getDatabase(endpoint1).isInGracePeriod()); + assertEquals(provider.getDatabase(), provider.getDatabase(endpoint1)); // This should process immediately since initialization is complete assertDoesNotThrow(() -> { - MultiClusterPooledConnectionProviderHelper.onHealthStatusChange(provider, endpoint1, + MultiDatabaseConnectionProviderHelper.onHealthStatusChange(provider, endpoint1, HealthStatus.HEALTHY, HealthStatus.UNHEALTHY); }, "Post-initialization events should be processed immediately"); // Verify the cluster has changed according to the UNHEALTHY status - assertTrue(provider.getCluster(endpoint1).isInGracePeriod(), + assertTrue(provider.getDatabase(endpoint1).isInGracePeriod(), "UNHEALTHY status on active cluster should cause a grace period"); - assertNotEquals(provider.getCluster(), provider.getCluster(endpoint1), + assertNotEquals(provider.getDatabase(), provider.getDatabase(endpoint1), "UNHEALTHY status on active cluster should cause a failover"); } } @@ -84,46 +84,46 @@ void postInit_unhealthy_active_sets_grace_and_fails_over() throws Exception { @Test void postInit_nonActive_changes_do_not_switch_active() throws Exception { try (MockedConstruction mockedPool = mockConnectionPool()) { - MultiClusterClientConfig.ClusterConfig cluster1 = MultiClusterClientConfig.ClusterConfig + MultiDatabaseConfig.DatabaseConfig cluster1 = MultiDatabaseConfig.DatabaseConfig .builder(endpoint1, clientConfig).weight(1.0f).healthCheckEnabled(false).build(); - MultiClusterClientConfig.ClusterConfig cluster2 = MultiClusterClientConfig.ClusterConfig + MultiDatabaseConfig.DatabaseConfig cluster2 = MultiDatabaseConfig.DatabaseConfig .builder(endpoint2, clientConfig).weight(0.5f).healthCheckEnabled(false).build(); - MultiClusterClientConfig config = new MultiClusterClientConfig.Builder( - new MultiClusterClientConfig.ClusterConfig[] { cluster1, cluster2 }).build(); + MultiDatabaseConfig config = new MultiDatabaseConfig.Builder( + new MultiDatabaseConfig.DatabaseConfig[] { cluster1, cluster2 }).build(); - try (MultiClusterPooledConnectionProvider provider = new MultiClusterPooledConnectionProvider( + try (MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider( config)) { // Verify initial state - assertEquals(provider.getCluster(endpoint1), provider.getCluster(), + assertEquals(provider.getDatabase(endpoint1), provider.getDatabase(), "Should start with endpoint1 active"); // Simulate multiple rapid events for the same endpoint (post-init behavior) - MultiClusterPooledConnectionProviderHelper.onHealthStatusChange(provider, endpoint1, + MultiDatabaseConnectionProviderHelper.onHealthStatusChange(provider, endpoint1, HealthStatus.HEALTHY, HealthStatus.UNHEALTHY); // After first UNHEALTHY on active cluster: it enters grace period and provider fails over - assertTrue(provider.getCluster(endpoint1).isInGracePeriod(), + assertTrue(provider.getDatabase(endpoint1).isInGracePeriod(), "Active cluster should enter grace period"); - assertEquals(provider.getCluster(endpoint2), provider.getCluster(), + assertEquals(provider.getDatabase(endpoint2), provider.getDatabase(), "Should fail over to endpoint2"); - MultiClusterPooledConnectionProviderHelper.onHealthStatusChange(provider, endpoint1, + MultiDatabaseConnectionProviderHelper.onHealthStatusChange(provider, endpoint1, HealthStatus.UNHEALTHY, HealthStatus.HEALTHY); // Healthy event for non-active cluster should not immediately revert active cluster - assertEquals(provider.getCluster(endpoint2), provider.getCluster(), + assertEquals(provider.getDatabase(endpoint2), provider.getDatabase(), "Active cluster should remain endpoint2"); - assertTrue(provider.getCluster(endpoint1).isInGracePeriod(), + assertTrue(provider.getDatabase(endpoint1).isInGracePeriod(), "Grace period should still be in effect"); - MultiClusterPooledConnectionProviderHelper.onHealthStatusChange(provider, endpoint1, + MultiDatabaseConnectionProviderHelper.onHealthStatusChange(provider, endpoint1, HealthStatus.HEALTHY, HealthStatus.UNHEALTHY); // Further UNHEALTHY for non-active cluster is a no-op - assertEquals(provider.getCluster(endpoint2), provider.getCluster(), + assertEquals(provider.getDatabase(endpoint2), provider.getDatabase(), "Active cluster unchanged"); - assertTrue(provider.getCluster(endpoint1).isInGracePeriod(), "Still in grace period"); + assertTrue(provider.getDatabase(endpoint1).isInGracePeriod(), "Still in grace period"); } } } @@ -131,26 +131,26 @@ void postInit_nonActive_changes_do_not_switch_active() throws Exception { @Test void init_selects_highest_weight_healthy_when_checks_disabled() throws Exception { try (MockedConstruction mockedPool = mockConnectionPool()) { - MultiClusterClientConfig.ClusterConfig cluster1 = MultiClusterClientConfig.ClusterConfig + MultiDatabaseConfig.DatabaseConfig cluster1 = MultiDatabaseConfig.DatabaseConfig .builder(endpoint1, clientConfig).weight(1.0f).healthCheckEnabled(false).build(); - MultiClusterClientConfig.ClusterConfig cluster2 = MultiClusterClientConfig.ClusterConfig + MultiDatabaseConfig.DatabaseConfig cluster2 = MultiDatabaseConfig.DatabaseConfig .builder(endpoint2, clientConfig).weight(2.0f).healthCheckEnabled(false).build(); - MultiClusterClientConfig config = new MultiClusterClientConfig.Builder( - new MultiClusterClientConfig.ClusterConfig[] { cluster1, cluster2 }).build(); + MultiDatabaseConfig config = new MultiDatabaseConfig.Builder( + new MultiDatabaseConfig.DatabaseConfig[] { cluster1, cluster2 }).build(); - try (MultiClusterPooledConnectionProvider provider = new MultiClusterPooledConnectionProvider( + try (MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider( config)) { // This test verifies that multiple endpoints are properly initialized // Verify both clusters are initialized properly - assertNotNull(provider.getCluster(endpoint1), "Cluster 1 should be available"); - assertNotNull(provider.getCluster(endpoint2), "Cluster 2 should be available"); + assertNotNull(provider.getDatabase(endpoint1), "Database 1 should be available"); + assertNotNull(provider.getDatabase(endpoint2), "Database 2 should be available"); // Both should be healthy (no health checks = assumed healthy) - assertTrue(provider.getCluster(endpoint1).isHealthy(), "Cluster 1 should be healthy"); - assertTrue(provider.getCluster(endpoint2).isHealthy(), "Cluster 2 should be healthy"); + assertTrue(provider.getDatabase(endpoint1).isHealthy(), "Database 1 should be healthy"); + assertTrue(provider.getDatabase(endpoint2).isHealthy(), "Database 2 should be healthy"); } } } @@ -158,22 +158,22 @@ void init_selects_highest_weight_healthy_when_checks_disabled() throws Exception @Test void init_single_cluster_initializes_and_is_healthy() throws Exception { try (MockedConstruction mockedPool = mockConnectionPool()) { - MultiClusterClientConfig.ClusterConfig cluster1 = MultiClusterClientConfig.ClusterConfig + MultiDatabaseConfig.DatabaseConfig cluster1 = MultiDatabaseConfig.DatabaseConfig .builder(endpoint1, clientConfig).weight(1.0f).healthCheckEnabled(false).build(); - MultiClusterClientConfig config = new MultiClusterClientConfig.Builder( - new MultiClusterClientConfig.ClusterConfig[] { cluster1 }).build(); + MultiDatabaseConfig config = new MultiDatabaseConfig.Builder( + new MultiDatabaseConfig.DatabaseConfig[] { cluster1 }).build(); // This test verifies that the provider initializes correctly and doesn't lose events // In practice, with health checks disabled, no events should be generated during init - try (MultiClusterPooledConnectionProvider provider = new MultiClusterPooledConnectionProvider( + try (MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider( config)) { // Verify successful initialization - assertNotNull(provider.getCluster(), "Provider should have initialized successfully"); - assertEquals(provider.getCluster(endpoint1), provider.getCluster(), + assertNotNull(provider.getDatabase(), "Provider should have initialized successfully"); + assertEquals(provider.getDatabase(endpoint1), provider.getDatabase(), "Should have selected the configured cluster"); - assertTrue(provider.getCluster().isHealthy(), - "Cluster should be healthy (assumed healthy with no health checks)"); + assertTrue(provider.getDatabase().isHealthy(), + "Database should be healthy (assumed healthy with no health checks)"); } } } @@ -183,42 +183,42 @@ void init_single_cluster_initializes_and_is_healthy() throws Exception { @Test void postInit_two_hop_failover_chain_respected() throws Exception { try (MockedConstruction mockedPool = mockConnectionPool()) { - MultiClusterClientConfig.ClusterConfig cluster1 = MultiClusterClientConfig.ClusterConfig + MultiDatabaseConfig.DatabaseConfig cluster1 = MultiDatabaseConfig.DatabaseConfig .builder(endpoint1, clientConfig).weight(1.0f).healthCheckEnabled(false).build(); - MultiClusterClientConfig.ClusterConfig cluster2 = MultiClusterClientConfig.ClusterConfig + MultiDatabaseConfig.DatabaseConfig cluster2 = MultiDatabaseConfig.DatabaseConfig .builder(endpoint2, clientConfig).weight(0.5f).healthCheckEnabled(false).build(); - MultiClusterClientConfig.ClusterConfig cluster3 = MultiClusterClientConfig.ClusterConfig + MultiDatabaseConfig.DatabaseConfig cluster3 = MultiDatabaseConfig.DatabaseConfig .builder(endpoint3, clientConfig).weight(0.2f).healthCheckEnabled(false).build(); - MultiClusterClientConfig config = new MultiClusterClientConfig.Builder( - new MultiClusterClientConfig.ClusterConfig[] { cluster1, cluster2, cluster3 }).build(); + MultiDatabaseConfig config = new MultiDatabaseConfig.Builder( + new MultiDatabaseConfig.DatabaseConfig[] { cluster1, cluster2, cluster3 }).build(); - try (MultiClusterPooledConnectionProvider provider = new MultiClusterPooledConnectionProvider( + try (MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider( config)) { // First event: endpoint1 (active) becomes UNHEALTHY -> failover to endpoint2, endpoint1 // enters grace - MultiClusterPooledConnectionProviderHelper.onHealthStatusChange(provider, endpoint1, + MultiDatabaseConnectionProviderHelper.onHealthStatusChange(provider, endpoint1, HealthStatus.HEALTHY, HealthStatus.UNHEALTHY); - assertTrue(provider.getCluster(endpoint1).isInGracePeriod(), + assertTrue(provider.getDatabase(endpoint1).isInGracePeriod(), "Endpoint1 should be in grace after unhealthy"); - assertEquals(provider.getCluster(endpoint2), provider.getCluster(), + assertEquals(provider.getDatabase(endpoint2), provider.getDatabase(), "Should have failed over to endpoint2"); // Second event: endpoint2 (now active) becomes UNHEALTHY -> failover to endpoint3 - MultiClusterPooledConnectionProviderHelper.onHealthStatusChange(provider, endpoint2, + MultiDatabaseConnectionProviderHelper.onHealthStatusChange(provider, endpoint2, HealthStatus.HEALTHY, HealthStatus.UNHEALTHY); - assertTrue(provider.getCluster(endpoint2).isInGracePeriod(), + assertTrue(provider.getDatabase(endpoint2).isInGracePeriod(), "Endpoint2 should be in grace after unhealthy"); - assertEquals(provider.getCluster(endpoint3), provider.getCluster(), + assertEquals(provider.getDatabase(endpoint3), provider.getDatabase(), "Should have failed over to endpoint3"); // Third event: endpoint1 becomes HEALTHY again -> no immediate switch due to grace period // behavior - MultiClusterPooledConnectionProviderHelper.onHealthStatusChange(provider, endpoint1, + MultiDatabaseConnectionProviderHelper.onHealthStatusChange(provider, endpoint1, HealthStatus.UNHEALTHY, HealthStatus.HEALTHY); - assertEquals(provider.getCluster(endpoint3), provider.getCluster(), + assertEquals(provider.getDatabase(endpoint3), provider.getDatabase(), "Active cluster should remain endpoint3"); } } @@ -227,33 +227,33 @@ void postInit_two_hop_failover_chain_respected() throws Exception { @Test void postInit_rapid_events_respect_grace_and_keep_active_stable() throws Exception { try (MockedConstruction mockedPool = mockConnectionPool()) { - MultiClusterClientConfig.ClusterConfig cluster1 = MultiClusterClientConfig.ClusterConfig + MultiDatabaseConfig.DatabaseConfig cluster1 = MultiDatabaseConfig.DatabaseConfig .builder(endpoint1, clientConfig).weight(1.0f).healthCheckEnabled(false).build(); - MultiClusterClientConfig.ClusterConfig cluster2 = MultiClusterClientConfig.ClusterConfig + MultiDatabaseConfig.DatabaseConfig cluster2 = MultiDatabaseConfig.DatabaseConfig .builder(endpoint2, clientConfig).weight(0.5f).healthCheckEnabled(false).build(); - MultiClusterClientConfig config = new MultiClusterClientConfig.Builder( - new MultiClusterClientConfig.ClusterConfig[] { cluster1, cluster2 }).build(); + MultiDatabaseConfig config = new MultiDatabaseConfig.Builder( + new MultiDatabaseConfig.DatabaseConfig[] { cluster1, cluster2 }).build(); - try (MultiClusterPooledConnectionProvider provider = new MultiClusterPooledConnectionProvider( + try (MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider( config)) { // Verify initial state - assertEquals(HealthStatus.HEALTHY, provider.getCluster(endpoint1).getHealthStatus(), + assertEquals(HealthStatus.HEALTHY, provider.getDatabase(endpoint1).getHealthStatus(), "Should start as HEALTHY"); // Send rapid sequence of events post-init - MultiClusterPooledConnectionProviderHelper.onHealthStatusChange(provider, endpoint1, + MultiDatabaseConnectionProviderHelper.onHealthStatusChange(provider, endpoint1, HealthStatus.HEALTHY, HealthStatus.UNHEALTHY); // triggers failover and grace - MultiClusterPooledConnectionProviderHelper.onHealthStatusChange(provider, endpoint1, + MultiDatabaseConnectionProviderHelper.onHealthStatusChange(provider, endpoint1, HealthStatus.UNHEALTHY, HealthStatus.HEALTHY); // non-active cluster becomes healthy - MultiClusterPooledConnectionProviderHelper.onHealthStatusChange(provider, endpoint1, + MultiDatabaseConnectionProviderHelper.onHealthStatusChange(provider, endpoint1, HealthStatus.HEALTHY, HealthStatus.UNHEALTHY); // still non-active and in grace; no change // Final expectations: endpoint1 is in grace, provider remains on endpoint2 - assertTrue(provider.getCluster(endpoint1).isInGracePeriod(), + assertTrue(provider.getDatabase(endpoint1).isInGracePeriod(), "Endpoint1 should be in grace period"); - assertEquals(provider.getCluster(endpoint2), provider.getCluster(), + assertEquals(provider.getDatabase(endpoint2), provider.getDatabase(), "Active cluster should remain endpoint2"); } } diff --git a/src/test/java/redis/clients/jedis/scenario/ActiveActiveFailoverTest.java b/src/test/java/redis/clients/jedis/scenario/ActiveActiveFailoverTest.java index a6deb256eb..400fd65404 100644 --- a/src/test/java/redis/clients/jedis/scenario/ActiveActiveFailoverTest.java +++ b/src/test/java/redis/clients/jedis/scenario/ActiveActiveFailoverTest.java @@ -9,10 +9,10 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; import redis.clients.jedis.*; -import redis.clients.jedis.MultiClusterClientConfig.ClusterConfig; +import redis.clients.jedis.MultiDatabaseConfig.DatabaseConfig; import redis.clients.jedis.exceptions.JedisConnectionException; import redis.clients.jedis.mcf.ClusterSwitchEventArgs; -import redis.clients.jedis.mcf.MultiClusterPooledConnectionProvider; +import redis.clients.jedis.mcf.MultiDatabaseConnectionProvider; import redis.clients.jedis.util.ClientTestUtil; import java.io.IOException; @@ -62,13 +62,13 @@ public void testFailover() { .socketTimeoutMillis(SOCKET_TIMEOUT_MS) .connectionTimeoutMillis(CONNECTION_TIMEOUT_MS).build(); - ClusterConfig primary = ClusterConfig.builder(endpoint.getHostAndPort(0), config) + DatabaseConfig primary = DatabaseConfig.builder(endpoint.getHostAndPort(0), config) .connectionPoolConfig(RecommendedSettings.poolConfig).weight(1.0f).build(); - ClusterConfig secondary = ClusterConfig.builder(endpoint.getHostAndPort(1), config) + DatabaseConfig secondary = DatabaseConfig.builder(endpoint.getHostAndPort(1), config) .connectionPoolConfig(RecommendedSettings.poolConfig).weight(0.5f).build(); - MultiClusterClientConfig multiConfig = MultiClusterClientConfig.builder() + MultiDatabaseConfig multiConfig = MultiDatabaseConfig.builder() .endpoint(primary) .endpoint(secondary) .circuitBreakerSlidingWindowSize(1) // SLIDING WINDOW SIZE IN SECONDS @@ -208,9 +208,9 @@ public void accept(ClusterSwitchEventArgs e) { throw new RuntimeException(e); } - MultiClusterPooledConnectionProvider provider = ClientTestUtil.getConnectionProvider(client); - ConnectionPool pool1 = provider.getCluster(endpoint.getHostAndPort(0)).getConnectionPool(); - ConnectionPool pool2 = provider.getCluster(endpoint.getHostAndPort(1)).getConnectionPool(); + MultiDatabaseConnectionProvider provider = ClientTestUtil.getConnectionProvider(client); + ConnectionPool pool1 = provider.getDatabase(endpoint.getHostAndPort(0)).getConnectionPool(); + ConnectionPool pool2 = provider.getDatabase(endpoint.getHostAndPort(1)).getConnectionPool(); await().atMost(Duration.ofSeconds(1)).until(() -> pool1.getNumActive() == 0); await().atMost(Duration.ofSeconds(1)).until(() -> pool2.getNumActive() == 0); From 18ec5ee3a0e873e734f1b66b9106290b1533a67e Mon Sep 17 00:00:00 2001 From: ggivo Date: Mon, 6 Oct 2025 14:08:39 +0300 Subject: [PATCH 03/18] Rename MultiDatabaseConfig to MultiDbConfig --- pom.xml | 2 +- .../redis/clients/jedis/MultiDbClient.java | 6 +- ...DatabaseConfig.java => MultiDbConfig.java} | 26 +++---- .../jedis/builders/MultiDbClientBuilder.java | 8 +-- .../mcf/CircuitBreakerThresholdsAdapter.java | 8 +-- .../redis/clients/jedis/mcf/EchoStrategy.java | 2 +- .../jedis/mcf/JedisFailoverException.java | 10 +-- .../mcf/MultiDatabaseConnectionProvider.java | 66 ++++++++--------- .../clients/jedis/MultiDbClientTest.java | 8 +-- .../failover/FailoverIntegrationTest.java | 14 ++-- .../mcf/ActiveActiveLocalFailoverTest.java | 6 +- .../mcf/CircuitBreakerThresholdsTest.java | 16 ++--- .../mcf/ClusterEvaluateThresholdsTest.java | 8 +-- .../clients/jedis/mcf/DefaultValuesTest.java | 8 +-- .../mcf/FailbackMechanismIntegrationTest.java | 70 +++++++++---------- .../jedis/mcf/FailbackMechanismUnitTest.java | 64 ++++++++--------- .../jedis/mcf/HealthCheckIntegrationTest.java | 16 ++--- .../clients/jedis/mcf/HealthCheckTest.java | 42 +++++------ .../MultiClusterDynamicEndpointUnitTest.java | 10 +-- ...ultiClusterFailoverAttemptsConfigTest.java | 16 ++--- .../mcf/MultiClusterInitializationTest.java | 36 +++++----- .../MultiDatabaseConnectionProviderTest.java | 14 ++-- .../jedis/mcf/PeriodicFailbackTest.java | 36 +++++----- .../jedis/misc/AutomaticFailoverTest.java | 16 ++--- ...erProviderHealthStatusChangeEventTest.java | 50 ++++++------- .../scenario/ActiveActiveFailoverTest.java | 4 +- 26 files changed, 281 insertions(+), 281 deletions(-) rename src/main/java/redis/clients/jedis/{MultiDatabaseConfig.java => MultiDbConfig.java} (98%) diff --git a/pom.xml b/pom.xml index 957561819d..e056cb1748 100644 --- a/pom.xml +++ b/pom.xml @@ -488,7 +488,7 @@ **/Health*.java **/*IT.java **/scenario/RestEndpointUtil.java - src/main/java/redis/clients/jedis/MultiDatabaseConfig.java + src/main/java/redis/clients/jedis/MultiDbConfig.java src/main/java/redis/clients/jedis/HostAndPort.java **/builders/*.java **/MultiDb*.java diff --git a/src/main/java/redis/clients/jedis/MultiDbClient.java b/src/main/java/redis/clients/jedis/MultiDbClient.java index 9224307a56..2e008f5c98 100644 --- a/src/main/java/redis/clients/jedis/MultiDbClient.java +++ b/src/main/java/redis/clients/jedis/MultiDbClient.java @@ -1,6 +1,6 @@ package redis.clients.jedis; -import redis.clients.jedis.MultiDatabaseConfig.DatabaseConfig; +import redis.clients.jedis.MultiDbConfig.DatabaseConfig; import redis.clients.jedis.annots.Experimental; import redis.clients.jedis.builders.MultiDbClientBuilder; import redis.clients.jedis.csc.Cache; @@ -43,7 +43,7 @@ * * MultiDbClient client = MultiDbClient.builder() * .multiDbConfig( - * MultiDatabaseConfig.builder() + * MultiDbConfig.builder() * .endpoint( * DatabaseConfig.builder( * primary, @@ -78,7 +78,7 @@ * @since 5.2.0 * @see MultiDatabaseConnectionProvider * @see CircuitBreakerCommandExecutor - * @see MultiDatabaseConfig + * @see MultiDbConfig */ @Experimental public class MultiDbClient extends UnifiedJedis { diff --git a/src/main/java/redis/clients/jedis/MultiDatabaseConfig.java b/src/main/java/redis/clients/jedis/MultiDbConfig.java similarity index 98% rename from src/main/java/redis/clients/jedis/MultiDatabaseConfig.java rename to src/main/java/redis/clients/jedis/MultiDbConfig.java index 96ab7d7971..5bde00c34a 100644 --- a/src/main/java/redis/clients/jedis/MultiDatabaseConfig.java +++ b/src/main/java/redis/clients/jedis/MultiDbConfig.java @@ -56,7 +56,7 @@ * .healthCheckEnabled(true).build(); * * // Build multi-cluster configuration - * MultiDatabaseConfig config = MultiDatabaseConfig.builder(primary, secondary) + * MultiDbConfig config = MultiDbConfig.builder(primary, secondary) * .circuitBreakerFailureRateThreshold(10.0f).retryMaxAttempts(3).failbackSupported(true) * .gracePeriod(10000).build(); * @@ -76,7 +76,7 @@ */ // TODO: move @Experimental -public final class MultiDatabaseConfig { +public final class MultiDbConfig { /** * Functional interface for creating {@link HealthCheckStrategy} instances for specific Redis @@ -437,7 +437,7 @@ public static interface StrategySupplier { private int delayInBetweenFailoverAttempts; /** - * Constructs a new MultiDatabaseConfig with the specified cluster configurations. + * Constructs a new MultiDbConfig with the specified cluster configurations. *

* This constructor validates that at least one cluster configuration is provided and that all * configurations are non-null. Use the {@link Builder} class for more convenient configuration @@ -448,7 +448,7 @@ public static interface StrategySupplier { * @throws IllegalArgumentException if any cluster configuration is null * @see Builder#Builder(DatabaseConfig[]) */ - public MultiDatabaseConfig(DatabaseConfig[] databaseConfigs) { + public MultiDbConfig(DatabaseConfig[] databaseConfigs) { if (databaseConfigs == null || databaseConfigs.length < 1) throw new JedisValidationException( "DatabaseClientConfigs are required for MultiDatabaseConnectionProvider"); @@ -636,7 +636,7 @@ public boolean isFastFailover() { } /** - * Creates a new Builder instance for configuring MultiDatabaseConfig. + * Creates a new Builder instance for configuring MultiDbConfig. *

* At least one cluster configuration must be added to the builder before calling build(). Use the * endpoint() methods to add cluster configurations. @@ -650,7 +650,7 @@ public static Builder builder() { } /** - * Creates a new Builder instance for configuring MultiDatabaseConfig. + * Creates a new Builder instance for configuring MultiDbConfig. * @param databaseConfigs array of cluster configurations defining available Redis endpoints * @return new Builder instance * @throws JedisValidationException if databaseConfigs is null or empty @@ -661,7 +661,7 @@ public static Builder builder(DatabaseConfig[] databaseConfigs) { } /** - * Creates a new Builder instance for configuring MultiDatabaseConfig. + * Creates a new Builder instance for configuring MultiDbConfig. * @param databaseConfigs list of cluster configurations defining available Redis endpoints * @return new Builder instance * @throws JedisValidationException if databaseConfigs is null or empty @@ -976,7 +976,7 @@ public DatabaseConfig build() { } /** - * Builder class for creating MultiDatabaseConfig instances with comprehensive configuration + * Builder class for creating MultiDbConfig instances with comprehensive configuration * options. *

* The Builder provides a fluent API for configuring all aspects of multi-cluster failover @@ -984,7 +984,7 @@ public DatabaseConfig build() { * sensible defaults based on production best practices while allowing fine-tuning for specific * requirements. *

- * @see MultiDatabaseConfig + * @see MultiDbConfig * @see DatabaseConfig */ public static class Builder { @@ -1499,17 +1499,17 @@ public Builder delayInBetweenFailoverAttempts(int delayInBetweenFailoverAttempts } /** - * Builds and returns a new MultiDatabaseConfig instance with all configured settings. + * Builds and returns a new MultiDbConfig instance with all configured settings. *

* This method creates the final configuration object by copying all builder settings to the * configuration instance. The builder can be reused after calling build() to create additional * configurations with different settings. *

- * @return a new MultiDatabaseConfig instance with the configured settings + * @return a new MultiDbConfig instance with the configured settings */ - public MultiDatabaseConfig build() { + public MultiDbConfig build() { - MultiDatabaseConfig config = new MultiDatabaseConfig( + MultiDbConfig config = new MultiDbConfig( this.databaseConfigs.toArray(new DatabaseConfig[0])); // Copy retry configuration diff --git a/src/main/java/redis/clients/jedis/builders/MultiDbClientBuilder.java b/src/main/java/redis/clients/jedis/builders/MultiDbClientBuilder.java index c4592ec905..252a931762 100644 --- a/src/main/java/redis/clients/jedis/builders/MultiDbClientBuilder.java +++ b/src/main/java/redis/clients/jedis/builders/MultiDbClientBuilder.java @@ -2,7 +2,7 @@ import java.util.function.Consumer; -import redis.clients.jedis.MultiDatabaseConfig; +import redis.clients.jedis.MultiDbConfig; import redis.clients.jedis.annots.Experimental; import redis.clients.jedis.executors.CommandExecutor; import redis.clients.jedis.mcf.CircuitBreakerCommandExecutor; @@ -38,7 +38,7 @@ *
  * MultiDbClient client = MultiDbClient.builder()
  *                 .multiDbConfig(
- *                         MultiDatabaseConfig.builder()
+ *                         MultiDbConfig.builder()
  *                                 .endpoint(
  *                                         DatabaseConfig.builder(
  *                                                         east,
@@ -67,7 +67,7 @@ public abstract class MultiDbClientBuilder
     extends AbstractClientBuilder, C> {
 
   // Multi-db specific configuration fields
-  private MultiDatabaseConfig multiDbConfig = null;
+  private MultiDbConfig multiDbConfig = null;
   private Consumer databaseSwitchListener = null;
 
   /**
@@ -79,7 +79,7 @@ public abstract class MultiDbClientBuilder
    * @param config the multi-database configuration
    * @return this builder
    */
-  public MultiDbClientBuilder multiDbConfig(MultiDatabaseConfig config) {
+  public MultiDbClientBuilder multiDbConfig(MultiDbConfig config) {
     this.multiDbConfig = config;
     return this;
   }
diff --git a/src/main/java/redis/clients/jedis/mcf/CircuitBreakerThresholdsAdapter.java b/src/main/java/redis/clients/jedis/mcf/CircuitBreakerThresholdsAdapter.java
index ad37099c50..ccdde074f3 100644
--- a/src/main/java/redis/clients/jedis/mcf/CircuitBreakerThresholdsAdapter.java
+++ b/src/main/java/redis/clients/jedis/mcf/CircuitBreakerThresholdsAdapter.java
@@ -1,7 +1,7 @@
 package redis.clients.jedis.mcf;
 
 import io.github.resilience4j.circuitbreaker.CircuitBreakerConfig.SlidingWindowType;
-import redis.clients.jedis.MultiDatabaseConfig;
+import redis.clients.jedis.MultiDbConfig;
 
 /**
  * Adapter that disables Resilience4j's built-in circuit breaker evaluation and help delegate
@@ -67,9 +67,9 @@ int getSlidingWindowSize() {
    * method controls circuit breaker state based on the original configuration's dual-threshold
    * logic.
    * 

- * @param multiDatabaseConfig configuration containing sliding window size + * @param multiDbConfig configuration containing sliding window size */ - CircuitBreakerThresholdsAdapter(MultiDatabaseConfig multiDatabaseConfig) { + CircuitBreakerThresholdsAdapter(MultiDbConfig multiDbConfig) { // IMPORTANT: failureRateThreshold is set to max theoretically disable Resilience4j's evaluation // and rely on our custom evaluateThresholds() logic. @@ -79,6 +79,6 @@ int getSlidingWindowSize() { // and rely on our custom evaluateThresholds() logic. minimumNumberOfCalls = Integer.MAX_VALUE; - slidingWindowSize = multiDatabaseConfig.getCircuitBreakerSlidingWindowSize(); + slidingWindowSize = multiDbConfig.getCircuitBreakerSlidingWindowSize(); } } diff --git a/src/main/java/redis/clients/jedis/mcf/EchoStrategy.java b/src/main/java/redis/clients/jedis/mcf/EchoStrategy.java index 51173ace31..6be05e2cfb 100644 --- a/src/main/java/redis/clients/jedis/mcf/EchoStrategy.java +++ b/src/main/java/redis/clients/jedis/mcf/EchoStrategy.java @@ -8,7 +8,7 @@ import redis.clients.jedis.JedisClientConfig; import redis.clients.jedis.JedisPooled; import redis.clients.jedis.UnifiedJedis; -import redis.clients.jedis.MultiDatabaseConfig.StrategySupplier; +import redis.clients.jedis.MultiDbConfig.StrategySupplier; public class EchoStrategy implements HealthCheckStrategy { private static final int MAX_HEALTH_CHECK_POOL_SIZE = 2; diff --git a/src/main/java/redis/clients/jedis/mcf/JedisFailoverException.java b/src/main/java/redis/clients/jedis/mcf/JedisFailoverException.java index fec047824f..2d6a81b81a 100644 --- a/src/main/java/redis/clients/jedis/mcf/JedisFailoverException.java +++ b/src/main/java/redis/clients/jedis/mcf/JedisFailoverException.java @@ -11,7 +11,7 @@ * @see JedisFailoverException.JedisTemporarilyNotAvailableException */ public class JedisFailoverException extends JedisConnectionException { - private static final String MESSAGE = "Cluster/database endpoint could not failover since the MultiDatabaseConfig was not " + private static final String MESSAGE = "Cluster/database endpoint could not failover since the MultiDbConfig was not " + "provided with an additional cluster/database endpoint according to its prioritized sequence. " + "If applicable, consider falling back OR restarting with an available cluster/database endpoint"; @@ -28,8 +28,8 @@ public JedisFailoverException() { * the max number of failover attempts has been exceeded. And there is still no healthy cluster. *

* See the configuration properties - * {@link redis.clients.jedis.MultiDatabaseConfig#maxNumFailoverAttempts} and - * {@link redis.clients.jedis.MultiDatabaseConfig#delayInBetweenFailoverAttempts} for more + * {@link redis.clients.jedis.MultiDbConfig#maxNumFailoverAttempts} and + * {@link redis.clients.jedis.MultiDbConfig#delayInBetweenFailoverAttempts} for more * details. */ public static class JedisPermanentlyNotAvailableException extends JedisFailoverException { @@ -49,8 +49,8 @@ public JedisPermanentlyNotAvailableException() { * temporary condition and it is possible that there will be a healthy cluster available. *

* See the configuration properties - * {@link redis.clients.jedis.MultiDatabaseConfig#maxNumFailoverAttempts} and - * {@link redis.clients.jedis.MultiDatabaseConfig#delayInBetweenFailoverAttempts} for more + * {@link redis.clients.jedis.MultiDbConfig#maxNumFailoverAttempts} and + * {@link redis.clients.jedis.MultiDbConfig#delayInBetweenFailoverAttempts} for more * details. */ public static class JedisTemporarilyNotAvailableException extends JedisFailoverException { diff --git a/src/main/java/redis/clients/jedis/mcf/MultiDatabaseConnectionProvider.java b/src/main/java/redis/clients/jedis/mcf/MultiDatabaseConnectionProvider.java index 9b7d12cb96..ba445ae5cb 100644 --- a/src/main/java/redis/clients/jedis/mcf/MultiDatabaseConnectionProvider.java +++ b/src/main/java/redis/clients/jedis/mcf/MultiDatabaseConnectionProvider.java @@ -33,7 +33,7 @@ import org.slf4j.LoggerFactory; import redis.clients.jedis.*; -import redis.clients.jedis.MultiDatabaseConfig.DatabaseConfig; +import redis.clients.jedis.MultiDbConfig.DatabaseConfig; import redis.clients.jedis.annots.Experimental; import redis.clients.jedis.annots.VisibleForTesting; import redis.clients.jedis.exceptions.JedisConnectionException; @@ -41,7 +41,7 @@ import redis.clients.jedis.exceptions.JedisValidationException; import redis.clients.jedis.mcf.JedisFailoverException.*; import redis.clients.jedis.providers.ConnectionProvider; -import redis.clients.jedis.MultiDatabaseConfig.StrategySupplier; +import redis.clients.jedis.MultiDbConfig.StrategySupplier; import redis.clients.jedis.util.Pool; /** @@ -69,7 +69,7 @@ public class MultiDatabaseConnectionProvider implements ConnectionProvider { /** * Indicates the actively used database endpoint (connection pool) amongst the pre-configured list - * which were provided at startup via the MultiDatabaseConfig. All traffic will be routed with + * which were provided at startup via the MultiDbConfig. All traffic will be routed with * this database */ private volatile Database activeDatabase; @@ -101,30 +101,30 @@ public class MultiDatabaseConnectionProvider implements ConnectionProvider { // Store retry and circuit breaker configs for dynamic database addition/removal private RetryConfig retryConfig; private CircuitBreakerConfig circuitBreakerConfig; - private MultiDatabaseConfig multiDatabaseConfig; + private MultiDbConfig multiDbConfig; private AtomicLong failoverFreezeUntil = new AtomicLong(0); private AtomicInteger failoverAttemptCount = new AtomicInteger(0); - public MultiDatabaseConnectionProvider(MultiDatabaseConfig multiDatabaseConfig) { + public MultiDatabaseConnectionProvider(MultiDbConfig multiDbConfig) { - if (multiDatabaseConfig == null) throw new JedisValidationException( - "MultiDatabaseConfig must not be NULL for MultiDatabaseConnectionProvider"); + if (multiDbConfig == null) throw new JedisValidationException( + "MultiDbConfig must not be NULL for MultiDatabaseConnectionProvider"); - this.multiDatabaseConfig = multiDatabaseConfig; + this.multiDbConfig = multiDbConfig; ////////////// Configure Retry //////////////////// RetryConfig.Builder retryConfigBuilder = RetryConfig.custom(); - retryConfigBuilder.maxAttempts(multiDatabaseConfig.getRetryMaxAttempts()); + retryConfigBuilder.maxAttempts(multiDbConfig.getRetryMaxAttempts()); retryConfigBuilder.intervalFunction( - IntervalFunction.ofExponentialBackoff(multiDatabaseConfig.getRetryWaitDuration(), - multiDatabaseConfig.getRetryWaitDurationExponentialBackoffMultiplier())); + IntervalFunction.ofExponentialBackoff(multiDbConfig.getRetryWaitDuration(), + multiDbConfig.getRetryWaitDurationExponentialBackoffMultiplier())); retryConfigBuilder.failAfterMaxAttempts(false); // JedisConnectionException will be thrown retryConfigBuilder.retryExceptions( - multiDatabaseConfig.getRetryIncludedExceptionList().stream().toArray(Class[]::new)); + multiDbConfig.getRetryIncludedExceptionList().stream().toArray(Class[]::new)); - List retryIgnoreExceptionList = multiDatabaseConfig.getRetryIgnoreExceptionList(); + List retryIgnoreExceptionList = multiDbConfig.getRetryIgnoreExceptionList(); if (retryIgnoreExceptionList != null) retryConfigBuilder.ignoreExceptions(retryIgnoreExceptionList.stream().toArray(Class[]::new)); @@ -135,14 +135,14 @@ public MultiDatabaseConnectionProvider(MultiDatabaseConfig multiDatabaseConfig) CircuitBreakerConfig.Builder circuitBreakerConfigBuilder = CircuitBreakerConfig.custom(); CircuitBreakerThresholdsAdapter adapter = new CircuitBreakerThresholdsAdapter( - multiDatabaseConfig); + multiDbConfig); circuitBreakerConfigBuilder.minimumNumberOfCalls(adapter.getMinimumNumberOfCalls()); circuitBreakerConfigBuilder.failureRateThreshold(adapter.getFailureRateThreshold()); circuitBreakerConfigBuilder.slidingWindowSize(adapter.getSlidingWindowSize()); circuitBreakerConfigBuilder.slidingWindowType(adapter.getSlidingWindowType()); circuitBreakerConfigBuilder.recordExceptions( - multiDatabaseConfig.getCircuitBreakerIncludedExceptionList().stream().toArray(Class[]::new)); + multiDbConfig.getCircuitBreakerIncludedExceptionList().stream().toArray(Class[]::new)); circuitBreakerConfigBuilder.automaticTransitionFromOpenToHalfOpenEnabled(false); // State // transitions // are @@ -151,7 +151,7 @@ public MultiDatabaseConnectionProvider(MultiDatabaseConfig multiDatabaseConfig) // states // are used - List circuitBreakerIgnoreExceptionList = multiDatabaseConfig + List circuitBreakerIgnoreExceptionList = multiDbConfig .getCircuitBreakerIgnoreExceptionList(); if (circuitBreakerIgnoreExceptionList != null) circuitBreakerConfigBuilder .ignoreExceptions(circuitBreakerIgnoreExceptionList.stream().toArray(Class[]::new)); @@ -160,11 +160,11 @@ public MultiDatabaseConnectionProvider(MultiDatabaseConfig multiDatabaseConfig) ////////////// Configure Database Map //////////////////// - DatabaseConfig[] databaseConfigs = multiDatabaseConfig.getDatabaseConfigs(); + DatabaseConfig[] databaseConfigs = multiDbConfig.getDatabaseConfigs(); // Now add databases - health checks will start but events will be queued for (DatabaseConfig config : databaseConfigs) { - addClusterInternal(multiDatabaseConfig, config); + addClusterInternal(multiDbConfig, config); } // Initialize StatusTracker for waiting on health check results @@ -185,11 +185,11 @@ public MultiDatabaseConnectionProvider(MultiDatabaseConfig multiDatabaseConfig) waitForInitialHealthyCluster(statusTracker); switchToHealthyDatabase(SwitchReason.HEALTH_CHECK, temp); } - this.fallbackExceptionList = multiDatabaseConfig.getFallbackExceptionList(); + this.fallbackExceptionList = multiDbConfig.getFallbackExceptionList(); // Start periodic failback checker - if (multiDatabaseConfig.isFailbackSupported()) { - long failbackInterval = multiDatabaseConfig.getFailbackCheckInterval(); + if (multiDbConfig.isFailbackSupported()) { + long failbackInterval = multiDbConfig.getFailbackCheckInterval(); failbackScheduler.scheduleAtFixedRate(this::periodicFailbackCheck, failbackInterval, failbackInterval, TimeUnit.MILLISECONDS); } @@ -213,7 +213,7 @@ public void add(DatabaseConfig databaseConfig) { activeDatabaseChangeLock.lock(); try { - addClusterInternal(multiDatabaseConfig, databaseConfig); + addClusterInternal(multiDbConfig, databaseConfig); } finally { activeDatabaseChangeLock.unlock(); } @@ -286,7 +286,7 @@ public void remove(Endpoint endpoint) { * Internal method to add a database configuration. This method is not thread-safe and should be * called within appropriate locks. */ - private void addClusterInternal(MultiDatabaseConfig multiDatabaseConfig, DatabaseConfig config) { + private void addClusterInternal(MultiDbConfig multiDbConfig, DatabaseConfig config) { if (databaseMap.containsKey(config.getEndpoint())) { throw new JedisValidationException( "Endpoint " + config.getEndpoint() + " already exists in the provider"); @@ -322,10 +322,10 @@ private void addClusterInternal(MultiDatabaseConfig multiDatabaseConfig, Databas healthStatusManager.registerListener(config.getEndpoint(), this::onHealthStatusChange); HealthCheck hc = healthStatusManager.add(config.getEndpoint(), hcs); database = new Database(config.getEndpoint(), pool, retry, hc, circuitBreaker, - config.getWeight(), multiDatabaseConfig); + config.getWeight(), multiDbConfig); } else { database = new Database(config.getEndpoint(), pool, retry, circuitBreaker, config.getWeight(), - multiDatabaseConfig); + multiDbConfig); } databaseMap.put(config.getEndpoint(), database); @@ -475,7 +475,7 @@ Endpoint switchToHealthyDatabase(SwitchReason reason, Database iterateFrom) { } private void handleNoHealthyCluster() { - int max = multiDatabaseConfig.getMaxNumFailoverAttempts(); + int max = multiDbConfig.getMaxNumFailoverAttempts(); log.error("No healthy cluster available to switch to"); if (failoverAttemptCount.get() > max) { throw new JedisPermanentlyNotAvailableException(); @@ -494,7 +494,7 @@ private boolean markAsFreeze() { long until = failoverFreezeUntil.get(); long now = System.currentTimeMillis(); if (until <= now) { - long nextUntil = now + multiDatabaseConfig.getDelayInBetweenFailoverAttempts(); + long nextUntil = now + multiDbConfig.getDelayInBetweenFailoverAttempts(); if (failoverFreezeUntil.compareAndSet(until, nextUntil)) { return true; } @@ -639,7 +639,7 @@ private boolean setActiveDatabase(Database database, boolean validateConnection) activeDatabaseChangeLock.unlock(); } boolean switched = oldCluster != database; - if (switched && this.multiDatabaseConfig.isFastFailover()) { + if (switched && this.multiDbConfig.isFastFailover()) { log.info("Forcing disconnect of all active connections in old database: {}", oldCluster.circuitBreaker.getName()); oldCluster.forceDisconnect(); @@ -734,7 +734,7 @@ public CircuitBreaker getDatabaseCircuitBreaker() { /** * Indicates the final cluster/database endpoint (connection pool), according to the - * pre-configured list provided at startup via the MultiDatabaseConfig, is unavailable and + * pre-configured list provided at startup via the MultiDbConfig, is unavailable and * therefore no further failover is possible. Users can manually failback to an available cluster */ public boolean canIterateFrom(Database iterateFrom) { @@ -764,7 +764,7 @@ public static class Database { private final CircuitBreaker circuitBreaker; private final float weight; private final HealthCheck healthCheck; - private final MultiDatabaseConfig multiDbConfig; + private final MultiDbConfig multiDbConfig; private boolean disabled = false; private final Endpoint endpoint; @@ -773,20 +773,20 @@ public static class Database { private final Logger log = LoggerFactory.getLogger(getClass()); private Database(Endpoint endpoint, TrackingConnectionPool connectionPool, Retry retry, - CircuitBreaker circuitBreaker, float weight, MultiDatabaseConfig multiDatabaseConfig) { + CircuitBreaker circuitBreaker, float weight, MultiDbConfig multiDbConfig) { this.endpoint = endpoint; this.connectionPool = connectionPool; this.retry = retry; this.circuitBreaker = circuitBreaker; this.weight = weight; - this.multiDbConfig = multiDatabaseConfig; + this.multiDbConfig = multiDbConfig; this.healthCheck = null; } private Database(Endpoint endpoint, TrackingConnectionPool connectionPool, Retry retry, HealthCheck hc, CircuitBreaker circuitBreaker, float weight, - MultiDatabaseConfig multiDbConfig) { + MultiDbConfig multiDbConfig) { this.endpoint = endpoint; this.connectionPool = connectionPool; diff --git a/src/test/java/redis/clients/jedis/MultiDbClientTest.java b/src/test/java/redis/clients/jedis/MultiDbClientTest.java index 05e923f2d4..5325f0e742 100644 --- a/src/test/java/redis/clients/jedis/MultiDbClientTest.java +++ b/src/test/java/redis/clients/jedis/MultiDbClientTest.java @@ -15,7 +15,7 @@ import static org.hamcrest.Matchers.not; import static org.junit.jupiter.api.Assertions.*; -import redis.clients.jedis.MultiDatabaseConfig.DatabaseConfig; +import redis.clients.jedis.MultiDbConfig.DatabaseConfig; import redis.clients.jedis.exceptions.JedisValidationException; import redis.clients.jedis.mcf.ClusterSwitchEventArgs; import redis.clients.jedis.mcf.SwitchReason; @@ -56,7 +56,7 @@ public static void setupAdminClients() throws IOException { @BeforeEach void setUp() { // Create a simple resilient client with mock endpoints for testing - MultiDatabaseConfig clientConfig = MultiDatabaseConfig.builder() + MultiDbConfig clientConfig = MultiDbConfig.builder() .endpoint(endpoint1.getHostAndPort(), 100.0f, endpoint1.getClientConfigBuilder().build()) .endpoint(endpoint2.getHostAndPort(), 50.0f, endpoint2.getClientConfigBuilder().build()) .build(); @@ -121,7 +121,7 @@ void testSetActiveDatabase() { @Test void testBuilderWithMultipleEndpointTypes() { - MultiDatabaseConfig clientConfig = MultiDatabaseConfig.builder() + MultiDbConfig clientConfig = MultiDbConfig.builder() .endpoint(endpoint1.getHostAndPort(), 100.0f, DefaultJedisClientConfig.builder().build()) .endpoint(DatabaseConfig .builder(endpoint2.getHostAndPort(), DefaultJedisClientConfig.builder().build()) @@ -172,7 +172,7 @@ public void testForceActiveEndpointWithNonExistingEndpoint() { @Test public void testWithDatabaseSwitchListener() { - MultiDatabaseConfig endpointsConfig = MultiDatabaseConfig.builder() + MultiDbConfig endpointsConfig = MultiDbConfig.builder() .endpoint(DatabaseConfig .builder(endpoint1.getHostAndPort(), endpoint1.getClientConfigBuilder().build()) .weight(100.0f).build()) diff --git a/src/test/java/redis/clients/jedis/failover/FailoverIntegrationTest.java b/src/test/java/redis/clients/jedis/failover/FailoverIntegrationTest.java index 3416f68b74..7cda9db8fd 100644 --- a/src/test/java/redis/clients/jedis/failover/FailoverIntegrationTest.java +++ b/src/test/java/redis/clients/jedis/failover/FailoverIntegrationTest.java @@ -16,7 +16,7 @@ import redis.clients.jedis.EndpointConfig; import redis.clients.jedis.HostAndPorts; import redis.clients.jedis.JedisClientConfig; -import redis.clients.jedis.MultiDatabaseConfig; +import redis.clients.jedis.MultiDbConfig; import redis.clients.jedis.UnifiedJedis; import redis.clients.jedis.exceptions.JedisConnectionException; import redis.clients.jedis.mcf.MultiDatabaseConnectionProvider; @@ -180,13 +180,13 @@ public void testManualFailoverNewCommandsAreSentToActiveCluster() throws Interru assertThat(getNodeId(failoverClient.info("server")), equalTo(JEDIS2_ID)); } - private List getDatabaseConfigs( + private List getDatabaseConfigs( JedisClientConfig clientConfig, EndpointConfig... endpoints) { int weight = endpoints.length; AtomicInteger weightCounter = new AtomicInteger(weight); return Arrays.stream(endpoints) - .map(e -> MultiDatabaseConfig.DatabaseConfig.builder(e.getHostAndPort(), clientConfig) + .map(e -> MultiDbConfig.DatabaseConfig.builder(e.getHostAndPort(), clientConfig) .weight(1.0f / weightCounter.getAndIncrement()).healthCheckEnabled(false).build()) .collect(Collectors.toList()); } @@ -261,7 +261,7 @@ public void testManualFailoverInflightCommandsWithErrorsPropagateError() throws */ @Test public void testCircuitBreakerCountsEachConnectionErrorSeparately() throws IOException { - MultiDatabaseConfig failoverConfig = new MultiDatabaseConfig.Builder(getDatabaseConfigs( + MultiDbConfig failoverConfig = new MultiDbConfig.Builder(getDatabaseConfigs( DefaultJedisClientConfig.builder().socketTimeoutMillis(RecommendedSettings.DEFAULT_TIMEOUT_MS) .connectionTimeoutMillis(RecommendedSettings.DEFAULT_TIMEOUT_MS).build(), endpoint1, endpoint2)).retryMaxAttempts(2).retryWaitDuration(1) @@ -422,7 +422,7 @@ private MultiDatabaseConnectionProvider createProvider() { .socketTimeoutMillis(RecommendedSettings.DEFAULT_TIMEOUT_MS) .connectionTimeoutMillis(RecommendedSettings.DEFAULT_TIMEOUT_MS).build(); - MultiDatabaseConfig failoverConfig = new MultiDatabaseConfig.Builder( + MultiDbConfig failoverConfig = new MultiDbConfig.Builder( getDatabaseConfigs(clientConfig, endpoint1, endpoint2)).retryMaxAttempts(1) .retryWaitDuration(1).circuitBreakerSlidingWindowSize(3) .circuitBreakerMinNumOfFailures(1).circuitBreakerFailureRateThreshold(50f).build(); @@ -435,12 +435,12 @@ private MultiDatabaseConnectionProvider createProvider() { * @return A configured provider */ private MultiDatabaseConnectionProvider createProvider( - Function configCustomizer) { + Function configCustomizer) { JedisClientConfig clientConfig = DefaultJedisClientConfig.builder() .socketTimeoutMillis(RecommendedSettings.DEFAULT_TIMEOUT_MS) .connectionTimeoutMillis(RecommendedSettings.DEFAULT_TIMEOUT_MS).build(); - MultiDatabaseConfig.Builder builder = new MultiDatabaseConfig.Builder( + MultiDbConfig.Builder builder = new MultiDbConfig.Builder( getDatabaseConfigs(clientConfig, endpoint1, endpoint2)).retryMaxAttempts(1) .retryWaitDuration(1).circuitBreakerSlidingWindowSize(3) .circuitBreakerMinNumOfFailures(1).circuitBreakerFailureRateThreshold(50f); diff --git a/src/test/java/redis/clients/jedis/mcf/ActiveActiveLocalFailoverTest.java b/src/test/java/redis/clients/jedis/mcf/ActiveActiveLocalFailoverTest.java index da295e837e..6fdc720cc9 100644 --- a/src/test/java/redis/clients/jedis/mcf/ActiveActiveLocalFailoverTest.java +++ b/src/test/java/redis/clients/jedis/mcf/ActiveActiveLocalFailoverTest.java @@ -16,7 +16,7 @@ import eu.rekawek.toxiproxy.ToxiproxyClient; import eu.rekawek.toxiproxy.model.Toxic; import redis.clients.jedis.*; -import redis.clients.jedis.MultiDatabaseConfig.DatabaseConfig; +import redis.clients.jedis.MultiDbConfig.DatabaseConfig; import redis.clients.jedis.scenario.ActiveActiveFailoverTest; import redis.clients.jedis.scenario.MultiThreadedFakeApp; import redis.clients.jedis.scenario.RecommendedSettings; @@ -94,7 +94,7 @@ public void testFailover(boolean fastFailover, long minFailoverCompletionDuratio "TESTING WITH PARAMETERS: fastFailover: {} numberOfThreads: {} minFailoverCompletionDuration: {} maxFailoverCompletionDuration: {] ", fastFailover, numberOfThreads, minFailoverCompletionDuration, maxFailoverCompletionDuration); - MultiDatabaseConfig.DatabaseConfig[] clusterConfig = new MultiDatabaseConfig.DatabaseConfig[2]; + MultiDbConfig.DatabaseConfig[] clusterConfig = new MultiDbConfig.DatabaseConfig[2]; JedisClientConfig config = endpoint1.getClientConfigBuilder() .socketTimeoutMillis(RecommendedSettings.DEFAULT_TIMEOUT_MS) @@ -105,7 +105,7 @@ public void testFailover(boolean fastFailover, long minFailoverCompletionDuratio clusterConfig[1] = DatabaseConfig.builder(endpoint2.getHostAndPort(), config) .connectionPoolConfig(RecommendedSettings.poolConfig).weight(0.5f).build(); - MultiDatabaseConfig.Builder builder = new MultiDatabaseConfig.Builder(clusterConfig); + MultiDbConfig.Builder builder = new MultiDbConfig.Builder(clusterConfig); builder.circuitBreakerSlidingWindowSize(1); // SLIDING WINDOW SIZE IN SECONDS builder.circuitBreakerFailureRateThreshold(10.0f); // percentage of failures to trigger circuit diff --git a/src/test/java/redis/clients/jedis/mcf/CircuitBreakerThresholdsTest.java b/src/test/java/redis/clients/jedis/mcf/CircuitBreakerThresholdsTest.java index bf7f702004..2e71b0f554 100644 --- a/src/test/java/redis/clients/jedis/mcf/CircuitBreakerThresholdsTest.java +++ b/src/test/java/redis/clients/jedis/mcf/CircuitBreakerThresholdsTest.java @@ -16,8 +16,8 @@ import redis.clients.jedis.Connection; import redis.clients.jedis.DefaultJedisClientConfig; import redis.clients.jedis.HostAndPort; -import redis.clients.jedis.MultiDatabaseConfig; -import redis.clients.jedis.MultiDatabaseConfig.DatabaseConfig; +import redis.clients.jedis.MultiDbConfig; +import redis.clients.jedis.MultiDbConfig.DatabaseConfig; import redis.clients.jedis.Protocol; import redis.clients.jedis.exceptions.JedisConnectionException; import redis.clients.jedis.mcf.MultiDatabaseConnectionProvider.Database; @@ -50,11 +50,11 @@ public void setup() throws Exception { .healthCheckEnabled(false).weight(0.5f).build() }; fakeDatabaseConfigs = databaseConfigs; - MultiDatabaseConfig.Builder cfgBuilder = MultiDatabaseConfig.builder(databaseConfigs) + MultiDbConfig.Builder cfgBuilder = MultiDbConfig.builder(databaseConfigs) .circuitBreakerFailureRateThreshold(50.0f).circuitBreakerMinNumOfFailures(3) .circuitBreakerSlidingWindowSize(10).retryMaxAttempts(1).retryOnFailover(false); - MultiDatabaseConfig mcc = cfgBuilder.build(); + MultiDbConfig mcc = cfgBuilder.build(); realProvider = new MultiDatabaseConnectionProvider(mcc); spyProvider = spy(realProvider); @@ -123,7 +123,7 @@ public void minFailuresAndRateExceeded_triggersFailover() { @Test public void rateBelowThreshold_doesNotFailover() throws Exception { // Use local provider with higher threshold (80%) and no retries - MultiDatabaseConfig.Builder cfgBuilder = MultiDatabaseConfig.builder(fakeDatabaseConfigs) + MultiDbConfig.Builder cfgBuilder = MultiDbConfig.builder(fakeDatabaseConfigs) .circuitBreakerFailureRateThreshold(80.0f).circuitBreakerMinNumOfFailures(3) .circuitBreakerSlidingWindowSize(10).retryMaxAttempts(1).retryOnFailover(false); MultiDatabaseConnectionProvider rp = new MultiDatabaseConnectionProvider(cfgBuilder.build()); @@ -162,10 +162,10 @@ public void rateBelowThreshold_doesNotFailover() throws Exception { @Test public void providerBuilder_zeroRate_mapsToHundredAndHugeMinCalls() { - MultiDatabaseConfig.Builder cfgBuilder = MultiDatabaseConfig.builder(fakeDatabaseConfigs); + MultiDbConfig.Builder cfgBuilder = MultiDbConfig.builder(fakeDatabaseConfigs); cfgBuilder.circuitBreakerFailureRateThreshold(0.0f).circuitBreakerMinNumOfFailures(3) .circuitBreakerSlidingWindowSize(10); - MultiDatabaseConfig mcc = cfgBuilder.build(); + MultiDbConfig mcc = cfgBuilder.build(); CircuitBreakerThresholdsAdapter adapter = new CircuitBreakerThresholdsAdapter(mcc); @@ -189,7 +189,7 @@ public void providerBuilder_zeroRate_mapsToHundredAndHugeMinCalls() { public void thresholdMatrix(int minFailures, float ratePercent, int successes, int failures, boolean expectFailoverOnNext) throws Exception { - MultiDatabaseConfig.Builder cfgBuilder = MultiDatabaseConfig.builder(fakeDatabaseConfigs) + MultiDbConfig.Builder cfgBuilder = MultiDbConfig.builder(fakeDatabaseConfigs) .circuitBreakerFailureRateThreshold(ratePercent).circuitBreakerMinNumOfFailures(minFailures) .circuitBreakerSlidingWindowSize(Math.max(10, successes + failures + 2)).retryMaxAttempts(1) .retryOnFailover(false); diff --git a/src/test/java/redis/clients/jedis/mcf/ClusterEvaluateThresholdsTest.java b/src/test/java/redis/clients/jedis/mcf/ClusterEvaluateThresholdsTest.java index 251d69140c..a3e9317a98 100644 --- a/src/test/java/redis/clients/jedis/mcf/ClusterEvaluateThresholdsTest.java +++ b/src/test/java/redis/clients/jedis/mcf/ClusterEvaluateThresholdsTest.java @@ -11,7 +11,7 @@ import org.junit.jupiter.params.provider.CsvSource; import redis.clients.jedis.DefaultJedisClientConfig; import redis.clients.jedis.HostAndPort; -import redis.clients.jedis.MultiDatabaseConfig; +import redis.clients.jedis.MultiDbConfig; import redis.clients.jedis.mcf.MultiDatabaseConnectionProvider.Database; /** @@ -100,13 +100,13 @@ public void rateBelowThreshold_doesNotFailover() { @Test public void providerBuilder_zeroRate_mapsToHundredAndHugeMinCalls() { - MultiDatabaseConfig.Builder cfgBuilder = MultiDatabaseConfig - .builder(java.util.Arrays.asList(MultiDatabaseConfig.DatabaseConfig + MultiDbConfig.Builder cfgBuilder = MultiDbConfig + .builder(java.util.Arrays.asList(MultiDbConfig.DatabaseConfig .builder(new HostAndPort("localhost", 6379), DefaultJedisClientConfig.builder().build()) .healthCheckEnabled(false).build())); cfgBuilder.circuitBreakerFailureRateThreshold(0.0f).circuitBreakerMinNumOfFailures(3) .circuitBreakerSlidingWindowSize(10); - MultiDatabaseConfig mcc = cfgBuilder.build(); + MultiDbConfig mcc = cfgBuilder.build(); CircuitBreakerThresholdsAdapter adapter = new CircuitBreakerThresholdsAdapter(mcc); diff --git a/src/test/java/redis/clients/jedis/mcf/DefaultValuesTest.java b/src/test/java/redis/clients/jedis/mcf/DefaultValuesTest.java index 51d0aa3ec2..e3e5f3f05e 100644 --- a/src/test/java/redis/clients/jedis/mcf/DefaultValuesTest.java +++ b/src/test/java/redis/clients/jedis/mcf/DefaultValuesTest.java @@ -9,7 +9,7 @@ import redis.clients.jedis.DefaultJedisClientConfig; import redis.clients.jedis.HostAndPort; import redis.clients.jedis.JedisClientConfig; -import redis.clients.jedis.MultiDatabaseConfig; +import redis.clients.jedis.MultiDbConfig; public class DefaultValuesTest { @@ -19,10 +19,10 @@ public class DefaultValuesTest { @Test void testDefaultValuesInConfig() { - MultiDatabaseConfig.DatabaseConfig clusterConfig = MultiDatabaseConfig.DatabaseConfig + MultiDbConfig.DatabaseConfig clusterConfig = MultiDbConfig.DatabaseConfig .builder(fakeEndpoint, config).build(); - MultiDatabaseConfig multiConfig = new MultiDatabaseConfig.Builder( - new MultiDatabaseConfig.DatabaseConfig[] { clusterConfig }).build(); + MultiDbConfig multiConfig = new MultiDbConfig.Builder( + new MultiDbConfig.DatabaseConfig[] { clusterConfig }).build(); // check for grace period assertEquals(60000, multiConfig.getGracePeriod()); diff --git a/src/test/java/redis/clients/jedis/mcf/FailbackMechanismIntegrationTest.java b/src/test/java/redis/clients/jedis/mcf/FailbackMechanismIntegrationTest.java index c216e69317..7179c0c475 100644 --- a/src/test/java/redis/clients/jedis/mcf/FailbackMechanismIntegrationTest.java +++ b/src/test/java/redis/clients/jedis/mcf/FailbackMechanismIntegrationTest.java @@ -17,7 +17,7 @@ import redis.clients.jedis.DefaultJedisClientConfig; import redis.clients.jedis.HostAndPort; import redis.clients.jedis.JedisClientConfig; -import redis.clients.jedis.MultiDatabaseConfig; +import redis.clients.jedis.MultiDbConfig; @ExtendWith(MockitoExtension.class) class FailbackMechanismIntegrationTest { @@ -49,16 +49,16 @@ private MockedConstruction mockPool() { void testFailbackDisabledDoesNotPerformFailback() throws InterruptedException { try (MockedConstruction mockedPool = mockPool()) { // Create clusters with different weights - MultiDatabaseConfig.DatabaseConfig cluster1 = MultiDatabaseConfig.DatabaseConfig + MultiDbConfig.DatabaseConfig cluster1 = MultiDbConfig.DatabaseConfig .builder(endpoint1, clientConfig).weight(1.0f).healthCheckEnabled(false).build(); // Lower // weight - MultiDatabaseConfig.DatabaseConfig cluster2 = MultiDatabaseConfig.DatabaseConfig + MultiDbConfig.DatabaseConfig cluster2 = MultiDbConfig.DatabaseConfig .builder(endpoint2, clientConfig).weight(2.0f) // Higher weight .healthCheckEnabled(false).build(); - MultiDatabaseConfig config = new MultiDatabaseConfig.Builder( - new MultiDatabaseConfig.DatabaseConfig[] { cluster1, cluster2 }).failbackSupported(false) // Disabled + MultiDbConfig config = new MultiDbConfig.Builder( + new MultiDbConfig.DatabaseConfig[] { cluster1, cluster2 }).failbackSupported(false) // Disabled .failbackCheckInterval(100) // Short interval for testing .build(); @@ -89,16 +89,16 @@ void testFailbackDisabledDoesNotPerformFailback() throws InterruptedException { void testFailbackToHigherWeightCluster() throws InterruptedException { try (MockedConstruction mockedPool = mockPool()) { // Create clusters with different weights - MultiDatabaseConfig.DatabaseConfig cluster1 = MultiDatabaseConfig.DatabaseConfig + MultiDbConfig.DatabaseConfig cluster1 = MultiDbConfig.DatabaseConfig .builder(endpoint1, clientConfig).weight(2.0f) // Higher weight .healthCheckEnabled(false).build(); - MultiDatabaseConfig.DatabaseConfig cluster2 = MultiDatabaseConfig.DatabaseConfig + MultiDbConfig.DatabaseConfig cluster2 = MultiDbConfig.DatabaseConfig .builder(endpoint2, clientConfig).weight(1.0f) // Lower weight .healthCheckEnabled(false).build(); - MultiDatabaseConfig config = new MultiDatabaseConfig.Builder( - new MultiDatabaseConfig.DatabaseConfig[] { cluster1, cluster2 }).failbackSupported(true) + MultiDbConfig config = new MultiDbConfig.Builder( + new MultiDbConfig.DatabaseConfig[] { cluster1, cluster2 }).failbackSupported(true) .failbackCheckInterval(100) // Short interval for testing .gracePeriod(100).build(); @@ -129,20 +129,20 @@ void testFailbackToHigherWeightCluster() throws InterruptedException { void testNoFailbackToLowerWeightCluster() throws InterruptedException { try (MockedConstruction mockedPool = mockPool()) { // Create three clusters with different weights to properly test no failback to lower weight - MultiDatabaseConfig.DatabaseConfig cluster1 = MultiDatabaseConfig.DatabaseConfig + MultiDbConfig.DatabaseConfig cluster1 = MultiDbConfig.DatabaseConfig .builder(endpoint1, clientConfig).weight(1.0f) // Lowest weight .healthCheckEnabled(false).build(); - MultiDatabaseConfig.DatabaseConfig cluster2 = MultiDatabaseConfig.DatabaseConfig + MultiDbConfig.DatabaseConfig cluster2 = MultiDbConfig.DatabaseConfig .builder(endpoint2, clientConfig).weight(2.0f) // Medium weight .healthCheckEnabled(false).build(); - MultiDatabaseConfig.DatabaseConfig cluster3 = MultiDatabaseConfig.DatabaseConfig + MultiDbConfig.DatabaseConfig cluster3 = MultiDbConfig.DatabaseConfig .builder(endpoint3, clientConfig).weight(3.0f) // Highest weight .healthCheckEnabled(false).build(); - MultiDatabaseConfig config = new MultiDatabaseConfig.Builder( - new MultiDatabaseConfig.DatabaseConfig[] { cluster1, cluster2, cluster3 }) + MultiDbConfig config = new MultiDbConfig.Builder( + new MultiDbConfig.DatabaseConfig[] { cluster1, cluster2, cluster3 }) .failbackSupported(true).failbackCheckInterval(100).build(); try (MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider(config)) { @@ -172,16 +172,16 @@ void testNoFailbackToLowerWeightCluster() throws InterruptedException { @Test void testFailbackToHigherWeightClusterImmediately() throws InterruptedException { try (MockedConstruction mockedPool = mockPool()) { - MultiDatabaseConfig.DatabaseConfig cluster1 = MultiDatabaseConfig.DatabaseConfig + MultiDbConfig.DatabaseConfig cluster1 = MultiDbConfig.DatabaseConfig .builder(endpoint1, clientConfig).weight(2.0f).healthCheckEnabled(false).build(); // Higher // weight - MultiDatabaseConfig.DatabaseConfig cluster2 = MultiDatabaseConfig.DatabaseConfig + MultiDbConfig.DatabaseConfig cluster2 = MultiDbConfig.DatabaseConfig .builder(endpoint2, clientConfig).weight(1.0f).healthCheckEnabled(false).build(); // Lower // weight - MultiDatabaseConfig config = new MultiDatabaseConfig.Builder( - new MultiDatabaseConfig.DatabaseConfig[] { cluster1, cluster2 }).failbackSupported(true) + MultiDbConfig config = new MultiDbConfig.Builder( + new MultiDbConfig.DatabaseConfig[] { cluster1, cluster2 }).failbackSupported(true) .failbackCheckInterval(100).gracePeriod(50).build(); try (MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider(config)) { @@ -211,16 +211,16 @@ void testFailbackToHigherWeightClusterImmediately() throws InterruptedException @Test void testUnhealthyClusterCancelsFailback() throws InterruptedException { try (MockedConstruction mockedPool = mockPool()) { - MultiDatabaseConfig.DatabaseConfig cluster1 = MultiDatabaseConfig.DatabaseConfig + MultiDbConfig.DatabaseConfig cluster1 = MultiDbConfig.DatabaseConfig .builder(endpoint1, clientConfig).weight(2.0f).healthCheckEnabled(false).build(); // Higher // weight - MultiDatabaseConfig.DatabaseConfig cluster2 = MultiDatabaseConfig.DatabaseConfig + MultiDbConfig.DatabaseConfig cluster2 = MultiDbConfig.DatabaseConfig .builder(endpoint2, clientConfig).weight(1.0f).healthCheckEnabled(false).build(); // Lower // weight - MultiDatabaseConfig config = new MultiDatabaseConfig.Builder( - new MultiDatabaseConfig.DatabaseConfig[] { cluster1, cluster2 }).failbackSupported(true) + MultiDbConfig config = new MultiDbConfig.Builder( + new MultiDbConfig.DatabaseConfig[] { cluster1, cluster2 }).failbackSupported(true) .failbackCheckInterval(200).build(); try (MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider(config)) { @@ -256,20 +256,20 @@ void testUnhealthyClusterCancelsFailback() throws InterruptedException { @Test void testMultipleClusterFailbackPriority() throws InterruptedException { try (MockedConstruction mockedPool = mockPool()) { - MultiDatabaseConfig.DatabaseConfig cluster1 = MultiDatabaseConfig.DatabaseConfig + MultiDbConfig.DatabaseConfig cluster1 = MultiDbConfig.DatabaseConfig .builder(endpoint1, clientConfig).weight(1.0f).healthCheckEnabled(false).build(); // Lowest // weight - MultiDatabaseConfig.DatabaseConfig cluster2 = MultiDatabaseConfig.DatabaseConfig + MultiDbConfig.DatabaseConfig cluster2 = MultiDbConfig.DatabaseConfig .builder(endpoint2, clientConfig).weight(2.0f).healthCheckEnabled(false).build(); // Medium // weight - MultiDatabaseConfig.DatabaseConfig cluster3 = MultiDatabaseConfig.DatabaseConfig + MultiDbConfig.DatabaseConfig cluster3 = MultiDbConfig.DatabaseConfig .builder(endpoint3, clientConfig).weight(3.0f) // Highest weight .healthCheckEnabled(false).build(); - MultiDatabaseConfig config = new MultiDatabaseConfig.Builder( - new MultiDatabaseConfig.DatabaseConfig[] { cluster1, cluster2, cluster3 }) + MultiDbConfig config = new MultiDbConfig.Builder( + new MultiDbConfig.DatabaseConfig[] { cluster1, cluster2, cluster3 }) .failbackSupported(true).failbackCheckInterval(100).gracePeriod(100).build(); try (MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider(config)) { @@ -298,16 +298,16 @@ void testMultipleClusterFailbackPriority() throws InterruptedException { @Test void testGracePeriodDisablesClusterOnUnhealthy() throws InterruptedException { try (MockedConstruction mockedPool = mockPool()) { - MultiDatabaseConfig.DatabaseConfig cluster1 = MultiDatabaseConfig.DatabaseConfig + MultiDbConfig.DatabaseConfig cluster1 = MultiDbConfig.DatabaseConfig .builder(endpoint1, clientConfig).weight(1.0f).healthCheckEnabled(false).build(); // Lower // weight - MultiDatabaseConfig.DatabaseConfig cluster2 = MultiDatabaseConfig.DatabaseConfig + MultiDbConfig.DatabaseConfig cluster2 = MultiDbConfig.DatabaseConfig .builder(endpoint2, clientConfig).weight(2.0f).healthCheckEnabled(false).build(); // Higher // weight - MultiDatabaseConfig config = new MultiDatabaseConfig.Builder( - new MultiDatabaseConfig.DatabaseConfig[] { cluster1, cluster2 }).failbackSupported(true) + MultiDbConfig config = new MultiDbConfig.Builder( + new MultiDbConfig.DatabaseConfig[] { cluster1, cluster2 }).failbackSupported(true) .failbackCheckInterval(100).gracePeriod(200) // 200ms grace // period .build(); @@ -332,16 +332,16 @@ void testGracePeriodDisablesClusterOnUnhealthy() throws InterruptedException { @Test void testGracePeriodReEnablesClusterAfterPeriod() throws InterruptedException { try (MockedConstruction mockedPool = mockPool()) { - MultiDatabaseConfig.DatabaseConfig cluster1 = MultiDatabaseConfig.DatabaseConfig + MultiDbConfig.DatabaseConfig cluster1 = MultiDbConfig.DatabaseConfig .builder(endpoint1, clientConfig).weight(1.0f).healthCheckEnabled(false).build(); // Lower // weight - MultiDatabaseConfig.DatabaseConfig cluster2 = MultiDatabaseConfig.DatabaseConfig + MultiDbConfig.DatabaseConfig cluster2 = MultiDbConfig.DatabaseConfig .builder(endpoint2, clientConfig).weight(2.0f).healthCheckEnabled(false).build(); // Higher // weight - MultiDatabaseConfig config = new MultiDatabaseConfig.Builder( - new MultiDatabaseConfig.DatabaseConfig[] { cluster1, cluster2 }).failbackSupported(true) + MultiDbConfig config = new MultiDbConfig.Builder( + new MultiDbConfig.DatabaseConfig[] { cluster1, cluster2 }).failbackSupported(true) .failbackCheckInterval(50) // Short interval for testing .gracePeriod(100) // Short grace period for testing .build(); diff --git a/src/test/java/redis/clients/jedis/mcf/FailbackMechanismUnitTest.java b/src/test/java/redis/clients/jedis/mcf/FailbackMechanismUnitTest.java index a200296e18..a69bdb9ee6 100644 --- a/src/test/java/redis/clients/jedis/mcf/FailbackMechanismUnitTest.java +++ b/src/test/java/redis/clients/jedis/mcf/FailbackMechanismUnitTest.java @@ -9,7 +9,7 @@ import redis.clients.jedis.DefaultJedisClientConfig; import redis.clients.jedis.HostAndPort; import redis.clients.jedis.JedisClientConfig; -import redis.clients.jedis.MultiDatabaseConfig; +import redis.clients.jedis.MultiDbConfig; @ExtendWith(MockitoExtension.class) class FailbackMechanismUnitTest { @@ -26,17 +26,17 @@ void setUp() { @Test void testFailbackCheckIntervalConfiguration() { // Test default value - MultiDatabaseConfig.DatabaseConfig clusterConfig = MultiDatabaseConfig.DatabaseConfig + MultiDbConfig.DatabaseConfig clusterConfig = MultiDbConfig.DatabaseConfig .builder(endpoint1, clientConfig).healthCheckEnabled(false).build(); - MultiDatabaseConfig defaultConfig = new MultiDatabaseConfig.Builder( - new MultiDatabaseConfig.DatabaseConfig[] { clusterConfig }).build(); + MultiDbConfig defaultConfig = new MultiDbConfig.Builder( + new MultiDbConfig.DatabaseConfig[] { clusterConfig }).build(); assertEquals(120000, defaultConfig.getFailbackCheckInterval()); // Test custom value - MultiDatabaseConfig customConfig = new MultiDatabaseConfig.Builder( - new MultiDatabaseConfig.DatabaseConfig[] { clusterConfig }).failbackCheckInterval(3000) + MultiDbConfig customConfig = new MultiDbConfig.Builder( + new MultiDbConfig.DatabaseConfig[] { clusterConfig }).failbackCheckInterval(3000) .build(); assertEquals(3000, customConfig.getFailbackCheckInterval()); @@ -44,18 +44,18 @@ void testFailbackCheckIntervalConfiguration() { @Test void testFailbackSupportedConfiguration() { - MultiDatabaseConfig.DatabaseConfig clusterConfig = MultiDatabaseConfig.DatabaseConfig + MultiDbConfig.DatabaseConfig clusterConfig = MultiDbConfig.DatabaseConfig .builder(endpoint1, clientConfig).healthCheckEnabled(false).build(); // Test default (should be true) - MultiDatabaseConfig defaultConfig = new MultiDatabaseConfig.Builder( - new MultiDatabaseConfig.DatabaseConfig[] { clusterConfig }).build(); + MultiDbConfig defaultConfig = new MultiDbConfig.Builder( + new MultiDbConfig.DatabaseConfig[] { clusterConfig }).build(); assertTrue(defaultConfig.isFailbackSupported()); // Test disabled - MultiDatabaseConfig disabledConfig = new MultiDatabaseConfig.Builder( - new MultiDatabaseConfig.DatabaseConfig[] { clusterConfig }).failbackSupported(false) + MultiDbConfig disabledConfig = new MultiDbConfig.Builder( + new MultiDbConfig.DatabaseConfig[] { clusterConfig }).failbackSupported(false) .build(); assertFalse(disabledConfig.isFailbackSupported()); @@ -63,19 +63,19 @@ void testFailbackSupportedConfiguration() { @Test void testFailbackCheckIntervalValidation() { - MultiDatabaseConfig.DatabaseConfig clusterConfig = MultiDatabaseConfig.DatabaseConfig + MultiDbConfig.DatabaseConfig clusterConfig = MultiDbConfig.DatabaseConfig .builder(endpoint1, clientConfig).healthCheckEnabled(false).build(); // Test zero interval (should be allowed) - MultiDatabaseConfig zeroConfig = new MultiDatabaseConfig.Builder( - new MultiDatabaseConfig.DatabaseConfig[] { clusterConfig }).failbackCheckInterval(0) + MultiDbConfig zeroConfig = new MultiDbConfig.Builder( + new MultiDbConfig.DatabaseConfig[] { clusterConfig }).failbackCheckInterval(0) .build(); assertEquals(0, zeroConfig.getFailbackCheckInterval()); // Test negative interval (should be allowed - implementation decision) - MultiDatabaseConfig negativeConfig = new MultiDatabaseConfig.Builder( - new MultiDatabaseConfig.DatabaseConfig[] { clusterConfig }).failbackCheckInterval(-1000) + MultiDbConfig negativeConfig = new MultiDbConfig.Builder( + new MultiDbConfig.DatabaseConfig[] { clusterConfig }).failbackCheckInterval(-1000) .build(); assertEquals(-1000, negativeConfig.getFailbackCheckInterval()); @@ -83,12 +83,12 @@ void testFailbackCheckIntervalValidation() { @Test void testBuilderChaining() { - MultiDatabaseConfig.DatabaseConfig clusterConfig = MultiDatabaseConfig.DatabaseConfig + MultiDbConfig.DatabaseConfig clusterConfig = MultiDbConfig.DatabaseConfig .builder(endpoint1, clientConfig).healthCheckEnabled(false).build(); // Test that builder methods can be chained - MultiDatabaseConfig config = new MultiDatabaseConfig.Builder( - new MultiDatabaseConfig.DatabaseConfig[] { clusterConfig }).failbackSupported(true) + MultiDbConfig config = new MultiDbConfig.Builder( + new MultiDbConfig.DatabaseConfig[] { clusterConfig }).failbackSupported(true) .failbackCheckInterval(2000).retryOnFailover(true).build(); assertTrue(config.isFailbackSupported()); @@ -99,47 +99,47 @@ void testBuilderChaining() { @Test void testGracePeriodConfiguration() { // Test default value - MultiDatabaseConfig.DatabaseConfig clusterConfig = MultiDatabaseConfig.DatabaseConfig + MultiDbConfig.DatabaseConfig clusterConfig = MultiDbConfig.DatabaseConfig .builder(endpoint1, clientConfig).healthCheckEnabled(false).build(); - MultiDatabaseConfig defaultConfig = new MultiDatabaseConfig.Builder( - new MultiDatabaseConfig.DatabaseConfig[] { clusterConfig }).build(); + MultiDbConfig defaultConfig = new MultiDbConfig.Builder( + new MultiDbConfig.DatabaseConfig[] { clusterConfig }).build(); assertEquals(60000, defaultConfig.getGracePeriod()); // Test custom value - MultiDatabaseConfig customConfig = new MultiDatabaseConfig.Builder( - new MultiDatabaseConfig.DatabaseConfig[] { clusterConfig }).gracePeriod(5000).build(); + MultiDbConfig customConfig = new MultiDbConfig.Builder( + new MultiDbConfig.DatabaseConfig[] { clusterConfig }).gracePeriod(5000).build(); assertEquals(5000, customConfig.getGracePeriod()); } @Test void testGracePeriodValidation() { - MultiDatabaseConfig.DatabaseConfig clusterConfig = MultiDatabaseConfig.DatabaseConfig + MultiDbConfig.DatabaseConfig clusterConfig = MultiDbConfig.DatabaseConfig .builder(endpoint1, clientConfig).healthCheckEnabled(false).build(); // Test zero grace period (should be allowed) - MultiDatabaseConfig zeroConfig = new MultiDatabaseConfig.Builder( - new MultiDatabaseConfig.DatabaseConfig[] { clusterConfig }).gracePeriod(0).build(); + MultiDbConfig zeroConfig = new MultiDbConfig.Builder( + new MultiDbConfig.DatabaseConfig[] { clusterConfig }).gracePeriod(0).build(); assertEquals(0, zeroConfig.getGracePeriod()); // Test negative grace period (should be allowed - implementation decision) - MultiDatabaseConfig negativeConfig = new MultiDatabaseConfig.Builder( - new MultiDatabaseConfig.DatabaseConfig[] { clusterConfig }).gracePeriod(-1000).build(); + MultiDbConfig negativeConfig = new MultiDbConfig.Builder( + new MultiDbConfig.DatabaseConfig[] { clusterConfig }).gracePeriod(-1000).build(); assertEquals(-1000, negativeConfig.getGracePeriod()); } @Test void testGracePeriodBuilderChaining() { - MultiDatabaseConfig.DatabaseConfig clusterConfig = MultiDatabaseConfig.DatabaseConfig + MultiDbConfig.DatabaseConfig clusterConfig = MultiDbConfig.DatabaseConfig .builder(endpoint1, clientConfig).healthCheckEnabled(false).build(); // Test that builder methods can be chained - MultiDatabaseConfig config = new MultiDatabaseConfig.Builder( - new MultiDatabaseConfig.DatabaseConfig[] { clusterConfig }).failbackSupported(true) + MultiDbConfig config = new MultiDbConfig.Builder( + new MultiDbConfig.DatabaseConfig[] { clusterConfig }).failbackSupported(true) .failbackCheckInterval(2000).gracePeriod(8000).retryOnFailover(true).build(); assertTrue(config.isFailbackSupported()); diff --git a/src/test/java/redis/clients/jedis/mcf/HealthCheckIntegrationTest.java b/src/test/java/redis/clients/jedis/mcf/HealthCheckIntegrationTest.java index c43baf9933..d315616956 100644 --- a/src/test/java/redis/clients/jedis/mcf/HealthCheckIntegrationTest.java +++ b/src/test/java/redis/clients/jedis/mcf/HealthCheckIntegrationTest.java @@ -15,10 +15,10 @@ import redis.clients.jedis.EndpointConfig; import redis.clients.jedis.HostAndPorts; import redis.clients.jedis.JedisClientConfig; -import redis.clients.jedis.MultiDatabaseConfig; +import redis.clients.jedis.MultiDbConfig; import redis.clients.jedis.UnifiedJedis; -import redis.clients.jedis.MultiDatabaseConfig.DatabaseConfig; -import redis.clients.jedis.MultiDatabaseConfig.StrategySupplier; +import redis.clients.jedis.MultiDbConfig.DatabaseConfig; +import redis.clients.jedis.MultiDbConfig.StrategySupplier; import redis.clients.jedis.mcf.ProbingPolicy.BuiltIn; import redis.clients.jedis.scenario.RecommendedSettings; @@ -43,7 +43,7 @@ public void testDisableHealthCheck() { @Test public void testDefaultStrategySupplier() { // Create a default strategy supplier that creates EchoStrategy instances - MultiDatabaseConfig.StrategySupplier defaultSupplier = (hostAndPort, jedisClientConfig) -> { + MultiDbConfig.StrategySupplier defaultSupplier = (hostAndPort, jedisClientConfig) -> { return new EchoStrategy(hostAndPort, jedisClientConfig); }; MultiDatabaseConnectionProvider customProvider = getMCCF(defaultSupplier); @@ -57,7 +57,7 @@ public void testDefaultStrategySupplier() { @Test public void testCustomStrategySupplier() { // Create a StrategySupplier that uses the JedisClientConfig when available - MultiDatabaseConfig.StrategySupplier strategySupplier = (hostAndPort, jedisClientConfig) -> { + MultiDbConfig.StrategySupplier strategySupplier = (hostAndPort, jedisClientConfig) -> { return new TestHealthCheckStrategy(HealthCheckStrategy.Config.builder().interval(500) .timeout(500).numProbes(1).policy(BuiltIn.ANY_SUCCESS).build(), (endpoint) -> { // Create connection per health check to avoid resource leak @@ -79,18 +79,18 @@ public void testCustomStrategySupplier() { } private MultiDatabaseConnectionProvider getMCCF( - MultiDatabaseConfig.StrategySupplier strategySupplier) { + MultiDbConfig.StrategySupplier strategySupplier) { Function modifier = builder -> strategySupplier == null ? builder.healthCheckEnabled(false) : builder.healthCheckStrategySupplier(strategySupplier); List databaseConfigs = Arrays.stream(new EndpointConfig[] { endpoint1 }) .map(e -> modifier - .apply(MultiDatabaseConfig.DatabaseConfig.builder(e.getHostAndPort(), clientConfig)) + .apply(MultiDbConfig.DatabaseConfig.builder(e.getHostAndPort(), clientConfig)) .build()) .collect(Collectors.toList()); - MultiDatabaseConfig mccf = new MultiDatabaseConfig.Builder(databaseConfigs).retryMaxAttempts(1) + MultiDbConfig mccf = new MultiDbConfig.Builder(databaseConfigs).retryMaxAttempts(1) .retryWaitDuration(1).circuitBreakerSlidingWindowSize(1) .circuitBreakerFailureRateThreshold(100).build(); diff --git a/src/test/java/redis/clients/jedis/mcf/HealthCheckTest.java b/src/test/java/redis/clients/jedis/mcf/HealthCheckTest.java index a9a592de1f..7a23a6d88f 100644 --- a/src/test/java/redis/clients/jedis/mcf/HealthCheckTest.java +++ b/src/test/java/redis/clients/jedis/mcf/HealthCheckTest.java @@ -9,7 +9,7 @@ import redis.clients.jedis.Endpoint; import redis.clients.jedis.HostAndPort; import redis.clients.jedis.JedisClientConfig; -import redis.clients.jedis.MultiDatabaseConfig; +import redis.clients.jedis.MultiDbConfig; import redis.clients.jedis.UnifiedJedis; import redis.clients.jedis.mcf.ProbingPolicy.BuiltIn; @@ -338,7 +338,7 @@ void testEchoStrategyCustomIntervalTimeout() { @Test void testEchoStrategyDefaultSupplier() { - MultiDatabaseConfig.StrategySupplier supplier = EchoStrategy.DEFAULT; + MultiDbConfig.StrategySupplier supplier = EchoStrategy.DEFAULT; HealthCheckStrategy strategy = supplier.get(testEndpoint, testConfig); assertInstanceOf(EchoStrategy.class, strategy); @@ -348,12 +348,12 @@ void testEchoStrategyDefaultSupplier() { @Test void testNewFieldLocations() { - // Test new field locations in DatabaseConfig and MultiDatabaseConfig - MultiDatabaseConfig.DatabaseConfig clusterConfig = MultiDatabaseConfig.DatabaseConfig + // Test new field locations in DatabaseConfig and MultiDbConfig + MultiDbConfig.DatabaseConfig clusterConfig = MultiDbConfig.DatabaseConfig .builder(testEndpoint, testConfig).weight(2.5f).build(); - MultiDatabaseConfig multiConfig = new MultiDatabaseConfig.Builder( - new MultiDatabaseConfig.DatabaseConfig[] { clusterConfig }).retryOnFailover(true) + MultiDbConfig multiConfig = new MultiDbConfig.Builder( + new MultiDbConfig.DatabaseConfig[] { clusterConfig }).retryOnFailover(true) .failbackSupported(false).build(); assertEquals(2.5f, clusterConfig.getWeight()); @@ -364,7 +364,7 @@ void testNewFieldLocations() { @Test void testDefaultValues() { // Test default values in DatabaseConfig - MultiDatabaseConfig.DatabaseConfig clusterConfig = MultiDatabaseConfig.DatabaseConfig + MultiDbConfig.DatabaseConfig clusterConfig = MultiDbConfig.DatabaseConfig .builder(testEndpoint, testConfig).build(); assertEquals(1.0f, clusterConfig.getWeight()); // Default weight @@ -374,9 +374,9 @@ void testDefaultValues() { // health // check) - // Test default values in MultiDatabaseConfig - MultiDatabaseConfig multiConfig = new MultiDatabaseConfig.Builder( - new MultiDatabaseConfig.DatabaseConfig[] { clusterConfig }).build(); + // Test default values in MultiDbConfig + MultiDbConfig multiConfig = new MultiDbConfig.Builder( + new MultiDbConfig.DatabaseConfig[] { clusterConfig }).build(); assertFalse(multiConfig.isRetryOnFailover()); // Default is false assertTrue(multiConfig.isFailbackSupported()); // Default is true @@ -386,10 +386,10 @@ void testDefaultValues() { void testDatabaseConfigWithHealthCheckStrategy() { HealthCheckStrategy customStrategy = mock(HealthCheckStrategy.class); - MultiDatabaseConfig.StrategySupplier supplier = (hostAndPort, + MultiDbConfig.StrategySupplier supplier = (hostAndPort, jedisClientConfig) -> customStrategy; - MultiDatabaseConfig.DatabaseConfig clusterConfig = MultiDatabaseConfig.DatabaseConfig + MultiDbConfig.DatabaseConfig clusterConfig = MultiDbConfig.DatabaseConfig .builder(testEndpoint, testConfig).healthCheckStrategySupplier(supplier).build(); assertNotNull(clusterConfig.getHealthCheckStrategySupplier()); @@ -400,11 +400,11 @@ void testDatabaseConfigWithHealthCheckStrategy() { @Test void testDatabaseConfigWithStrategySupplier() { - MultiDatabaseConfig.StrategySupplier customSupplier = (hostAndPort, jedisClientConfig) -> { + MultiDbConfig.StrategySupplier customSupplier = (hostAndPort, jedisClientConfig) -> { return mock(HealthCheckStrategy.class); }; - MultiDatabaseConfig.DatabaseConfig clusterConfig = MultiDatabaseConfig.DatabaseConfig + MultiDbConfig.DatabaseConfig clusterConfig = MultiDbConfig.DatabaseConfig .builder(testEndpoint, testConfig).healthCheckStrategySupplier(customSupplier).build(); assertEquals(customSupplier, clusterConfig.getHealthCheckStrategySupplier()); @@ -412,21 +412,21 @@ void testDatabaseConfigWithStrategySupplier() { @Test void testDatabaseConfigWithEchoStrategy() { - MultiDatabaseConfig.StrategySupplier echoSupplier = (hostAndPort, jedisClientConfig) -> { + MultiDbConfig.StrategySupplier echoSupplier = (hostAndPort, jedisClientConfig) -> { return new EchoStrategy(hostAndPort, jedisClientConfig); }; - MultiDatabaseConfig.DatabaseConfig clusterConfig = MultiDatabaseConfig.DatabaseConfig + MultiDbConfig.DatabaseConfig clusterConfig = MultiDbConfig.DatabaseConfig .builder(testEndpoint, testConfig).healthCheckStrategySupplier(echoSupplier).build(); - MultiDatabaseConfig.StrategySupplier supplier = clusterConfig.getHealthCheckStrategySupplier(); + MultiDbConfig.StrategySupplier supplier = clusterConfig.getHealthCheckStrategySupplier(); assertNotNull(supplier); assertInstanceOf(EchoStrategy.class, supplier.get(testEndpoint, testConfig)); } @Test void testDatabaseConfigWithDefaultHealthCheck() { - MultiDatabaseConfig.DatabaseConfig clusterConfig = MultiDatabaseConfig.DatabaseConfig + MultiDbConfig.DatabaseConfig clusterConfig = MultiDbConfig.DatabaseConfig .builder(testEndpoint, testConfig).build(); // Should use default EchoStrategy assertNotNull(clusterConfig.getHealthCheckStrategySupplier()); @@ -435,7 +435,7 @@ void testDatabaseConfigWithDefaultHealthCheck() { @Test void testDatabaseConfigWithDisabledHealthCheck() { - MultiDatabaseConfig.DatabaseConfig clusterConfig = MultiDatabaseConfig.DatabaseConfig + MultiDbConfig.DatabaseConfig clusterConfig = MultiDbConfig.DatabaseConfig .builder(testEndpoint, testConfig).healthCheckEnabled(false).build(); assertNull(clusterConfig.getHealthCheckStrategySupplier()); @@ -443,7 +443,7 @@ void testDatabaseConfigWithDisabledHealthCheck() { @Test void testDatabaseConfigHealthCheckEnabledExplicitly() { - MultiDatabaseConfig.DatabaseConfig clusterConfig = MultiDatabaseConfig.DatabaseConfig + MultiDbConfig.DatabaseConfig clusterConfig = MultiDbConfig.DatabaseConfig .builder(testEndpoint, testConfig).healthCheckEnabled(true).build(); assertNotNull(clusterConfig.getHealthCheckStrategySupplier()); @@ -515,7 +515,7 @@ void testHealthCheckIntegration() throws InterruptedException { @Test void testStrategySupplierPolymorphism() { // Test that the polymorphic design works correctly - MultiDatabaseConfig.StrategySupplier supplier = (hostAndPort, jedisClientConfig) -> { + MultiDbConfig.StrategySupplier supplier = (hostAndPort, jedisClientConfig) -> { if (jedisClientConfig != null) { return new EchoStrategy(hostAndPort, jedisClientConfig, HealthCheckStrategy.Config.builder().interval(500).timeout(250).numProbes(1).build()); diff --git a/src/test/java/redis/clients/jedis/mcf/MultiClusterDynamicEndpointUnitTest.java b/src/test/java/redis/clients/jedis/mcf/MultiClusterDynamicEndpointUnitTest.java index 973c854f6c..41aac09723 100644 --- a/src/test/java/redis/clients/jedis/mcf/MultiClusterDynamicEndpointUnitTest.java +++ b/src/test/java/redis/clients/jedis/mcf/MultiClusterDynamicEndpointUnitTest.java @@ -11,8 +11,8 @@ import redis.clients.jedis.HostAndPort; import redis.clients.jedis.HostAndPorts; import redis.clients.jedis.JedisClientConfig; -import redis.clients.jedis.MultiDatabaseConfig; -import redis.clients.jedis.MultiDatabaseConfig.DatabaseConfig; +import redis.clients.jedis.MultiDbConfig; +import redis.clients.jedis.MultiDbConfig.DatabaseConfig; import redis.clients.jedis.exceptions.JedisValidationException; import static org.junit.jupiter.api.Assertions.*; @@ -35,7 +35,7 @@ void setUp() { // Create initial provider with endpoint1 DatabaseConfig initialConfig = createDatabaseConfig(endpoint1.getHostAndPort(), 1.0f); - MultiDatabaseConfig multiConfig = new MultiDatabaseConfig.Builder( + MultiDbConfig multiConfig = new MultiDbConfig.Builder( new DatabaseConfig[] { initialConfig }).build(); provider = new MultiDatabaseConnectionProvider(multiConfig); @@ -82,7 +82,7 @@ void testRemoveExistingCluster() { // Create initial provider with endpoint1 DatabaseConfig clusterConfig1 = createDatabaseConfig(endpoint1.getHostAndPort(), 1.0f); - MultiDatabaseConfig multiConfig = MultiDatabaseConfig + MultiDbConfig multiConfig = MultiDbConfig .builder(new DatabaseConfig[] { clusterConfig1 }).build(); try ( @@ -179,7 +179,7 @@ void testActiveClusterHandlingOnRemove() { // Create initial provider with endpoint1 DatabaseConfig clusterConfig1 = createDatabaseConfig(endpoint1.getHostAndPort(), 1.0f); - MultiDatabaseConfig multiConfig = MultiDatabaseConfig + MultiDbConfig multiConfig = MultiDbConfig .builder(new DatabaseConfig[] { clusterConfig1 }).build(); try ( diff --git a/src/test/java/redis/clients/jedis/mcf/MultiClusterFailoverAttemptsConfigTest.java b/src/test/java/redis/clients/jedis/mcf/MultiClusterFailoverAttemptsConfigTest.java index 2742084082..26e437b2d1 100644 --- a/src/test/java/redis/clients/jedis/mcf/MultiClusterFailoverAttemptsConfigTest.java +++ b/src/test/java/redis/clients/jedis/mcf/MultiClusterFailoverAttemptsConfigTest.java @@ -8,8 +8,8 @@ import redis.clients.jedis.DefaultJedisClientConfig; import redis.clients.jedis.HostAndPort; import redis.clients.jedis.JedisClientConfig; -import redis.clients.jedis.MultiDatabaseConfig; -import redis.clients.jedis.MultiDatabaseConfig.DatabaseConfig; +import redis.clients.jedis.MultiDbConfig; +import redis.clients.jedis.MultiDbConfig.DatabaseConfig; import redis.clients.jedis.mcf.JedisFailoverException.JedisPermanentlyNotAvailableException; import redis.clients.jedis.mcf.JedisFailoverException.JedisTemporarilyNotAvailableException; import redis.clients.jedis.util.ReflectionTestUtil; @@ -40,7 +40,7 @@ void setUp() throws Exception { DatabaseConfig.builder(endpoint1, clientCfg).weight(0.5f).healthCheckEnabled(false) .build() }; - MultiDatabaseConfig.Builder builder = new MultiDatabaseConfig.Builder(databaseConfigs); + MultiDbConfig.Builder builder = new MultiDbConfig.Builder(databaseConfigs); // Use small values by default for tests unless overridden per-test via reflection setBuilderFailoverConfig(builder, /* maxAttempts */ 10, /* delayMs */ 12000); @@ -146,7 +146,7 @@ void maxNumFailoverAttempts_zeroDelay_leadsToPermanentAfterExceeding() throws Ex // ======== Test helper methods (reflection) ======== - private static void setBuilderFailoverConfig(MultiDatabaseConfig.Builder builder, int maxAttempts, + private static void setBuilderFailoverConfig(MultiDbConfig.Builder builder, int maxAttempts, int delayMs) throws Exception { ReflectionTestUtil.setField(builder, "maxNumFailoverAttempts", maxAttempts); @@ -154,9 +154,9 @@ private static void setBuilderFailoverConfig(MultiDatabaseConfig.Builder builder } private void setProviderFailoverConfig(int maxAttempts, int delayMs) throws Exception { - // Access the underlying MultiDatabaseConfig inside provider and adjust fields for this + // Access the underlying MultiDbConfig inside provider and adjust fields for this // test - Object cfg = ReflectionTestUtil.getField(provider, "multiDatabaseConfig"); + Object cfg = ReflectionTestUtil.getField(provider, "MultiDbConfig"); ReflectionTestUtil.setField(cfg, "maxNumFailoverAttempts", maxAttempts); @@ -164,13 +164,13 @@ private void setProviderFailoverConfig(int maxAttempts, int delayMs) throws Exce } private int getProviderMaxAttempts() throws Exception { - Object cfg = ReflectionTestUtil.getField(provider, "multiDatabaseConfig"); + Object cfg = ReflectionTestUtil.getField(provider, "MultiDbConfig"); return ReflectionTestUtil.getField(cfg, "maxNumFailoverAttempts"); } private int getProviderDelayMs() throws Exception { - Object cfg = ReflectionTestUtil.getField(provider, "multiDatabaseConfig"); + Object cfg = ReflectionTestUtil.getField(provider, "MultiDbConfig"); return ReflectionTestUtil.getField(cfg, "delayInBetweenFailoverAttempts"); } diff --git a/src/test/java/redis/clients/jedis/mcf/MultiClusterInitializationTest.java b/src/test/java/redis/clients/jedis/mcf/MultiClusterInitializationTest.java index 9aed74a5ea..780f3c2571 100644 --- a/src/test/java/redis/clients/jedis/mcf/MultiClusterInitializationTest.java +++ b/src/test/java/redis/clients/jedis/mcf/MultiClusterInitializationTest.java @@ -14,7 +14,7 @@ import redis.clients.jedis.DefaultJedisClientConfig; import redis.clients.jedis.HostAndPort; import redis.clients.jedis.JedisClientConfig; -import redis.clients.jedis.MultiDatabaseConfig; +import redis.clients.jedis.MultiDbConfig; import redis.clients.jedis.exceptions.JedisValidationException; /** @@ -49,20 +49,20 @@ private MockedConstruction mockPool() { void testInitializationWithMixedHealthCheckConfiguration() { try (MockedConstruction mockedPool = mockPool()) { // Create clusters with mixed health check configuration - MultiDatabaseConfig.DatabaseConfig cluster1 = MultiDatabaseConfig.DatabaseConfig + MultiDbConfig.DatabaseConfig cluster1 = MultiDbConfig.DatabaseConfig .builder(endpoint1, clientConfig).weight(1.0f).healthCheckEnabled(false) // No health // check .build(); - MultiDatabaseConfig.DatabaseConfig cluster2 = MultiDatabaseConfig.DatabaseConfig + MultiDbConfig.DatabaseConfig cluster2 = MultiDbConfig.DatabaseConfig .builder(endpoint2, clientConfig).weight(2.0f) .healthCheckStrategySupplier(EchoStrategy.DEFAULT) // With // health // check .build(); - MultiDatabaseConfig config = new MultiDatabaseConfig.Builder( - new MultiDatabaseConfig.DatabaseConfig[] { cluster1, cluster2 }).build(); + MultiDbConfig config = new MultiDbConfig.Builder( + new MultiDbConfig.DatabaseConfig[] { cluster1, cluster2 }).build(); try (MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider(config)) { // Should initialize successfully @@ -80,15 +80,15 @@ void testInitializationWithMixedHealthCheckConfiguration() { void testInitializationWithAllHealthChecksDisabled() { try (MockedConstruction mockedPool = mockPool()) { // Create clusters with no health checks - MultiDatabaseConfig.DatabaseConfig cluster1 = MultiDatabaseConfig.DatabaseConfig + MultiDbConfig.DatabaseConfig cluster1 = MultiDbConfig.DatabaseConfig .builder(endpoint1, clientConfig).weight(1.0f).healthCheckEnabled(false).build(); - MultiDatabaseConfig.DatabaseConfig cluster2 = MultiDatabaseConfig.DatabaseConfig + MultiDbConfig.DatabaseConfig cluster2 = MultiDbConfig.DatabaseConfig .builder(endpoint2, clientConfig).weight(3.0f) // Higher weight .healthCheckEnabled(false).build(); - MultiDatabaseConfig config = new MultiDatabaseConfig.Builder( - new MultiDatabaseConfig.DatabaseConfig[] { cluster1, cluster2 }).build(); + MultiDbConfig config = new MultiDbConfig.Builder( + new MultiDbConfig.DatabaseConfig[] { cluster1, cluster2 }).build(); try (MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider(config)) { // Should select cluster2 (highest weight, no health checks) @@ -100,11 +100,11 @@ void testInitializationWithAllHealthChecksDisabled() { @Test void testInitializationWithSingleCluster() { try (MockedConstruction mockedPool = mockPool()) { - MultiDatabaseConfig.DatabaseConfig cluster = MultiDatabaseConfig.DatabaseConfig + MultiDbConfig.DatabaseConfig cluster = MultiDbConfig.DatabaseConfig .builder(endpoint1, clientConfig).weight(1.0f).healthCheckEnabled(false).build(); - MultiDatabaseConfig config = new MultiDatabaseConfig.Builder( - new MultiDatabaseConfig.DatabaseConfig[] { cluster }).build(); + MultiDbConfig config = new MultiDbConfig.Builder( + new MultiDbConfig.DatabaseConfig[] { cluster }).build(); try (MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider(config)) { // Should select the only available cluster @@ -123,30 +123,30 @@ void testErrorHandlingWithNullConfiguration() { @Test void testErrorHandlingWithEmptyClusterArray() { assertThrows(JedisValidationException.class, () -> { - new MultiDatabaseConfig.Builder(new MultiDatabaseConfig.DatabaseConfig[0]).build(); + new MultiDbConfig.Builder(new MultiDbConfig.DatabaseConfig[0]).build(); }); } @Test void testErrorHandlingWithNullDatabaseConfig() { assertThrows(IllegalArgumentException.class, () -> { - new MultiDatabaseConfig.Builder(new MultiDatabaseConfig.DatabaseConfig[] { null }).build(); + new MultiDbConfig.Builder(new MultiDbConfig.DatabaseConfig[] { null }).build(); }); } @Test void testInitializationWithZeroWeights() { try (MockedConstruction mockedPool = mockPool()) { - MultiDatabaseConfig.DatabaseConfig cluster1 = MultiDatabaseConfig.DatabaseConfig + MultiDbConfig.DatabaseConfig cluster1 = MultiDbConfig.DatabaseConfig .builder(endpoint1, clientConfig).weight(0.0f) // Zero weight .healthCheckEnabled(false).build(); - MultiDatabaseConfig.DatabaseConfig cluster2 = MultiDatabaseConfig.DatabaseConfig + MultiDbConfig.DatabaseConfig cluster2 = MultiDbConfig.DatabaseConfig .builder(endpoint2, clientConfig).weight(0.0f) // Zero weight .healthCheckEnabled(false).build(); - MultiDatabaseConfig config = new MultiDatabaseConfig.Builder( - new MultiDatabaseConfig.DatabaseConfig[] { cluster1, cluster2 }).build(); + MultiDbConfig config = new MultiDbConfig.Builder( + new MultiDbConfig.DatabaseConfig[] { cluster1, cluster2 }).build(); try (MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider(config)) { // Should still initialize and select one of the clusters diff --git a/src/test/java/redis/clients/jedis/mcf/MultiDatabaseConnectionProviderTest.java b/src/test/java/redis/clients/jedis/mcf/MultiDatabaseConnectionProviderTest.java index fa84564645..ad9bc150dd 100644 --- a/src/test/java/redis/clients/jedis/mcf/MultiDatabaseConnectionProviderTest.java +++ b/src/test/java/redis/clients/jedis/mcf/MultiDatabaseConnectionProviderTest.java @@ -5,7 +5,7 @@ import org.awaitility.Durations; import org.junit.jupiter.api.*; import redis.clients.jedis.*; -import redis.clients.jedis.MultiDatabaseConfig.DatabaseConfig; +import redis.clients.jedis.MultiDbConfig.DatabaseConfig; import redis.clients.jedis.exceptions.JedisConnectionException; import redis.clients.jedis.exceptions.JedisValidationException; import redis.clients.jedis.mcf.MultiDatabaseConnectionProvider.Database; @@ -43,7 +43,7 @@ public void setUp() { endpointStandalone1.getClientConfigBuilder().build()).weight(0.3f).build(); provider = new MultiDatabaseConnectionProvider( - new MultiDatabaseConfig.Builder(databaseConfigs).build()); + new MultiDbConfig.Builder(databaseConfigs).build()); } @AfterEach @@ -109,7 +109,7 @@ public void testRunClusterFailoverPostProcessor() { DefaultJedisClientConfig.builder().build()) .weight(0.4f).healthCheckEnabled(false).build(); - MultiDatabaseConfig.Builder builder = new MultiDatabaseConfig.Builder(databaseConfigs); + MultiDbConfig.Builder builder = new MultiDbConfig.Builder(databaseConfigs); // Configures a single failed command to trigger an open circuit on the next subsequent failure builder.circuitBreakerSlidingWindowSize(3).circuitBreakerMinNumOfFailures(1) @@ -180,7 +180,7 @@ public void testConnectionPoolConfigApplied() { databaseConfigs[1] = new DatabaseConfig(endpointStandalone1.getHostAndPort(), endpointStandalone0.getClientConfigBuilder().build(), poolConfig); try (MultiDatabaseConnectionProvider customProvider = new MultiDatabaseConnectionProvider( - new MultiDatabaseConfig.Builder(databaseConfigs).build())) { + new MultiDbConfig.Builder(databaseConfigs).build())) { MultiDatabaseConnectionProvider.Database activeCluster = customProvider.getDatabase(); ConnectionPool connectionPool = activeCluster.getConnectionPool(); assertEquals(8, connectionPool.getMaxTotal()); @@ -210,7 +210,7 @@ void testHealthChecksStopAfterProviderClose() throws InterruptedException { .healthCheckStrategy(countingStrategy).build(); MultiDatabaseConnectionProvider testProvider = new MultiDatabaseConnectionProvider( - new MultiDatabaseConfig.Builder(Collections.singletonList(config)).build()); + new MultiDbConfig.Builder(Collections.singletonList(config)).build()); try { // Wait for some health checks to occur @@ -245,7 +245,7 @@ public void userCommand_firstTemporary_thenPermanent_inOrder() { endpointStandalone1.getClientConfigBuilder().build()).weight(0.3f).build(); MultiDatabaseConnectionProvider testProvider = new MultiDatabaseConnectionProvider( - new MultiDatabaseConfig.Builder(databaseConfigs).delayInBetweenFailoverAttempts(100) + new MultiDbConfig.Builder(databaseConfigs).delayInBetweenFailoverAttempts(100) .maxNumFailoverAttempts(2).retryMaxAttempts(1).build()); try (UnifiedJedis jedis = new UnifiedJedis(testProvider)) { @@ -282,7 +282,7 @@ public void userCommand_connectionExceptions_thenMultipleTemporary_thenPermanent // adjusted to get exact numbers of failures with exact exception types // and open to impact from other defaulted values withing the components in use. MultiDatabaseConnectionProvider testProvider = new MultiDatabaseConnectionProvider( - new MultiDatabaseConfig.Builder(databaseConfigs).delayInBetweenFailoverAttempts(100) + new MultiDbConfig.Builder(databaseConfigs).delayInBetweenFailoverAttempts(100) .maxNumFailoverAttempts(2).retryMaxAttempts(1).circuitBreakerSlidingWindowSize(5) .circuitBreakerFailureRateThreshold(60).build()) { }; diff --git a/src/test/java/redis/clients/jedis/mcf/PeriodicFailbackTest.java b/src/test/java/redis/clients/jedis/mcf/PeriodicFailbackTest.java index 1496ee3815..54fc1e1f7a 100644 --- a/src/test/java/redis/clients/jedis/mcf/PeriodicFailbackTest.java +++ b/src/test/java/redis/clients/jedis/mcf/PeriodicFailbackTest.java @@ -14,7 +14,7 @@ import redis.clients.jedis.DefaultJedisClientConfig; import redis.clients.jedis.HostAndPort; import redis.clients.jedis.JedisClientConfig; -import redis.clients.jedis.MultiDatabaseConfig; +import redis.clients.jedis.MultiDbConfig; @ExtendWith(MockitoExtension.class) class PeriodicFailbackTest { @@ -42,14 +42,14 @@ private MockedConstruction mockPool() { @Test void testPeriodicFailbackCheckWithDisabledCluster() throws InterruptedException { try (MockedConstruction mockedPool = mockPool()) { - MultiDatabaseConfig.DatabaseConfig cluster1 = MultiDatabaseConfig.DatabaseConfig + MultiDbConfig.DatabaseConfig cluster1 = MultiDbConfig.DatabaseConfig .builder(endpoint1, clientConfig).weight(1.0f).healthCheckEnabled(false).build(); - MultiDatabaseConfig.DatabaseConfig cluster2 = MultiDatabaseConfig.DatabaseConfig + MultiDbConfig.DatabaseConfig cluster2 = MultiDbConfig.DatabaseConfig .builder(endpoint2, clientConfig).weight(2.0f).healthCheckEnabled(false).build(); - MultiDatabaseConfig config = new MultiDatabaseConfig.Builder( - new MultiDatabaseConfig.DatabaseConfig[] { cluster1, cluster2 }).failbackSupported(true) + MultiDbConfig config = new MultiDbConfig.Builder( + new MultiDbConfig.DatabaseConfig[] { cluster1, cluster2 }).failbackSupported(true) .failbackCheckInterval(100).build(); try (MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider(config)) { @@ -75,14 +75,14 @@ void testPeriodicFailbackCheckWithDisabledCluster() throws InterruptedException @Test void testPeriodicFailbackCheckWithHealthyCluster() throws InterruptedException { try (MockedConstruction mockedPool = mockPool()) { - MultiDatabaseConfig.DatabaseConfig cluster1 = MultiDatabaseConfig.DatabaseConfig + MultiDbConfig.DatabaseConfig cluster1 = MultiDbConfig.DatabaseConfig .builder(endpoint1, clientConfig).weight(1.0f).healthCheckEnabled(false).build(); - MultiDatabaseConfig.DatabaseConfig cluster2 = MultiDatabaseConfig.DatabaseConfig + MultiDbConfig.DatabaseConfig cluster2 = MultiDbConfig.DatabaseConfig .builder(endpoint2, clientConfig).weight(2.0f).healthCheckEnabled(false).build(); - MultiDatabaseConfig config = new MultiDatabaseConfig.Builder( - new MultiDatabaseConfig.DatabaseConfig[] { cluster1, cluster2 }).failbackSupported(true) + MultiDbConfig config = new MultiDbConfig.Builder( + new MultiDbConfig.DatabaseConfig[] { cluster1, cluster2 }).failbackSupported(true) .failbackCheckInterval(50).gracePeriod(100).build(); // Add // grace // period @@ -122,14 +122,14 @@ void testPeriodicFailbackCheckWithHealthyCluster() throws InterruptedException { @Test void testPeriodicFailbackCheckWithFailbackDisabled() throws InterruptedException { try (MockedConstruction mockedPool = mockPool()) { - MultiDatabaseConfig.DatabaseConfig cluster1 = MultiDatabaseConfig.DatabaseConfig + MultiDbConfig.DatabaseConfig cluster1 = MultiDbConfig.DatabaseConfig .builder(endpoint1, clientConfig).weight(1.0f).healthCheckEnabled(false).build(); - MultiDatabaseConfig.DatabaseConfig cluster2 = MultiDatabaseConfig.DatabaseConfig + MultiDbConfig.DatabaseConfig cluster2 = MultiDbConfig.DatabaseConfig .builder(endpoint2, clientConfig).weight(2.0f).healthCheckEnabled(false).build(); - MultiDatabaseConfig config = new MultiDatabaseConfig.Builder( - new MultiDatabaseConfig.DatabaseConfig[] { cluster1, cluster2 }).failbackSupported(false) // Disabled + MultiDbConfig config = new MultiDbConfig.Builder( + new MultiDbConfig.DatabaseConfig[] { cluster1, cluster2 }).failbackSupported(false) // Disabled .failbackCheckInterval(50).build(); try (MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider(config)) { @@ -162,18 +162,18 @@ void testPeriodicFailbackCheckSelectsHighestWeightCluster() throws InterruptedEx try (MockedConstruction mockedPool = mockPool()) { HostAndPort endpoint3 = new HostAndPort("localhost", 6381); - MultiDatabaseConfig.DatabaseConfig cluster1 = MultiDatabaseConfig.DatabaseConfig + MultiDbConfig.DatabaseConfig cluster1 = MultiDbConfig.DatabaseConfig .builder(endpoint1, clientConfig).weight(1.0f).healthCheckEnabled(false).build(); - MultiDatabaseConfig.DatabaseConfig cluster2 = MultiDatabaseConfig.DatabaseConfig + MultiDbConfig.DatabaseConfig cluster2 = MultiDbConfig.DatabaseConfig .builder(endpoint2, clientConfig).weight(2.0f).healthCheckEnabled(false).build(); - MultiDatabaseConfig.DatabaseConfig cluster3 = MultiDatabaseConfig.DatabaseConfig + MultiDbConfig.DatabaseConfig cluster3 = MultiDbConfig.DatabaseConfig .builder(endpoint3, clientConfig).weight(3.0f) // Highest weight .healthCheckEnabled(false).build(); - MultiDatabaseConfig config = new MultiDatabaseConfig.Builder( - new MultiDatabaseConfig.DatabaseConfig[] { cluster1, cluster2, cluster3 }) + MultiDbConfig config = new MultiDbConfig.Builder( + new MultiDbConfig.DatabaseConfig[] { cluster1, cluster2, cluster3 }) .failbackSupported(true).failbackCheckInterval(50).gracePeriod(100).build(); // Add // grace // period diff --git a/src/test/java/redis/clients/jedis/misc/AutomaticFailoverTest.java b/src/test/java/redis/clients/jedis/misc/AutomaticFailoverTest.java index 25788fa916..c061c5c4e6 100644 --- a/src/test/java/redis/clients/jedis/misc/AutomaticFailoverTest.java +++ b/src/test/java/redis/clients/jedis/misc/AutomaticFailoverTest.java @@ -47,10 +47,10 @@ public class AutomaticFailoverTest { private Jedis jedis2; - private List getDatabaseConfigs( + private List getDatabaseConfigs( JedisClientConfig clientConfig, HostAndPort... hostPorts) { return Arrays.stream(hostPorts) - .map(hp -> new MultiDatabaseConfig.DatabaseConfig(hp, clientConfig)) + .map(hp -> new MultiDbConfig.DatabaseConfig(hp, clientConfig)) .collect(Collectors.toList()); } @@ -69,7 +69,7 @@ public void cleanUp() { @Test public void pipelineWithSwitch() { MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider( - new MultiDatabaseConfig.Builder( + new MultiDbConfig.Builder( getDatabaseConfigs(clientConfig, hostPortWithFailure, workingEndpoint.getHostAndPort())) .build()); @@ -89,7 +89,7 @@ public void pipelineWithSwitch() { @Test public void transactionWithSwitch() { MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider( - new MultiDatabaseConfig.Builder( + new MultiDbConfig.Builder( getDatabaseConfigs(clientConfig, hostPortWithFailure, workingEndpoint.getHostAndPort())) .build()); @@ -112,7 +112,7 @@ public void commandFailoverUnresolvableHost() { int slidingWindowSize = 2; HostAndPort unresolvableHostAndPort = new HostAndPort("unresolvable", 6379); - MultiDatabaseConfig.Builder builder = new MultiDatabaseConfig.Builder( + MultiDbConfig.Builder builder = new MultiDbConfig.Builder( getDatabaseConfigs(clientConfig, unresolvableHostAndPort, workingEndpoint.getHostAndPort())) .retryWaitDuration(1).retryMaxAttempts(1) .circuitBreakerSlidingWindowSize(slidingWindowSize) @@ -152,7 +152,7 @@ public void commandFailover() { int slidingWindowSize = 6; int retryMaxAttempts = 3; - MultiDatabaseConfig.Builder builder = new MultiDatabaseConfig.Builder( + MultiDbConfig.Builder builder = new MultiDbConfig.Builder( getDatabaseConfigs(clientConfig, hostPortWithFailure, workingEndpoint.getHostAndPort())) .retryMaxAttempts(retryMaxAttempts) // Default // is @@ -194,7 +194,7 @@ public void commandFailover() { public void pipelineFailover() { int slidingWindowSize = 10; - MultiDatabaseConfig.Builder builder = new MultiDatabaseConfig.Builder( + MultiDbConfig.Builder builder = new MultiDbConfig.Builder( getDatabaseConfigs(clientConfig, hostPortWithFailure, workingEndpoint.getHostAndPort())) .circuitBreakerSlidingWindowSize(slidingWindowSize) .fallbackExceptionList(Collections.singletonList(JedisConnectionException.class)); @@ -226,7 +226,7 @@ public void pipelineFailover() { public void failoverFromAuthError() { int slidingWindowSize = 10; - MultiDatabaseConfig.Builder builder = new MultiDatabaseConfig.Builder( + MultiDbConfig.Builder builder = new MultiDbConfig.Builder( getDatabaseConfigs(clientConfig, endpointForAuthFailure.getHostAndPort(), workingEndpoint.getHostAndPort())).circuitBreakerSlidingWindowSize(slidingWindowSize) .fallbackExceptionList(Collections.singletonList(JedisAccessControlException.class)); diff --git a/src/test/java/redis/clients/jedis/providers/MultiClusterProviderHealthStatusChangeEventTest.java b/src/test/java/redis/clients/jedis/providers/MultiClusterProviderHealthStatusChangeEventTest.java index c3f974dcd5..ef14cc51ce 100644 --- a/src/test/java/redis/clients/jedis/providers/MultiClusterProviderHealthStatusChangeEventTest.java +++ b/src/test/java/redis/clients/jedis/providers/MultiClusterProviderHealthStatusChangeEventTest.java @@ -14,7 +14,7 @@ import redis.clients.jedis.DefaultJedisClientConfig; import redis.clients.jedis.HostAndPort; import redis.clients.jedis.JedisClientConfig; -import redis.clients.jedis.MultiDatabaseConfig; +import redis.clients.jedis.MultiDbConfig; import redis.clients.jedis.mcf.HealthStatus; import redis.clients.jedis.mcf.MultiDatabaseConnectionProvider; import redis.clients.jedis.mcf.MultiDatabaseConnectionProviderHelper; @@ -52,13 +52,13 @@ private MockedConstruction mockConnectionPool() { void postInit_unhealthy_active_sets_grace_and_fails_over() throws Exception { try (MockedConstruction mockedPool = mockConnectionPool()) { // Create clusters without health checks - MultiDatabaseConfig.DatabaseConfig cluster1 = MultiDatabaseConfig.DatabaseConfig + MultiDbConfig.DatabaseConfig cluster1 = MultiDbConfig.DatabaseConfig .builder(endpoint1, clientConfig).weight(1.0f).healthCheckEnabled(false).build(); - MultiDatabaseConfig.DatabaseConfig cluster2 = MultiDatabaseConfig.DatabaseConfig + MultiDbConfig.DatabaseConfig cluster2 = MultiDbConfig.DatabaseConfig .builder(endpoint2, clientConfig).weight(0.5f).healthCheckEnabled(false).build(); - MultiDatabaseConfig config = new MultiDatabaseConfig.Builder( - new MultiDatabaseConfig.DatabaseConfig[] { cluster1, cluster2 }).build(); + MultiDbConfig config = new MultiDbConfig.Builder( + new MultiDbConfig.DatabaseConfig[] { cluster1, cluster2 }).build(); try (MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider( config)) { @@ -84,13 +84,13 @@ void postInit_unhealthy_active_sets_grace_and_fails_over() throws Exception { @Test void postInit_nonActive_changes_do_not_switch_active() throws Exception { try (MockedConstruction mockedPool = mockConnectionPool()) { - MultiDatabaseConfig.DatabaseConfig cluster1 = MultiDatabaseConfig.DatabaseConfig + MultiDbConfig.DatabaseConfig cluster1 = MultiDbConfig.DatabaseConfig .builder(endpoint1, clientConfig).weight(1.0f).healthCheckEnabled(false).build(); - MultiDatabaseConfig.DatabaseConfig cluster2 = MultiDatabaseConfig.DatabaseConfig + MultiDbConfig.DatabaseConfig cluster2 = MultiDbConfig.DatabaseConfig .builder(endpoint2, clientConfig).weight(0.5f).healthCheckEnabled(false).build(); - MultiDatabaseConfig config = new MultiDatabaseConfig.Builder( - new MultiDatabaseConfig.DatabaseConfig[] { cluster1, cluster2 }).build(); + MultiDbConfig config = new MultiDbConfig.Builder( + new MultiDbConfig.DatabaseConfig[] { cluster1, cluster2 }).build(); try (MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider( config)) { @@ -131,14 +131,14 @@ void postInit_nonActive_changes_do_not_switch_active() throws Exception { @Test void init_selects_highest_weight_healthy_when_checks_disabled() throws Exception { try (MockedConstruction mockedPool = mockConnectionPool()) { - MultiDatabaseConfig.DatabaseConfig cluster1 = MultiDatabaseConfig.DatabaseConfig + MultiDbConfig.DatabaseConfig cluster1 = MultiDbConfig.DatabaseConfig .builder(endpoint1, clientConfig).weight(1.0f).healthCheckEnabled(false).build(); - MultiDatabaseConfig.DatabaseConfig cluster2 = MultiDatabaseConfig.DatabaseConfig + MultiDbConfig.DatabaseConfig cluster2 = MultiDbConfig.DatabaseConfig .builder(endpoint2, clientConfig).weight(2.0f).healthCheckEnabled(false).build(); - MultiDatabaseConfig config = new MultiDatabaseConfig.Builder( - new MultiDatabaseConfig.DatabaseConfig[] { cluster1, cluster2 }).build(); + MultiDbConfig config = new MultiDbConfig.Builder( + new MultiDbConfig.DatabaseConfig[] { cluster1, cluster2 }).build(); try (MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider( config)) { @@ -158,11 +158,11 @@ void init_selects_highest_weight_healthy_when_checks_disabled() throws Exception @Test void init_single_cluster_initializes_and_is_healthy() throws Exception { try (MockedConstruction mockedPool = mockConnectionPool()) { - MultiDatabaseConfig.DatabaseConfig cluster1 = MultiDatabaseConfig.DatabaseConfig + MultiDbConfig.DatabaseConfig cluster1 = MultiDbConfig.DatabaseConfig .builder(endpoint1, clientConfig).weight(1.0f).healthCheckEnabled(false).build(); - MultiDatabaseConfig config = new MultiDatabaseConfig.Builder( - new MultiDatabaseConfig.DatabaseConfig[] { cluster1 }).build(); + MultiDbConfig config = new MultiDbConfig.Builder( + new MultiDbConfig.DatabaseConfig[] { cluster1 }).build(); // This test verifies that the provider initializes correctly and doesn't lose events // In practice, with health checks disabled, no events should be generated during init @@ -183,17 +183,17 @@ void init_single_cluster_initializes_and_is_healthy() throws Exception { @Test void postInit_two_hop_failover_chain_respected() throws Exception { try (MockedConstruction mockedPool = mockConnectionPool()) { - MultiDatabaseConfig.DatabaseConfig cluster1 = MultiDatabaseConfig.DatabaseConfig + MultiDbConfig.DatabaseConfig cluster1 = MultiDbConfig.DatabaseConfig .builder(endpoint1, clientConfig).weight(1.0f).healthCheckEnabled(false).build(); - MultiDatabaseConfig.DatabaseConfig cluster2 = MultiDatabaseConfig.DatabaseConfig + MultiDbConfig.DatabaseConfig cluster2 = MultiDbConfig.DatabaseConfig .builder(endpoint2, clientConfig).weight(0.5f).healthCheckEnabled(false).build(); - MultiDatabaseConfig.DatabaseConfig cluster3 = MultiDatabaseConfig.DatabaseConfig + MultiDbConfig.DatabaseConfig cluster3 = MultiDbConfig.DatabaseConfig .builder(endpoint3, clientConfig).weight(0.2f).healthCheckEnabled(false).build(); - MultiDatabaseConfig config = new MultiDatabaseConfig.Builder( - new MultiDatabaseConfig.DatabaseConfig[] { cluster1, cluster2, cluster3 }).build(); + MultiDbConfig config = new MultiDbConfig.Builder( + new MultiDbConfig.DatabaseConfig[] { cluster1, cluster2, cluster3 }).build(); try (MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider( config)) { @@ -227,14 +227,14 @@ void postInit_two_hop_failover_chain_respected() throws Exception { @Test void postInit_rapid_events_respect_grace_and_keep_active_stable() throws Exception { try (MockedConstruction mockedPool = mockConnectionPool()) { - MultiDatabaseConfig.DatabaseConfig cluster1 = MultiDatabaseConfig.DatabaseConfig + MultiDbConfig.DatabaseConfig cluster1 = MultiDbConfig.DatabaseConfig .builder(endpoint1, clientConfig).weight(1.0f).healthCheckEnabled(false).build(); - MultiDatabaseConfig.DatabaseConfig cluster2 = MultiDatabaseConfig.DatabaseConfig + MultiDbConfig.DatabaseConfig cluster2 = MultiDbConfig.DatabaseConfig .builder(endpoint2, clientConfig).weight(0.5f).healthCheckEnabled(false).build(); - MultiDatabaseConfig config = new MultiDatabaseConfig.Builder( - new MultiDatabaseConfig.DatabaseConfig[] { cluster1, cluster2 }).build(); + MultiDbConfig config = new MultiDbConfig.Builder( + new MultiDbConfig.DatabaseConfig[] { cluster1, cluster2 }).build(); try (MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider( config)) { diff --git a/src/test/java/redis/clients/jedis/scenario/ActiveActiveFailoverTest.java b/src/test/java/redis/clients/jedis/scenario/ActiveActiveFailoverTest.java index 400fd65404..9250e90a3e 100644 --- a/src/test/java/redis/clients/jedis/scenario/ActiveActiveFailoverTest.java +++ b/src/test/java/redis/clients/jedis/scenario/ActiveActiveFailoverTest.java @@ -9,7 +9,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; import redis.clients.jedis.*; -import redis.clients.jedis.MultiDatabaseConfig.DatabaseConfig; +import redis.clients.jedis.MultiDbConfig.DatabaseConfig; import redis.clients.jedis.exceptions.JedisConnectionException; import redis.clients.jedis.mcf.ClusterSwitchEventArgs; import redis.clients.jedis.mcf.MultiDatabaseConnectionProvider; @@ -68,7 +68,7 @@ public void testFailover() { DatabaseConfig secondary = DatabaseConfig.builder(endpoint.getHostAndPort(1), config) .connectionPoolConfig(RecommendedSettings.poolConfig).weight(0.5f).build(); - MultiDatabaseConfig multiConfig = MultiDatabaseConfig.builder() + MultiDbConfig multiConfig = MultiDbConfig.builder() .endpoint(primary) .endpoint(secondary) .circuitBreakerSlidingWindowSize(1) // SLIDING WINDOW SIZE IN SECONDS From f88cb25bb376f5386e755b1e6143337b65dbba34 Mon Sep 17 00:00:00 2001 From: ggivo Date: Mon, 6 Oct 2025 14:32:30 +0300 Subject: [PATCH 04/18] Rename MultiDatabaseConnectionProvider to MultiDbConnectionProvider --- .../redis/clients/jedis/MultiDbClient.java | 38 +++++++-------- .../redis/clients/jedis/MultiDbConfig.java | 14 +++--- .../redis/clients/jedis/UnifiedJedis.java | 12 ++--- .../jedis/builders/MultiDbClientBuilder.java | 7 ++- .../mcf/CircuitBreakerCommandExecutor.java | 4 +- .../jedis/mcf/CircuitBreakerFailoverBase.java | 6 +-- ...cuitBreakerFailoverConnectionProvider.java | 4 +- .../mcf/CircuitBreakerThresholdsAdapter.java | 4 +- .../jedis/mcf/ClusterSwitchEventArgs.java | 2 +- .../jedis/mcf/JedisFailoverException.java | 6 +-- .../jedis/mcf/MultiClusterPipeline.java | 4 +- .../jedis/mcf/MultiClusterTransaction.java | 6 +-- ...er.java => MultiDbConnectionProvider.java} | 22 ++++----- ...UnifiedJedisConstructorReflectionTest.java | 2 +- .../failover/FailoverIntegrationTest.java | 26 +++++----- .../mcf/ActiveActiveLocalFailoverTest.java | 2 +- .../mcf/CircuitBreakerThresholdsTest.java | 16 +++---- .../mcf/ClusterEvaluateThresholdsTest.java | 6 +-- .../mcf/FailbackMechanismIntegrationTest.java | 48 +++++++++---------- .../jedis/mcf/FailbackMechanismUnitTest.java | 12 ++--- .../jedis/mcf/HealthCheckIntegrationTest.java | 14 +++--- .../clients/jedis/mcf/HealthCheckTest.java | 3 +- .../MultiClusterDynamicEndpointUnitTest.java | 26 +++++----- ...ultiClusterFailoverAttemptsConfigTest.java | 18 ++++--- .../mcf/MultiClusterInitializationTest.java | 12 ++--- ...MultiDatabaseConnectionProviderHelper.java | 20 -------- .../mcf/MultiDbConnectionProviderHelper.java | 20 ++++++++ ...ava => MultiDbConnectionProviderTest.java} | 24 +++++----- .../jedis/mcf/PeriodicFailbackTest.java | 20 ++++---- .../jedis/misc/AutomaticFailoverTest.java | 20 ++++---- ...erProviderHealthStatusChangeEventTest.java | 38 +++++++-------- .../scenario/ActiveActiveFailoverTest.java | 4 +- 32 files changed, 219 insertions(+), 241 deletions(-) rename src/main/java/redis/clients/jedis/mcf/{MultiDatabaseConnectionProvider.java => MultiDbConnectionProvider.java} (98%) delete mode 100644 src/test/java/redis/clients/jedis/mcf/MultiDatabaseConnectionProviderHelper.java create mode 100644 src/test/java/redis/clients/jedis/mcf/MultiDbConnectionProviderHelper.java rename src/test/java/redis/clients/jedis/mcf/{MultiDatabaseConnectionProviderTest.java => MultiDbConnectionProviderTest.java} (93%) diff --git a/src/main/java/redis/clients/jedis/MultiDbClient.java b/src/main/java/redis/clients/jedis/MultiDbClient.java index 2e008f5c98..bd65f119ff 100644 --- a/src/main/java/redis/clients/jedis/MultiDbClient.java +++ b/src/main/java/redis/clients/jedis/MultiDbClient.java @@ -9,7 +9,7 @@ import redis.clients.jedis.mcf.MultiClusterPipeline; import redis.clients.jedis.mcf.MultiClusterTransaction; import redis.clients.jedis.providers.ConnectionProvider; -import redis.clients.jedis.mcf.MultiDatabaseConnectionProvider; +import redis.clients.jedis.mcf.MultiDbConnectionProvider; import java.util.Set; @@ -76,7 +76,7 @@ *

* @author Ivo Gaydazhiev * @since 5.2.0 - * @see MultiDatabaseConnectionProvider + * @see MultiDbConnectionProvider * @see CircuitBreakerCommandExecutor * @see MultiDbConfig */ @@ -91,7 +91,7 @@ public class MultiDbClient extends UnifiedJedis { * {@link #builder()} to create instances. *

* @param commandExecutor the command executor (typically CircuitBreakerCommandExecutor) - * @param connectionProvider the connection provider (typically MultiDatabaseConnectionProvider) + * @param connectionProvider the connection provider (typically MultiDbConnectionProvider) * @param commandObjects the command objects * @param redisProtocol the Redis protocol version * @param cache the client-side cache (may be null) @@ -102,16 +102,16 @@ public class MultiDbClient extends UnifiedJedis { } /** - * Returns the underlying MultiDatabaseConnectionProvider. + * Returns the underlying MultiDbConnectionProvider. *

* This provides access to multi-cluster specific operations like manual failover, health status * monitoring, and cluster switch event handling. *

* @return the multi-cluster connection provider - * @throws ClassCastException if the provider is not a MultiDatabaseConnectionProvider + * @throws ClassCastException if the provider is not a MultiDbConnectionProvider */ - private MultiDatabaseConnectionProvider getMultiDatabaseConnectionProvider() { - return (MultiDatabaseConnectionProvider) this.provider; + private MultiDbConnectionProvider getMultiDbConnectionProvider() { + return (MultiDbConnectionProvider) this.provider; } /** @@ -123,7 +123,7 @@ private MultiDatabaseConnectionProvider getMultiDatabaseConnectionProvider() { * @param endpoint the endpoint to switch to */ public void setActiveDatabase(Endpoint endpoint) { - getMultiDatabaseConnectionProvider().setActiveDatabase(endpoint); + getMultiDbConnectionProvider().setActiveDatabase(endpoint); } /** @@ -136,7 +136,7 @@ public void setActiveDatabase(Endpoint endpoint) { * @param databaseConfig the pre-configured database configuration */ public void addEndpoint(DatabaseConfig databaseConfig) { - getMultiDatabaseConnectionProvider().add(databaseConfig); + getMultiDbConnectionProvider().add(databaseConfig); } /** @@ -155,7 +155,7 @@ public void addEndpoint(Endpoint endpoint, float weight, JedisClientConfig clien DatabaseConfig databaseConfig = DatabaseConfig.builder(endpoint, clientConfig).weight(weight) .build(); - getMultiDatabaseConnectionProvider().add(databaseConfig); + getMultiDbConnectionProvider().add(databaseConfig); } /** @@ -166,7 +166,7 @@ public void addEndpoint(Endpoint endpoint, float weight, JedisClientConfig clien * @return the set of all configured endpoints */ public Set getEndpoints() { - return getMultiDatabaseConnectionProvider().getEndpoints(); + return getMultiDbConnectionProvider().getEndpoints(); } /** @@ -178,7 +178,7 @@ public Set getEndpoints() { * @return the health status of the endpoint */ public boolean isHealthy(Endpoint endpoint) { - return getMultiDatabaseConnectionProvider().isHealthy(endpoint); + return getMultiDbConnectionProvider().isHealthy(endpoint); } /** @@ -194,7 +194,7 @@ public boolean isHealthy(Endpoint endpoint) { * healthy clusters available */ public void removeEndpoint(Endpoint endpoint) { - getMultiDatabaseConnectionProvider().remove(endpoint); + getMultiDbConnectionProvider().remove(endpoint); } /** @@ -210,7 +210,7 @@ public void removeEndpoint(Endpoint endpoint) { * or doesn't exist */ public void forceActiveEndpoint(Endpoint endpoint, long forcedActiveDurationMs) { - getMultiDatabaseConnectionProvider().forceActiveDatabase(endpoint, forcedActiveDurationMs); + getMultiDbConnectionProvider().forceActiveDatabase(endpoint, forcedActiveDurationMs); } /** @@ -223,7 +223,7 @@ public void forceActiveEndpoint(Endpoint endpoint, long forcedActiveDurationMs) */ @Override public MultiClusterPipeline pipelined() { - return new MultiClusterPipeline(getMultiDatabaseConnectionProvider(), commandObjects); + return new MultiClusterPipeline(getMultiDbConnectionProvider(), commandObjects); } /** @@ -236,8 +236,7 @@ public MultiClusterPipeline pipelined() { */ @Override public MultiClusterTransaction multi() { - return new MultiClusterTransaction((MultiDatabaseConnectionProvider) provider, true, - commandObjects); + return new MultiClusterTransaction((MultiDbConnectionProvider) provider, true, commandObjects); } /** @@ -251,12 +250,11 @@ public MultiClusterTransaction transaction(boolean doMulti) { "It is not allowed to create Transaction from this " + getClass()); } - return new MultiClusterTransaction(getMultiDatabaseConnectionProvider(), doMulti, - commandObjects); + return new MultiClusterTransaction(getMultiDbConnectionProvider(), doMulti, commandObjects); } public Endpoint getActiveEndpoint() { - return getMultiDatabaseConnectionProvider().getDatabase().getEndpoint(); + return getMultiDbConnectionProvider().getDatabase().getEndpoint(); } /** diff --git a/src/main/java/redis/clients/jedis/MultiDbConfig.java b/src/main/java/redis/clients/jedis/MultiDbConfig.java index 5bde00c34a..7bafdd9f5e 100644 --- a/src/main/java/redis/clients/jedis/MultiDbConfig.java +++ b/src/main/java/redis/clients/jedis/MultiDbConfig.java @@ -21,7 +21,7 @@ * This configuration enables seamless failover between multiple Redis clusters, databases, or * endpoints by providing comprehensive settings for retry logic, circuit breaker behavior, health * checks, and failback mechanisms. It is designed to work with - * {@link redis.clients.jedis.mcf.MultiDatabaseConnectionProvider} to provide high availability and + * {@link redis.clients.jedis.mcf.MultiDbConnectionProvider} to provide high availability and * disaster recovery capabilities. *

*

@@ -61,14 +61,14 @@ * .gracePeriod(10000).build(); * * // Use with connection provider - * MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider(config); + * MultiDbConnectionProvider provider = new MultiDbConnectionProvider(config); * } *

*

* The configuration leverages Resilience4j for * circuit breaker and retry implementations, providing battle-tested fault tolerance patterns. *

- * @see redis.clients.jedis.mcf.MultiDatabaseConnectionProvider + * @see redis.clients.jedis.mcf.MultiDbConnectionProvider * @see redis.clients.jedis.mcf.HealthCheckStrategy * @see redis.clients.jedis.mcf.EchoStrategy * @see redis.clients.jedis.mcf.LagAwareStrategy @@ -451,7 +451,7 @@ public static interface StrategySupplier { public MultiDbConfig(DatabaseConfig[] databaseConfigs) { if (databaseConfigs == null || databaseConfigs.length < 1) throw new JedisValidationException( - "DatabaseClientConfigs are required for MultiDatabaseConnectionProvider"); + "DatabaseClientConfigs are required for MultiDbConnectionProvider"); for (DatabaseConfig databaseConfig : databaseConfigs) { if (databaseConfig == null) @@ -976,8 +976,7 @@ public DatabaseConfig build() { } /** - * Builder class for creating MultiDbConfig instances with comprehensive configuration - * options. + * Builder class for creating MultiDbConfig instances with comprehensive configuration options. *

* The Builder provides a fluent API for configuring all aspects of multi-cluster failover * behavior, including retry logic, circuit breaker settings, and failback mechanisms. It uses @@ -1509,8 +1508,7 @@ public Builder delayInBetweenFailoverAttempts(int delayInBetweenFailoverAttempts */ public MultiDbConfig build() { - MultiDbConfig config = new MultiDbConfig( - this.databaseConfigs.toArray(new DatabaseConfig[0])); + MultiDbConfig config = new MultiDbConfig(this.databaseConfigs.toArray(new DatabaseConfig[0])); // Copy retry configuration config.retryMaxAttempts = this.retryMaxAttempts; diff --git a/src/main/java/redis/clients/jedis/UnifiedJedis.java b/src/main/java/redis/clients/jedis/UnifiedJedis.java index ebe5a55f54..ccb9d37df6 100644 --- a/src/main/java/redis/clients/jedis/UnifiedJedis.java +++ b/src/main/java/redis/clients/jedis/UnifiedJedis.java @@ -34,7 +34,7 @@ import redis.clients.jedis.json.JsonObjectMapper; import redis.clients.jedis.mcf.CircuitBreakerCommandExecutor; import redis.clients.jedis.mcf.MultiClusterPipeline; -import redis.clients.jedis.mcf.MultiDatabaseConnectionProvider; +import redis.clients.jedis.mcf.MultiDbConnectionProvider; import redis.clients.jedis.mcf.MultiClusterTransaction; import redis.clients.jedis.params.*; import redis.clients.jedis.providers.*; @@ -240,7 +240,7 @@ public UnifiedJedis(ConnectionProvider provider, int maxAttempts, Duration maxTo *

*/ @Experimental - public UnifiedJedis(MultiDatabaseConnectionProvider provider) { + public UnifiedJedis(MultiDbConnectionProvider provider) { this(new CircuitBreakerCommandExecutor(provider), provider); } @@ -5099,8 +5099,8 @@ public List tdigestByRevRank(String key, long... ranks) { public PipelineBase pipelined() { if (provider == null) { throw new IllegalStateException("It is not allowed to create Pipeline from this " + getClass()); - } else if (provider instanceof MultiDatabaseConnectionProvider) { - return new MultiClusterPipeline((MultiDatabaseConnectionProvider) provider, commandObjects); + } else if (provider instanceof MultiDbConnectionProvider) { + return new MultiClusterPipeline((MultiDbConnectionProvider) provider, commandObjects); } else { return new Pipeline(provider.getConnection(), true, commandObjects); } @@ -5120,8 +5120,8 @@ public AbstractTransaction multi() { public AbstractTransaction transaction(boolean doMulti) { if (provider == null) { throw new IllegalStateException("It is not allowed to create Transaction from this " + getClass()); - } else if (provider instanceof MultiDatabaseConnectionProvider) { - return new MultiClusterTransaction((MultiDatabaseConnectionProvider) provider, doMulti, commandObjects); + } else if (provider instanceof MultiDbConnectionProvider) { + return new MultiClusterTransaction((MultiDbConnectionProvider) provider, doMulti, commandObjects); } else { return new Transaction(provider.getConnection(), doMulti, true, commandObjects); } diff --git a/src/main/java/redis/clients/jedis/builders/MultiDbClientBuilder.java b/src/main/java/redis/clients/jedis/builders/MultiDbClientBuilder.java index 252a931762..12cc2efefb 100644 --- a/src/main/java/redis/clients/jedis/builders/MultiDbClientBuilder.java +++ b/src/main/java/redis/clients/jedis/builders/MultiDbClientBuilder.java @@ -7,7 +7,7 @@ import redis.clients.jedis.executors.CommandExecutor; import redis.clients.jedis.mcf.CircuitBreakerCommandExecutor; import redis.clients.jedis.mcf.ClusterSwitchEventArgs; -import redis.clients.jedis.mcf.MultiDatabaseConnectionProvider; +import redis.clients.jedis.mcf.MultiDbConnectionProvider; import redis.clients.jedis.providers.ConnectionProvider; /** @@ -113,7 +113,7 @@ protected ConnectionProvider createDefaultConnectionProvider() { } // Create the multi-cluster connection provider - MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider(multiDbConfig); + MultiDbConnectionProvider provider = new MultiDbConnectionProvider(multiDbConfig); // Set database switch listener if provided if (this.databaseSwitchListener != null) { @@ -126,8 +126,7 @@ protected ConnectionProvider createDefaultConnectionProvider() { @Override protected CommandExecutor createDefaultCommandExecutor() { // For multi-db clients, we always use CircuitBreakerCommandExecutor - return new CircuitBreakerCommandExecutor( - (MultiDatabaseConnectionProvider) this.connectionProvider); + return new CircuitBreakerCommandExecutor((MultiDbConnectionProvider) this.connectionProvider); } @Override diff --git a/src/main/java/redis/clients/jedis/mcf/CircuitBreakerCommandExecutor.java b/src/main/java/redis/clients/jedis/mcf/CircuitBreakerCommandExecutor.java index 5a5f24e063..31d8d67a73 100644 --- a/src/main/java/redis/clients/jedis/mcf/CircuitBreakerCommandExecutor.java +++ b/src/main/java/redis/clients/jedis/mcf/CircuitBreakerCommandExecutor.java @@ -9,7 +9,7 @@ import redis.clients.jedis.annots.Experimental; import redis.clients.jedis.exceptions.JedisConnectionException; import redis.clients.jedis.executors.CommandExecutor; -import redis.clients.jedis.mcf.MultiDatabaseConnectionProvider.Database; +import redis.clients.jedis.mcf.MultiDbConnectionProvider.Database; /** * @author Allen Terleto (aterleto) @@ -24,7 +24,7 @@ public class CircuitBreakerCommandExecutor extends CircuitBreakerFailoverBase implements CommandExecutor { - public CircuitBreakerCommandExecutor(MultiDatabaseConnectionProvider provider) { + public CircuitBreakerCommandExecutor(MultiDbConnectionProvider provider) { super(provider); } diff --git a/src/main/java/redis/clients/jedis/mcf/CircuitBreakerFailoverBase.java b/src/main/java/redis/clients/jedis/mcf/CircuitBreakerFailoverBase.java index cbe97f27a8..ba1ea98dec 100644 --- a/src/main/java/redis/clients/jedis/mcf/CircuitBreakerFailoverBase.java +++ b/src/main/java/redis/clients/jedis/mcf/CircuitBreakerFailoverBase.java @@ -6,7 +6,7 @@ import java.util.concurrent.locks.ReentrantLock; import redis.clients.jedis.annots.Experimental; -import redis.clients.jedis.mcf.MultiDatabaseConnectionProvider.Database; +import redis.clients.jedis.mcf.MultiDbConnectionProvider.Database; import redis.clients.jedis.util.IOUtils; /** @@ -23,9 +23,9 @@ public class CircuitBreakerFailoverBase implements AutoCloseable { private final Lock lock = new ReentrantLock(true); - protected final MultiDatabaseConnectionProvider provider; + protected final MultiDbConnectionProvider provider; - public CircuitBreakerFailoverBase(MultiDatabaseConnectionProvider provider) { + public CircuitBreakerFailoverBase(MultiDbConnectionProvider provider) { this.provider = provider; } diff --git a/src/main/java/redis/clients/jedis/mcf/CircuitBreakerFailoverConnectionProvider.java b/src/main/java/redis/clients/jedis/mcf/CircuitBreakerFailoverConnectionProvider.java index b45cd04c61..7dfd1ef527 100644 --- a/src/main/java/redis/clients/jedis/mcf/CircuitBreakerFailoverConnectionProvider.java +++ b/src/main/java/redis/clients/jedis/mcf/CircuitBreakerFailoverConnectionProvider.java @@ -6,7 +6,7 @@ import redis.clients.jedis.Connection; import redis.clients.jedis.annots.Experimental; -import redis.clients.jedis.mcf.MultiDatabaseConnectionProvider.Database; +import redis.clients.jedis.mcf.MultiDbConnectionProvider.Database; /** * ConnectionProvider with built-in retry, circuit-breaker, and failover to another cluster/database @@ -16,7 +16,7 @@ @Experimental public class CircuitBreakerFailoverConnectionProvider extends CircuitBreakerFailoverBase { - public CircuitBreakerFailoverConnectionProvider(MultiDatabaseConnectionProvider provider) { + public CircuitBreakerFailoverConnectionProvider(MultiDbConnectionProvider provider) { super(provider); } diff --git a/src/main/java/redis/clients/jedis/mcf/CircuitBreakerThresholdsAdapter.java b/src/main/java/redis/clients/jedis/mcf/CircuitBreakerThresholdsAdapter.java index ccdde074f3..3dfc65a1f5 100644 --- a/src/main/java/redis/clients/jedis/mcf/CircuitBreakerThresholdsAdapter.java +++ b/src/main/java/redis/clients/jedis/mcf/CircuitBreakerThresholdsAdapter.java @@ -9,10 +9,10 @@ *

* This adapter sets maximum values for failure rate (100%) and minimum calls (Integer.MAX_VALUE) to * effectively disable Resilience4j's automatic circuit breaker transitions, allowing - * {@link MultiDatabaseConnectionProvider.Database#evaluateThresholds(boolean)} to control when the + * {@link MultiDbConnectionProvider.Database#evaluateThresholds(boolean)} to control when the * circuit breaker opens based on both minimum failure count AND failure rate. *

- * @see MultiDatabaseConnectionProvider.Database#evaluateThresholds(boolean) + * @see MultiDbConnectionProvider.Database#evaluateThresholds(boolean) */ class CircuitBreakerThresholdsAdapter { /** Maximum failure rate threshold (100%) to disable Resilience4j evaluation */ diff --git a/src/main/java/redis/clients/jedis/mcf/ClusterSwitchEventArgs.java b/src/main/java/redis/clients/jedis/mcf/ClusterSwitchEventArgs.java index 2c3e283445..a78c41864a 100644 --- a/src/main/java/redis/clients/jedis/mcf/ClusterSwitchEventArgs.java +++ b/src/main/java/redis/clients/jedis/mcf/ClusterSwitchEventArgs.java @@ -1,7 +1,7 @@ package redis.clients.jedis.mcf; import redis.clients.jedis.Endpoint; -import redis.clients.jedis.mcf.MultiDatabaseConnectionProvider.Database; +import redis.clients.jedis.mcf.MultiDbConnectionProvider.Database; public class ClusterSwitchEventArgs { diff --git a/src/main/java/redis/clients/jedis/mcf/JedisFailoverException.java b/src/main/java/redis/clients/jedis/mcf/JedisFailoverException.java index 2d6a81b81a..c431764d42 100644 --- a/src/main/java/redis/clients/jedis/mcf/JedisFailoverException.java +++ b/src/main/java/redis/clients/jedis/mcf/JedisFailoverException.java @@ -29,8 +29,7 @@ public JedisFailoverException() { *

* See the configuration properties * {@link redis.clients.jedis.MultiDbConfig#maxNumFailoverAttempts} and - * {@link redis.clients.jedis.MultiDbConfig#delayInBetweenFailoverAttempts} for more - * details. + * {@link redis.clients.jedis.MultiDbConfig#delayInBetweenFailoverAttempts} for more details. */ public static class JedisPermanentlyNotAvailableException extends JedisFailoverException { public JedisPermanentlyNotAvailableException(String s) { @@ -50,8 +49,7 @@ public JedisPermanentlyNotAvailableException() { *

* See the configuration properties * {@link redis.clients.jedis.MultiDbConfig#maxNumFailoverAttempts} and - * {@link redis.clients.jedis.MultiDbConfig#delayInBetweenFailoverAttempts} for more - * details. + * {@link redis.clients.jedis.MultiDbConfig#delayInBetweenFailoverAttempts} for more details. */ public static class JedisTemporarilyNotAvailableException extends JedisFailoverException { diff --git a/src/main/java/redis/clients/jedis/mcf/MultiClusterPipeline.java b/src/main/java/redis/clients/jedis/mcf/MultiClusterPipeline.java index d23f56411f..d302768fad 100644 --- a/src/main/java/redis/clients/jedis/mcf/MultiClusterPipeline.java +++ b/src/main/java/redis/clients/jedis/mcf/MultiClusterPipeline.java @@ -21,7 +21,7 @@ public class MultiClusterPipeline extends PipelineBase implements Closeable { private final Queue>> commands = new LinkedList<>(); @Deprecated - public MultiClusterPipeline(MultiDatabaseConnectionProvider pooledProvider) { + public MultiClusterPipeline(MultiDbConnectionProvider pooledProvider) { super(new CommandObjects()); this.failoverProvider = new CircuitBreakerFailoverConnectionProvider(pooledProvider); @@ -32,7 +32,7 @@ public MultiClusterPipeline(MultiDatabaseConnectionProvider pooledProvider) { } } - public MultiClusterPipeline(MultiDatabaseConnectionProvider pooledProvider, + public MultiClusterPipeline(MultiDbConnectionProvider pooledProvider, CommandObjects commandObjects) { super(commandObjects); this.failoverProvider = new CircuitBreakerFailoverConnectionProvider(pooledProvider); diff --git a/src/main/java/redis/clients/jedis/mcf/MultiClusterTransaction.java b/src/main/java/redis/clients/jedis/mcf/MultiClusterTransaction.java index 6f634549e2..e4afa24887 100644 --- a/src/main/java/redis/clients/jedis/mcf/MultiClusterTransaction.java +++ b/src/main/java/redis/clients/jedis/mcf/MultiClusterTransaction.java @@ -39,7 +39,7 @@ public class MultiClusterTransaction extends TransactionBase { * @param provider */ @Deprecated - public MultiClusterTransaction(MultiDatabaseConnectionProvider provider) { + public MultiClusterTransaction(MultiDbConnectionProvider provider) { this(provider, true); } @@ -50,7 +50,7 @@ public MultiClusterTransaction(MultiDatabaseConnectionProvider provider) { * @param doMulti {@code false} should be set to enable manual WATCH, UNWATCH and MULTI */ @Deprecated - public MultiClusterTransaction(MultiDatabaseConnectionProvider provider, boolean doMulti) { + public MultiClusterTransaction(MultiDbConnectionProvider provider, boolean doMulti) { this.failoverProvider = new CircuitBreakerFailoverConnectionProvider(provider); try (Connection connection = failoverProvider.getConnection()) { @@ -68,7 +68,7 @@ public MultiClusterTransaction(MultiDatabaseConnectionProvider provider, boolean * @param doMulti {@code false} should be set to enable manual WATCH, UNWATCH and MULTI * @param commandObjects command objects */ - public MultiClusterTransaction(MultiDatabaseConnectionProvider provider, boolean doMulti, + public MultiClusterTransaction(MultiDbConnectionProvider provider, boolean doMulti, CommandObjects commandObjects) { super(commandObjects); this.failoverProvider = new CircuitBreakerFailoverConnectionProvider(provider); diff --git a/src/main/java/redis/clients/jedis/mcf/MultiDatabaseConnectionProvider.java b/src/main/java/redis/clients/jedis/mcf/MultiDbConnectionProvider.java similarity index 98% rename from src/main/java/redis/clients/jedis/mcf/MultiDatabaseConnectionProvider.java rename to src/main/java/redis/clients/jedis/mcf/MultiDbConnectionProvider.java index ba445ae5cb..c39d137616 100644 --- a/src/main/java/redis/clients/jedis/mcf/MultiDatabaseConnectionProvider.java +++ b/src/main/java/redis/clients/jedis/mcf/MultiDbConnectionProvider.java @@ -57,7 +57,7 @@ *

*/ @Experimental -public class MultiDatabaseConnectionProvider implements ConnectionProvider { +public class MultiDbConnectionProvider implements ConnectionProvider { private final Logger log = LoggerFactory.getLogger(getClass()); @@ -69,8 +69,8 @@ public class MultiDatabaseConnectionProvider implements ConnectionProvider { /** * Indicates the actively used database endpoint (connection pool) amongst the pre-configured list - * which were provided at startup via the MultiDbConfig. All traffic will be routed with - * this database + * which were provided at startup via the MultiDbConfig. All traffic will be routed with this + * database */ private volatile Database activeDatabase; @@ -106,10 +106,10 @@ public class MultiDatabaseConnectionProvider implements ConnectionProvider { private AtomicLong failoverFreezeUntil = new AtomicLong(0); private AtomicInteger failoverAttemptCount = new AtomicInteger(0); - public MultiDatabaseConnectionProvider(MultiDbConfig multiDbConfig) { + public MultiDbConnectionProvider(MultiDbConfig multiDbConfig) { if (multiDbConfig == null) throw new JedisValidationException( - "MultiDbConfig must not be NULL for MultiDatabaseConnectionProvider"); + "MultiDbConfig must not be NULL for MultiDbConnectionProvider"); this.multiDbConfig = multiDbConfig; @@ -134,8 +134,7 @@ public MultiDatabaseConnectionProvider(MultiDbConfig multiDbConfig) { CircuitBreakerConfig.Builder circuitBreakerConfigBuilder = CircuitBreakerConfig.custom(); - CircuitBreakerThresholdsAdapter adapter = new CircuitBreakerThresholdsAdapter( - multiDbConfig); + CircuitBreakerThresholdsAdapter adapter = new CircuitBreakerThresholdsAdapter(multiDbConfig); circuitBreakerConfigBuilder.minimumNumberOfCalls(adapter.getMinimumNumberOfCalls()); circuitBreakerConfigBuilder.failureRateThreshold(adapter.getFailureRateThreshold()); circuitBreakerConfigBuilder.slidingWindowSize(adapter.getSlidingWindowSize()); @@ -410,7 +409,7 @@ private Database waitForInitialHealthyCluster(StatusTracker statusTracker) { // All clusters are unhealthy throw new JedisConnectionException( - "All configured clusters are unhealthy. Cannot initialize MultiDatabaseConnectionProvider."); + "All configured clusters are unhealthy. Cannot initialize MultiDbConnectionProvider."); } /** @@ -734,8 +733,8 @@ public CircuitBreaker getDatabaseCircuitBreaker() { /** * Indicates the final cluster/database endpoint (connection pool), according to the - * pre-configured list provided at startup via the MultiDbConfig, is unavailable and - * therefore no further failover is possible. Users can manually failback to an available cluster + * pre-configured list provided at startup via the MultiDbConfig, is unavailable and therefore no + * further failover is possible. Users can manually failback to an available cluster */ public boolean canIterateFrom(Database iterateFrom) { Map.Entry e = findWeightedHealthyClusterToIterate(iterateFrom); @@ -785,8 +784,7 @@ private Database(Endpoint endpoint, TrackingConnectionPool connectionPool, Retry } private Database(Endpoint endpoint, TrackingConnectionPool connectionPool, Retry retry, - HealthCheck hc, CircuitBreaker circuitBreaker, float weight, - MultiDbConfig multiDbConfig) { + HealthCheck hc, CircuitBreaker circuitBreaker, float weight, MultiDbConfig multiDbConfig) { this.endpoint = endpoint; this.connectionPool = connectionPool; diff --git a/src/test/java/redis/clients/jedis/builders/UnifiedJedisConstructorReflectionTest.java b/src/test/java/redis/clients/jedis/builders/UnifiedJedisConstructorReflectionTest.java index 39c76b6338..cd2ca8e4c5 100644 --- a/src/test/java/redis/clients/jedis/builders/UnifiedJedisConstructorReflectionTest.java +++ b/src/test/java/redis/clients/jedis/builders/UnifiedJedisConstructorReflectionTest.java @@ -184,7 +184,7 @@ private static boolean clusterConstructorThatShouldBeDeprecatedAndRemoved(Constr private static boolean multiClusterPooledConnectionProviderShouldBeReplacedWithResilientClient( Constructor ctor) { Class[] types = ctor.getParameterTypes(); - return types.length == 1 && types[0].getSimpleName().equals("MultiDatabaseConnectionProvider"); + return types.length == 1 && types[0].getSimpleName().equals("MultiDbConnectionProvider"); } private static String prettySignature(Constructor ctor) { diff --git a/src/test/java/redis/clients/jedis/failover/FailoverIntegrationTest.java b/src/test/java/redis/clients/jedis/failover/FailoverIntegrationTest.java index 7cda9db8fd..b3c19fdda5 100644 --- a/src/test/java/redis/clients/jedis/failover/FailoverIntegrationTest.java +++ b/src/test/java/redis/clients/jedis/failover/FailoverIntegrationTest.java @@ -19,7 +19,7 @@ import redis.clients.jedis.MultiDbConfig; import redis.clients.jedis.UnifiedJedis; import redis.clients.jedis.exceptions.JedisConnectionException; -import redis.clients.jedis.mcf.MultiDatabaseConnectionProvider; +import redis.clients.jedis.mcf.MultiDbConnectionProvider; import redis.clients.jedis.scenario.RecommendedSettings; import java.io.IOException; @@ -57,7 +57,7 @@ public class FailoverIntegrationTest { private static UnifiedJedis jedis2; private static String JEDIS1_ID = ""; private static String JEDIS2_ID = ""; - private MultiDatabaseConnectionProvider provider; + private MultiDbConnectionProvider provider; private UnifiedJedis failoverClient; @BeforeAll @@ -180,8 +180,8 @@ public void testManualFailoverNewCommandsAreSentToActiveCluster() throws Interru assertThat(getNodeId(failoverClient.info("server")), equalTo(JEDIS2_ID)); } - private List getDatabaseConfigs( - JedisClientConfig clientConfig, EndpointConfig... endpoints) { + private List getDatabaseConfigs(JedisClientConfig clientConfig, + EndpointConfig... endpoints) { int weight = endpoints.length; AtomicInteger weightCounter = new AtomicInteger(weight); @@ -269,7 +269,7 @@ public void testCircuitBreakerCountsEachConnectionErrorSeparately() throws IOExc .circuitBreakerFailureRateThreshold(50f) // %50 failure rate .build(); - MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider(failoverConfig); + MultiDbConnectionProvider provider = new MultiDbConnectionProvider(failoverConfig); try (UnifiedJedis client = new UnifiedJedis(provider)) { // Verify initial connection to first endpoint assertThat(getNodeId(client.info("server")), equalTo(JEDIS1_ID)); @@ -315,7 +315,7 @@ public void testCircuitBreakerCountsEachConnectionErrorSeparately() throws IOExc @Test public void testInflightCommandsAreRetriedAfterFailover() throws Exception { - MultiDatabaseConnectionProvider customProvider = createProvider( + MultiDbConnectionProvider customProvider = createProvider( builder -> builder.retryOnFailover(true)); // Create a custom client with retryOnFailover enabled for this specific test @@ -357,7 +357,7 @@ public void testInflightCommandsAreRetriedAfterFailover() throws Exception { @Test public void testInflightCommandsAreNotRetriedAfterFailover() throws Exception { // Create a custom provider and client with retry disabled for this specific test - MultiDatabaseConnectionProvider customProvider = createProvider( + MultiDbConnectionProvider customProvider = createProvider( builder -> builder.retryOnFailover(false)); try (UnifiedJedis customClient = new UnifiedJedis(customProvider)) { @@ -414,10 +414,10 @@ private static String generateTestValue(int byteSize) { } /** - * Creates a MultiDatabaseConnectionProvider with standard configuration + * Creates a MultiDbConnectionProvider with standard configuration * @return A configured provider */ - private MultiDatabaseConnectionProvider createProvider() { + private MultiDbConnectionProvider createProvider() { JedisClientConfig clientConfig = DefaultJedisClientConfig.builder() .socketTimeoutMillis(RecommendedSettings.DEFAULT_TIMEOUT_MS) .connectionTimeoutMillis(RecommendedSettings.DEFAULT_TIMEOUT_MS).build(); @@ -427,14 +427,14 @@ private MultiDatabaseConnectionProvider createProvider() { .retryWaitDuration(1).circuitBreakerSlidingWindowSize(3) .circuitBreakerMinNumOfFailures(1).circuitBreakerFailureRateThreshold(50f).build(); - return new MultiDatabaseConnectionProvider(failoverConfig); + return new MultiDbConnectionProvider(failoverConfig); } /** - * Creates a MultiDatabaseConnectionProvider with standard configuration + * Creates a MultiDbConnectionProvider with standard configuration * @return A configured provider */ - private MultiDatabaseConnectionProvider createProvider( + private MultiDbConnectionProvider createProvider( Function configCustomizer) { JedisClientConfig clientConfig = DefaultJedisClientConfig.builder() .socketTimeoutMillis(RecommendedSettings.DEFAULT_TIMEOUT_MS) @@ -449,6 +449,6 @@ private MultiDatabaseConnectionProvider createProvider( builder = configCustomizer.apply(builder); } - return new MultiDatabaseConnectionProvider(builder.build()); + return new MultiDbConnectionProvider(builder.build()); } } diff --git a/src/test/java/redis/clients/jedis/mcf/ActiveActiveLocalFailoverTest.java b/src/test/java/redis/clients/jedis/mcf/ActiveActiveLocalFailoverTest.java index 6fdc720cc9..f42030cd52 100644 --- a/src/test/java/redis/clients/jedis/mcf/ActiveActiveLocalFailoverTest.java +++ b/src/test/java/redis/clients/jedis/mcf/ActiveActiveLocalFailoverTest.java @@ -162,7 +162,7 @@ public void accept(ClusterSwitchEventArgs e) { ensureEndpointAvailability(endpoint2.getHostAndPort(), config); // Create the connection provider - MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider(builder.build()); + MultiDbConnectionProvider provider = new MultiDbConnectionProvider(builder.build()); FailoverReporter reporter = new FailoverReporter(); provider.setDatabaseSwitchListener(reporter); provider.setActiveDatabase(endpoint1.getHostAndPort()); diff --git a/src/test/java/redis/clients/jedis/mcf/CircuitBreakerThresholdsTest.java b/src/test/java/redis/clients/jedis/mcf/CircuitBreakerThresholdsTest.java index 2e71b0f554..7d439c6d46 100644 --- a/src/test/java/redis/clients/jedis/mcf/CircuitBreakerThresholdsTest.java +++ b/src/test/java/redis/clients/jedis/mcf/CircuitBreakerThresholdsTest.java @@ -20,7 +20,7 @@ import redis.clients.jedis.MultiDbConfig.DatabaseConfig; import redis.clients.jedis.Protocol; import redis.clients.jedis.exceptions.JedisConnectionException; -import redis.clients.jedis.mcf.MultiDatabaseConnectionProvider.Database; +import redis.clients.jedis.mcf.MultiDbConnectionProvider.Database; import redis.clients.jedis.util.ReflectionTestUtil; /** @@ -30,8 +30,8 @@ */ public class CircuitBreakerThresholdsTest { - private MultiDatabaseConnectionProvider realProvider; - private MultiDatabaseConnectionProvider spyProvider; + private MultiDbConnectionProvider realProvider; + private MultiDbConnectionProvider spyProvider; private Database cluster; private CircuitBreakerCommandExecutor executor; private CommandObject dummyCommand; @@ -56,7 +56,7 @@ public void setup() throws Exception { MultiDbConfig mcc = cfgBuilder.build(); - realProvider = new MultiDatabaseConnectionProvider(mcc); + realProvider = new MultiDbConnectionProvider(mcc); spyProvider = spy(realProvider); cluster = spyProvider.getDatabase(); @@ -126,8 +126,8 @@ public void rateBelowThreshold_doesNotFailover() throws Exception { MultiDbConfig.Builder cfgBuilder = MultiDbConfig.builder(fakeDatabaseConfigs) .circuitBreakerFailureRateThreshold(80.0f).circuitBreakerMinNumOfFailures(3) .circuitBreakerSlidingWindowSize(10).retryMaxAttempts(1).retryOnFailover(false); - MultiDatabaseConnectionProvider rp = new MultiDatabaseConnectionProvider(cfgBuilder.build()); - MultiDatabaseConnectionProvider sp = spy(rp); + MultiDbConnectionProvider rp = new MultiDbConnectionProvider(cfgBuilder.build()); + MultiDbConnectionProvider sp = spy(rp); Database c = sp.getDatabase(); try (CircuitBreakerCommandExecutor ex = new CircuitBreakerCommandExecutor(sp)) { CommandObject cmd = new CommandObject<>(new CommandArguments(Protocol.Command.PING), @@ -194,8 +194,8 @@ public void thresholdMatrix(int minFailures, float ratePercent, int successes, i .circuitBreakerSlidingWindowSize(Math.max(10, successes + failures + 2)).retryMaxAttempts(1) .retryOnFailover(false); - MultiDatabaseConnectionProvider real = new MultiDatabaseConnectionProvider(cfgBuilder.build()); - MultiDatabaseConnectionProvider spy = spy(real); + MultiDbConnectionProvider real = new MultiDbConnectionProvider(cfgBuilder.build()); + MultiDbConnectionProvider spy = spy(real); Database c = spy.getDatabase(); try (CircuitBreakerCommandExecutor ex = new CircuitBreakerCommandExecutor(spy)) { diff --git a/src/test/java/redis/clients/jedis/mcf/ClusterEvaluateThresholdsTest.java b/src/test/java/redis/clients/jedis/mcf/ClusterEvaluateThresholdsTest.java index a3e9317a98..8a6fd466c0 100644 --- a/src/test/java/redis/clients/jedis/mcf/ClusterEvaluateThresholdsTest.java +++ b/src/test/java/redis/clients/jedis/mcf/ClusterEvaluateThresholdsTest.java @@ -12,7 +12,7 @@ import redis.clients.jedis.DefaultJedisClientConfig; import redis.clients.jedis.HostAndPort; import redis.clients.jedis.MultiDbConfig; -import redis.clients.jedis.mcf.MultiDatabaseConnectionProvider.Database; +import redis.clients.jedis.mcf.MultiDbConnectionProvider.Database; /** * Tests for circuit breaker thresholds: both failure-rate threshold and minimum number of failures @@ -21,14 +21,14 @@ */ public class ClusterEvaluateThresholdsTest { - private MultiDatabaseConnectionProvider provider; + private MultiDbConnectionProvider provider; private Database cluster; private CircuitBreaker circuitBreaker; private CircuitBreaker.Metrics metrics; @BeforeEach public void setup() { - provider = mock(MultiDatabaseConnectionProvider.class); + provider = mock(MultiDbConnectionProvider.class); cluster = mock(Database.class); circuitBreaker = mock(CircuitBreaker.class); diff --git a/src/test/java/redis/clients/jedis/mcf/FailbackMechanismIntegrationTest.java b/src/test/java/redis/clients/jedis/mcf/FailbackMechanismIntegrationTest.java index 7179c0c475..34e521683e 100644 --- a/src/test/java/redis/clients/jedis/mcf/FailbackMechanismIntegrationTest.java +++ b/src/test/java/redis/clients/jedis/mcf/FailbackMechanismIntegrationTest.java @@ -62,19 +62,19 @@ void testFailbackDisabledDoesNotPerformFailback() throws InterruptedException { .failbackCheckInterval(100) // Short interval for testing .build(); - try (MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider(config)) { + try (MultiDbConnectionProvider provider = new MultiDbConnectionProvider(config)) { // Initially, cluster2 should be active (highest weight) assertEquals(provider.getDatabase(endpoint2), provider.getDatabase()); // Make cluster2 unhealthy to force failover to cluster1 - MultiDatabaseConnectionProviderHelper.onHealthStatusChange(provider, endpoint2, + MultiDbConnectionProviderHelper.onHealthStatusChange(provider, endpoint2, HealthStatus.HEALTHY, HealthStatus.UNHEALTHY); // Should now be on cluster1 (only healthy option) assertEquals(provider.getDatabase(endpoint1), provider.getDatabase()); // Make cluster2 healthy again (higher weight - would normally trigger failback) - MultiDatabaseConnectionProviderHelper.onHealthStatusChange(provider, endpoint2, + MultiDbConnectionProviderHelper.onHealthStatusChange(provider, endpoint2, HealthStatus.UNHEALTHY, HealthStatus.HEALTHY); // Wait longer than failback interval @@ -102,19 +102,19 @@ void testFailbackToHigherWeightCluster() throws InterruptedException { .failbackCheckInterval(100) // Short interval for testing .gracePeriod(100).build(); - try (MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider(config)) { + try (MultiDbConnectionProvider provider = new MultiDbConnectionProvider(config)) { // Initially, cluster1 should be active (highest weight) assertEquals(provider.getDatabase(endpoint1), provider.getDatabase()); // Make cluster1 unhealthy to force failover to cluster2 - MultiDatabaseConnectionProviderHelper.onHealthStatusChange(provider, endpoint1, + MultiDbConnectionProviderHelper.onHealthStatusChange(provider, endpoint1, HealthStatus.HEALTHY, HealthStatus.UNHEALTHY); // Should now be on cluster2 (lower weight, but only healthy option) assertEquals(provider.getDatabase(endpoint2), provider.getDatabase()); // Make cluster1 healthy again - MultiDatabaseConnectionProviderHelper.onHealthStatusChange(provider, endpoint1, + MultiDbConnectionProviderHelper.onHealthStatusChange(provider, endpoint1, HealthStatus.UNHEALTHY, HealthStatus.HEALTHY); // Wait for failback check interval + some buffer @@ -145,12 +145,12 @@ void testNoFailbackToLowerWeightCluster() throws InterruptedException { new MultiDbConfig.DatabaseConfig[] { cluster1, cluster2, cluster3 }) .failbackSupported(true).failbackCheckInterval(100).build(); - try (MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider(config)) { + try (MultiDbConnectionProvider provider = new MultiDbConnectionProvider(config)) { // Initially, cluster3 should be active (highest weight) assertEquals(provider.getDatabase(endpoint3), provider.getDatabase()); // Make cluster3 unhealthy to force failover to cluster2 (medium weight) - MultiDatabaseConnectionProviderHelper.onHealthStatusChange(provider, endpoint3, + MultiDbConnectionProviderHelper.onHealthStatusChange(provider, endpoint3, HealthStatus.HEALTHY, HealthStatus.UNHEALTHY); // Should now be on cluster2 (highest weight among healthy clusters) @@ -158,7 +158,7 @@ void testNoFailbackToLowerWeightCluster() throws InterruptedException { // Make cluster1 (lowest weight) healthy - this should NOT trigger failback // since we don't failback to lower weight clusters - MultiDatabaseConnectionProviderHelper.onHealthStatusChange(provider, endpoint1, + MultiDbConnectionProviderHelper.onHealthStatusChange(provider, endpoint1, HealthStatus.UNHEALTHY, HealthStatus.HEALTHY); // Wait for failback check interval @@ -184,19 +184,19 @@ void testFailbackToHigherWeightClusterImmediately() throws InterruptedException new MultiDbConfig.DatabaseConfig[] { cluster1, cluster2 }).failbackSupported(true) .failbackCheckInterval(100).gracePeriod(50).build(); - try (MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider(config)) { + try (MultiDbConnectionProvider provider = new MultiDbConnectionProvider(config)) { // Initially, cluster1 should be active (highest weight) assertEquals(provider.getDatabase(endpoint1), provider.getDatabase()); // Make cluster1 unhealthy to force failover to cluster2 - MultiDatabaseConnectionProviderHelper.onHealthStatusChange(provider, endpoint1, + MultiDbConnectionProviderHelper.onHealthStatusChange(provider, endpoint1, HealthStatus.HEALTHY, HealthStatus.UNHEALTHY); // Should now be on cluster2 (only healthy option) assertEquals(provider.getDatabase(endpoint2), provider.getDatabase()); // Make cluster1 healthy again - MultiDatabaseConnectionProviderHelper.onHealthStatusChange(provider, endpoint1, + MultiDbConnectionProviderHelper.onHealthStatusChange(provider, endpoint1, HealthStatus.UNHEALTHY, HealthStatus.HEALTHY); // Wait for failback check @@ -223,26 +223,26 @@ void testUnhealthyClusterCancelsFailback() throws InterruptedException { new MultiDbConfig.DatabaseConfig[] { cluster1, cluster2 }).failbackSupported(true) .failbackCheckInterval(200).build(); - try (MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider(config)) { + try (MultiDbConnectionProvider provider = new MultiDbConnectionProvider(config)) { // Initially, cluster1 should be active (highest weight) assertEquals(provider.getDatabase(endpoint1), provider.getDatabase()); // Make cluster1 unhealthy to force failover to cluster2 - MultiDatabaseConnectionProviderHelper.onHealthStatusChange(provider, endpoint1, + MultiDbConnectionProviderHelper.onHealthStatusChange(provider, endpoint1, HealthStatus.HEALTHY, HealthStatus.UNHEALTHY); // Should now be on cluster2 (only healthy option) assertEquals(provider.getDatabase(endpoint2), provider.getDatabase()); // Make cluster1 healthy again (should trigger failback attempt) - MultiDatabaseConnectionProviderHelper.onHealthStatusChange(provider, endpoint1, + MultiDbConnectionProviderHelper.onHealthStatusChange(provider, endpoint1, HealthStatus.UNHEALTHY, HealthStatus.HEALTHY); // Wait a bit Thread.sleep(100); // Make cluster1 unhealthy again before failback completes - MultiDatabaseConnectionProviderHelper.onHealthStatusChange(provider, endpoint1, + MultiDbConnectionProviderHelper.onHealthStatusChange(provider, endpoint1, HealthStatus.HEALTHY, HealthStatus.UNHEALTHY); // Wait past the original failback interval @@ -272,19 +272,19 @@ void testMultipleClusterFailbackPriority() throws InterruptedException { new MultiDbConfig.DatabaseConfig[] { cluster1, cluster2, cluster3 }) .failbackSupported(true).failbackCheckInterval(100).gracePeriod(100).build(); - try (MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider(config)) { + try (MultiDbConnectionProvider provider = new MultiDbConnectionProvider(config)) { // Initially, cluster3 should be active (highest weight) assertEquals(provider.getDatabase(endpoint3), provider.getDatabase()); // Make cluster3 unhealthy to force failover to cluster2 (next highest weight) - MultiDatabaseConnectionProviderHelper.onHealthStatusChange(provider, endpoint3, + MultiDbConnectionProviderHelper.onHealthStatusChange(provider, endpoint3, HealthStatus.HEALTHY, HealthStatus.UNHEALTHY); // Should now be on cluster2 (highest weight among healthy clusters) assertEquals(provider.getDatabase(endpoint2), provider.getDatabase()); // Make cluster3 healthy again - MultiDatabaseConnectionProviderHelper.onHealthStatusChange(provider, endpoint3, + MultiDbConnectionProviderHelper.onHealthStatusChange(provider, endpoint3, HealthStatus.UNHEALTHY, HealthStatus.HEALTHY); // Wait for failback @@ -312,12 +312,12 @@ void testGracePeriodDisablesClusterOnUnhealthy() throws InterruptedException { // period .build(); - try (MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider(config)) { + try (MultiDbConnectionProvider provider = new MultiDbConnectionProvider(config)) { // Initially, cluster2 should be active (highest weight) assertEquals(provider.getDatabase(endpoint2), provider.getDatabase()); // Now make cluster2 unhealthy - it should be disabled for grace period - MultiDatabaseConnectionProviderHelper.onHealthStatusChange(provider, endpoint2, + MultiDbConnectionProviderHelper.onHealthStatusChange(provider, endpoint2, HealthStatus.HEALTHY, HealthStatus.UNHEALTHY); // Should failover to cluster1 @@ -346,12 +346,12 @@ void testGracePeriodReEnablesClusterAfterPeriod() throws InterruptedException { .gracePeriod(100) // Short grace period for testing .build(); - try (MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider(config)) { + try (MultiDbConnectionProvider provider = new MultiDbConnectionProvider(config)) { // Initially, cluster2 should be active (highest weight) assertEquals(provider.getDatabase(endpoint2), provider.getDatabase()); // Make cluster2 unhealthy to start grace period and force failover - MultiDatabaseConnectionProviderHelper.onHealthStatusChange(provider, endpoint2, + MultiDbConnectionProviderHelper.onHealthStatusChange(provider, endpoint2, HealthStatus.HEALTHY, HealthStatus.UNHEALTHY); // Should failover to cluster1 @@ -361,7 +361,7 @@ void testGracePeriodReEnablesClusterAfterPeriod() throws InterruptedException { assertTrue(provider.getDatabase(endpoint2).isInGracePeriod()); // Make cluster2 healthy again while it's still in grace period - MultiDatabaseConnectionProviderHelper.onHealthStatusChange(provider, endpoint2, + MultiDbConnectionProviderHelper.onHealthStatusChange(provider, endpoint2, HealthStatus.UNHEALTHY, HealthStatus.HEALTHY); // Should still be on cluster1 because cluster2 is in grace period diff --git a/src/test/java/redis/clients/jedis/mcf/FailbackMechanismUnitTest.java b/src/test/java/redis/clients/jedis/mcf/FailbackMechanismUnitTest.java index a69bdb9ee6..ad251975c2 100644 --- a/src/test/java/redis/clients/jedis/mcf/FailbackMechanismUnitTest.java +++ b/src/test/java/redis/clients/jedis/mcf/FailbackMechanismUnitTest.java @@ -36,8 +36,7 @@ void testFailbackCheckIntervalConfiguration() { // Test custom value MultiDbConfig customConfig = new MultiDbConfig.Builder( - new MultiDbConfig.DatabaseConfig[] { clusterConfig }).failbackCheckInterval(3000) - .build(); + new MultiDbConfig.DatabaseConfig[] { clusterConfig }).failbackCheckInterval(3000).build(); assertEquals(3000, customConfig.getFailbackCheckInterval()); } @@ -55,8 +54,7 @@ void testFailbackSupportedConfiguration() { // Test disabled MultiDbConfig disabledConfig = new MultiDbConfig.Builder( - new MultiDbConfig.DatabaseConfig[] { clusterConfig }).failbackSupported(false) - .build(); + new MultiDbConfig.DatabaseConfig[] { clusterConfig }).failbackSupported(false).build(); assertFalse(disabledConfig.isFailbackSupported()); } @@ -68,15 +66,13 @@ void testFailbackCheckIntervalValidation() { // Test zero interval (should be allowed) MultiDbConfig zeroConfig = new MultiDbConfig.Builder( - new MultiDbConfig.DatabaseConfig[] { clusterConfig }).failbackCheckInterval(0) - .build(); + new MultiDbConfig.DatabaseConfig[] { clusterConfig }).failbackCheckInterval(0).build(); assertEquals(0, zeroConfig.getFailbackCheckInterval()); // Test negative interval (should be allowed - implementation decision) MultiDbConfig negativeConfig = new MultiDbConfig.Builder( - new MultiDbConfig.DatabaseConfig[] { clusterConfig }).failbackCheckInterval(-1000) - .build(); + new MultiDbConfig.DatabaseConfig[] { clusterConfig }).failbackCheckInterval(-1000).build(); assertEquals(-1000, negativeConfig.getFailbackCheckInterval()); } diff --git a/src/test/java/redis/clients/jedis/mcf/HealthCheckIntegrationTest.java b/src/test/java/redis/clients/jedis/mcf/HealthCheckIntegrationTest.java index d315616956..ce12cde8a7 100644 --- a/src/test/java/redis/clients/jedis/mcf/HealthCheckIntegrationTest.java +++ b/src/test/java/redis/clients/jedis/mcf/HealthCheckIntegrationTest.java @@ -32,7 +32,7 @@ public class HealthCheckIntegrationTest { @Test public void testDisableHealthCheck() { // No health check strategy supplier means health check is disabled - MultiDatabaseConnectionProvider customProvider = getMCCF(null); + MultiDbConnectionProvider customProvider = getMCCF(null); try (UnifiedJedis customClient = new UnifiedJedis(customProvider)) { // Verify that the client can connect and execute commands String result = customClient.ping(); @@ -46,7 +46,7 @@ public void testDefaultStrategySupplier() { MultiDbConfig.StrategySupplier defaultSupplier = (hostAndPort, jedisClientConfig) -> { return new EchoStrategy(hostAndPort, jedisClientConfig); }; - MultiDatabaseConnectionProvider customProvider = getMCCF(defaultSupplier); + MultiDbConnectionProvider customProvider = getMCCF(defaultSupplier); try (UnifiedJedis customClient = new UnifiedJedis(customProvider)) { // Verify that the client can connect and execute commands String result = customClient.ping(); @@ -70,7 +70,7 @@ public void testCustomStrategySupplier() { }); }; - MultiDatabaseConnectionProvider customProvider = getMCCF(strategySupplier); + MultiDbConnectionProvider customProvider = getMCCF(strategySupplier); try (UnifiedJedis customClient = new UnifiedJedis(customProvider)) { // Verify that the client can connect and execute commands String result = customClient.ping(); @@ -78,23 +78,21 @@ public void testCustomStrategySupplier() { } } - private MultiDatabaseConnectionProvider getMCCF( - MultiDbConfig.StrategySupplier strategySupplier) { + private MultiDbConnectionProvider getMCCF(MultiDbConfig.StrategySupplier strategySupplier) { Function modifier = builder -> strategySupplier == null ? builder.healthCheckEnabled(false) : builder.healthCheckStrategySupplier(strategySupplier); List databaseConfigs = Arrays.stream(new EndpointConfig[] { endpoint1 }) .map(e -> modifier - .apply(MultiDbConfig.DatabaseConfig.builder(e.getHostAndPort(), clientConfig)) - .build()) + .apply(MultiDbConfig.DatabaseConfig.builder(e.getHostAndPort(), clientConfig)).build()) .collect(Collectors.toList()); MultiDbConfig mccf = new MultiDbConfig.Builder(databaseConfigs).retryMaxAttempts(1) .retryWaitDuration(1).circuitBreakerSlidingWindowSize(1) .circuitBreakerFailureRateThreshold(100).build(); - return new MultiDatabaseConnectionProvider(mccf); + return new MultiDbConnectionProvider(mccf); } // ========== Probe Logic Integration Tests ========== diff --git a/src/test/java/redis/clients/jedis/mcf/HealthCheckTest.java b/src/test/java/redis/clients/jedis/mcf/HealthCheckTest.java index 7a23a6d88f..b83ecb8981 100644 --- a/src/test/java/redis/clients/jedis/mcf/HealthCheckTest.java +++ b/src/test/java/redis/clients/jedis/mcf/HealthCheckTest.java @@ -386,8 +386,7 @@ void testDefaultValues() { void testDatabaseConfigWithHealthCheckStrategy() { HealthCheckStrategy customStrategy = mock(HealthCheckStrategy.class); - MultiDbConfig.StrategySupplier supplier = (hostAndPort, - jedisClientConfig) -> customStrategy; + MultiDbConfig.StrategySupplier supplier = (hostAndPort, jedisClientConfig) -> customStrategy; MultiDbConfig.DatabaseConfig clusterConfig = MultiDbConfig.DatabaseConfig .builder(testEndpoint, testConfig).healthCheckStrategySupplier(supplier).build(); diff --git a/src/test/java/redis/clients/jedis/mcf/MultiClusterDynamicEndpointUnitTest.java b/src/test/java/redis/clients/jedis/mcf/MultiClusterDynamicEndpointUnitTest.java index 41aac09723..6ddca7ae12 100644 --- a/src/test/java/redis/clients/jedis/mcf/MultiClusterDynamicEndpointUnitTest.java +++ b/src/test/java/redis/clients/jedis/mcf/MultiClusterDynamicEndpointUnitTest.java @@ -23,7 +23,7 @@ public class MultiClusterDynamicEndpointUnitTest { - private MultiDatabaseConnectionProvider provider; + private MultiDbConnectionProvider provider; private JedisClientConfig clientConfig; private final EndpointConfig endpoint1 = HostAndPorts.getRedisEndpoint("standalone0"); private final EndpointConfig endpoint2 = HostAndPorts.getRedisEndpoint("standalone1"); @@ -35,10 +35,10 @@ void setUp() { // Create initial provider with endpoint1 DatabaseConfig initialConfig = createDatabaseConfig(endpoint1.getHostAndPort(), 1.0f); - MultiDbConfig multiConfig = new MultiDbConfig.Builder( - new DatabaseConfig[] { initialConfig }).build(); + MultiDbConfig multiConfig = new MultiDbConfig.Builder(new DatabaseConfig[] { initialConfig }) + .build(); - provider = new MultiDatabaseConnectionProvider(multiConfig); + provider = new MultiDbConnectionProvider(multiConfig); } // Helper method to create cluster configurations @@ -82,12 +82,11 @@ void testRemoveExistingCluster() { // Create initial provider with endpoint1 DatabaseConfig clusterConfig1 = createDatabaseConfig(endpoint1.getHostAndPort(), 1.0f); - MultiDbConfig multiConfig = MultiDbConfig - .builder(new DatabaseConfig[] { clusterConfig1 }).build(); + MultiDbConfig multiConfig = MultiDbConfig.builder(new DatabaseConfig[] { clusterConfig1 }) + .build(); - try ( - MultiDatabaseConnectionProvider providerWithMockedPool = new MultiDatabaseConnectionProvider( - multiConfig)) { + try (MultiDbConnectionProvider providerWithMockedPool = new MultiDbConnectionProvider( + multiConfig)) { // Add endpoint2 as second cluster DatabaseConfig newConfig = createDatabaseConfig(endpoint2.getHostAndPort(), 2.0f); @@ -179,12 +178,11 @@ void testActiveClusterHandlingOnRemove() { // Create initial provider with endpoint1 DatabaseConfig clusterConfig1 = createDatabaseConfig(endpoint1.getHostAndPort(), 1.0f); - MultiDbConfig multiConfig = MultiDbConfig - .builder(new DatabaseConfig[] { clusterConfig1 }).build(); + MultiDbConfig multiConfig = MultiDbConfig.builder(new DatabaseConfig[] { clusterConfig1 }) + .build(); - try ( - MultiDatabaseConnectionProvider providerWithMockedPool = new MultiDatabaseConnectionProvider( - multiConfig)) { + try (MultiDbConnectionProvider providerWithMockedPool = new MultiDbConnectionProvider( + multiConfig)) { // Add endpoint2 as second cluster DatabaseConfig newConfig = createDatabaseConfig(endpoint2.getHostAndPort(), 2.0f); diff --git a/src/test/java/redis/clients/jedis/mcf/MultiClusterFailoverAttemptsConfigTest.java b/src/test/java/redis/clients/jedis/mcf/MultiClusterFailoverAttemptsConfigTest.java index 26e437b2d1..076eade442 100644 --- a/src/test/java/redis/clients/jedis/mcf/MultiClusterFailoverAttemptsConfigTest.java +++ b/src/test/java/redis/clients/jedis/mcf/MultiClusterFailoverAttemptsConfigTest.java @@ -22,14 +22,14 @@ /** * Tests for how getMaxNumFailoverAttempts and getDelayInBetweenFailoverAttempts impact - * MultiDatabaseConnectionProvider behaviour when no healthy clusters are available. + * MultiDbConnectionProvider behaviour when no healthy clusters are available. */ public class MultiClusterFailoverAttemptsConfigTest { private HostAndPort endpoint0 = new HostAndPort("purposefully-incorrect", 0000); private HostAndPort endpoint1 = new HostAndPort("purposefully-incorrect", 0001); - private MultiDatabaseConnectionProvider provider; + private MultiDbConnectionProvider provider; @BeforeEach void setUp() throws Exception { @@ -45,7 +45,7 @@ void setUp() throws Exception { // Use small values by default for tests unless overridden per-test via reflection setBuilderFailoverConfig(builder, /* maxAttempts */ 10, /* delayMs */ 12000); - provider = new MultiDatabaseConnectionProvider(builder.build()); + provider = new MultiDbConnectionProvider(builder.build()); // Disable both clusters to force handleNoHealthyCluster path provider.getDatabase(endpoint0).setDisabled(true); @@ -69,9 +69,8 @@ void delayBetweenFailoverAttempts_gatesCounterIncrementsWithinWindow() throws Ex // First call: should throw temporary and start the freeze window, incrementing attempt count to // 1 - assertThrows(JedisTemporarilyNotAvailableException.class, - () -> MultiDatabaseConnectionProviderHelper.switchToHealthyCluster(provider, - SwitchReason.HEALTH_CHECK, provider.getDatabase())); + assertThrows(JedisTemporarilyNotAvailableException.class, () -> MultiDbConnectionProviderHelper + .switchToHealthyCluster(provider, SwitchReason.HEALTH_CHECK, provider.getDatabase())); int afterFirst = getProviderAttemptCount(); assertEquals(1, afterFirst); @@ -79,7 +78,7 @@ void delayBetweenFailoverAttempts_gatesCounterIncrementsWithinWindow() throws Ex // and should NOT increment the attempt count beyond 1 for (int i = 0; i < 50; i++) { assertThrows(JedisTemporarilyNotAvailableException.class, - () -> MultiDatabaseConnectionProviderHelper.switchToHealthyCluster(provider, + () -> MultiDbConnectionProviderHelper.switchToHealthyCluster(provider, SwitchReason.HEALTH_CHECK, provider.getDatabase())); assertEquals(1, getProviderAttemptCount()); } @@ -97,9 +96,8 @@ void delayBetweenFailoverAttempts_permanentExceptionAfterAttemptsExhausted() thr // First call: should throw temporary and start the freeze window, incrementing attempt count to // 1 - assertThrows(JedisTemporarilyNotAvailableException.class, - () -> MultiDatabaseConnectionProviderHelper.switchToHealthyCluster(provider, - SwitchReason.HEALTH_CHECK, provider.getDatabase())); + assertThrows(JedisTemporarilyNotAvailableException.class, () -> MultiDbConnectionProviderHelper + .switchToHealthyCluster(provider, SwitchReason.HEALTH_CHECK, provider.getDatabase())); int afterFirst = getProviderAttemptCount(); assertEquals(1, afterFirst); diff --git a/src/test/java/redis/clients/jedis/mcf/MultiClusterInitializationTest.java b/src/test/java/redis/clients/jedis/mcf/MultiClusterInitializationTest.java index 780f3c2571..e01af8f8f4 100644 --- a/src/test/java/redis/clients/jedis/mcf/MultiClusterInitializationTest.java +++ b/src/test/java/redis/clients/jedis/mcf/MultiClusterInitializationTest.java @@ -18,7 +18,7 @@ import redis.clients.jedis.exceptions.JedisValidationException; /** - * Tests for MultiDatabaseConnectionProvider initialization edge cases + * Tests for MultiDbConnectionProvider initialization edge cases */ @ExtendWith(MockitoExtension.class) public class MultiClusterInitializationTest { @@ -64,7 +64,7 @@ void testInitializationWithMixedHealthCheckConfiguration() { MultiDbConfig config = new MultiDbConfig.Builder( new MultiDbConfig.DatabaseConfig[] { cluster1, cluster2 }).build(); - try (MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider(config)) { + try (MultiDbConnectionProvider provider = new MultiDbConnectionProvider(config)) { // Should initialize successfully assertNotNull(provider.getDatabase()); @@ -90,7 +90,7 @@ void testInitializationWithAllHealthChecksDisabled() { MultiDbConfig config = new MultiDbConfig.Builder( new MultiDbConfig.DatabaseConfig[] { cluster1, cluster2 }).build(); - try (MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider(config)) { + try (MultiDbConnectionProvider provider = new MultiDbConnectionProvider(config)) { // Should select cluster2 (highest weight, no health checks) assertEquals(provider.getDatabase(endpoint2), provider.getDatabase()); } @@ -106,7 +106,7 @@ void testInitializationWithSingleCluster() { MultiDbConfig config = new MultiDbConfig.Builder( new MultiDbConfig.DatabaseConfig[] { cluster }).build(); - try (MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider(config)) { + try (MultiDbConnectionProvider provider = new MultiDbConnectionProvider(config)) { // Should select the only available cluster assertEquals(provider.getDatabase(endpoint1), provider.getDatabase()); } @@ -116,7 +116,7 @@ void testInitializationWithSingleCluster() { @Test void testErrorHandlingWithNullConfiguration() { assertThrows(JedisValidationException.class, () -> { - new MultiDatabaseConnectionProvider(null); + new MultiDbConnectionProvider(null); }); } @@ -148,7 +148,7 @@ void testInitializationWithZeroWeights() { MultiDbConfig config = new MultiDbConfig.Builder( new MultiDbConfig.DatabaseConfig[] { cluster1, cluster2 }).build(); - try (MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider(config)) { + try (MultiDbConnectionProvider provider = new MultiDbConnectionProvider(config)) { // Should still initialize and select one of the clusters assertNotNull(provider.getDatabase()); } diff --git a/src/test/java/redis/clients/jedis/mcf/MultiDatabaseConnectionProviderHelper.java b/src/test/java/redis/clients/jedis/mcf/MultiDatabaseConnectionProviderHelper.java deleted file mode 100644 index a88e53feed..0000000000 --- a/src/test/java/redis/clients/jedis/mcf/MultiDatabaseConnectionProviderHelper.java +++ /dev/null @@ -1,20 +0,0 @@ -package redis.clients.jedis.mcf; - -import redis.clients.jedis.Endpoint; - -public class MultiDatabaseConnectionProviderHelper { - - public static void onHealthStatusChange(MultiDatabaseConnectionProvider provider, - Endpoint endpoint, HealthStatus oldStatus, HealthStatus newStatus) { - provider.onHealthStatusChange(new HealthStatusChangeEvent(endpoint, oldStatus, newStatus)); - } - - public static void periodicFailbackCheck(MultiDatabaseConnectionProvider provider) { - provider.periodicFailbackCheck(); - } - - public static Endpoint switchToHealthyCluster(MultiDatabaseConnectionProvider provider, - SwitchReason reason, MultiDatabaseConnectionProvider.Database iterateFrom) { - return provider.switchToHealthyDatabase(reason, iterateFrom); - } -} diff --git a/src/test/java/redis/clients/jedis/mcf/MultiDbConnectionProviderHelper.java b/src/test/java/redis/clients/jedis/mcf/MultiDbConnectionProviderHelper.java new file mode 100644 index 0000000000..4ae061c9f5 --- /dev/null +++ b/src/test/java/redis/clients/jedis/mcf/MultiDbConnectionProviderHelper.java @@ -0,0 +1,20 @@ +package redis.clients.jedis.mcf; + +import redis.clients.jedis.Endpoint; + +public class MultiDbConnectionProviderHelper { + + public static void onHealthStatusChange(MultiDbConnectionProvider provider, Endpoint endpoint, + HealthStatus oldStatus, HealthStatus newStatus) { + provider.onHealthStatusChange(new HealthStatusChangeEvent(endpoint, oldStatus, newStatus)); + } + + public static void periodicFailbackCheck(MultiDbConnectionProvider provider) { + provider.periodicFailbackCheck(); + } + + public static Endpoint switchToHealthyCluster(MultiDbConnectionProvider provider, + SwitchReason reason, MultiDbConnectionProvider.Database iterateFrom) { + return provider.switchToHealthyDatabase(reason, iterateFrom); + } +} diff --git a/src/test/java/redis/clients/jedis/mcf/MultiDatabaseConnectionProviderTest.java b/src/test/java/redis/clients/jedis/mcf/MultiDbConnectionProviderTest.java similarity index 93% rename from src/test/java/redis/clients/jedis/mcf/MultiDatabaseConnectionProviderTest.java rename to src/test/java/redis/clients/jedis/mcf/MultiDbConnectionProviderTest.java index ad9bc150dd..9af53002e0 100644 --- a/src/test/java/redis/clients/jedis/mcf/MultiDatabaseConnectionProviderTest.java +++ b/src/test/java/redis/clients/jedis/mcf/MultiDbConnectionProviderTest.java @@ -8,7 +8,7 @@ import redis.clients.jedis.MultiDbConfig.DatabaseConfig; import redis.clients.jedis.exceptions.JedisConnectionException; import redis.clients.jedis.exceptions.JedisValidationException; -import redis.clients.jedis.mcf.MultiDatabaseConnectionProvider.Database; +import redis.clients.jedis.mcf.MultiDbConnectionProvider.Database; import redis.clients.jedis.mcf.ProbingPolicy.BuiltIn; import redis.clients.jedis.mcf.JedisFailoverException.JedisPermanentlyNotAvailableException; import redis.clients.jedis.mcf.JedisFailoverException.JedisTemporarilyNotAvailableException; @@ -23,15 +23,15 @@ import static org.junit.jupiter.api.Assertions.*; /** - * @see MultiDatabaseConnectionProvider + * @see MultiDbConnectionProvider */ @Tag("integration") -public class MultiDatabaseConnectionProviderTest { +public class MultiDbConnectionProviderTest { private final EndpointConfig endpointStandalone0 = HostAndPorts.getRedisEndpoint("standalone0"); private final EndpointConfig endpointStandalone1 = HostAndPorts.getRedisEndpoint("standalone1"); - private MultiDatabaseConnectionProvider provider; + private MultiDbConnectionProvider provider; @BeforeEach public void setUp() { @@ -42,8 +42,7 @@ public void setUp() { databaseConfigs[1] = DatabaseConfig.builder(endpointStandalone1.getHostAndPort(), endpointStandalone1.getClientConfigBuilder().build()).weight(0.3f).build(); - provider = new MultiDatabaseConnectionProvider( - new MultiDbConfig.Builder(databaseConfigs).build()); + provider = new MultiDbConnectionProvider(new MultiDbConfig.Builder(databaseConfigs).build()); } @AfterEach @@ -117,8 +116,7 @@ public void testRunClusterFailoverPostProcessor() { AtomicBoolean isValidTest = new AtomicBoolean(false); - MultiDatabaseConnectionProvider localProvider = new MultiDatabaseConnectionProvider( - builder.build()); + MultiDbConnectionProvider localProvider = new MultiDbConnectionProvider(builder.build()); localProvider.setDatabaseSwitchListener(a -> { isValidTest.set(true); }); @@ -179,9 +177,9 @@ public void testConnectionPoolConfigApplied() { endpointStandalone0.getClientConfigBuilder().build(), poolConfig); databaseConfigs[1] = new DatabaseConfig(endpointStandalone1.getHostAndPort(), endpointStandalone0.getClientConfigBuilder().build(), poolConfig); - try (MultiDatabaseConnectionProvider customProvider = new MultiDatabaseConnectionProvider( + try (MultiDbConnectionProvider customProvider = new MultiDbConnectionProvider( new MultiDbConfig.Builder(databaseConfigs).build())) { - MultiDatabaseConnectionProvider.Database activeCluster = customProvider.getDatabase(); + MultiDbConnectionProvider.Database activeCluster = customProvider.getDatabase(); ConnectionPool connectionPool = activeCluster.getConnectionPool(); assertEquals(8, connectionPool.getMaxTotal()); assertEquals(4, connectionPool.getMaxIdle()); @@ -209,7 +207,7 @@ void testHealthChecksStopAfterProviderClose() throws InterruptedException { endpointStandalone0.getClientConfigBuilder().build()) .healthCheckStrategy(countingStrategy).build(); - MultiDatabaseConnectionProvider testProvider = new MultiDatabaseConnectionProvider( + MultiDbConnectionProvider testProvider = new MultiDbConnectionProvider( new MultiDbConfig.Builder(Collections.singletonList(config)).build()); try { @@ -244,7 +242,7 @@ public void userCommand_firstTemporary_thenPermanent_inOrder() { databaseConfigs[1] = DatabaseConfig.builder(endpointStandalone1.getHostAndPort(), endpointStandalone1.getClientConfigBuilder().build()).weight(0.3f).build(); - MultiDatabaseConnectionProvider testProvider = new MultiDatabaseConnectionProvider( + MultiDbConnectionProvider testProvider = new MultiDbConnectionProvider( new MultiDbConfig.Builder(databaseConfigs).delayInBetweenFailoverAttempts(100) .maxNumFailoverAttempts(2).retryMaxAttempts(1).build()); @@ -281,7 +279,7 @@ public void userCommand_connectionExceptions_thenMultipleTemporary_thenPermanent // ATTENTION: these configuration settings are not random and // adjusted to get exact numbers of failures with exact exception types // and open to impact from other defaulted values withing the components in use. - MultiDatabaseConnectionProvider testProvider = new MultiDatabaseConnectionProvider( + MultiDbConnectionProvider testProvider = new MultiDbConnectionProvider( new MultiDbConfig.Builder(databaseConfigs).delayInBetweenFailoverAttempts(100) .maxNumFailoverAttempts(2).retryMaxAttempts(1).circuitBreakerSlidingWindowSize(5) .circuitBreakerFailureRateThreshold(60).build()) { diff --git a/src/test/java/redis/clients/jedis/mcf/PeriodicFailbackTest.java b/src/test/java/redis/clients/jedis/mcf/PeriodicFailbackTest.java index 54fc1e1f7a..f58df34e0c 100644 --- a/src/test/java/redis/clients/jedis/mcf/PeriodicFailbackTest.java +++ b/src/test/java/redis/clients/jedis/mcf/PeriodicFailbackTest.java @@ -2,7 +2,7 @@ import static org.junit.jupiter.api.Assertions.*; import static org.mockito.Mockito.*; -import static redis.clients.jedis.mcf.MultiDatabaseConnectionProviderHelper.onHealthStatusChange; +import static redis.clients.jedis.mcf.MultiDbConnectionProviderHelper.onHealthStatusChange; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -52,7 +52,7 @@ void testPeriodicFailbackCheckWithDisabledCluster() throws InterruptedException new MultiDbConfig.DatabaseConfig[] { cluster1, cluster2 }).failbackSupported(true) .failbackCheckInterval(100).build(); - try (MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider(config)) { + try (MultiDbConnectionProvider provider = new MultiDbConnectionProvider(config)) { // Initially, cluster2 should be active (highest weight: 2.0f vs 1.0f) assertEquals(provider.getDatabase(endpoint2), provider.getDatabase()); @@ -64,7 +64,7 @@ void testPeriodicFailbackCheckWithDisabledCluster() throws InterruptedException provider.switchToHealthyDatabase(SwitchReason.FORCED, provider.getDatabase(endpoint2)); // Manually trigger periodic check - MultiDatabaseConnectionProviderHelper.periodicFailbackCheck(provider); + MultiDbConnectionProviderHelper.periodicFailbackCheck(provider); // Should still be on cluster1 (cluster2 is in grace period) assertEquals(provider.getDatabase(endpoint1), provider.getDatabase()); @@ -87,7 +87,7 @@ void testPeriodicFailbackCheckWithHealthyCluster() throws InterruptedException { // grace // period - try (MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider(config)) { + try (MultiDbConnectionProvider provider = new MultiDbConnectionProvider(config)) { // Initially, cluster2 should be active (highest weight: 2.0f vs 1.0f) assertEquals(provider.getDatabase(endpoint2), provider.getDatabase()); @@ -104,14 +104,14 @@ void testPeriodicFailbackCheckWithHealthyCluster() throws InterruptedException { onHealthStatusChange(provider, endpoint2, HealthStatus.UNHEALTHY, HealthStatus.HEALTHY); // Trigger periodic check immediately - should still be on cluster1 - MultiDatabaseConnectionProviderHelper.periodicFailbackCheck(provider); + MultiDbConnectionProviderHelper.periodicFailbackCheck(provider); assertEquals(provider.getDatabase(endpoint1), provider.getDatabase()); // Wait for grace period to expire Thread.sleep(150); // Trigger periodic check after grace period expires - MultiDatabaseConnectionProviderHelper.periodicFailbackCheck(provider); + MultiDbConnectionProviderHelper.periodicFailbackCheck(provider); // Should have failed back to cluster2 (higher weight, grace period expired) assertEquals(provider.getDatabase(endpoint2), provider.getDatabase()); @@ -132,7 +132,7 @@ void testPeriodicFailbackCheckWithFailbackDisabled() throws InterruptedException new MultiDbConfig.DatabaseConfig[] { cluster1, cluster2 }).failbackSupported(false) // Disabled .failbackCheckInterval(50).build(); - try (MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider(config)) { + try (MultiDbConnectionProvider provider = new MultiDbConnectionProvider(config)) { // Initially, cluster2 should be active (highest weight: 2.0f vs 1.0f) assertEquals(provider.getDatabase(endpoint2), provider.getDatabase()); @@ -149,7 +149,7 @@ void testPeriodicFailbackCheckWithFailbackDisabled() throws InterruptedException Thread.sleep(100); // Trigger periodic check - MultiDatabaseConnectionProviderHelper.periodicFailbackCheck(provider); + MultiDbConnectionProviderHelper.periodicFailbackCheck(provider); // Should still be on cluster1 (failback disabled) assertEquals(provider.getDatabase(endpoint1), provider.getDatabase()); @@ -178,7 +178,7 @@ void testPeriodicFailbackCheckSelectsHighestWeightCluster() throws InterruptedEx // grace // period - try (MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider(config)) { + try (MultiDbConnectionProvider provider = new MultiDbConnectionProvider(config)) { // Initially, cluster3 should be active (highest weight: 3.0f vs 2.0f vs 1.0f) assertEquals(provider.getDatabase(endpoint3), provider.getDatabase()); @@ -202,7 +202,7 @@ void testPeriodicFailbackCheckSelectsHighestWeightCluster() throws InterruptedEx Thread.sleep(150); // Trigger periodic check - MultiDatabaseConnectionProviderHelper.periodicFailbackCheck(provider); + MultiDbConnectionProviderHelper.periodicFailbackCheck(provider); // Should have failed back to cluster3 (highest weight, grace period expired) assertEquals(provider.getDatabase(endpoint3), provider.getDatabase()); diff --git a/src/test/java/redis/clients/jedis/misc/AutomaticFailoverTest.java b/src/test/java/redis/clients/jedis/misc/AutomaticFailoverTest.java index c061c5c4e6..e4b95d2e52 100644 --- a/src/test/java/redis/clients/jedis/misc/AutomaticFailoverTest.java +++ b/src/test/java/redis/clients/jedis/misc/AutomaticFailoverTest.java @@ -18,8 +18,8 @@ import redis.clients.jedis.exceptions.JedisAccessControlException; import redis.clients.jedis.exceptions.JedisConnectionException; import redis.clients.jedis.mcf.ClusterSwitchEventArgs; -import redis.clients.jedis.mcf.MultiDatabaseConnectionProvider; -import redis.clients.jedis.mcf.MultiDatabaseConnectionProviderHelper; +import redis.clients.jedis.mcf.MultiDbConnectionProvider; +import redis.clients.jedis.mcf.MultiDbConnectionProviderHelper; import redis.clients.jedis.mcf.SwitchReason; import redis.clients.jedis.util.IOUtils; @@ -68,7 +68,7 @@ public void cleanUp() { @Test public void pipelineWithSwitch() { - MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider( + MultiDbConnectionProvider provider = new MultiDbConnectionProvider( new MultiDbConfig.Builder( getDatabaseConfigs(clientConfig, hostPortWithFailure, workingEndpoint.getHostAndPort())) .build()); @@ -77,7 +77,7 @@ public void pipelineWithSwitch() { AbstractPipeline pipe = client.pipelined(); pipe.set("pstr", "foobar"); pipe.hset("phash", "foo", "bar"); - MultiDatabaseConnectionProviderHelper.switchToHealthyCluster(provider, + MultiDbConnectionProviderHelper.switchToHealthyCluster(provider, SwitchReason.HEALTH_CHECK, provider.getDatabase()); pipe.sync(); } @@ -88,7 +88,7 @@ public void pipelineWithSwitch() { @Test public void transactionWithSwitch() { - MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider( + MultiDbConnectionProvider provider = new MultiDbConnectionProvider( new MultiDbConfig.Builder( getDatabaseConfigs(clientConfig, hostPortWithFailure, workingEndpoint.getHostAndPort())) .build()); @@ -97,7 +97,7 @@ public void transactionWithSwitch() { AbstractTransaction tx = client.multi(); tx.set("tstr", "foobar"); tx.hset("thash", "foo", "bar"); - MultiDatabaseConnectionProviderHelper.switchToHealthyCluster(provider, + MultiDbConnectionProviderHelper.switchToHealthyCluster(provider, SwitchReason.HEALTH_CHECK, provider.getDatabase()); assertEquals(Arrays.asList("OK", 1L), tx.exec()); } @@ -119,7 +119,7 @@ public void commandFailoverUnresolvableHost() { .circuitBreakerMinNumOfFailures(slidingWindowMinFails); RedisFailoverReporter failoverReporter = new RedisFailoverReporter(); - MultiDatabaseConnectionProvider connectionProvider = new MultiDatabaseConnectionProvider( + MultiDbConnectionProvider connectionProvider = new MultiDbConnectionProvider( builder.build()); connectionProvider.setDatabaseSwitchListener(failoverReporter); @@ -162,7 +162,7 @@ public void commandFailover() { .circuitBreakerSlidingWindowSize(slidingWindowSize); RedisFailoverReporter failoverReporter = new RedisFailoverReporter(); - MultiDatabaseConnectionProvider connectionProvider = new MultiDatabaseConnectionProvider( + MultiDbConnectionProvider connectionProvider = new MultiDbConnectionProvider( builder.build()); connectionProvider.setDatabaseSwitchListener(failoverReporter); @@ -200,7 +200,7 @@ public void pipelineFailover() { .fallbackExceptionList(Collections.singletonList(JedisConnectionException.class)); RedisFailoverReporter failoverReporter = new RedisFailoverReporter(); - MultiDatabaseConnectionProvider cacheProvider = new MultiDatabaseConnectionProvider( + MultiDbConnectionProvider cacheProvider = new MultiDbConnectionProvider( builder.build()); cacheProvider.setDatabaseSwitchListener(failoverReporter); @@ -232,7 +232,7 @@ public void failoverFromAuthError() { .fallbackExceptionList(Collections.singletonList(JedisAccessControlException.class)); RedisFailoverReporter failoverReporter = new RedisFailoverReporter(); - MultiDatabaseConnectionProvider cacheProvider = new MultiDatabaseConnectionProvider( + MultiDbConnectionProvider cacheProvider = new MultiDbConnectionProvider( builder.build()); cacheProvider.setDatabaseSwitchListener(failoverReporter); diff --git a/src/test/java/redis/clients/jedis/providers/MultiClusterProviderHealthStatusChangeEventTest.java b/src/test/java/redis/clients/jedis/providers/MultiClusterProviderHealthStatusChangeEventTest.java index ef14cc51ce..6d1b14009e 100644 --- a/src/test/java/redis/clients/jedis/providers/MultiClusterProviderHealthStatusChangeEventTest.java +++ b/src/test/java/redis/clients/jedis/providers/MultiClusterProviderHealthStatusChangeEventTest.java @@ -16,11 +16,11 @@ import redis.clients.jedis.JedisClientConfig; import redis.clients.jedis.MultiDbConfig; import redis.clients.jedis.mcf.HealthStatus; -import redis.clients.jedis.mcf.MultiDatabaseConnectionProvider; -import redis.clients.jedis.mcf.MultiDatabaseConnectionProviderHelper; +import redis.clients.jedis.mcf.MultiDbConnectionProvider; +import redis.clients.jedis.mcf.MultiDbConnectionProviderHelper; /** - * Tests for MultiDatabaseConnectionProvider event handling behavior during initialization and + * Tests for MultiDbConnectionProvider event handling behavior during initialization and * throughout its lifecycle with HealthStatusChangeEvents. */ @ExtendWith(MockitoExtension.class) @@ -60,7 +60,7 @@ void postInit_unhealthy_active_sets_grace_and_fails_over() throws Exception { MultiDbConfig config = new MultiDbConfig.Builder( new MultiDbConfig.DatabaseConfig[] { cluster1, cluster2 }).build(); - try (MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider( + try (MultiDbConnectionProvider provider = new MultiDbConnectionProvider( config)) { assertFalse(provider.getDatabase(endpoint1).isInGracePeriod()); @@ -68,7 +68,7 @@ void postInit_unhealthy_active_sets_grace_and_fails_over() throws Exception { // This should process immediately since initialization is complete assertDoesNotThrow(() -> { - MultiDatabaseConnectionProviderHelper.onHealthStatusChange(provider, endpoint1, + MultiDbConnectionProviderHelper.onHealthStatusChange(provider, endpoint1, HealthStatus.HEALTHY, HealthStatus.UNHEALTHY); }, "Post-initialization events should be processed immediately"); @@ -92,14 +92,14 @@ void postInit_nonActive_changes_do_not_switch_active() throws Exception { MultiDbConfig config = new MultiDbConfig.Builder( new MultiDbConfig.DatabaseConfig[] { cluster1, cluster2 }).build(); - try (MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider( + try (MultiDbConnectionProvider provider = new MultiDbConnectionProvider( config)) { // Verify initial state assertEquals(provider.getDatabase(endpoint1), provider.getDatabase(), "Should start with endpoint1 active"); // Simulate multiple rapid events for the same endpoint (post-init behavior) - MultiDatabaseConnectionProviderHelper.onHealthStatusChange(provider, endpoint1, + MultiDbConnectionProviderHelper.onHealthStatusChange(provider, endpoint1, HealthStatus.HEALTHY, HealthStatus.UNHEALTHY); // After first UNHEALTHY on active cluster: it enters grace period and provider fails over @@ -108,7 +108,7 @@ void postInit_nonActive_changes_do_not_switch_active() throws Exception { assertEquals(provider.getDatabase(endpoint2), provider.getDatabase(), "Should fail over to endpoint2"); - MultiDatabaseConnectionProviderHelper.onHealthStatusChange(provider, endpoint1, + MultiDbConnectionProviderHelper.onHealthStatusChange(provider, endpoint1, HealthStatus.UNHEALTHY, HealthStatus.HEALTHY); // Healthy event for non-active cluster should not immediately revert active cluster @@ -117,7 +117,7 @@ void postInit_nonActive_changes_do_not_switch_active() throws Exception { assertTrue(provider.getDatabase(endpoint1).isInGracePeriod(), "Grace period should still be in effect"); - MultiDatabaseConnectionProviderHelper.onHealthStatusChange(provider, endpoint1, + MultiDbConnectionProviderHelper.onHealthStatusChange(provider, endpoint1, HealthStatus.HEALTHY, HealthStatus.UNHEALTHY); // Further UNHEALTHY for non-active cluster is a no-op @@ -140,7 +140,7 @@ void init_selects_highest_weight_healthy_when_checks_disabled() throws Exception MultiDbConfig config = new MultiDbConfig.Builder( new MultiDbConfig.DatabaseConfig[] { cluster1, cluster2 }).build(); - try (MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider( + try (MultiDbConnectionProvider provider = new MultiDbConnectionProvider( config)) { // This test verifies that multiple endpoints are properly initialized @@ -166,7 +166,7 @@ void init_single_cluster_initializes_and_is_healthy() throws Exception { // This test verifies that the provider initializes correctly and doesn't lose events // In practice, with health checks disabled, no events should be generated during init - try (MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider( + try (MultiDbConnectionProvider provider = new MultiDbConnectionProvider( config)) { // Verify successful initialization assertNotNull(provider.getDatabase(), "Provider should have initialized successfully"); @@ -195,11 +195,11 @@ void postInit_two_hop_failover_chain_respected() throws Exception { MultiDbConfig config = new MultiDbConfig.Builder( new MultiDbConfig.DatabaseConfig[] { cluster1, cluster2, cluster3 }).build(); - try (MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider( + try (MultiDbConnectionProvider provider = new MultiDbConnectionProvider( config)) { // First event: endpoint1 (active) becomes UNHEALTHY -> failover to endpoint2, endpoint1 // enters grace - MultiDatabaseConnectionProviderHelper.onHealthStatusChange(provider, endpoint1, + MultiDbConnectionProviderHelper.onHealthStatusChange(provider, endpoint1, HealthStatus.HEALTHY, HealthStatus.UNHEALTHY); assertTrue(provider.getDatabase(endpoint1).isInGracePeriod(), "Endpoint1 should be in grace after unhealthy"); @@ -207,7 +207,7 @@ void postInit_two_hop_failover_chain_respected() throws Exception { "Should have failed over to endpoint2"); // Second event: endpoint2 (now active) becomes UNHEALTHY -> failover to endpoint3 - MultiDatabaseConnectionProviderHelper.onHealthStatusChange(provider, endpoint2, + MultiDbConnectionProviderHelper.onHealthStatusChange(provider, endpoint2, HealthStatus.HEALTHY, HealthStatus.UNHEALTHY); assertTrue(provider.getDatabase(endpoint2).isInGracePeriod(), "Endpoint2 should be in grace after unhealthy"); @@ -216,7 +216,7 @@ void postInit_two_hop_failover_chain_respected() throws Exception { // Third event: endpoint1 becomes HEALTHY again -> no immediate switch due to grace period // behavior - MultiDatabaseConnectionProviderHelper.onHealthStatusChange(provider, endpoint1, + MultiDbConnectionProviderHelper.onHealthStatusChange(provider, endpoint1, HealthStatus.UNHEALTHY, HealthStatus.HEALTHY); assertEquals(provider.getDatabase(endpoint3), provider.getDatabase(), "Active cluster should remain endpoint3"); @@ -236,18 +236,18 @@ void postInit_rapid_events_respect_grace_and_keep_active_stable() throws Excepti MultiDbConfig config = new MultiDbConfig.Builder( new MultiDbConfig.DatabaseConfig[] { cluster1, cluster2 }).build(); - try (MultiDatabaseConnectionProvider provider = new MultiDatabaseConnectionProvider( + try (MultiDbConnectionProvider provider = new MultiDbConnectionProvider( config)) { // Verify initial state assertEquals(HealthStatus.HEALTHY, provider.getDatabase(endpoint1).getHealthStatus(), "Should start as HEALTHY"); // Send rapid sequence of events post-init - MultiDatabaseConnectionProviderHelper.onHealthStatusChange(provider, endpoint1, + MultiDbConnectionProviderHelper.onHealthStatusChange(provider, endpoint1, HealthStatus.HEALTHY, HealthStatus.UNHEALTHY); // triggers failover and grace - MultiDatabaseConnectionProviderHelper.onHealthStatusChange(provider, endpoint1, + MultiDbConnectionProviderHelper.onHealthStatusChange(provider, endpoint1, HealthStatus.UNHEALTHY, HealthStatus.HEALTHY); // non-active cluster becomes healthy - MultiDatabaseConnectionProviderHelper.onHealthStatusChange(provider, endpoint1, + MultiDbConnectionProviderHelper.onHealthStatusChange(provider, endpoint1, HealthStatus.HEALTHY, HealthStatus.UNHEALTHY); // still non-active and in grace; no change // Final expectations: endpoint1 is in grace, provider remains on endpoint2 diff --git a/src/test/java/redis/clients/jedis/scenario/ActiveActiveFailoverTest.java b/src/test/java/redis/clients/jedis/scenario/ActiveActiveFailoverTest.java index 9250e90a3e..596a10ff46 100644 --- a/src/test/java/redis/clients/jedis/scenario/ActiveActiveFailoverTest.java +++ b/src/test/java/redis/clients/jedis/scenario/ActiveActiveFailoverTest.java @@ -12,7 +12,7 @@ import redis.clients.jedis.MultiDbConfig.DatabaseConfig; import redis.clients.jedis.exceptions.JedisConnectionException; import redis.clients.jedis.mcf.ClusterSwitchEventArgs; -import redis.clients.jedis.mcf.MultiDatabaseConnectionProvider; +import redis.clients.jedis.mcf.MultiDbConnectionProvider; import redis.clients.jedis.util.ClientTestUtil; import java.io.IOException; @@ -208,7 +208,7 @@ public void accept(ClusterSwitchEventArgs e) { throw new RuntimeException(e); } - MultiDatabaseConnectionProvider provider = ClientTestUtil.getConnectionProvider(client); + MultiDbConnectionProvider provider = ClientTestUtil.getConnectionProvider(client); ConnectionPool pool1 = provider.getDatabase(endpoint.getHostAndPort(0)).getConnectionPool(); ConnectionPool pool2 = provider.getDatabase(endpoint.getHostAndPort(1)).getConnectionPool(); From d8b1fba1bac8921af682a61f7b1834e420a2f9a3 Mon Sep 17 00:00:00 2001 From: ggivo Date: Mon, 6 Oct 2025 14:43:16 +0300 Subject: [PATCH 05/18] Rename ClusterSwitchEventArgs to DatabaseSwitchEvent --- .../jedis/builders/MultiDbClientBuilder.java | 6 ++-- .../jedis/mcf/ClusterSwitchEventArgs.java | 31 ------------------- .../jedis/mcf/DatabaseSwitchEvent.java | 30 ++++++++++++++++++ .../jedis/mcf/MultiDbConnectionProvider.java | 6 ++-- .../clients/jedis/MultiDbClientTest.java | 6 ++-- .../mcf/ActiveActiveLocalFailoverTest.java | 8 ++--- .../jedis/misc/AutomaticFailoverTest.java | 8 ++--- .../scenario/ActiveActiveFailoverTest.java | 10 +++--- 8 files changed, 52 insertions(+), 53 deletions(-) delete mode 100644 src/main/java/redis/clients/jedis/mcf/ClusterSwitchEventArgs.java create mode 100644 src/main/java/redis/clients/jedis/mcf/DatabaseSwitchEvent.java diff --git a/src/main/java/redis/clients/jedis/builders/MultiDbClientBuilder.java b/src/main/java/redis/clients/jedis/builders/MultiDbClientBuilder.java index 12cc2efefb..3758d5c52b 100644 --- a/src/main/java/redis/clients/jedis/builders/MultiDbClientBuilder.java +++ b/src/main/java/redis/clients/jedis/builders/MultiDbClientBuilder.java @@ -6,7 +6,7 @@ import redis.clients.jedis.annots.Experimental; import redis.clients.jedis.executors.CommandExecutor; import redis.clients.jedis.mcf.CircuitBreakerCommandExecutor; -import redis.clients.jedis.mcf.ClusterSwitchEventArgs; +import redis.clients.jedis.mcf.DatabaseSwitchEvent; import redis.clients.jedis.mcf.MultiDbConnectionProvider; import redis.clients.jedis.providers.ConnectionProvider; @@ -68,7 +68,7 @@ public abstract class MultiDbClientBuilder // Multi-db specific configuration fields private MultiDbConfig multiDbConfig = null; - private Consumer databaseSwitchListener = null; + private Consumer databaseSwitchListener = null; /** * Sets the multi-database configuration. @@ -94,7 +94,7 @@ public MultiDbClientBuilder multiDbConfig(MultiDbConfig config) { * @param listener the database switch event listener * @return this builder */ - public MultiDbClientBuilder databaseSwitchListener(Consumer listener) { + public MultiDbClientBuilder databaseSwitchListener(Consumer listener) { this.databaseSwitchListener = listener; return this; } diff --git a/src/main/java/redis/clients/jedis/mcf/ClusterSwitchEventArgs.java b/src/main/java/redis/clients/jedis/mcf/ClusterSwitchEventArgs.java deleted file mode 100644 index a78c41864a..0000000000 --- a/src/main/java/redis/clients/jedis/mcf/ClusterSwitchEventArgs.java +++ /dev/null @@ -1,31 +0,0 @@ -package redis.clients.jedis.mcf; - -import redis.clients.jedis.Endpoint; -import redis.clients.jedis.mcf.MultiDbConnectionProvider.Database; - -public class ClusterSwitchEventArgs { - - private final SwitchReason reason; - private final String ClusterName; - private final Endpoint Endpoint; - - public ClusterSwitchEventArgs(SwitchReason reason, Endpoint endpoint, Database database) { - this.reason = reason; - // TODO: @ggivo do we need cluster name? - this.ClusterName = database.getCircuitBreaker().getName(); - this.Endpoint = endpoint; - } - - public SwitchReason getReason() { - return reason; - } - - public String getClusterName() { - return ClusterName; - } - - public Endpoint getEndpoint() { - return Endpoint; - } - -} diff --git a/src/main/java/redis/clients/jedis/mcf/DatabaseSwitchEvent.java b/src/main/java/redis/clients/jedis/mcf/DatabaseSwitchEvent.java new file mode 100644 index 0000000000..6cc233cd7d --- /dev/null +++ b/src/main/java/redis/clients/jedis/mcf/DatabaseSwitchEvent.java @@ -0,0 +1,30 @@ +package redis.clients.jedis.mcf; + +import redis.clients.jedis.Endpoint; +import redis.clients.jedis.mcf.MultiDbConnectionProvider.Database; + +public class DatabaseSwitchEvent { + + private final SwitchReason reason; + private final String databaseName; + private final Endpoint endpoint; + + public DatabaseSwitchEvent(SwitchReason reason, Endpoint endpoint, Database database) { + this.reason = reason; + this.databaseName = database.getCircuitBreaker().getName(); + this.endpoint = endpoint; + } + + public SwitchReason getReason() { + return reason; + } + + public String getDatabaseName() { + return databaseName; + } + + public Endpoint getEndpoint() { + return endpoint; + } + +} diff --git a/src/main/java/redis/clients/jedis/mcf/MultiDbConnectionProvider.java b/src/main/java/redis/clients/jedis/mcf/MultiDbConnectionProvider.java index c39d137616..8d515627f3 100644 --- a/src/main/java/redis/clients/jedis/mcf/MultiDbConnectionProvider.java +++ b/src/main/java/redis/clients/jedis/mcf/MultiDbConnectionProvider.java @@ -80,7 +80,7 @@ public class MultiDbConnectionProvider implements ConnectionProvider { * Functional interface for listening to cluster switch events. The event args contain the reason * for the switch, the endpoint, and the cluster. */ - private Consumer databaseSwitchListener; + private Consumer databaseSwitchListener; private List> fallbackExceptionList; @@ -743,12 +743,12 @@ public boolean canIterateFrom(Database iterateFrom) { public void onClusterSwitch(SwitchReason reason, Endpoint endpoint, Database database) { if (databaseSwitchListener != null) { - ClusterSwitchEventArgs eventArgs = new ClusterSwitchEventArgs(reason, endpoint, database); + DatabaseSwitchEvent eventArgs = new DatabaseSwitchEvent(reason, endpoint, database); databaseSwitchListener.accept(eventArgs); } } - public void setDatabaseSwitchListener(Consumer databaseSwitchListener) { + public void setDatabaseSwitchListener(Consumer databaseSwitchListener) { this.databaseSwitchListener = databaseSwitchListener; } diff --git a/src/test/java/redis/clients/jedis/MultiDbClientTest.java b/src/test/java/redis/clients/jedis/MultiDbClientTest.java index 5325f0e742..43673da1ed 100644 --- a/src/test/java/redis/clients/jedis/MultiDbClientTest.java +++ b/src/test/java/redis/clients/jedis/MultiDbClientTest.java @@ -17,7 +17,7 @@ import redis.clients.jedis.MultiDbConfig.DatabaseConfig; import redis.clients.jedis.exceptions.JedisValidationException; -import redis.clients.jedis.mcf.ClusterSwitchEventArgs; +import redis.clients.jedis.mcf.DatabaseSwitchEvent; import redis.clients.jedis.mcf.SwitchReason; import java.io.IOException; @@ -181,8 +181,8 @@ public void testWithDatabaseSwitchListener() { .weight(50.0f).build()) .build(); - Consumer eventConsumer; - List events = new ArrayList<>(); + Consumer eventConsumer; + List events = new ArrayList<>(); eventConsumer = events::add; try (MultiDbClient testClient = MultiDbClient.builder().databaseSwitchListener(eventConsumer) diff --git a/src/test/java/redis/clients/jedis/mcf/ActiveActiveLocalFailoverTest.java b/src/test/java/redis/clients/jedis/mcf/ActiveActiveLocalFailoverTest.java index f42030cd52..a5aae5e9bf 100644 --- a/src/test/java/redis/clients/jedis/mcf/ActiveActiveLocalFailoverTest.java +++ b/src/test/java/redis/clients/jedis/mcf/ActiveActiveLocalFailoverTest.java @@ -122,7 +122,7 @@ public void testFailover(boolean fastFailover, long minFailoverCompletionDuratio // Use the parameterized fastFailover setting builder.fastFailover(fastFailover); - class FailoverReporter implements Consumer { + class FailoverReporter implements Consumer { String currentClusterName = "not set"; @@ -139,10 +139,10 @@ public String getCurrentClusterName() { } @Override - public void accept(ClusterSwitchEventArgs e) { - this.currentClusterName = e.getClusterName(); + public void accept(DatabaseSwitchEvent e) { + this.currentClusterName = e.getDatabaseName(); log.info("\n\n===={}=== \nJedis switching to cluster: {}\n====End of log===\n", - e.getReason(), e.getClusterName()); + e.getReason(), e.getDatabaseName()); if ((e.getReason() == SwitchReason.CIRCUIT_BREAKER || e.getReason() == SwitchReason.HEALTH_CHECK)) { failoverHappened = true; diff --git a/src/test/java/redis/clients/jedis/misc/AutomaticFailoverTest.java b/src/test/java/redis/clients/jedis/misc/AutomaticFailoverTest.java index e4b95d2e52..ac74738226 100644 --- a/src/test/java/redis/clients/jedis/misc/AutomaticFailoverTest.java +++ b/src/test/java/redis/clients/jedis/misc/AutomaticFailoverTest.java @@ -17,7 +17,7 @@ import redis.clients.jedis.*; import redis.clients.jedis.exceptions.JedisAccessControlException; import redis.clients.jedis.exceptions.JedisConnectionException; -import redis.clients.jedis.mcf.ClusterSwitchEventArgs; +import redis.clients.jedis.mcf.DatabaseSwitchEvent; import redis.clients.jedis.mcf.MultiDbConnectionProvider; import redis.clients.jedis.mcf.MultiDbConnectionProviderHelper; import redis.clients.jedis.mcf.SwitchReason; @@ -250,13 +250,13 @@ public void failoverFromAuthError() { jedis.close(); } - static class RedisFailoverReporter implements Consumer { + static class RedisFailoverReporter implements Consumer { boolean failedOver = false; @Override - public void accept(ClusterSwitchEventArgs e) { - log.info("Jedis fail over to cluster: " + e.getClusterName()); + public void accept(DatabaseSwitchEvent e) { + log.info("Jedis fail over to cluster: " + e.getDatabaseName()); failedOver = true; } } diff --git a/src/test/java/redis/clients/jedis/scenario/ActiveActiveFailoverTest.java b/src/test/java/redis/clients/jedis/scenario/ActiveActiveFailoverTest.java index 596a10ff46..e6ebc42b8d 100644 --- a/src/test/java/redis/clients/jedis/scenario/ActiveActiveFailoverTest.java +++ b/src/test/java/redis/clients/jedis/scenario/ActiveActiveFailoverTest.java @@ -11,7 +11,7 @@ import redis.clients.jedis.*; import redis.clients.jedis.MultiDbConfig.DatabaseConfig; import redis.clients.jedis.exceptions.JedisConnectionException; -import redis.clients.jedis.mcf.ClusterSwitchEventArgs; +import redis.clients.jedis.mcf.DatabaseSwitchEvent; import redis.clients.jedis.mcf.MultiDbConnectionProvider; import redis.clients.jedis.util.ClientTestUtil; @@ -82,7 +82,7 @@ public void testFailover() { .fastFailover(true) .retryOnFailover(false) .build(); - class FailoverReporter implements Consumer { + class FailoverReporter implements Consumer { String currentClusterName = "not set"; @@ -99,10 +99,10 @@ public String getCurrentClusterName() { } @Override - public void accept(ClusterSwitchEventArgs e) { - this.currentClusterName = e.getClusterName(); + public void accept(DatabaseSwitchEvent e) { + this.currentClusterName = e.getDatabaseName(); log.info("\n\n====FailoverEvent=== \nJedis failover to cluster: {}\n====FailoverEvent===\n\n", - e.getClusterName()); + e.getDatabaseName()); if (failoverHappened) { failbackHappened = true; From 0eed3a6d151296be26de5e7578aef40d3dd9fa00 Mon Sep 17 00:00:00 2001 From: ggivo Date: Mon, 6 Oct 2025 15:04:00 +0300 Subject: [PATCH 06/18] Fix error in test after renaming multiDbConfig --- .../jedis/mcf/MultiClusterFailoverAttemptsConfigTest.java | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/test/java/redis/clients/jedis/mcf/MultiClusterFailoverAttemptsConfigTest.java b/src/test/java/redis/clients/jedis/mcf/MultiClusterFailoverAttemptsConfigTest.java index 076eade442..857f6dc32a 100644 --- a/src/test/java/redis/clients/jedis/mcf/MultiClusterFailoverAttemptsConfigTest.java +++ b/src/test/java/redis/clients/jedis/mcf/MultiClusterFailoverAttemptsConfigTest.java @@ -154,7 +154,7 @@ private static void setBuilderFailoverConfig(MultiDbConfig.Builder builder, int private void setProviderFailoverConfig(int maxAttempts, int delayMs) throws Exception { // Access the underlying MultiDbConfig inside provider and adjust fields for this // test - Object cfg = ReflectionTestUtil.getField(provider, "MultiDbConfig"); + Object cfg = ReflectionTestUtil.getField(provider, "multiDbConfig"); ReflectionTestUtil.setField(cfg, "maxNumFailoverAttempts", maxAttempts); @@ -162,13 +162,13 @@ private void setProviderFailoverConfig(int maxAttempts, int delayMs) throws Exce } private int getProviderMaxAttempts() throws Exception { - Object cfg = ReflectionTestUtil.getField(provider, "MultiDbConfig"); + Object cfg = ReflectionTestUtil.getField(provider, "multiDbConfig"); return ReflectionTestUtil.getField(cfg, "maxNumFailoverAttempts"); } private int getProviderDelayMs() throws Exception { - Object cfg = ReflectionTestUtil.getField(provider, "MultiDbConfig"); + Object cfg = ReflectionTestUtil.getField(provider, "multiDbConfig"); return ReflectionTestUtil.getField(cfg, "delayInBetweenFailoverAttempts"); } From df049cfb71d7295ab38c4cb1d17e1f6d42728be0 Mon Sep 17 00:00:00 2001 From: ggivo Date: Mon, 6 Oct 2025 16:51:15 +0300 Subject: [PATCH 07/18] Rename MultiClusterPipeline to MultiDbPipeline --- src/main/java/redis/clients/jedis/MultiDbClient.java | 8 ++++---- src/main/java/redis/clients/jedis/UnifiedJedis.java | 4 ++-- ...{MultiClusterPipeline.java => MultiDbPipeline.java} | 10 ++++------ 3 files changed, 10 insertions(+), 12 deletions(-) rename src/main/java/redis/clients/jedis/mcf/{MultiClusterPipeline.java => MultiDbPipeline.java} (86%) diff --git a/src/main/java/redis/clients/jedis/MultiDbClient.java b/src/main/java/redis/clients/jedis/MultiDbClient.java index bd65f119ff..a753739cec 100644 --- a/src/main/java/redis/clients/jedis/MultiDbClient.java +++ b/src/main/java/redis/clients/jedis/MultiDbClient.java @@ -6,7 +6,7 @@ import redis.clients.jedis.csc.Cache; import redis.clients.jedis.executors.CommandExecutor; import redis.clients.jedis.mcf.CircuitBreakerCommandExecutor; -import redis.clients.jedis.mcf.MultiClusterPipeline; +import redis.clients.jedis.mcf.MultiDbPipeline; import redis.clients.jedis.mcf.MultiClusterTransaction; import redis.clients.jedis.providers.ConnectionProvider; import redis.clients.jedis.mcf.MultiDbConnectionProvider; @@ -219,11 +219,11 @@ public void forceActiveEndpoint(Endpoint endpoint, long forcedActiveDurationMs) * The returned pipeline supports the same resilience features as the main client, including * automatic failover during batch execution. *

- * @return a new MultiClusterPipeline instance + * @return a new MultiDbPipeline instance */ @Override - public MultiClusterPipeline pipelined() { - return new MultiClusterPipeline(getMultiDbConnectionProvider(), commandObjects); + public MultiDbPipeline pipelined() { + return new MultiDbPipeline(getMultiDbConnectionProvider(), commandObjects); } /** diff --git a/src/main/java/redis/clients/jedis/UnifiedJedis.java b/src/main/java/redis/clients/jedis/UnifiedJedis.java index ccb9d37df6..11e063b7c2 100644 --- a/src/main/java/redis/clients/jedis/UnifiedJedis.java +++ b/src/main/java/redis/clients/jedis/UnifiedJedis.java @@ -33,7 +33,7 @@ import redis.clients.jedis.resps.RawVector; import redis.clients.jedis.json.JsonObjectMapper; import redis.clients.jedis.mcf.CircuitBreakerCommandExecutor; -import redis.clients.jedis.mcf.MultiClusterPipeline; +import redis.clients.jedis.mcf.MultiDbPipeline; import redis.clients.jedis.mcf.MultiDbConnectionProvider; import redis.clients.jedis.mcf.MultiClusterTransaction; import redis.clients.jedis.params.*; @@ -5100,7 +5100,7 @@ public PipelineBase pipelined() { if (provider == null) { throw new IllegalStateException("It is not allowed to create Pipeline from this " + getClass()); } else if (provider instanceof MultiDbConnectionProvider) { - return new MultiClusterPipeline((MultiDbConnectionProvider) provider, commandObjects); + return new MultiDbPipeline((MultiDbConnectionProvider) provider, commandObjects); } else { return new Pipeline(provider.getConnection(), true, commandObjects); } diff --git a/src/main/java/redis/clients/jedis/mcf/MultiClusterPipeline.java b/src/main/java/redis/clients/jedis/mcf/MultiDbPipeline.java similarity index 86% rename from src/main/java/redis/clients/jedis/mcf/MultiClusterPipeline.java rename to src/main/java/redis/clients/jedis/mcf/MultiDbPipeline.java index d302768fad..defb97d1a2 100644 --- a/src/main/java/redis/clients/jedis/mcf/MultiClusterPipeline.java +++ b/src/main/java/redis/clients/jedis/mcf/MultiDbPipeline.java @@ -11,17 +11,16 @@ /** * This is high memory dependent solution as all the appending commands will be hold in memory until - * {@link MultiClusterPipeline#sync() SYNC} (or {@link MultiClusterPipeline#close() CLOSE}) gets - * called. + * {@link MultiDbPipeline#sync() SYNC} (or {@link MultiDbPipeline#close() CLOSE}) gets called. */ @Experimental -public class MultiClusterPipeline extends PipelineBase implements Closeable { +public class MultiDbPipeline extends PipelineBase implements Closeable { private final CircuitBreakerFailoverConnectionProvider failoverProvider; private final Queue>> commands = new LinkedList<>(); @Deprecated - public MultiClusterPipeline(MultiDbConnectionProvider pooledProvider) { + public MultiDbPipeline(MultiDbConnectionProvider pooledProvider) { super(new CommandObjects()); this.failoverProvider = new CircuitBreakerFailoverConnectionProvider(pooledProvider); @@ -32,8 +31,7 @@ public MultiClusterPipeline(MultiDbConnectionProvider pooledProvider) { } } - public MultiClusterPipeline(MultiDbConnectionProvider pooledProvider, - CommandObjects commandObjects) { + public MultiDbPipeline(MultiDbConnectionProvider pooledProvider, CommandObjects commandObjects) { super(commandObjects); this.failoverProvider = new CircuitBreakerFailoverConnectionProvider(pooledProvider); } From 571b8092767edadc3d58ae72799493de9e1f12d6 Mon Sep 17 00:00:00 2001 From: ggivo Date: Mon, 6 Oct 2025 16:52:46 +0300 Subject: [PATCH 08/18] Rename MultiClusterTransaction t o MultiDbTransaction --- src/main/java/redis/clients/jedis/MultiDbClient.java | 12 ++++++------ src/main/java/redis/clients/jedis/UnifiedJedis.java | 4 ++-- ...usterTransaction.java => MultiDbTransaction.java} | 8 ++++---- 3 files changed, 12 insertions(+), 12 deletions(-) rename src/main/java/redis/clients/jedis/mcf/{MultiClusterTransaction.java => MultiDbTransaction.java} (94%) diff --git a/src/main/java/redis/clients/jedis/MultiDbClient.java b/src/main/java/redis/clients/jedis/MultiDbClient.java index a753739cec..ef6d2ca252 100644 --- a/src/main/java/redis/clients/jedis/MultiDbClient.java +++ b/src/main/java/redis/clients/jedis/MultiDbClient.java @@ -7,7 +7,7 @@ import redis.clients.jedis.executors.CommandExecutor; import redis.clients.jedis.mcf.CircuitBreakerCommandExecutor; import redis.clients.jedis.mcf.MultiDbPipeline; -import redis.clients.jedis.mcf.MultiClusterTransaction; +import redis.clients.jedis.mcf.MultiDbTransaction; import redis.clients.jedis.providers.ConnectionProvider; import redis.clients.jedis.mcf.MultiDbConnectionProvider; @@ -232,11 +232,11 @@ public MultiDbPipeline pipelined() { * The returned transaction supports the same resilience features as the main client, including * automatic failover during transaction execution. *

- * @return a new MultiClusterTransaction instance + * @return a new MultiDbTransaction instance */ @Override - public MultiClusterTransaction multi() { - return new MultiClusterTransaction((MultiDbConnectionProvider) provider, true, commandObjects); + public MultiDbTransaction multi() { + return new MultiDbTransaction((MultiDbConnectionProvider) provider, true, commandObjects); } /** @@ -244,13 +244,13 @@ public MultiClusterTransaction multi() { * @return transaction object */ @Override - public MultiClusterTransaction transaction(boolean doMulti) { + public MultiDbTransaction transaction(boolean doMulti) { if (provider == null) { throw new IllegalStateException( "It is not allowed to create Transaction from this " + getClass()); } - return new MultiClusterTransaction(getMultiDbConnectionProvider(), doMulti, commandObjects); + return new MultiDbTransaction(getMultiDbConnectionProvider(), doMulti, commandObjects); } public Endpoint getActiveEndpoint() { diff --git a/src/main/java/redis/clients/jedis/UnifiedJedis.java b/src/main/java/redis/clients/jedis/UnifiedJedis.java index 11e063b7c2..8a59657ff6 100644 --- a/src/main/java/redis/clients/jedis/UnifiedJedis.java +++ b/src/main/java/redis/clients/jedis/UnifiedJedis.java @@ -35,7 +35,7 @@ import redis.clients.jedis.mcf.CircuitBreakerCommandExecutor; import redis.clients.jedis.mcf.MultiDbPipeline; import redis.clients.jedis.mcf.MultiDbConnectionProvider; -import redis.clients.jedis.mcf.MultiClusterTransaction; +import redis.clients.jedis.mcf.MultiDbTransaction; import redis.clients.jedis.params.*; import redis.clients.jedis.providers.*; import redis.clients.jedis.resps.*; @@ -5121,7 +5121,7 @@ public AbstractTransaction transaction(boolean doMulti) { if (provider == null) { throw new IllegalStateException("It is not allowed to create Transaction from this " + getClass()); } else if (provider instanceof MultiDbConnectionProvider) { - return new MultiClusterTransaction((MultiDbConnectionProvider) provider, doMulti, commandObjects); + return new MultiDbTransaction((MultiDbConnectionProvider) provider, doMulti, commandObjects); } else { return new Transaction(provider.getConnection(), doMulti, true, commandObjects); } diff --git a/src/main/java/redis/clients/jedis/mcf/MultiClusterTransaction.java b/src/main/java/redis/clients/jedis/mcf/MultiDbTransaction.java similarity index 94% rename from src/main/java/redis/clients/jedis/mcf/MultiClusterTransaction.java rename to src/main/java/redis/clients/jedis/mcf/MultiDbTransaction.java index e4afa24887..ccfe9a0cd6 100644 --- a/src/main/java/redis/clients/jedis/mcf/MultiClusterTransaction.java +++ b/src/main/java/redis/clients/jedis/mcf/MultiDbTransaction.java @@ -20,7 +20,7 @@ * This is high memory dependent solution as all the appending commands will be hold in memory. */ @Experimental -public class MultiClusterTransaction extends TransactionBase { +public class MultiDbTransaction extends TransactionBase { private static final Builder NO_OP_BUILDER = BuilderFactory.RAW_OBJECT; @@ -39,7 +39,7 @@ public class MultiClusterTransaction extends TransactionBase { * @param provider */ @Deprecated - public MultiClusterTransaction(MultiDbConnectionProvider provider) { + public MultiDbTransaction(MultiDbConnectionProvider provider) { this(provider, true); } @@ -50,7 +50,7 @@ public MultiClusterTransaction(MultiDbConnectionProvider provider) { * @param doMulti {@code false} should be set to enable manual WATCH, UNWATCH and MULTI */ @Deprecated - public MultiClusterTransaction(MultiDbConnectionProvider provider, boolean doMulti) { + public MultiDbTransaction(MultiDbConnectionProvider provider, boolean doMulti) { this.failoverProvider = new CircuitBreakerFailoverConnectionProvider(provider); try (Connection connection = failoverProvider.getConnection()) { @@ -68,7 +68,7 @@ public MultiClusterTransaction(MultiDbConnectionProvider provider, boolean doMul * @param doMulti {@code false} should be set to enable manual WATCH, UNWATCH and MULTI * @param commandObjects command objects */ - public MultiClusterTransaction(MultiDbConnectionProvider provider, boolean doMulti, + public MultiDbTransaction(MultiDbConnectionProvider provider, boolean doMulti, CommandObjects commandObjects) { super(commandObjects); this.failoverProvider = new CircuitBreakerFailoverConnectionProvider(provider); From 6da8ceb63624921e35df4c9cafba30727f6bc2fa Mon Sep 17 00:00:00 2001 From: ggivo Date: Mon, 6 Oct 2025 17:07:56 +0300 Subject: [PATCH 09/18] Rename MultiClusterTransaction to MultiDbTransaction --- .../java/redis/clients/jedis/MultiDbClient.java | 8 ++++---- .../java/redis/clients/jedis/UnifiedJedis.java | 14 +------------- .../jedis/builders/MultiDbClientBuilder.java | 8 ++++---- ...ndExecutor.java => MultiDbCommandExecutor.java} | 5 ++--- ...a => MultiDbCommandExecutorThresholdsTest.java} | 8 ++++---- 5 files changed, 15 insertions(+), 28 deletions(-) rename src/main/java/redis/clients/jedis/mcf/{CircuitBreakerCommandExecutor.java => MultiDbCommandExecutor.java} (94%) rename src/test/java/redis/clients/jedis/mcf/{CircuitBreakerThresholdsTest.java => MultiDbCommandExecutorThresholdsTest.java} (97%) diff --git a/src/main/java/redis/clients/jedis/MultiDbClient.java b/src/main/java/redis/clients/jedis/MultiDbClient.java index ef6d2ca252..3ffeaf93aa 100644 --- a/src/main/java/redis/clients/jedis/MultiDbClient.java +++ b/src/main/java/redis/clients/jedis/MultiDbClient.java @@ -5,7 +5,7 @@ import redis.clients.jedis.builders.MultiDbClientBuilder; import redis.clients.jedis.csc.Cache; import redis.clients.jedis.executors.CommandExecutor; -import redis.clients.jedis.mcf.CircuitBreakerCommandExecutor; +import redis.clients.jedis.mcf.MultiDbCommandExecutor; import redis.clients.jedis.mcf.MultiDbPipeline; import redis.clients.jedis.mcf.MultiDbTransaction; import redis.clients.jedis.providers.ConnectionProvider; @@ -75,9 +75,9 @@ * resilience features. *

* @author Ivo Gaydazhiev - * @since 5.2.0 + * @since 7.0.0 * @see MultiDbConnectionProvider - * @see CircuitBreakerCommandExecutor + * @see MultiDbCommandExecutor * @see MultiDbConfig */ @Experimental @@ -90,7 +90,7 @@ public class MultiDbClient extends UnifiedJedis { * the builder pattern for advanced configurations. For most use cases, prefer using * {@link #builder()} to create instances. *

- * @param commandExecutor the command executor (typically CircuitBreakerCommandExecutor) + * @param commandExecutor the command executor (typically MultiDbCommandExecutor) * @param connectionProvider the connection provider (typically MultiDbConnectionProvider) * @param commandObjects the command objects * @param redisProtocol the Redis protocol version diff --git a/src/main/java/redis/clients/jedis/UnifiedJedis.java b/src/main/java/redis/clients/jedis/UnifiedJedis.java index 8a59657ff6..18616c2b71 100644 --- a/src/main/java/redis/clients/jedis/UnifiedJedis.java +++ b/src/main/java/redis/clients/jedis/UnifiedJedis.java @@ -32,7 +32,7 @@ import redis.clients.jedis.params.VSimParams; import redis.clients.jedis.resps.RawVector; import redis.clients.jedis.json.JsonObjectMapper; -import redis.clients.jedis.mcf.CircuitBreakerCommandExecutor; +import redis.clients.jedis.mcf.MultiDbCommandExecutor; import redis.clients.jedis.mcf.MultiDbPipeline; import redis.clients.jedis.mcf.MultiDbConnectionProvider; import redis.clients.jedis.mcf.MultiDbTransaction; @@ -232,18 +232,6 @@ public UnifiedJedis(ConnectionProvider provider, int maxAttempts, Duration maxTo this(new RetryableCommandExecutor(provider, maxAttempts, maxTotalRetriesDuration), provider); } - /** - * Constructor which supports multiple cluster/database endpoints each with their own isolated connection pool. - *

- * With this Constructor users can seamlessly failover to Disaster Recovery (DR), Backup, and Active-Active cluster(s) - * by using simple configuration which is passed through from Resilience4j - https://resilience4j.readme.io/docs - *

- */ - @Experimental - public UnifiedJedis(MultiDbConnectionProvider provider) { - this(new CircuitBreakerCommandExecutor(provider), provider); - } - /** * The constructor to use a custom {@link CommandExecutor}. *

diff --git a/src/main/java/redis/clients/jedis/builders/MultiDbClientBuilder.java b/src/main/java/redis/clients/jedis/builders/MultiDbClientBuilder.java index 3758d5c52b..002de51666 100644 --- a/src/main/java/redis/clients/jedis/builders/MultiDbClientBuilder.java +++ b/src/main/java/redis/clients/jedis/builders/MultiDbClientBuilder.java @@ -5,7 +5,7 @@ import redis.clients.jedis.MultiDbConfig; import redis.clients.jedis.annots.Experimental; import redis.clients.jedis.executors.CommandExecutor; -import redis.clients.jedis.mcf.CircuitBreakerCommandExecutor; +import redis.clients.jedis.mcf.MultiDbCommandExecutor; import redis.clients.jedis.mcf.DatabaseSwitchEvent; import redis.clients.jedis.mcf.MultiDbConnectionProvider; import redis.clients.jedis.providers.ConnectionProvider; @@ -60,7 +60,7 @@ * * @param the client type that this builder creates * @author Ivo Gaydazhiev - * @since 5.2.0 + * @since 7.0.0 */ @Experimental public abstract class MultiDbClientBuilder @@ -125,8 +125,8 @@ protected ConnectionProvider createDefaultConnectionProvider() { @Override protected CommandExecutor createDefaultCommandExecutor() { - // For multi-db clients, we always use CircuitBreakerCommandExecutor - return new CircuitBreakerCommandExecutor((MultiDbConnectionProvider) this.connectionProvider); + // For multi-db clients, we always use MultiDbCommandExecutor + return new MultiDbCommandExecutor((MultiDbConnectionProvider) this.connectionProvider); } @Override diff --git a/src/main/java/redis/clients/jedis/mcf/CircuitBreakerCommandExecutor.java b/src/main/java/redis/clients/jedis/mcf/MultiDbCommandExecutor.java similarity index 94% rename from src/main/java/redis/clients/jedis/mcf/CircuitBreakerCommandExecutor.java rename to src/main/java/redis/clients/jedis/mcf/MultiDbCommandExecutor.java index 31d8d67a73..815266df53 100644 --- a/src/main/java/redis/clients/jedis/mcf/CircuitBreakerCommandExecutor.java +++ b/src/main/java/redis/clients/jedis/mcf/MultiDbCommandExecutor.java @@ -21,10 +21,9 @@ *

*/ @Experimental -public class CircuitBreakerCommandExecutor extends CircuitBreakerFailoverBase - implements CommandExecutor { +public class MultiDbCommandExecutor extends CircuitBreakerFailoverBase implements CommandExecutor { - public CircuitBreakerCommandExecutor(MultiDbConnectionProvider provider) { + public MultiDbCommandExecutor(MultiDbConnectionProvider provider) { super(provider); } diff --git a/src/test/java/redis/clients/jedis/mcf/CircuitBreakerThresholdsTest.java b/src/test/java/redis/clients/jedis/mcf/MultiDbCommandExecutorThresholdsTest.java similarity index 97% rename from src/test/java/redis/clients/jedis/mcf/CircuitBreakerThresholdsTest.java rename to src/test/java/redis/clients/jedis/mcf/MultiDbCommandExecutorThresholdsTest.java index 7d439c6d46..46d041f9db 100644 --- a/src/test/java/redis/clients/jedis/mcf/CircuitBreakerThresholdsTest.java +++ b/src/test/java/redis/clients/jedis/mcf/MultiDbCommandExecutorThresholdsTest.java @@ -33,7 +33,7 @@ public class CircuitBreakerThresholdsTest { private MultiDbConnectionProvider realProvider; private MultiDbConnectionProvider spyProvider; private Database cluster; - private CircuitBreakerCommandExecutor executor; + private MultiDbCommandExecutor executor; private CommandObject dummyCommand; private TrackingConnectionPool poolMock; private HostAndPort fakeEndpoint = new HostAndPort("fake", 6379); @@ -61,7 +61,7 @@ public void setup() throws Exception { cluster = spyProvider.getDatabase(); - executor = new CircuitBreakerCommandExecutor(spyProvider); + executor = new MultiDbCommandExecutor(spyProvider); dummyCommand = new CommandObject<>(new CommandArguments(Protocol.Command.PING), BuilderFactory.STRING); @@ -129,7 +129,7 @@ public void rateBelowThreshold_doesNotFailover() throws Exception { MultiDbConnectionProvider rp = new MultiDbConnectionProvider(cfgBuilder.build()); MultiDbConnectionProvider sp = spy(rp); Database c = sp.getDatabase(); - try (CircuitBreakerCommandExecutor ex = new CircuitBreakerCommandExecutor(sp)) { + try (MultiDbCommandExecutor ex = new MultiDbCommandExecutor(sp)) { CommandObject cmd = new CommandObject<>(new CommandArguments(Protocol.Command.PING), BuilderFactory.STRING); @@ -197,7 +197,7 @@ public void thresholdMatrix(int minFailures, float ratePercent, int successes, i MultiDbConnectionProvider real = new MultiDbConnectionProvider(cfgBuilder.build()); MultiDbConnectionProvider spy = spy(real); Database c = spy.getDatabase(); - try (CircuitBreakerCommandExecutor ex = new CircuitBreakerCommandExecutor(spy)) { + try (MultiDbCommandExecutor ex = new MultiDbCommandExecutor(spy)) { CommandObject cmd = new CommandObject<>(new CommandArguments(Protocol.Command.PING), BuilderFactory.STRING); From 07dd63af85d637f0fe5725fae8e11d4c434447f3 Mon Sep 17 00:00:00 2001 From: ggivo Date: Mon, 6 Oct 2025 17:13:19 +0300 Subject: [PATCH 10/18] Rename MultiClusterTransaction to MultiDbTransaction --- ...nnectionProvider.java => MultiDbConnectionSupplier.java} | 4 ++-- src/main/java/redis/clients/jedis/mcf/MultiDbPipeline.java | 6 +++--- .../java/redis/clients/jedis/mcf/MultiDbTransaction.java | 6 +++--- 3 files changed, 8 insertions(+), 8 deletions(-) rename src/main/java/redis/clients/jedis/mcf/{CircuitBreakerFailoverConnectionProvider.java => MultiDbConnectionSupplier.java} (91%) diff --git a/src/main/java/redis/clients/jedis/mcf/CircuitBreakerFailoverConnectionProvider.java b/src/main/java/redis/clients/jedis/mcf/MultiDbConnectionSupplier.java similarity index 91% rename from src/main/java/redis/clients/jedis/mcf/CircuitBreakerFailoverConnectionProvider.java rename to src/main/java/redis/clients/jedis/mcf/MultiDbConnectionSupplier.java index 7dfd1ef527..7310ef8d63 100644 --- a/src/main/java/redis/clients/jedis/mcf/CircuitBreakerFailoverConnectionProvider.java +++ b/src/main/java/redis/clients/jedis/mcf/MultiDbConnectionSupplier.java @@ -14,9 +14,9 @@ * Active-Active cluster(s) by using simple configuration */ @Experimental -public class CircuitBreakerFailoverConnectionProvider extends CircuitBreakerFailoverBase { +public class MultiDbConnectionSupplier extends CircuitBreakerFailoverBase { - public CircuitBreakerFailoverConnectionProvider(MultiDbConnectionProvider provider) { + public MultiDbConnectionSupplier(MultiDbConnectionProvider provider) { super(provider); } diff --git a/src/main/java/redis/clients/jedis/mcf/MultiDbPipeline.java b/src/main/java/redis/clients/jedis/mcf/MultiDbPipeline.java index defb97d1a2..bc0d950a6a 100644 --- a/src/main/java/redis/clients/jedis/mcf/MultiDbPipeline.java +++ b/src/main/java/redis/clients/jedis/mcf/MultiDbPipeline.java @@ -16,14 +16,14 @@ @Experimental public class MultiDbPipeline extends PipelineBase implements Closeable { - private final CircuitBreakerFailoverConnectionProvider failoverProvider; + private final MultiDbConnectionSupplier failoverProvider; private final Queue>> commands = new LinkedList<>(); @Deprecated public MultiDbPipeline(MultiDbConnectionProvider pooledProvider) { super(new CommandObjects()); - this.failoverProvider = new CircuitBreakerFailoverConnectionProvider(pooledProvider); + this.failoverProvider = new MultiDbConnectionSupplier(pooledProvider); try (Connection connection = failoverProvider.getConnection()) { RedisProtocol proto = connection.getRedisProtocol(); @@ -33,7 +33,7 @@ public MultiDbPipeline(MultiDbConnectionProvider pooledProvider) { public MultiDbPipeline(MultiDbConnectionProvider pooledProvider, CommandObjects commandObjects) { super(commandObjects); - this.failoverProvider = new CircuitBreakerFailoverConnectionProvider(pooledProvider); + this.failoverProvider = new MultiDbConnectionSupplier(pooledProvider); } @Override diff --git a/src/main/java/redis/clients/jedis/mcf/MultiDbTransaction.java b/src/main/java/redis/clients/jedis/mcf/MultiDbTransaction.java index ccfe9a0cd6..1688a2c635 100644 --- a/src/main/java/redis/clients/jedis/mcf/MultiDbTransaction.java +++ b/src/main/java/redis/clients/jedis/mcf/MultiDbTransaction.java @@ -26,7 +26,7 @@ public class MultiDbTransaction extends TransactionBase { private static final String GRAPH_COMMANDS_NOT_SUPPORTED_MESSAGE = "Graph commands are not supported."; - private final CircuitBreakerFailoverConnectionProvider failoverProvider; + private final MultiDbConnectionSupplier failoverProvider; private final AtomicInteger extraCommandCount = new AtomicInteger(); private final Queue>> commands = new LinkedList<>(); @@ -51,7 +51,7 @@ public MultiDbTransaction(MultiDbConnectionProvider provider) { */ @Deprecated public MultiDbTransaction(MultiDbConnectionProvider provider, boolean doMulti) { - this.failoverProvider = new CircuitBreakerFailoverConnectionProvider(provider); + this.failoverProvider = new MultiDbConnectionSupplier(provider); try (Connection connection = failoverProvider.getConnection()) { RedisProtocol proto = connection.getRedisProtocol(); @@ -71,7 +71,7 @@ public MultiDbTransaction(MultiDbConnectionProvider provider, boolean doMulti) { public MultiDbTransaction(MultiDbConnectionProvider provider, boolean doMulti, CommandObjects commandObjects) { super(commandObjects); - this.failoverProvider = new CircuitBreakerFailoverConnectionProvider(provider); + this.failoverProvider = new MultiDbConnectionSupplier(provider); if (doMulti) multi(); } From 61056e38538a8276c6fd462eab0a12edfd8a0bb7 Mon Sep 17 00:00:00 2001 From: ggivo Date: Mon, 6 Oct 2025 17:16:06 +0300 Subject: [PATCH 11/18] Rename CircuitBreakerFailoverBase to MultiDbFailoverBase --- .../java/redis/clients/jedis/mcf/MultiDbCommandExecutor.java | 2 +- .../redis/clients/jedis/mcf/MultiDbConnectionSupplier.java | 2 +- ...rcuitBreakerFailoverBase.java => MultiDbFailoverBase.java} | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) rename src/main/java/redis/clients/jedis/mcf/{CircuitBreakerFailoverBase.java => MultiDbFailoverBase.java} (96%) diff --git a/src/main/java/redis/clients/jedis/mcf/MultiDbCommandExecutor.java b/src/main/java/redis/clients/jedis/mcf/MultiDbCommandExecutor.java index 815266df53..d3b7c48e2e 100644 --- a/src/main/java/redis/clients/jedis/mcf/MultiDbCommandExecutor.java +++ b/src/main/java/redis/clients/jedis/mcf/MultiDbCommandExecutor.java @@ -21,7 +21,7 @@ *

*/ @Experimental -public class MultiDbCommandExecutor extends CircuitBreakerFailoverBase implements CommandExecutor { +public class MultiDbCommandExecutor extends MultiDbFailoverBase implements CommandExecutor { public MultiDbCommandExecutor(MultiDbConnectionProvider provider) { super(provider); diff --git a/src/main/java/redis/clients/jedis/mcf/MultiDbConnectionSupplier.java b/src/main/java/redis/clients/jedis/mcf/MultiDbConnectionSupplier.java index 7310ef8d63..9bd1f35440 100644 --- a/src/main/java/redis/clients/jedis/mcf/MultiDbConnectionSupplier.java +++ b/src/main/java/redis/clients/jedis/mcf/MultiDbConnectionSupplier.java @@ -14,7 +14,7 @@ * Active-Active cluster(s) by using simple configuration */ @Experimental -public class MultiDbConnectionSupplier extends CircuitBreakerFailoverBase { +public class MultiDbConnectionSupplier extends MultiDbFailoverBase { public MultiDbConnectionSupplier(MultiDbConnectionProvider provider) { super(provider); diff --git a/src/main/java/redis/clients/jedis/mcf/CircuitBreakerFailoverBase.java b/src/main/java/redis/clients/jedis/mcf/MultiDbFailoverBase.java similarity index 96% rename from src/main/java/redis/clients/jedis/mcf/CircuitBreakerFailoverBase.java rename to src/main/java/redis/clients/jedis/mcf/MultiDbFailoverBase.java index ba1ea98dec..3e9d5f2d39 100644 --- a/src/main/java/redis/clients/jedis/mcf/CircuitBreakerFailoverBase.java +++ b/src/main/java/redis/clients/jedis/mcf/MultiDbFailoverBase.java @@ -20,12 +20,12 @@ *

*/ @Experimental -public class CircuitBreakerFailoverBase implements AutoCloseable { +public class MultiDbFailoverBase implements AutoCloseable { private final Lock lock = new ReentrantLock(true); protected final MultiDbConnectionProvider provider; - public CircuitBreakerFailoverBase(MultiDbConnectionProvider provider) { + public MultiDbFailoverBase(MultiDbConnectionProvider provider) { this.provider = provider; } From ee588cc8f1a5c1d1f19cdbd56857b40a788a8204 Mon Sep 17 00:00:00 2001 From: ggivo Date: Mon, 6 Oct 2025 17:41:26 +0300 Subject: [PATCH 12/18] Rename tests 'Cluster*' --- .../redis/clients/jedis/UnifiedJedis.java | 1 - ...va => DatabaseEvaluateThresholdsTest.java} | 36 +++++------ ... MultiDbCircuitBreakerThresholdsTest.java} | 2 +- ...ctionProviderDynamicEndpointUnitTest.java} | 47 +++++++------- ...onProviderFailoverAttemptsConfigTest.java} | 6 +- ...ConnectionProviderInitializationTest.java} | 62 +++++++++---------- 6 files changed, 73 insertions(+), 81 deletions(-) rename src/test/java/redis/clients/jedis/mcf/{ClusterEvaluateThresholdsTest.java => DatabaseEvaluateThresholdsTest.java} (87%) rename src/test/java/redis/clients/jedis/mcf/{MultiDbCommandExecutorThresholdsTest.java => MultiDbCircuitBreakerThresholdsTest.java} (99%) rename src/test/java/redis/clients/jedis/mcf/{MultiClusterDynamicEndpointUnitTest.java => MultiDbConnectionProviderDynamicEndpointUnitTest.java} (84%) rename src/test/java/redis/clients/jedis/mcf/{MultiClusterFailoverAttemptsConfigTest.java => MultiDbConnectionProviderFailoverAttemptsConfigTest.java} (97%) rename src/test/java/redis/clients/jedis/mcf/{MultiClusterInitializationTest.java => MultiDbConnectionProviderInitializationTest.java} (62%) diff --git a/src/main/java/redis/clients/jedis/UnifiedJedis.java b/src/main/java/redis/clients/jedis/UnifiedJedis.java index 18616c2b71..548380aa9d 100644 --- a/src/main/java/redis/clients/jedis/UnifiedJedis.java +++ b/src/main/java/redis/clients/jedis/UnifiedJedis.java @@ -32,7 +32,6 @@ import redis.clients.jedis.params.VSimParams; import redis.clients.jedis.resps.RawVector; import redis.clients.jedis.json.JsonObjectMapper; -import redis.clients.jedis.mcf.MultiDbCommandExecutor; import redis.clients.jedis.mcf.MultiDbPipeline; import redis.clients.jedis.mcf.MultiDbConnectionProvider; import redis.clients.jedis.mcf.MultiDbTransaction; diff --git a/src/test/java/redis/clients/jedis/mcf/ClusterEvaluateThresholdsTest.java b/src/test/java/redis/clients/jedis/mcf/DatabaseEvaluateThresholdsTest.java similarity index 87% rename from src/test/java/redis/clients/jedis/mcf/ClusterEvaluateThresholdsTest.java rename to src/test/java/redis/clients/jedis/mcf/DatabaseEvaluateThresholdsTest.java index 8a6fd466c0..2892005cb4 100644 --- a/src/test/java/redis/clients/jedis/mcf/ClusterEvaluateThresholdsTest.java +++ b/src/test/java/redis/clients/jedis/mcf/DatabaseEvaluateThresholdsTest.java @@ -17,29 +17,29 @@ /** * Tests for circuit breaker thresholds: both failure-rate threshold and minimum number of failures * must be exceeded to trigger failover. Uses a real CircuitBreaker and real Retry, but mocks the - * provider and cluster wiring to avoid network I/O. + * provider and {@link Database} wiring to avoid network I/O. */ -public class ClusterEvaluateThresholdsTest { +public class DatabaseEvaluateThresholdsTest { private MultiDbConnectionProvider provider; - private Database cluster; + private Database database; private CircuitBreaker circuitBreaker; private CircuitBreaker.Metrics metrics; @BeforeEach public void setup() { provider = mock(MultiDbConnectionProvider.class); - cluster = mock(Database.class); + database = mock(Database.class); circuitBreaker = mock(CircuitBreaker.class); metrics = mock(CircuitBreaker.Metrics.class); - when(cluster.getCircuitBreaker()).thenReturn(circuitBreaker); + when(database.getCircuitBreaker()).thenReturn(circuitBreaker); when(circuitBreaker.getMetrics()).thenReturn(metrics); when(circuitBreaker.getState()).thenReturn(CircuitBreaker.State.CLOSED); // Configure the mock to call the real evaluateThresholds method - doCallRealMethod().when(cluster).evaluateThresholds(anyBoolean()); + doCallRealMethod().when(database).evaluateThresholds(anyBoolean()); } @@ -50,13 +50,13 @@ public void setup() { */ @Test public void belowMinFailures_doesNotFailover() { - when(cluster.getCircuitBreakerMinNumOfFailures()).thenReturn(3); + when(database.getCircuitBreakerMinNumOfFailures()).thenReturn(3); when(metrics.getNumberOfFailedCalls()).thenReturn(1); // +1 becomes 2, still < 3 when(metrics.getNumberOfSuccessfulCalls()).thenReturn(0); - when(cluster.getCircuitBreakerFailureRateThreshold()).thenReturn(50.0f); + when(database.getCircuitBreakerFailureRateThreshold()).thenReturn(50.0f); when(circuitBreaker.getState()).thenReturn(CircuitBreaker.State.CLOSED); - cluster.evaluateThresholds(false); + database.evaluateThresholds(false); verify(circuitBreaker, never()).transitionToOpenState(); verify(provider, never()).switchToHealthyDatabase(any(), any()); } @@ -68,13 +68,13 @@ public void belowMinFailures_doesNotFailover() { */ @Test public void minFailuresAndRateExceeded_triggersOpenState() { - when(cluster.getCircuitBreakerMinNumOfFailures()).thenReturn(3); + when(database.getCircuitBreakerMinNumOfFailures()).thenReturn(3); when(metrics.getNumberOfFailedCalls()).thenReturn(2); // +1 becomes 3, reaching minFailures when(metrics.getNumberOfSuccessfulCalls()).thenReturn(0); - when(cluster.getCircuitBreakerFailureRateThreshold()).thenReturn(50.0f); + when(database.getCircuitBreakerFailureRateThreshold()).thenReturn(50.0f); when(circuitBreaker.getState()).thenReturn(CircuitBreaker.State.CLOSED); - cluster.evaluateThresholds(false); + database.evaluateThresholds(false); verify(circuitBreaker, times(1)).transitionToOpenState(); } @@ -86,13 +86,13 @@ public void minFailuresAndRateExceeded_triggersOpenState() { */ @Test public void rateBelowThreshold_doesNotFailover() { - when(cluster.getCircuitBreakerMinNumOfFailures()).thenReturn(3); + when(database.getCircuitBreakerMinNumOfFailures()).thenReturn(3); when(metrics.getNumberOfSuccessfulCalls()).thenReturn(3); when(metrics.getNumberOfFailedCalls()).thenReturn(2); // +1 becomes 3, rate = 3/(3+3) = 50% - when(cluster.getCircuitBreakerFailureRateThreshold()).thenReturn(80.0f); + when(database.getCircuitBreakerFailureRateThreshold()).thenReturn(80.0f); when(circuitBreaker.getState()).thenReturn(CircuitBreaker.State.CLOSED); - cluster.evaluateThresholds(false); + database.evaluateThresholds(false); verify(circuitBreaker, never()).transitionToOpenState(); verify(provider, never()).switchToHealthyDatabase(any(), any()); @@ -165,13 +165,13 @@ public void providerBuilder_zeroRate_mapsToHundredAndHugeMinCalls() { public void thresholdMatrix(int minFailures, float ratePercent, int successes, int failures, boolean lastFailRecorded, boolean expectOpenState) { - when(cluster.getCircuitBreakerMinNumOfFailures()).thenReturn(minFailures); + when(database.getCircuitBreakerMinNumOfFailures()).thenReturn(minFailures); when(metrics.getNumberOfSuccessfulCalls()).thenReturn(successes); when(metrics.getNumberOfFailedCalls()).thenReturn(failures); - when(cluster.getCircuitBreakerFailureRateThreshold()).thenReturn(ratePercent); + when(database.getCircuitBreakerFailureRateThreshold()).thenReturn(ratePercent); when(circuitBreaker.getState()).thenReturn(CircuitBreaker.State.CLOSED); - cluster.evaluateThresholds(lastFailRecorded); + database.evaluateThresholds(lastFailRecorded); if (expectOpenState) { verify(circuitBreaker, times(1)).transitionToOpenState(); diff --git a/src/test/java/redis/clients/jedis/mcf/MultiDbCommandExecutorThresholdsTest.java b/src/test/java/redis/clients/jedis/mcf/MultiDbCircuitBreakerThresholdsTest.java similarity index 99% rename from src/test/java/redis/clients/jedis/mcf/MultiDbCommandExecutorThresholdsTest.java rename to src/test/java/redis/clients/jedis/mcf/MultiDbCircuitBreakerThresholdsTest.java index 46d041f9db..7a0f4319c6 100644 --- a/src/test/java/redis/clients/jedis/mcf/MultiDbCommandExecutorThresholdsTest.java +++ b/src/test/java/redis/clients/jedis/mcf/MultiDbCircuitBreakerThresholdsTest.java @@ -28,7 +28,7 @@ * must be exceeded to trigger failover. Uses a real CircuitBreaker and real Retry, but mocks the * provider and cluster wiring to avoid network I/O. */ -public class CircuitBreakerThresholdsTest { +public class MultiDbCircuitBreakerThresholdsTest { private MultiDbConnectionProvider realProvider; private MultiDbConnectionProvider spyProvider; diff --git a/src/test/java/redis/clients/jedis/mcf/MultiClusterDynamicEndpointUnitTest.java b/src/test/java/redis/clients/jedis/mcf/MultiDbConnectionProviderDynamicEndpointUnitTest.java similarity index 84% rename from src/test/java/redis/clients/jedis/mcf/MultiClusterDynamicEndpointUnitTest.java rename to src/test/java/redis/clients/jedis/mcf/MultiDbConnectionProviderDynamicEndpointUnitTest.java index 6ddca7ae12..ceb5cc021c 100644 --- a/src/test/java/redis/clients/jedis/mcf/MultiClusterDynamicEndpointUnitTest.java +++ b/src/test/java/redis/clients/jedis/mcf/MultiDbConnectionProviderDynamicEndpointUnitTest.java @@ -5,7 +5,6 @@ import org.mockito.MockedConstruction; import redis.clients.jedis.Connection; -import redis.clients.jedis.ConnectionPool; import redis.clients.jedis.DefaultJedisClientConfig; import redis.clients.jedis.EndpointConfig; import redis.clients.jedis.HostAndPort; @@ -21,7 +20,7 @@ import static org.mockito.Mockito.mockConstruction; import static org.mockito.Mockito.when; -public class MultiClusterDynamicEndpointUnitTest { +public class MultiDbConnectionProviderDynamicEndpointUnitTest { private MultiDbConnectionProvider provider; private JedisClientConfig clientConfig; @@ -41,7 +40,7 @@ void setUp() { provider = new MultiDbConnectionProvider(multiConfig); } - // Helper method to create cluster configurations + // Helper method to create database configurations private DatabaseConfig createDatabaseConfig(HostAndPort hostAndPort, float weight) { // Disable health check for unit tests to avoid real connections return DatabaseConfig.builder(hostAndPort, clientConfig).weight(weight) @@ -49,18 +48,18 @@ private DatabaseConfig createDatabaseConfig(HostAndPort hostAndPort, float weigh } @Test - void testAddNewCluster() { + void testAddNewDatabase() { DatabaseConfig newConfig = createDatabaseConfig(endpoint2.getHostAndPort(), 2.0f); // Should not throw exception assertDoesNotThrow(() -> provider.add(newConfig)); - // Verify the cluster was added by checking it can be retrieved + // Verify the database was added by checking it can be retrieved assertNotNull(provider.getDatabase(endpoint2.getHostAndPort())); } @Test - void testAddDuplicateCluster() { + void testAddDuplicateDatabase() { DatabaseConfig duplicateConfig = createDatabaseConfig(endpoint1.getHostAndPort(), 2.0f); // Should throw validation exception for duplicate endpoint @@ -80,19 +79,18 @@ void testRemoveExistingCluster() { try (MockedConstruction mockedPool = mockPool(mockConnection)) { // Create initial provider with endpoint1 - DatabaseConfig clusterConfig1 = createDatabaseConfig(endpoint1.getHostAndPort(), 1.0f); + DatabaseConfig dbConfig1 = createDatabaseConfig(endpoint1.getHostAndPort(), 1.0f); - MultiDbConfig multiConfig = MultiDbConfig.builder(new DatabaseConfig[] { clusterConfig1 }) - .build(); + MultiDbConfig multiConfig = MultiDbConfig.builder(new DatabaseConfig[] { dbConfig1 }).build(); try (MultiDbConnectionProvider providerWithMockedPool = new MultiDbConnectionProvider( multiConfig)) { - // Add endpoint2 as second cluster + // Add endpoint2 as second database DatabaseConfig newConfig = createDatabaseConfig(endpoint2.getHostAndPort(), 2.0f); providerWithMockedPool.add(newConfig); - // Now remove endpoint1 (original cluster) + // Now remove endpoint1 (original database) assertDoesNotThrow(() -> providerWithMockedPool.remove(endpoint1.getHostAndPort())); // Verify endpoint1 was removed @@ -119,8 +117,8 @@ void testRemoveNonExistentCluster() { } @Test - void testRemoveLastRemainingCluster() { - // Should throw validation exception when trying to remove the last cluster + void testRemoveLastRemainingDatabase() { + // Should throw validation exception when trying to remove the last database assertThrows(JedisValidationException.class, () -> provider.remove(endpoint1.getHostAndPort())); } @@ -132,7 +130,7 @@ void testRemoveNullEndpoint() { @Test void testAddAndRemoveMultipleClusters() { - // Add endpoint2 as second cluster + // Add endpoint2 as second database DatabaseConfig config2 = createDatabaseConfig(endpoint2.getHostAndPort(), 2.0f); // Create a third endpoint for this test @@ -142,7 +140,7 @@ void testAddAndRemoveMultipleClusters() { provider.add(config2); provider.add(config3); - // Verify all clusters exist + // Verify all databases exist assertNotNull(provider.getDatabase(endpoint1.getHostAndPort())); assertNotNull(provider.getDatabase(endpoint2.getHostAndPort())); assertNotNull(provider.getDatabase(endpoint3)); @@ -150,7 +148,7 @@ void testAddAndRemoveMultipleClusters() { // Remove endpoint2 provider.remove(endpoint2.getHostAndPort()); - // Verify correct cluster was removed + // Verify correct database was removed assertNull(provider.getDatabase(endpoint2.getHostAndPort())); assertNotNull(provider.getDatabase(endpoint1.getHostAndPort())); assertNotNull(provider.getDatabase(endpoint3)); @@ -158,14 +156,14 @@ void testAddAndRemoveMultipleClusters() { @Test void testActiveClusterHandlingOnAdd() { - // The initial cluster should be active + // The initial database should be active assertNotNull(provider.getDatabase()); // Add endpoint2 with higher weight DatabaseConfig newConfig = createDatabaseConfig(endpoint2.getHostAndPort(), 5.0f); provider.add(newConfig); - // Active cluster should still be valid (implementation may or may not switch) + // Active database should still be valid (implementation may or may not switch) assertNotNull(provider.getDatabase()); } @@ -176,26 +174,25 @@ void testActiveClusterHandlingOnRemove() { try (MockedConstruction mockedPool = mockPool(mockConnection)) { // Create initial provider with endpoint1 - DatabaseConfig clusterConfig1 = createDatabaseConfig(endpoint1.getHostAndPort(), 1.0f); + DatabaseConfig dbConfig1 = createDatabaseConfig(endpoint1.getHostAndPort(), 1.0f); - MultiDbConfig multiConfig = MultiDbConfig.builder(new DatabaseConfig[] { clusterConfig1 }) - .build(); + MultiDbConfig multiConfig = MultiDbConfig.builder(new DatabaseConfig[] { dbConfig1 }).build(); try (MultiDbConnectionProvider providerWithMockedPool = new MultiDbConnectionProvider( multiConfig)) { - // Add endpoint2 as second cluster + // Add endpoint2 as second database DatabaseConfig newConfig = createDatabaseConfig(endpoint2.getHostAndPort(), 2.0f); providerWithMockedPool.add(newConfig); - // Get current active cluster + // Get current active database Object initialActiveCluster = providerWithMockedPool.getDatabase(); assertNotNull(initialActiveCluster); - // Remove endpoint1 (original cluster, might be active) + // Remove endpoint1 (original database, might be active) providerWithMockedPool.remove(endpoint1.getHostAndPort()); - // Should still have an active cluster + // Should still have an active database assertNotNull(providerWithMockedPool.getDatabase()); } } diff --git a/src/test/java/redis/clients/jedis/mcf/MultiClusterFailoverAttemptsConfigTest.java b/src/test/java/redis/clients/jedis/mcf/MultiDbConnectionProviderFailoverAttemptsConfigTest.java similarity index 97% rename from src/test/java/redis/clients/jedis/mcf/MultiClusterFailoverAttemptsConfigTest.java rename to src/test/java/redis/clients/jedis/mcf/MultiDbConnectionProviderFailoverAttemptsConfigTest.java index 857f6dc32a..0b062e4298 100644 --- a/src/test/java/redis/clients/jedis/mcf/MultiClusterFailoverAttemptsConfigTest.java +++ b/src/test/java/redis/clients/jedis/mcf/MultiDbConnectionProviderFailoverAttemptsConfigTest.java @@ -22,9 +22,9 @@ /** * Tests for how getMaxNumFailoverAttempts and getDelayInBetweenFailoverAttempts impact - * MultiDbConnectionProvider behaviour when no healthy clusters are available. + * MultiDbConnectionProvider behaviour when no healthy databases are available. */ -public class MultiClusterFailoverAttemptsConfigTest { +public class MultiDbConnectionProviderFailoverAttemptsConfigTest { private HostAndPort endpoint0 = new HostAndPort("purposefully-incorrect", 0000); private HostAndPort endpoint1 = new HostAndPort("purposefully-incorrect", 0001); @@ -47,7 +47,7 @@ void setUp() throws Exception { provider = new MultiDbConnectionProvider(builder.build()); - // Disable both clusters to force handleNoHealthyCluster path + // Disable both databases to force handleNoHealthyCluster path provider.getDatabase(endpoint0).setDisabled(true); provider.getDatabase(endpoint1).setDisabled(true); } diff --git a/src/test/java/redis/clients/jedis/mcf/MultiClusterInitializationTest.java b/src/test/java/redis/clients/jedis/mcf/MultiDbConnectionProviderInitializationTest.java similarity index 62% rename from src/test/java/redis/clients/jedis/mcf/MultiClusterInitializationTest.java rename to src/test/java/redis/clients/jedis/mcf/MultiDbConnectionProviderInitializationTest.java index e01af8f8f4..1935647d46 100644 --- a/src/test/java/redis/clients/jedis/mcf/MultiClusterInitializationTest.java +++ b/src/test/java/redis/clients/jedis/mcf/MultiDbConnectionProviderInitializationTest.java @@ -15,13 +15,14 @@ import redis.clients.jedis.HostAndPort; import redis.clients.jedis.JedisClientConfig; import redis.clients.jedis.MultiDbConfig; +import redis.clients.jedis.MultiDbConfig.DatabaseConfig; import redis.clients.jedis.exceptions.JedisValidationException; /** * Tests for MultiDbConnectionProvider initialization edge cases */ @ExtendWith(MockitoExtension.class) -public class MultiClusterInitializationTest { +public class MultiDbConnectionProviderInitializationTest { private HostAndPort endpoint1; private HostAndPort endpoint2; @@ -48,28 +49,26 @@ private MockedConstruction mockPool() { @Test void testInitializationWithMixedHealthCheckConfiguration() { try (MockedConstruction mockedPool = mockPool()) { - // Create clusters with mixed health check configuration - MultiDbConfig.DatabaseConfig cluster1 = MultiDbConfig.DatabaseConfig - .builder(endpoint1, clientConfig).weight(1.0f).healthCheckEnabled(false) // No health - // check + // Create databases with mixed health check configuration + DatabaseConfig db1 = DatabaseConfig.builder(endpoint1, clientConfig).weight(1.0f) + .healthCheckEnabled(false) // No health + // check .build(); - MultiDbConfig.DatabaseConfig cluster2 = MultiDbConfig.DatabaseConfig - .builder(endpoint2, clientConfig).weight(2.0f) + DatabaseConfig db2 = DatabaseConfig.builder(endpoint2, clientConfig).weight(2.0f) .healthCheckStrategySupplier(EchoStrategy.DEFAULT) // With // health // check .build(); - MultiDbConfig config = new MultiDbConfig.Builder( - new MultiDbConfig.DatabaseConfig[] { cluster1, cluster2 }).build(); + MultiDbConfig config = new MultiDbConfig.Builder(new DatabaseConfig[] { db1, db2 }).build(); try (MultiDbConnectionProvider provider = new MultiDbConnectionProvider(config)) { // Should initialize successfully assertNotNull(provider.getDatabase()); - // Should select cluster1 (no health check, assumed healthy) or cluster2 based on weight - // Since cluster2 has higher weight and health checks, it should be selected if healthy + // Should select db1 (no health check, assumed healthy) or db2 based on weight + // Since db2 has higher weight and health checks, it should be selected if healthy assertTrue(provider.getDatabase() == provider.getDatabase(endpoint1) || provider.getDatabase() == provider.getDatabase(endpoint2)); } @@ -79,19 +78,18 @@ void testInitializationWithMixedHealthCheckConfiguration() { @Test void testInitializationWithAllHealthChecksDisabled() { try (MockedConstruction mockedPool = mockPool()) { - // Create clusters with no health checks - MultiDbConfig.DatabaseConfig cluster1 = MultiDbConfig.DatabaseConfig - .builder(endpoint1, clientConfig).weight(1.0f).healthCheckEnabled(false).build(); + // Create databases with no health checks + DatabaseConfig db1 = DatabaseConfig.builder(endpoint1, clientConfig).weight(1.0f) + .healthCheckEnabled(false).build(); - MultiDbConfig.DatabaseConfig cluster2 = MultiDbConfig.DatabaseConfig - .builder(endpoint2, clientConfig).weight(3.0f) // Higher weight + DatabaseConfig db22 = DatabaseConfig.builder(endpoint2, clientConfig).weight(3.0f) // Higher + // weight .healthCheckEnabled(false).build(); - MultiDbConfig config = new MultiDbConfig.Builder( - new MultiDbConfig.DatabaseConfig[] { cluster1, cluster2 }).build(); + MultiDbConfig config = new MultiDbConfig.Builder(new DatabaseConfig[] { db1, db22 }).build(); try (MultiDbConnectionProvider provider = new MultiDbConnectionProvider(config)) { - // Should select cluster2 (highest weight, no health checks) + // Should select db22 (highest weight, no health checks) assertEquals(provider.getDatabase(endpoint2), provider.getDatabase()); } } @@ -100,14 +98,13 @@ void testInitializationWithAllHealthChecksDisabled() { @Test void testInitializationWithSingleCluster() { try (MockedConstruction mockedPool = mockPool()) { - MultiDbConfig.DatabaseConfig cluster = MultiDbConfig.DatabaseConfig - .builder(endpoint1, clientConfig).weight(1.0f).healthCheckEnabled(false).build(); + DatabaseConfig db = DatabaseConfig.builder(endpoint1, clientConfig).weight(1.0f) + .healthCheckEnabled(false).build(); - MultiDbConfig config = new MultiDbConfig.Builder( - new MultiDbConfig.DatabaseConfig[] { cluster }).build(); + MultiDbConfig config = new MultiDbConfig.Builder(new DatabaseConfig[] { db }).build(); try (MultiDbConnectionProvider provider = new MultiDbConnectionProvider(config)) { - // Should select the only available cluster + // Should select the only available db assertEquals(provider.getDatabase(endpoint1), provider.getDatabase()); } } @@ -123,33 +120,32 @@ void testErrorHandlingWithNullConfiguration() { @Test void testErrorHandlingWithEmptyClusterArray() { assertThrows(JedisValidationException.class, () -> { - new MultiDbConfig.Builder(new MultiDbConfig.DatabaseConfig[0]).build(); + new MultiDbConfig.Builder(new DatabaseConfig[0]).build(); }); } @Test void testErrorHandlingWithNullDatabaseConfig() { assertThrows(IllegalArgumentException.class, () -> { - new MultiDbConfig.Builder(new MultiDbConfig.DatabaseConfig[] { null }).build(); + new MultiDbConfig.Builder(new DatabaseConfig[] { null }).build(); }); } @Test void testInitializationWithZeroWeights() { try (MockedConstruction mockedPool = mockPool()) { - MultiDbConfig.DatabaseConfig cluster1 = MultiDbConfig.DatabaseConfig - .builder(endpoint1, clientConfig).weight(0.0f) // Zero weight + DatabaseConfig db1 = DatabaseConfig.builder(endpoint1, clientConfig).weight(0.0f) // Zero + // weight .healthCheckEnabled(false).build(); - MultiDbConfig.DatabaseConfig cluster2 = MultiDbConfig.DatabaseConfig - .builder(endpoint2, clientConfig).weight(0.0f) // Zero weight + DatabaseConfig db2 = DatabaseConfig.builder(endpoint2, clientConfig).weight(0.0f) // Zero + // weight .healthCheckEnabled(false).build(); - MultiDbConfig config = new MultiDbConfig.Builder( - new MultiDbConfig.DatabaseConfig[] { cluster1, cluster2 }).build(); + MultiDbConfig config = new MultiDbConfig.Builder(new DatabaseConfig[] { db1, db2 }).build(); try (MultiDbConnectionProvider provider = new MultiDbConnectionProvider(config)) { - // Should still initialize and select one of the clusters + // Should still initialize and select one of the databases assertNotNull(provider.getDatabase()); } } From 263472d611549aea0b33812e378b93623718a60b Mon Sep 17 00:00:00 2001 From: ggivo Date: Mon, 6 Oct 2025 18:24:52 +0300 Subject: [PATCH 13/18] Revert removed UnifiedJedis(MultiDbConnectionProvider provider) constructor --- src/main/java/redis/clients/jedis/UnifiedJedis.java | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/src/main/java/redis/clients/jedis/UnifiedJedis.java b/src/main/java/redis/clients/jedis/UnifiedJedis.java index 548380aa9d..895d6280c6 100644 --- a/src/main/java/redis/clients/jedis/UnifiedJedis.java +++ b/src/main/java/redis/clients/jedis/UnifiedJedis.java @@ -28,6 +28,7 @@ import redis.clients.jedis.json.JsonSetParams; import redis.clients.jedis.json.Path; import redis.clients.jedis.json.Path2; +import redis.clients.jedis.mcf.MultiDbCommandExecutor; import redis.clients.jedis.params.VAddParams; import redis.clients.jedis.params.VSimParams; import redis.clients.jedis.resps.RawVector; @@ -231,6 +232,18 @@ public UnifiedJedis(ConnectionProvider provider, int maxAttempts, Duration maxTo this(new RetryableCommandExecutor(provider, maxAttempts, maxTotalRetriesDuration), provider); } + /** + * Constructor which supports multiple cluster/database endpoints each with their own isolated connection pool. + *

+ * With this Constructor users can seamlessly failover to Disaster Recovery (DR), Backup, and Active-Active cluster(s) + * by using simple configuration which is passed through from Resilience4j - https://resilience4j.readme.io/docs + *

+ */ + @Experimental + public UnifiedJedis(MultiDbConnectionProvider provider) { + this(new MultiDbCommandExecutor(provider), provider); + } + /** * The constructor to use a custom {@link CommandExecutor}. *

From ba5485953b8bfcce449d8f02790e22bb722a79ed Mon Sep 17 00:00:00 2001 From: ggivo Date: Tue, 7 Oct 2025 11:08:28 +0300 Subject: [PATCH 14/18] Address review comments and fix leftovers in docs and error messages --- docs/failover.md | 164 +++++++++--------- .../jedis/mcf/MultiDbConnectionProvider.java | 108 ++++++------ .../MultiClusterPooledConnectionProvider.java | 0 ...UnifiedJedisConstructorReflectionTest.java | 4 +- ...ectionProviderDynamicEndpointUnitTest.java | 2 +- .../mcf/MultiDbConnectionProviderTest.java | 28 ++- ...ultiDbProviderHealthStatusChangeTest.java} | 24 +-- 7 files changed, 156 insertions(+), 174 deletions(-) delete mode 100644 src/main/java/redis/clients/jedis/providers/MultiClusterPooledConnectionProvider.java rename src/test/java/redis/clients/jedis/providers/{MultiClusterProviderHealthStatusChangeEventTest.java => MultiDbProviderHealthStatusChangeTest.java} (97%) diff --git a/docs/failover.md b/docs/failover.md index 632fba1a82..ef95aa2184 100644 --- a/docs/failover.md +++ b/docs/failover.md @@ -38,67 +38,60 @@ If `redis-east` becomes unavailable, you want your application to connect to `re Let's look at one way of configuring Jedis for this scenario. -First, create an array of `ClusterConfig` objects, one for each Redis database. +First, start by defining the initial configuration for each Redis database available and prioritize them using weights. ```java -JedisClientConfig config = DefaultJedisClientConfig.builder().user("cache").password("secret") - .socketTimeoutMillis(5000).connectionTimeoutMillis(5000).build(); + JedisClientConfig config = DefaultJedisClientConfig.builder().user("cache").password("secret") + .socketTimeoutMillis(5000).connectionTimeoutMillis(5000).build(); +// Custom pool config per database can be provided ConnectionPoolConfig poolConfig = new ConnectionPoolConfig(); -poolConfig.setMaxTotal(8); -poolConfig.setMaxIdle(8); -poolConfig.setMinIdle(0); -poolConfig.setBlockWhenExhausted(true); -poolConfig.setMaxWait(Duration.ofSeconds(1)); -poolConfig.setTestWhileIdle(true); -poolConfig.setTimeBetweenEvictionRuns(Duration.ofSeconds(1)); - -MultiClusterClientConfig.ClusterConfig[] clusterConfig = new MultiClusterClientConfig.ClusterConfig[2]; -HostAndPort east = new HostAndPort("redis-east.example.com", 14000); -clusterConfig[0] = ClusterConfig.builder(east, config).connectionPoolConfig(poolConfig).weight(1.0f).build(); + poolConfig.setMaxTotal(8); + poolConfig.setMaxIdle(8); + poolConfig.setMinIdle(0); + poolConfig.setBlockWhenExhausted(true); + poolConfig.setMaxWait(Duration.ofSeconds(1)); + poolConfig.setTestWhileIdle(true); + poolConfig.setTimeBetweenEvictionRuns(Duration.ofSeconds(1)); +HostAndPort east = new HostAndPort("redis-east.example.com", 14000); HostAndPort west = new HostAndPort("redis-west.example.com", 14000); -clusterConfig[1] = ClusterConfig.builder(west, config).connectionPoolConfig(poolConfig).weight(0.5f).build(); + +MultiDbConfig.Builder multiConfig = MultiDbConfig.builder() + .endpoint(DatabaseConfig.builder(east, config).connectionPoolConfig(poolConfig).weight(1.0f).build()) + .endpoint(DatabaseConfig.builder(west, config).connectionPoolConfig(poolConfig).weight(0.5f).build()); ``` The configuration above represents your two Redis deployments: `redis-east` and `redis-west`. -You'll use this array of configuration objects to create a connection provider that supports failover. -Use the `MultiClusterClientConfig` builder to set your preferred retry and failover configuration, passing in the client configs you just created. -Then build a `MultiClusterPooledConnectionProvider`. +Continue using the `MultiDbConfig.Builder` builder to set your preferred retry and failover configuration. +Then build a `MultiDbClient`. ```java -MultiClusterClientConfig.Builder builder = new MultiClusterClientConfig.Builder(clientConfigs); -builder.circuitBreakerSlidingWindowSize(2); // Sliding window size in number of calls -builder.circuitBreakerFailureRateThreshold(10.0f); // percentage of failures to trigger circuit breaker +multiDbBuilder.circuitBreakerSlidingWindowSize(2) // Sliding window size in number of calls + .circuitBreakerFailureRateThreshold(10.0f) // percentage of failures to trigger circuit breaker + .circuitBreakerMinNumOfFailures(1000) // Minimum number of failures before circuit breaker is tripped -builder.failbackSupported(true); // Enable failback -builder.failbackCheckInterval(1000); // Check every second the unhealthy cluster to see if it has recovered -builder.gracePeriod(10000); // Keep cluster disabled for 10 seconds after it becomes unhealthy + .failbackSupported(true) // Enable failback + .failbackCheckInterval(1000) // Check every second the unhealthy cluster to see if it has recovered + .gracePeriod(10000) // Keep cluster disabled for 10 seconds after it becomes unhealthy // Optional: configure retry settings -builder.retryMaxAttempts(3); // Maximum number of retry attempts (including the initial call) -builder.retryWaitDuration(500); // Number of milliseconds to wait between retry attempts -builder.retryWaitDurationExponentialBackoffMultiplier(2); // Exponential backoff factor multiplied against wait duration between retries + .retryMaxAttempts(3) // Maximum number of retry attempts (including the initial call) + .retryWaitDuration(500) // Number of milliseconds to wait between retry attempts + .retryWaitDurationExponentialBackoffMultiplier(2) // Exponential backoff factor multiplied against wait duration between retries // Optional: configure fast failover -builder.fastFailover(true); // Force closing connections to unhealthy cluster on failover -builder.retryOnFailover(false); // Do not retry failed commands during failover + .fastFailover(true) // Force closing connections to unhealthy cluster on failover + .retryOnFailover(false); // Do not retry failed commands during failover -MultiClusterPooledConnectionProvider provider = new MultiClusterPooledConnectionProvider(builder.build()); +MultiDbClient multiDbClient = multiDbBuilder.build(); ``` -Internally, the connection provider uses a [highly configurable circuit breaker and retry implementation](https://resilience4j.readme.io/docs/circuitbreaker) to determine when to fail over. In the configuration here, we've set a sliding window size of 10 and a failure rate threshold of 50%. This means that a failover will be triggered if 5 out of any 10 calls to Redis fail. -Once you've configured and created a `MultiClusterPooledConnectionProvider`, instantiate a `UnifiedJedis` instance for your application, passing in the provider you just created: - -```java -UnifiedJedis jedis = new UnifiedJedis(provider); -``` - -You can now use this `UnifiedJedis` instance, and the connection management and failover will be handled transparently. +You can now use this `MultiDbClient` instance, and the connection management and failover will be handled transparently. ## Configuration options @@ -108,7 +101,7 @@ a fault-tolerance library that implements [retry](https://resilience4j.readme.io Once you configure Jedis for failover using the `MultiClusterPooledConnectionProvider`, each call to Redis is decorated with a resilience4j retry and circuit breaker. By default, any call that throws a `JedisConnectionException` will be retried up to 3 times. -If the call continues to fail after the maximum number of retry attempts, then the circuit breaker will record a failure. +If the call fail then the circuit breaker will record a failure. The circuit breaker maintains a record of failures in a sliding window data structure. If the failure rate reaches a configured threshold (e.g., when 50% of the last 10 calls have failed), @@ -147,7 +140,7 @@ Jedis uses the following circuit breaker settings: ### Health Check Configuration and Customization -The `MultiClusterPooledConnectionProvider` includes a comprehensive health check system that continuously monitors the availability of Redis clusters to enable automatic failover and failback. +The `MultiDbClient` includes a comprehensive health check system that continuously monitors the availability of Redis clusters to enable automatic failover and failback. The health check system serves several critical purposes in the failover architecture: @@ -190,29 +183,29 @@ The `LagAwareStrategy` is designed specifically for Redis Enterprise Active-Acti **Example Configuration:** ```java -BiFunction, MultiClusterClientConfig.StrategySupplier> healthCheckStrategySupplier = -(HostAndPort clusterHostPort, Supplier credentialsSupplier) -> { - LagAwareStrategy.Config lagConfig = LagAwareStrategy.Config.builder(clusterHostPort, credentialsSupplier) - .interval(5000) // Check every 5 seconds - .timeout(3000) // 3 second timeout - .extendedCheckEnabled(true) - .build(); - - return (hostAndPort, jedisClientConfig) -> new LagAwareStrategy(lagConfig); -}; +BiFunction, MultiDbConfig.StrategySupplier> healthCheckStrategySupplier = + (HostAndPort clusterHostPort, Supplier credentialsSupplier) -> { + LagAwareStrategy.Config lagConfig = LagAwareStrategy.Config.builder(clusterHostPort, credentialsSupplier) + .interval(5000) // Check every 5 seconds + .timeout(3000) // 3 second timeout + .extendedCheckEnabled(true) + .build(); + + return (hostAndPort, jedisClientConfig) -> new LagAwareStrategy(lagConfig); + }; // Configure REST API endpoint and credentials -Endpoint restEndpoint = new HostAndPort("redis-enterprise-cluster-fqdn", 9443); -Supplier credentialsSupplier = () -> - new DefaultRedisCredentials("rest-api-user", "pwd"); +HostAndPort restEndpoint = new HostAndPort("redis-enterprise-cluster-fqdn", 9443); +Supplier credentialsSupplier = () -> + new DefaultRedisCredentials("rest-api-user", "pwd"); -MultiClusterClientConfig.StrategySupplier lagawareStrategySupplier = healthCheckStrategySupplier.apply( - restEndpoint, credentialsSupplier); +MultiDbConfig.StrategySupplier lagawareStrategySupplier = healthCheckStrategySupplier.apply( + restEndpoint, credentialsSupplier); -MultiClusterClientConfig.ClusterConfig clusterConfig = - MultiClusterClientConfig.ClusterConfig.builder(hostAndPort, clientConfig) - .healthCheckStrategySupplier(lagawareStrategySupplier) - .build(); +MultiDbConfig.DatabaseConfig clusterConfig = + MultiDbConfig.DatabaseConfig.builder(hostAndPort, clientConfig) + .healthCheckStrategySupplier(lagawareStrategySupplier) + .build(); ``` ##### 3. Custom Health Check Strategies @@ -289,9 +282,9 @@ MultiClusterClientConfig.ClusterConfig clusterConfig = Use the `healthCheckEnabled(false)` method to completely disable health checks: ```java -clusterConfig[0] = ClusterConfig.builder(east, config) - .healthCheckEnabled(false) // Disable health checks entirely - .build(); +DatabaseConfig dbConfig = DatabaseConfig.builder(east, config) + .healthCheckEnabled(false) // Disable health checks entirely + .build(); ``` ### Fallback configuration @@ -306,36 +299,38 @@ Jedis uses the following fallback settings: In the event that Jedis fails over, you may wish to take some action. This might include logging a warning, recording a metric, or externally persisting the cluster connection state, to name just a few examples. For this reason, -`MultiPooledConnectionProvider` lets you register a custom callback that will be called whenever Jedis +`MultiDbClient` lets you register a custom callback that will be called whenever Jedis fails over to a new cluster. To use this feature, you'll need to design a class that implements `java.util.function.Consumer`. This class must implement the `accept` method, as you can see below. ```java -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.function.Consumer; - -public class FailoverReporter implements Consumer { - + public class FailoverReporter implements Consumer { + @Override - public void accept(ClusterSwitchEventArgs e) { - Logger logger = LoggerFactory.getLogger(FailoverReporter.class); - logger.warn("Jedis failover to cluster: " + e.getClusterName() + " due to " + e.getReason()); + public void accept(DatabaseSwitchEvent e) { + System.out.println("Jedis failover to cluster: " + e.getDatabaseName() + " due to " + e.getReason()); } } ``` -You can then pass an instance of this class to your `MultiPooledConnectionProvider`. +DatabaseSwitchEvent consumer can be registered as follows: ``` -FailoverReporter reporter = new FailoverReporter(); -provider.setClusterSwitchListener(reporter); + FailoverReporter reporter = new FailoverReporter(); + MultiDbClient client = MultiDbClient.builder() + .databaseSwitchListener(reporter) + .build(); ``` - The provider will call your `accept` whenever a failover occurs. +or directly using lambda expression: +``` + MultiDbClient client = MultiDbClient.builder() + .databaseSwitchListener(event -> System.out.println("Switched to: " + event.getEndpoint())) + .build(); +``` + ## Failing back @@ -368,17 +363,18 @@ The automatic failback process works as follows: Once you've determined that it's safe to fail back to a previously-unavailable cluster, you need to decide how to trigger the failback. There are two ways to accomplish this: -`MultiClusterPooledConnectionProvider` exposes a method that you can use to manually select which cluster Jedis should use. -To select a different cluster to use, pass the cluster's `HostAndPort` to `setActiveCluster()`: +`MultiDbClient` exposes a method that you can use to manually select which cluster Jedis should use. +To select a different cluster to use, pass the cluster's `HostAndPort` to `setActiveDatabase()`: ``` -provider.setActiveCluster(west); + Endpoint endpoint = new HostAndPort("redis-east.example.com", 14000); + client.setActiveDatabase(endpoint); ``` This method is thread-safe. If you decide to implement manual failback, you will need a way for external systems to trigger this method in your application. For example, if your application exposes a REST API, you might consider creating a REST endpoint -to call `setActiveCluster` and fail back the application. +to call `setActiveDatabase` and fail back the application. ## Troubleshooting Failover and Failback Issues @@ -418,9 +414,9 @@ HealthCheckStrategy.Config config = HealthCheckStrategy.Config.builder() .build(); // Adjust failback timing -MultiClusterClientConfig multiConfig = new MultiClusterClientConfig.Builder(clusterConfigs) - .gracePeriod(5000) // Shorter grace period - .build(); +MultiDbConfig multiConfig = new MultiDbConfig.Builder() + .gracePeriod(5000) // Shorter grace period + .build(); ``` ## Need help or have questions? diff --git a/src/main/java/redis/clients/jedis/mcf/MultiDbConnectionProvider.java b/src/main/java/redis/clients/jedis/mcf/MultiDbConnectionProvider.java index 8d515627f3..01b194524e 100644 --- a/src/main/java/redis/clients/jedis/mcf/MultiDbConnectionProvider.java +++ b/src/main/java/redis/clients/jedis/mcf/MultiDbConnectionProvider.java @@ -47,9 +47,9 @@ /** * @author Allen Terleto (aterleto) *

- * ConnectionProvider which supports multiple cluster/database endpoints each with their own + * ConnectionProvider which supports multiple database endpoints each with their own * isolated connection pool. With this ConnectionProvider users can seamlessly failover to - * Disaster Recovery (DR), Backup, and Active-Active cluster(s) by using simple + * Disaster Recovery (DR), Backup, and Active-Active database(s) by using simple * configuration which is passed through from Resilience4j - * docs *

@@ -77,8 +77,8 @@ public class MultiDbConnectionProvider implements ConnectionProvider { private final Lock activeDatabaseChangeLock = new ReentrantLock(true); /** - * Functional interface for listening to cluster switch events. The event args contain the reason - * for the switch, the endpoint, and the cluster. + * Functional interface for listening to database switch events. The event args contain the reason + * for the switch, the endpoint, and the database. */ private Consumer databaseSwitchListener; @@ -169,8 +169,8 @@ public MultiDbConnectionProvider(MultiDbConfig multiDbConfig) { // Initialize StatusTracker for waiting on health check results StatusTracker statusTracker = new StatusTracker(healthStatusManager); - // Wait for initial health check results and select active cluster based on weights - activeDatabase = waitForInitialHealthyCluster(statusTracker); + // Wait for initial health check results and select active database based on weights + activeDatabase = waitForInitialHealthyDatabase(statusTracker); // Mark initialization as complete - handleHealthStatusChange can now process events initializationComplete = true; @@ -181,7 +181,7 @@ public MultiDbConnectionProvider(MultiDbConfig multiDbConfig) { // is set to true. // Simple rule is to never assign value of 'activeDatabase' outside of // 'activeDatabaseChangeLock' once the 'initializationComplete' is done. - waitForInitialHealthyCluster(statusTracker); + waitForInitialHealthyDatabase(statusTracker); switchToHealthyDatabase(SwitchReason.HEALTH_CHECK, temp); } this.fallbackExceptionList = multiDbConfig.getFallbackExceptionList(); @@ -195,7 +195,7 @@ public MultiDbConnectionProvider(MultiDbConfig multiDbConfig) { } /** - * Adds a new cluster endpoint to the provider. + * Adds a new database endpoint to the provider. * @param databaseConfig the configuration for the new database * @throws JedisValidationException if the endpoint already exists */ @@ -219,7 +219,7 @@ public void add(DatabaseConfig databaseConfig) { } /** - * Removes a cluster endpoint from the provider. + * Removes a database endpoint from the provider. * @param endpoint the endpoint to remove * @throws JedisValidationException if the endpoint doesn't exist or is the last remaining * endpoint @@ -246,18 +246,18 @@ public void remove(Endpoint endpoint) { boolean isActiveDatabase = (activeDatabase == databaseToRemove); if (isActiveDatabase) { - log.info("Active cluster is being removed. Finding a new active cluster..."); + log.info("Active database is being removed. Finding a new active database..."); Map.Entry candidate = findWeightedHealthyClusterToIterate( databaseToRemove); if (candidate != null) { Database selectedCluster = candidate.getValue(); if (setActiveDatabase(selectedCluster, true)) { - log.info("New active cluster set to {}", candidate.getKey()); + log.info("New active database set to {}", candidate.getKey()); notificationData = candidate; } } else { throw new JedisException( - "Database can not be removed due to no healthy cluster available to switch!"); + "Database can not be removed due to no healthy database available to switch!"); } } @@ -265,10 +265,10 @@ public void remove(Endpoint endpoint) { healthStatusManager.unregisterListener(endpoint, this::onHealthStatusChange); healthStatusManager.remove(endpoint); - // Remove from cluster map + // Remove from database map databaseMap.remove(endpoint); - // Close the cluster resources + // Close the database resources if (databaseToRemove != null) { databaseToRemove.setDisabled(true); databaseToRemove.close(); @@ -291,16 +291,16 @@ private void addClusterInternal(MultiDbConfig multiDbConfig, DatabaseConfig conf "Endpoint " + config.getEndpoint() + " already exists in the provider"); } - String clusterId = "database:" + config.getEndpoint(); + String databaseId = "database:" + config.getEndpoint(); - Retry retry = RetryRegistry.of(retryConfig).retry(clusterId); + Retry retry = RetryRegistry.of(retryConfig).retry(databaseId); Retry.EventPublisher retryPublisher = retry.getEventPublisher(); retryPublisher.onRetry(event -> log.warn(String.valueOf(event))); retryPublisher.onError(event -> log.error(String.valueOf(event))); CircuitBreaker circuitBreaker = CircuitBreakerRegistry.of(circuitBreakerConfig) - .circuitBreaker(clusterId); + .circuitBreaker(databaseId); CircuitBreaker.EventPublisher circuitBreakerEventPublisher = circuitBreaker.getEventPublisher(); circuitBreakerEventPublisher.onCallNotPermitted(event -> log.error(String.valueOf(event))); @@ -317,7 +317,7 @@ private void addClusterInternal(MultiDbConfig multiDbConfig, DatabaseConfig conf if (strategySupplier != null) { HealthCheckStrategy hcs = strategySupplier.get(hostPort(config.getEndpoint()), config.getJedisClientConfig()); - // Register listeners BEFORE adding clusters to avoid missing events + // Register listeners BEFORE adding databases to avoid missing events healthStatusManager.registerListener(config.getEndpoint(), this::onHealthStatusChange); HealthCheck hc = healthStatusManager.add(config.getEndpoint(), hcs); database = new Database(config.getEndpoint(), pool, retry, hc, circuitBreaker, @@ -341,8 +341,8 @@ private HostAndPort hostPort(Endpoint endpoint) { } /** - * Handles health status changes for clusters. This method is called by the health status manager - * when the health status of a cluster changes. + * Handles health status changes for databases. This method is called by the health status manager + * when the health status of a database changes. */ @VisibleForTesting void onHealthStatusChange(HealthStatusChangeEvent eventArgs) { @@ -350,37 +350,37 @@ void onHealthStatusChange(HealthStatusChangeEvent eventArgs) { HealthStatus newStatus = eventArgs.getNewStatus(); log.debug("Health status changed for {} from {} to {}", endpoint, eventArgs.getOldStatus(), newStatus); - Database clusterWithHealthChange = databaseMap.get(endpoint); + Database databaseWithHealthChange = databaseMap.get(endpoint); - if (clusterWithHealthChange == null) return; + if (databaseWithHealthChange == null) return; if (initializationComplete) { - if (!newStatus.isHealthy() && clusterWithHealthChange == activeDatabase) { - clusterWithHealthChange.setGracePeriod(); - switchToHealthyDatabase(SwitchReason.HEALTH_CHECK, clusterWithHealthChange); + if (!newStatus.isHealthy() && databaseWithHealthChange == activeDatabase) { + databaseWithHealthChange.setGracePeriod(); + switchToHealthyDatabase(SwitchReason.HEALTH_CHECK, databaseWithHealthChange); } } } /** - * Waits for initial health check results and selects the first healthy cluster based on weight - * priority. Blocks until at least one cluster becomes healthy or all clusters are determined to + * Waits for initial health check results and selects the first healthy database based on weight + * priority. Blocks until at least one database becomes healthy or all databases are determined to * be unhealthy. * @param statusTracker the status tracker to use for waiting on health check results - * @return the first healthy cluster found, ordered by weight (highest first) - * @throws JedisConnectionException if all clusters are unhealthy + * @return the first healthy database found, ordered by weight (highest first) + * @throws JedisConnectionException if all databases are unhealthy */ - private Database waitForInitialHealthyCluster(StatusTracker statusTracker) { - // Sort clusters by weight in descending order - List> sortedClusters = databaseMap.entrySet().stream() + private Database waitForInitialHealthyDatabase(StatusTracker statusTracker) { + // Sort databases by weight in descending order + List> sortedDatabases = databaseMap.entrySet().stream() .sorted(Map.Entry. comparingByValue( Comparator.comparing(Database::getWeight).reversed())) .collect(Collectors.toList()); - log.info("Selecting initial cluster from {} configured clusters", sortedClusters.size()); + log.info("Selecting initial database from {} configured databases", sortedDatabases.size()); - // Select cluster in weight order - for (Map.Entry entry : sortedClusters) { + // Select database in weight order + for (Map.Entry entry : sortedDatabases) { Endpoint endpoint = entry.getKey(); Database database = entry.getValue(); @@ -407,9 +407,9 @@ private Database waitForInitialHealthyCluster(StatusTracker statusTracker) { } } - // All clusters are unhealthy + // All databases are unhealthy throw new JedisConnectionException( - "All configured clusters are unhealthy. Cannot initialize MultiDbConnectionProvider."); + "All configured databases are unhealthy. Cannot initialize MultiDbConnectionProvider."); } /** @@ -418,7 +418,7 @@ private Database waitForInitialHealthyCluster(StatusTracker statusTracker) { @VisibleForTesting void periodicFailbackCheck() { try { - // Find the best candidate cluster for failback + // Find the best candidate database for failback Map.Entry bestCandidate = null; float bestWeight = activeDatabase.getWeight(); @@ -445,7 +445,7 @@ void periodicFailbackCheck() { // Perform failback if we found a better candidate if (bestCandidate != null) { Database selectedCluster = bestCandidate.getValue(); - log.info("Performing failback from {} to {} (higher weight cluster available)", + log.info("Performing failback from {} to {} (higher weight database available)", activeDatabase.getCircuitBreaker().getName(), selectedCluster.getCircuitBreaker().getName()); if (setActiveDatabase(selectedCluster, true)) { @@ -475,7 +475,7 @@ Endpoint switchToHealthyDatabase(SwitchReason reason, Database iterateFrom) { private void handleNoHealthyCluster() { int max = multiDbConfig.getMaxNumFailoverAttempts(); - log.error("No healthy cluster available to switch to"); + log.error("No healthy database available to switch to"); if (failoverAttemptCount.get() > max) { throw new JedisPermanentlyNotAvailableException(); } @@ -502,12 +502,12 @@ private boolean markAsFreeze() { } /** - * Asserts that the active cluster is operable. If not, throws an exception. + * Asserts that the active database is operable. If not, throws an exception. *

* This method is called by the circuit breaker command executor before executing a command. - * @throws JedisPermanentlyNotAvailableException if the there is no operable cluster and the max + * @throws JedisPermanentlyNotAvailableException if the there is no operable database and the max * number of failover attempts has been exceeded. - * @throws JedisTemporarilyNotAvailableException if the there is no operable cluster and the max + * @throws JedisTemporarilyNotAvailableException if the there is no operable database and the max * number of failover attempts has not been exceeded. */ @VisibleForTesting @@ -531,7 +531,7 @@ private Map.Entry findWeightedHealthyClusterToIterate(Databa /** * Design decision was made to defer responsibility for cross-replication validation to the user. - * Alternatively there was discussion to handle cross-cluster replication validation by setting a + * Alternatively there was discussion to handle cross-database replication validation by setting a * key/value pair per hashslot in the active connection (with a TTL) and subsequently reading it * from the target connection. */ @@ -614,7 +614,7 @@ public void forceActiveDatabase(Endpoint endpoint, long forcedActiveDuration) { private boolean setActiveDatabase(Database database, boolean validateConnection) { // Database database = clusterEntry.getValue(); // Field-level synchronization is used to avoid the edge case in which - // incrementActiveMultiClusterIndex() is called at the same time + // setActiveDatabase() is called at the same time activeDatabaseChangeLock.lock(); Database oldCluster; try { @@ -667,7 +667,7 @@ public void close() { Thread.currentThread().interrupt(); } - // Close all cluster connection pools + // Close all database connection pools for (Database database : databaseMap.values()) { database.close(); } @@ -707,7 +707,7 @@ public Database getDatabase(Endpoint endpoint) { *

* Active endpoint is the one which is currently being used for all operations. It can change at * any time due to health checks, failover, failback, etc. - * @return the active cluster endpoint + * @return the active database endpoint */ public Endpoint getActiveEndpoint() { return activeDatabase.getEndpoint(); @@ -732,9 +732,9 @@ public CircuitBreaker getDatabaseCircuitBreaker() { } /** - * Indicates the final cluster/database endpoint (connection pool), according to the - * pre-configured list provided at startup via the MultiDbConfig, is unavailable and therefore no - * further failover is possible. Users can manually failback to an available cluster + * Indicates the final database endpoint (connection pool), according to the pre-configured list + * provided at startup via the MultiDbConfig, is unavailable and therefore no further failover is + * possible. Users can manually failback to an available database */ public boolean canIterateFrom(Database iterateFrom) { Map.Entry e = findWeightedHealthyClusterToIterate(iterateFrom); @@ -825,7 +825,7 @@ public HealthStatus getHealthStatus() { } /** - * Assigned weight for this cluster + * Assigned weight for this database */ public float getWeight() { return weight; @@ -865,14 +865,14 @@ public void setDisabled(boolean disabled) { } /** - * Checks if the cluster is currently in grace period + * Checks if the da is currently in grace period */ public boolean isInGracePeriod() { return System.currentTimeMillis() < gracePeriodEndsAt; } /** - * Sets the grace period for this cluster + * Sets the grace period for this database */ public void setGracePeriod() { setGracePeriod(multiDbConfig.getGracePeriod()); @@ -931,7 +931,7 @@ private static boolean isThresholdsExceeded(Database database, boolean lastFailR public String toString() { return circuitBreaker.getName() + "{" + "connectionPool=" + connectionPool + ", retry=" + retry + ", circuitBreaker=" + circuitBreaker + ", weight=" + weight + ", healthStatus=" - + getHealthStatus() + ", multiClusterClientConfig=" + multiDbConfig + '}'; + + getHealthStatus() + ", multiDbConfig=" + multiDbConfig + '}'; } } diff --git a/src/main/java/redis/clients/jedis/providers/MultiClusterPooledConnectionProvider.java b/src/main/java/redis/clients/jedis/providers/MultiClusterPooledConnectionProvider.java deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/src/test/java/redis/clients/jedis/builders/UnifiedJedisConstructorReflectionTest.java b/src/test/java/redis/clients/jedis/builders/UnifiedJedisConstructorReflectionTest.java index cd2ca8e4c5..591c3ed942 100644 --- a/src/test/java/redis/clients/jedis/builders/UnifiedJedisConstructorReflectionTest.java +++ b/src/test/java/redis/clients/jedis/builders/UnifiedJedisConstructorReflectionTest.java @@ -44,7 +44,7 @@ void testConstructorParameterCoverageReport() { for (Constructor ctor : ctors) { if (isUnsafeConstructor(ctor) || clusterConstructorThatShouldBeDeprecatedAndRemoved(ctor) || retriesConstructorThatShouldBeIncorporatedIntoBuilderAsDefault(ctor) - || multiClusterPooledConnectionProviderShouldBeReplacedWithResilientClient(ctor)) { + || multiDbConnectionProviderShouldBeReplacedWithMultiDbClient(ctor)) { // Exclude unsafe constructors from analysis as requested continue; } @@ -181,7 +181,7 @@ private static boolean clusterConstructorThatShouldBeDeprecatedAndRemoved(Constr } // FIXME: Remove this when we add convince class and builder for ResilientClient - private static boolean multiClusterPooledConnectionProviderShouldBeReplacedWithResilientClient( + private static boolean multiDbConnectionProviderShouldBeReplacedWithMultiDbClient( Constructor ctor) { Class[] types = ctor.getParameterTypes(); return types.length == 1 && types[0].getSimpleName().equals("MultiDbConnectionProvider"); diff --git a/src/test/java/redis/clients/jedis/mcf/MultiDbConnectionProviderDynamicEndpointUnitTest.java b/src/test/java/redis/clients/jedis/mcf/MultiDbConnectionProviderDynamicEndpointUnitTest.java index ceb5cc021c..bc418f8682 100644 --- a/src/test/java/redis/clients/jedis/mcf/MultiDbConnectionProviderDynamicEndpointUnitTest.java +++ b/src/test/java/redis/clients/jedis/mcf/MultiDbConnectionProviderDynamicEndpointUnitTest.java @@ -129,7 +129,7 @@ void testRemoveNullEndpoint() { } @Test - void testAddAndRemoveMultipleClusters() { + void testAddAndRemoveMultipleDatabases() { // Add endpoint2 as second database DatabaseConfig config2 = createDatabaseConfig(endpoint2.getHostAndPort(), 2.0f); diff --git a/src/test/java/redis/clients/jedis/mcf/MultiDbConnectionProviderTest.java b/src/test/java/redis/clients/jedis/mcf/MultiDbConnectionProviderTest.java index 9af53002e0..8c2da1e61a 100644 --- a/src/test/java/redis/clients/jedis/mcf/MultiDbConnectionProviderTest.java +++ b/src/test/java/redis/clients/jedis/mcf/MultiDbConnectionProviderTest.java @@ -68,8 +68,8 @@ public void testCircuitBreakerForcedTransitions() { } @Test - public void testIterateActiveCluster() throws InterruptedException { - waitForClustersToGetHealthy(provider.getDatabase(endpointStandalone0.getHostAndPort()), + public void testSwitchToHealthyDatabase() throws InterruptedException { + waitForDatabaseToGetHealthy(provider.getDatabase(endpointStandalone0.getHostAndPort()), provider.getDatabase(endpointStandalone1.getHostAndPort())); Endpoint e2 = provider.switchToHealthyDatabase(SwitchReason.HEALTH_CHECK, @@ -80,7 +80,7 @@ public void testIterateActiveCluster() throws InterruptedException { @Test public void testCanIterateOnceMore() { Endpoint endpoint0 = endpointStandalone0.getHostAndPort(); - waitForClustersToGetHealthy(provider.getDatabase(endpoint0), + waitForDatabaseToGetHealthy(provider.getDatabase(endpoint0), provider.getDatabase(endpointStandalone1.getHostAndPort())); provider.setActiveDatabase(endpoint0); @@ -90,14 +90,14 @@ public void testCanIterateOnceMore() { assertFalse(provider.canIterateFrom(provider.getDatabase())); } - private void waitForClustersToGetHealthy(Database... clusters) { + private void waitForDatabaseToGetHealthy(Database... databases) { Awaitility.await().pollInterval(Durations.ONE_HUNDRED_MILLISECONDS) .atMost(Durations.TWO_SECONDS) - .until(() -> Arrays.stream(clusters).allMatch(Database::isHealthy)); + .until(() -> Arrays.stream(databases).allMatch(Database::isHealthy)); } @Test - public void testRunClusterFailoverPostProcessor() { + public void testDatabaseSwitchListener() { DatabaseConfig[] databaseConfigs = new DatabaseConfig[2]; databaseConfigs[0] = DatabaseConfig .builder(new HostAndPort("purposefully-incorrect", 0000), @@ -136,7 +136,7 @@ public void testRunClusterFailoverPostProcessor() { } @Test - public void testSetActiveMultiClusterIndexEqualsZero() { + public void testSetActiveDatabaseIndexEqualsZero() { assertThrows(JedisValidationException.class, () -> provider.setActiveDatabase(null)); // Should // throw // an @@ -144,15 +144,7 @@ public void testSetActiveMultiClusterIndexEqualsZero() { } @Test - public void testSetActiveMultiClusterIndexLessThanZero() { - assertThrows(JedisValidationException.class, () -> provider.setActiveDatabase(null)); // Should - // throw - // an - // exception - } - - @Test - public void testSetActiveMultiClusterIndexOutOfRange() { + public void testSetActiveDatabaseByMissingEndpoint() { assertThrows(JedisValidationException.class, () -> provider.setActiveDatabase(new Endpoint() { @Override public String getHost() { @@ -249,7 +241,7 @@ public void userCommand_firstTemporary_thenPermanent_inOrder() { try (UnifiedJedis jedis = new UnifiedJedis(testProvider)) { jedis.get("foo"); - // Disable both clusters so any attempt to switch results in 'no healthy cluster' path + // Disable both databases so any attempt to switch results in 'no healthy database' path testProvider.getDatabase(endpointStandalone0.getHostAndPort()).setDisabled(true); testProvider.getDatabase(endpointStandalone1.getHostAndPort()).setDisabled(true); @@ -288,7 +280,7 @@ public void userCommand_connectionExceptions_thenMultipleTemporary_thenPermanent try (UnifiedJedis jedis = new UnifiedJedis(testProvider)) { jedis.get("foo"); - // disable most weighted cluster so that it will fail on initial requests + // disable most weighted database so that it will fail on initial requests testProvider.getDatabase(endpointStandalone0.getHostAndPort()).setDisabled(true); Exception e = assertThrows(JedisConnectionException.class, () -> jedis.get("foo")); diff --git a/src/test/java/redis/clients/jedis/providers/MultiClusterProviderHealthStatusChangeEventTest.java b/src/test/java/redis/clients/jedis/providers/MultiDbProviderHealthStatusChangeTest.java similarity index 97% rename from src/test/java/redis/clients/jedis/providers/MultiClusterProviderHealthStatusChangeEventTest.java rename to src/test/java/redis/clients/jedis/providers/MultiDbProviderHealthStatusChangeTest.java index 6d1b14009e..ed874c816c 100644 --- a/src/test/java/redis/clients/jedis/providers/MultiClusterProviderHealthStatusChangeEventTest.java +++ b/src/test/java/redis/clients/jedis/providers/MultiDbProviderHealthStatusChangeTest.java @@ -20,11 +20,11 @@ import redis.clients.jedis.mcf.MultiDbConnectionProviderHelper; /** - * Tests for MultiDbConnectionProvider event handling behavior during initialization and - * throughout its lifecycle with HealthStatusChangeEvents. + * Tests for MultiDbConnectionProvider event handling behavior during initialization and throughout + * its lifecycle with HealthStatusChangeEvents. */ @ExtendWith(MockitoExtension.class) -public class MultiClusterProviderHealthStatusChangeEventTest { +public class MultiDbProviderHealthStatusChangeTest { private HostAndPort endpoint1; private HostAndPort endpoint2; @@ -60,8 +60,7 @@ void postInit_unhealthy_active_sets_grace_and_fails_over() throws Exception { MultiDbConfig config = new MultiDbConfig.Builder( new MultiDbConfig.DatabaseConfig[] { cluster1, cluster2 }).build(); - try (MultiDbConnectionProvider provider = new MultiDbConnectionProvider( - config)) { + try (MultiDbConnectionProvider provider = new MultiDbConnectionProvider(config)) { assertFalse(provider.getDatabase(endpoint1).isInGracePeriod()); assertEquals(provider.getDatabase(), provider.getDatabase(endpoint1)); @@ -92,8 +91,7 @@ void postInit_nonActive_changes_do_not_switch_active() throws Exception { MultiDbConfig config = new MultiDbConfig.Builder( new MultiDbConfig.DatabaseConfig[] { cluster1, cluster2 }).build(); - try (MultiDbConnectionProvider provider = new MultiDbConnectionProvider( - config)) { + try (MultiDbConnectionProvider provider = new MultiDbConnectionProvider(config)) { // Verify initial state assertEquals(provider.getDatabase(endpoint1), provider.getDatabase(), "Should start with endpoint1 active"); @@ -140,8 +138,7 @@ void init_selects_highest_weight_healthy_when_checks_disabled() throws Exception MultiDbConfig config = new MultiDbConfig.Builder( new MultiDbConfig.DatabaseConfig[] { cluster1, cluster2 }).build(); - try (MultiDbConnectionProvider provider = new MultiDbConnectionProvider( - config)) { + try (MultiDbConnectionProvider provider = new MultiDbConnectionProvider(config)) { // This test verifies that multiple endpoints are properly initialized // Verify both clusters are initialized properly @@ -166,8 +163,7 @@ void init_single_cluster_initializes_and_is_healthy() throws Exception { // This test verifies that the provider initializes correctly and doesn't lose events // In practice, with health checks disabled, no events should be generated during init - try (MultiDbConnectionProvider provider = new MultiDbConnectionProvider( - config)) { + try (MultiDbConnectionProvider provider = new MultiDbConnectionProvider(config)) { // Verify successful initialization assertNotNull(provider.getDatabase(), "Provider should have initialized successfully"); assertEquals(provider.getDatabase(endpoint1), provider.getDatabase(), @@ -195,8 +191,7 @@ void postInit_two_hop_failover_chain_respected() throws Exception { MultiDbConfig config = new MultiDbConfig.Builder( new MultiDbConfig.DatabaseConfig[] { cluster1, cluster2, cluster3 }).build(); - try (MultiDbConnectionProvider provider = new MultiDbConnectionProvider( - config)) { + try (MultiDbConnectionProvider provider = new MultiDbConnectionProvider(config)) { // First event: endpoint1 (active) becomes UNHEALTHY -> failover to endpoint2, endpoint1 // enters grace MultiDbConnectionProviderHelper.onHealthStatusChange(provider, endpoint1, @@ -236,8 +231,7 @@ void postInit_rapid_events_respect_grace_and_keep_active_stable() throws Excepti MultiDbConfig config = new MultiDbConfig.Builder( new MultiDbConfig.DatabaseConfig[] { cluster1, cluster2 }).build(); - try (MultiDbConnectionProvider provider = new MultiDbConnectionProvider( - config)) { + try (MultiDbConnectionProvider provider = new MultiDbConnectionProvider(config)) { // Verify initial state assertEquals(HealthStatus.HEALTHY, provider.getDatabase(endpoint1).getHealthStatus(), "Should start as HEALTHY"); From b3ab862f186e047d1fe613bb5b6b7d0bcb1a7b4d Mon Sep 17 00:00:00 2001 From: ggivo Date: Tue, 7 Oct 2025 11:17:48 +0300 Subject: [PATCH 15/18] more leftovers in failover.md & MultiDbConnectionProvider --- docs/failover.md | 56 +++++++++---------- .../jedis/mcf/MultiDbConnectionProvider.java | 16 +++--- 2 files changed, 36 insertions(+), 36 deletions(-) diff --git a/docs/failover.md b/docs/failover.md index ef95aa2184..29e5551306 100644 --- a/docs/failover.md +++ b/docs/failover.md @@ -19,7 +19,7 @@ The remainder of this guide describes: * A basic failover and health check configuration * Supported retry and circuit breaker settings -* Failback and the cluster selection API +* Failback and the database selection API We recommend that you read this guide carefully and understand the configuration settings before enabling Jedis failover in production. @@ -73,8 +73,8 @@ multiDbBuilder.circuitBreakerSlidingWindowSize(2) // Sliding window size in numb .circuitBreakerMinNumOfFailures(1000) // Minimum number of failures before circuit breaker is tripped .failbackSupported(true) // Enable failback - .failbackCheckInterval(1000) // Check every second the unhealthy cluster to see if it has recovered - .gracePeriod(10000) // Keep cluster disabled for 10 seconds after it becomes unhealthy + .failbackCheckInterval(1000) // Check every second the unhealthy database to see if it has recovered + .gracePeriod(10000) // Keep database disabled for 10 seconds after it becomes unhealthy // Optional: configure retry settings .retryMaxAttempts(3) // Maximum number of retry attempts (including the initial call) @@ -82,7 +82,7 @@ multiDbBuilder.circuitBreakerSlidingWindowSize(2) // Sliding window size in numb .retryWaitDurationExponentialBackoffMultiplier(2) // Exponential backoff factor multiplied against wait duration between retries // Optional: configure fast failover - .fastFailover(true) // Force closing connections to unhealthy cluster on failover + .fastFailover(true) // Force closing connections to unhealthy database on failover .retryOnFailover(false); // Do not retry failed commands during failover MultiDbClient multiDbClient = multiDbBuilder.build(); @@ -140,16 +140,16 @@ Jedis uses the following circuit breaker settings: ### Health Check Configuration and Customization -The `MultiDbClient` includes a comprehensive health check system that continuously monitors the availability of Redis clusters to enable automatic failover and failback. +The `MultiDbClient` includes a comprehensive health check system that continuously monitors the availability of Redis databases to enable automatic failover and failback. The health check system serves several critical purposes in the failover architecture: -1. **Proactive Monitoring**: Continuously monitors passive clusters that aren't currently receiving traffic -2. **Failback Detection**: Determines when a previously failed cluster has recovered and is ready to accept traffic -3. **Circuit Breaker Integration**: Works with the circuit breaker pattern to manage cluster state transitions +1. **Proactive Monitoring**: Continuously monitors passive databases that aren't currently receiving traffic +2. **Failback Detection**: Determines when a previously failed database has recovered and is ready to accept traffic +3. **Circuit Breaker Integration**: Works with the circuit breaker pattern to manage database state transitions 4. **Customizable Strategies**: Supports pluggable health check implementations for different deployment scenarios -The health check system operates independently of your application traffic, running background checks at configurable intervals to assess cluster health without impacting performance. +The health check system operates independently of your application traffic, running background checks at configurable intervals to assess database health without impacting performance. #### Available Health Check Types @@ -184,8 +184,8 @@ The `LagAwareStrategy` is designed specifically for Redis Enterprise Active-Acti **Example Configuration:** ```java BiFunction, MultiDbConfig.StrategySupplier> healthCheckStrategySupplier = - (HostAndPort clusterHostPort, Supplier credentialsSupplier) -> { - LagAwareStrategy.Config lagConfig = LagAwareStrategy.Config.builder(clusterHostPort, credentialsSupplier) + (HostAndPort dbHostPort, Supplier credentialsSupplier) -> { + LagAwareStrategy.Config lagConfig = LagAwareStrategy.Config.builder(dbHostPort, credentialsSupplier) .interval(5000) // Check every 5 seconds .timeout(3000) // 3 second timeout .extendedCheckEnabled(true) @@ -195,14 +195,14 @@ BiFunction, MultiDbConfig.StrategySuppli }; // Configure REST API endpoint and credentials -HostAndPort restEndpoint = new HostAndPort("redis-enterprise-cluster-fqdn", 9443); +HostAndPort restEndpoint = new HostAndPort("redis-enterprise-db-fqdn", 9443); Supplier credentialsSupplier = () -> new DefaultRedisCredentials("rest-api-user", "pwd"); MultiDbConfig.StrategySupplier lagawareStrategySupplier = healthCheckStrategySupplier.apply( restEndpoint, credentialsSupplier); -MultiDbConfig.DatabaseConfig clusterConfig = +MultiDbConfig.DatabaseConfig dbConfig = MultiDbConfig.DatabaseConfig.builder(hostAndPort, clientConfig) .healthCheckStrategySupplier(lagawareStrategySupplier) .build(); @@ -227,7 +227,7 @@ MultiClusterClientConfig.StrategySupplier customStrategy = return new MyCustomHealthCheckStrategy(hostAndPort, jedisClientConfig); }; -MultiClusterClientConfig.ClusterConfig clusterConfig = +MultiClusterClientConfig.ClusterConfig dbConfig = MultiClusterClientConfig.ClusterConfig.builder(hostAndPort, clientConfig) .healthCheckStrategySupplier(customStrategy) .weight(1.0f) @@ -271,7 +271,7 @@ MultiClusterClientConfig.StrategySupplier pingStrategy = (hostAndPort, jedisClie }; }; -MultiClusterClientConfig.ClusterConfig clusterConfig = +MultiClusterClientConfig.ClusterConfig dbConfig = MultiClusterClientConfig.ClusterConfig.builder(hostAndPort, clientConfig) .healthCheckStrategySupplier(pingStrategy) .build(); @@ -298,9 +298,9 @@ Jedis uses the following fallback settings: ### Failover callbacks In the event that Jedis fails over, you may wish to take some action. This might include logging a warning, recording -a metric, or externally persisting the cluster connection state, to name just a few examples. For this reason, +a metric, or externally persisting the database connection state, to name just a few examples. For this reason, `MultiDbClient` lets you register a custom callback that will be called whenever Jedis -fails over to a new cluster. +fails over to a new database. To use this feature, you'll need to design a class that implements `java.util.function.Consumer`. This class must implement the `accept` method, as you can see below. @@ -310,7 +310,7 @@ This class must implement the `accept` method, as you can see below. @Override public void accept(DatabaseSwitchEvent e) { - System.out.println("Jedis failover to cluster: " + e.getDatabaseName() + " due to " + e.getReason()); + System.out.println("Jedis failover to database: " + e.getDatabaseName() + " due to " + e.getReason()); } } ``` @@ -334,7 +334,7 @@ or directly using lambda expression: ## Failing back -Jedis supports automatic failback based on health checks or manual failback using the cluster selection API. +Jedis supports automatic failback based on health checks or manual failback using the database selection API. ## Failback scenario @@ -350,21 +350,21 @@ You will likely want to fail your application back to `redis-east`. ### Automatic failback based on health checks -When health checks are enabled, Jedis automatically monitors the health of all configured clusters, including those that are currently inactive due to previous failures. +When health checks are enabled, Jedis automatically monitors the health of all configured databases, including those that are currently inactive due to previous failures. The automatic failback process works as follows: -1. **Continuous Monitoring**: Health checks run continuously for all clusters, regardless of their current active status -2. **Recovery Detection**: When a previously failed cluster passes the required number of consecutive health checks, it's marked as healthy -3. **Weight-Based Failback**: If automatic failback is enabled and a recovered cluster has a higher weight than the currently active cluster, Jedis will automatically switch to the recovered cluster -4. **Grace Period Respect**: Failback only occurs after the configured grace period has elapsed since the cluster was marked as unhealthy +1. **Continuous Monitoring**: Health checks run continuously for all databases, regardless of their current active status +2. **Recovery Detection**: When a previously failed database passes the required number of consecutive health checks, it's marked as healthy +3. **Weight-Based Failback**: If automatic failback is enabled and a recovered database has a higher weight than the currently active database, Jedis will automatically switch to the recovered database +4. **Grace Period Respect**: Failback only occurs after the configured grace period has elapsed since the database was marked as unhealthy -## Manual Failback using the cluster selection API +## Manual Failback using the database selection API -Once you've determined that it's safe to fail back to a previously-unavailable cluster, +Once you've determined that it's safe to fail back to a previously-unavailable database, you need to decide how to trigger the failback. There are two ways to accomplish this: -`MultiDbClient` exposes a method that you can use to manually select which cluster Jedis should use. -To select a different cluster to use, pass the cluster's `HostAndPort` to `setActiveDatabase()`: +`MultiDbClient` exposes a method that you can use to manually select which database Jedis should use. +To select a different database to use, pass the database's `HostAndPort` to `setActiveDatabase()`: ``` Endpoint endpoint = new HostAndPort("redis-east.example.com", 14000); client.setActiveDatabase(endpoint); diff --git a/src/main/java/redis/clients/jedis/mcf/MultiDbConnectionProvider.java b/src/main/java/redis/clients/jedis/mcf/MultiDbConnectionProvider.java index 01b194524e..97f7fa658f 100644 --- a/src/main/java/redis/clients/jedis/mcf/MultiDbConnectionProvider.java +++ b/src/main/java/redis/clients/jedis/mcf/MultiDbConnectionProvider.java @@ -163,7 +163,7 @@ public MultiDbConnectionProvider(MultiDbConfig multiDbConfig) { // Now add databases - health checks will start but events will be queued for (DatabaseConfig config : databaseConfigs) { - addClusterInternal(multiDbConfig, config); + addDatabaseInternal(multiDbConfig, config); } // Initialize StatusTracker for waiting on health check results @@ -212,7 +212,7 @@ public void add(DatabaseConfig databaseConfig) { activeDatabaseChangeLock.lock(); try { - addClusterInternal(multiDbConfig, databaseConfig); + addDatabaseInternal(multiDbConfig, databaseConfig); } finally { activeDatabaseChangeLock.unlock(); } @@ -277,7 +277,7 @@ public void remove(Endpoint endpoint) { activeDatabaseChangeLock.unlock(); } if (notificationData != null) { - onClusterSwitch(SwitchReason.FORCED, notificationData.getKey(), notificationData.getValue()); + onDatabaseSwitch(SwitchReason.FORCED, notificationData.getKey(), notificationData.getValue()); } } @@ -285,7 +285,7 @@ public void remove(Endpoint endpoint) { * Internal method to add a database configuration. This method is not thread-safe and should be * called within appropriate locks. */ - private void addClusterInternal(MultiDbConfig multiDbConfig, DatabaseConfig config) { + private void addDatabaseInternal(MultiDbConfig multiDbConfig, DatabaseConfig config) { if (databaseMap.containsKey(config.getEndpoint())) { throw new JedisValidationException( "Endpoint " + config.getEndpoint() + " already exists in the provider"); @@ -449,7 +449,7 @@ void periodicFailbackCheck() { activeDatabase.getCircuitBreaker().getName(), selectedCluster.getCircuitBreaker().getName()); if (setActiveDatabase(selectedCluster, true)) { - onClusterSwitch(SwitchReason.FAILBACK, bestCandidate.getKey(), selectedCluster); + onDatabaseSwitch(SwitchReason.FAILBACK, bestCandidate.getKey(), selectedCluster); } } } catch (Exception e) { @@ -469,7 +469,7 @@ Endpoint switchToHealthyDatabase(SwitchReason reason, Database iterateFrom) { boolean changed = setActiveDatabase(database, false); if (!changed) return null; failoverAttemptCount.set(0); - onClusterSwitch(reason, databaseToIterate.getKey(), database); + onDatabaseSwitch(reason, databaseToIterate.getKey(), database); return databaseToIterate.getKey(); } @@ -586,7 +586,7 @@ public void setActiveDatabase(Endpoint endpoint) { + "the configured endpoints. Please use one from the configuration"); } if (setActiveDatabase(database, true)) { - onClusterSwitch(SwitchReason.FORCED, endpoint, database); + onDatabaseSwitch(SwitchReason.FORCED, endpoint, database); } } @@ -741,7 +741,7 @@ public boolean canIterateFrom(Database iterateFrom) { return e != null; } - public void onClusterSwitch(SwitchReason reason, Endpoint endpoint, Database database) { + public void onDatabaseSwitch(SwitchReason reason, Endpoint endpoint, Database database) { if (databaseSwitchListener != null) { DatabaseSwitchEvent eventArgs = new DatabaseSwitchEvent(reason, endpoint, database); databaseSwitchListener.accept(eventArgs); From cfac214a436a25952f0807034721596f641fc64c Mon Sep 17 00:00:00 2001 From: ggivo Date: Tue, 7 Oct 2025 11:23:12 +0300 Subject: [PATCH 16/18] more leftovers in test names --- .../java/redis/clients/jedis/mcf/MultiDbFailoverBase.java | 2 +- .../MultiDbConnectionProviderDynamicEndpointUnitTest.java | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/src/main/java/redis/clients/jedis/mcf/MultiDbFailoverBase.java b/src/main/java/redis/clients/jedis/mcf/MultiDbFailoverBase.java index 3e9d5f2d39..e9fc874a2d 100644 --- a/src/main/java/redis/clients/jedis/mcf/MultiDbFailoverBase.java +++ b/src/main/java/redis/clients/jedis/mcf/MultiDbFailoverBase.java @@ -43,7 +43,7 @@ protected void clusterFailover(Database database) { CircuitBreaker circuitBreaker = database.getCircuitBreaker(); try { - // Check state to handle race conditions since iterateActiveCluster() is + // Check state to handle race conditions since () is // non-idempotent if (!CircuitBreaker.State.FORCED_OPEN.equals(circuitBreaker.getState())) { diff --git a/src/test/java/redis/clients/jedis/mcf/MultiDbConnectionProviderDynamicEndpointUnitTest.java b/src/test/java/redis/clients/jedis/mcf/MultiDbConnectionProviderDynamicEndpointUnitTest.java index bc418f8682..663f33529e 100644 --- a/src/test/java/redis/clients/jedis/mcf/MultiDbConnectionProviderDynamicEndpointUnitTest.java +++ b/src/test/java/redis/clients/jedis/mcf/MultiDbConnectionProviderDynamicEndpointUnitTest.java @@ -73,7 +73,7 @@ void testAddNullDatabaseConfig() { } @Test - void testRemoveExistingCluster() { + void testRemoveExistingDatabase() { Connection mockConnection = mock(Connection.class); when(mockConnection.ping()).thenReturn(true); @@ -155,7 +155,7 @@ void testAddAndRemoveMultipleDatabases() { } @Test - void testActiveClusterHandlingOnAdd() { + void testActiveDatabaseHandlingOnAdd() { // The initial database should be active assertNotNull(provider.getDatabase()); @@ -186,8 +186,8 @@ void testActiveClusterHandlingOnRemove() { providerWithMockedPool.add(newConfig); // Get current active database - Object initialActiveCluster = providerWithMockedPool.getDatabase(); - assertNotNull(initialActiveCluster); + Object initialActiveDb = providerWithMockedPool.getDatabase(); + assertNotNull(initialActiveDb); // Remove endpoint1 (original database, might be active) providerWithMockedPool.remove(endpoint1.getHostAndPort()); From 22d7b31f744f705a3eb37d4119cdeedc416bc2fd Mon Sep 17 00:00:00 2001 From: ggivo Date: Tue, 7 Oct 2025 11:26:13 +0300 Subject: [PATCH 17/18] fix spell checks --- .github/wordlist.txt | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/wordlist.txt b/.github/wordlist.txt index a0ba60937b..8f4a29819d 100644 --- a/.github/wordlist.txt +++ b/.github/wordlist.txt @@ -355,4 +355,5 @@ entraid EntraID ACR AMR -Entra \ No newline at end of file +Entra +DatabaseSwitchEvent \ No newline at end of file From 8bd8fc17e5f1a265e6ed7764f5798723f8881e20 Mon Sep 17 00:00:00 2001 From: ggivo Date: Tue, 7 Oct 2025 11:57:40 +0300 Subject: [PATCH 18/18] nit test rename --- .../clients/jedis/mcf/MultiDbConnectionProviderTest.java | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/src/test/java/redis/clients/jedis/mcf/MultiDbConnectionProviderTest.java b/src/test/java/redis/clients/jedis/mcf/MultiDbConnectionProviderTest.java index 8c2da1e61a..841896aa34 100644 --- a/src/test/java/redis/clients/jedis/mcf/MultiDbConnectionProviderTest.java +++ b/src/test/java/redis/clients/jedis/mcf/MultiDbConnectionProviderTest.java @@ -136,11 +136,8 @@ public void testDatabaseSwitchListener() { } @Test - public void testSetActiveDatabaseIndexEqualsZero() { - assertThrows(JedisValidationException.class, () -> provider.setActiveDatabase(null)); // Should - // throw - // an - // exception + public void testSetActiveDatabaseNull() { + assertThrows(JedisValidationException.class, () -> provider.setActiveDatabase(null)); } @Test