diff --git a/docs/failover.md b/docs/failover.md index 29e5551306..52c3bee02e 100644 --- a/docs/failover.md +++ b/docs/failover.md @@ -41,8 +41,8 @@ Let's look at one way of configuring Jedis for this scenario. First, start by defining the initial configuration for each Redis database available and prioritize them using weights. ```java - JedisClientConfig config = DefaultJedisClientConfig.builder().user("cache").password("secret") - .socketTimeoutMillis(5000).connectionTimeoutMillis(5000).build(); +JedisClientConfig config = DefaultJedisClientConfig.builder().user("cache").password("secret") +.socketTimeoutMillis(5000).connectionTimeoutMillis(5000).build(); // Custom pool config per database can be provided ConnectionPoolConfig poolConfig = new ConnectionPoolConfig(); @@ -58,8 +58,8 @@ HostAndPort east = new HostAndPort("redis-east.example.com", 14000); HostAndPort west = new HostAndPort("redis-west.example.com", 14000); MultiDbConfig.Builder multiConfig = MultiDbConfig.builder() - .endpoint(DatabaseConfig.builder(east, config).connectionPoolConfig(poolConfig).weight(1.0f).build()) - .endpoint(DatabaseConfig.builder(west, config).connectionPoolConfig(poolConfig).weight(0.5f).build()); + .database(DatabaseConfig.builder(east, config).connectionPoolConfig(poolConfig).weight(1.0f).build()) + .database(DatabaseConfig.builder(west, config).connectionPoolConfig(poolConfig).weight(0.5f).build()); ``` The configuration above represents your two Redis deployments: `redis-east` and `redis-west`. @@ -68,28 +68,33 @@ Continue using the `MultiDbConfig.Builder` builder to set your preferred retry a Then build a `MultiDbClient`. ```java -multiDbBuilder.circuitBreakerSlidingWindowSize(2) // Sliding window size in number of calls - .circuitBreakerFailureRateThreshold(10.0f) // percentage of failures to trigger circuit breaker - .circuitBreakerMinNumOfFailures(1000) // Minimum number of failures before circuit breaker is tripped - - .failbackSupported(true) // Enable failback - .failbackCheckInterval(1000) // Check every second the unhealthy database to see if it has recovered - .gracePeriod(10000) // Keep database disabled for 10 seconds after it becomes unhealthy - -// Optional: configure retry settings - .retryMaxAttempts(3) // Maximum number of retry attempts (including the initial call) - .retryWaitDuration(500) // Number of milliseconds to wait between retry attempts - .retryWaitDurationExponentialBackoffMultiplier(2) // Exponential backoff factor multiplied against wait duration between retries - -// Optional: configure fast failover - .fastFailover(true) // Force closing connections to unhealthy database on failover - .retryOnFailover(false); // Do not retry failed commands during failover - -MultiDbClient multiDbClient = multiDbBuilder.build(); +// Configure circuit breaker for failure detection +multiConfig + .failureDetector(MultiDbConfig.CircuitBreakerConfig.builder() + .slidingWindowSize(1000) // Sliding window size in number of calls + .failureRateThreshold(50.0f) // percentage of failures to trigger circuit breaker + .minNumOfFailures(500) // Minimum number of failures before circuit breaker is tripped + .build()) + .failbackSupported(true) // Enable failback + .failbackCheckInterval(1000) // Check every second the unhealthy database to see if it has recovered + .gracePeriod(10000) // Keep database disabled for 10 seconds after it becomes unhealthy + // Optional: configure retry settings + .commandRetry(MultiDbConfig.RetryConfig.builder() + .maxAttempts(3) // Maximum number of retry attempts (including the initial call) + .waitDuration(500) // Number of milliseconds to wait between retry attempts + .exponentialBackoffMultiplier(2) // Exponential backoff factor multiplied against wait duration between retries + .build()) + // Optional: configure fast failover + .fastFailover(true) // Force closing connections to unhealthy database on failover + .retryOnFailover(false); // Do not retry failed commands during failover + +MultiDbClient multiDbClient = MultiDbClient.builder() + .multiDbConfig(multiConfig.build()) + .build(); ``` -In the configuration here, we've set a sliding window size of 10 and a failure rate threshold of 50%. -This means that a failover will be triggered if 5 out of any 10 calls to Redis fail. +In the configuration here, we've set a sliding window size of 1000 and a failure rate threshold of 50%. +This means that a failover will be triggered only if both 500 out of any 1000 calls to Redis fail (i.e., the failure rate threshold is reached) and the minimum number of failures is also met. You can now use this `MultiDbClient` instance, and the connection management and failover will be handled transparently. @@ -98,22 +103,22 @@ You can now use this `MultiDbClient` instance, and the connection management and Under the hood, Jedis' failover support relies on [resilience4j](https://resilience4j.readme.io/docs/getting-started), a fault-tolerance library that implements [retry](https://resilience4j.readme.io/docs/retry) and [circuit breakers](https://resilience4j.readme.io/docs/circuitbreaker). -Once you configure Jedis for failover using the `MultiClusterPooledConnectionProvider`, each call to Redis is decorated with a resilience4j retry and circuit breaker. +Once you configure Jedis for failover using the `MultiDbConnectionProvider`, each call to Redis is decorated with a resilience4j retry and circuit breaker. By default, any call that throws a `JedisConnectionException` will be retried up to 3 times. If the call fail then the circuit breaker will record a failure. The circuit breaker maintains a record of failures in a sliding window data structure. -If the failure rate reaches a configured threshold (e.g., when 50% of the last 10 calls have failed), +If the failure rate reaches a configured threshold (e.g., when 50% of the last 1000 calls have failed), then the circuit breaker's state transitions from `CLOSED` to `OPEN`. When this occurs, Jedis will attempt to connect to the next Redis database with the highest weight in its client configuration list. The supported retry and circuit breaker settings, and their default values, are described below. -You can configure any of these settings using the `MultiClusterClientConfig.Builder` builder. +You can configure any of these settings using the `MultiDbConfig.Builder` builder. Refer the basic usage above for an example of this. ### Retry configuration - +Configuration for command retry behavior is encapsulated in `MultiDbConfig.RetryConfig`. Jedis uses the following retry settings: | Setting | Default value | Description | @@ -124,10 +129,11 @@ Jedis uses the following retry settings: | Retry included exception list | [JedisConnectionException] | A list of Throwable classes that count as failures and should be retried. | | Retry ignored exception list | null | A list of Throwable classes to explicitly ignore for the purposes of retry. | -To disable retry, set `maxRetryAttempts` to 1. +To disable retry, set `maxAttempts` to 1. ### Circuit breaker configuration - +For failover, Jedis uses a circuit breaker to detect when a Redis database has failed. +Failover configuration is encapsulated in `MultiDbConfig.CircuitBreakerConfig` and can be provided using the `MultiDbConfig.Builder.failureDetector()`. Jedis uses the following circuit breaker settings: | Setting | Default value | Description | @@ -221,23 +227,23 @@ Use the `healthCheckStrategySupplier()` method to provide a custom health check ```java // Custom strategy supplier -MultiClusterClientConfig.StrategySupplier customStrategy = - (hostAndPort, jedisClientConfig) -> { - // Return your custom HealthCheckStrategy implementation - return new MyCustomHealthCheckStrategy(hostAndPort, jedisClientConfig); - }; +MultiDbConfig.StrategySupplier customStrategy = + (hostAndPort, jedisClientConfig) -> { + // Return your custom HealthCheckStrategy implementation + return new MyCustomHealthCheckStrategy(hostAndPort, jedisClientConfig); + }; -MultiClusterClientConfig.ClusterConfig dbConfig = - MultiClusterClientConfig.ClusterConfig.builder(hostAndPort, clientConfig) - .healthCheckStrategySupplier(customStrategy) - .weight(1.0f) - .build(); +MultiDbConfig.DatabaseConfig dbConfig = + MultiDbConfig.DatabaseConfig.builder(hostAndPort, clientConfig) + .healthCheckStrategySupplier(customStrategy) + .weight(1.0f) + .build(); ``` You can implement custom health check strategies by implementing the `HealthCheckStrategy` interface: ```java -MultiClusterClientConfig.StrategySupplier pingStrategy = (hostAndPort, jedisClientConfig) -> { +MultiDbConfig.StrategySupplier pingStrategy = (hostAndPort, jedisClientConfig) -> { return new HealthCheckStrategy() { @Override public int getInterval() { @@ -249,11 +255,21 @@ MultiClusterClientConfig.StrategySupplier pingStrategy = (hostAndPort, jedisClie return 500; // 500ms timeout } + @Override - public int minConsecutiveSuccessCount() { - return 1; // Single success required + public int getNumProbes() { + return 1; } + @Override + public ProbingPolicy getPolicy() { + return ProbingPolicy.BuiltIn.ANY_SUCCESS; + } + + @Override + public int getDelayInBetweenProbes() { + return 100; + } @Override public HealthStatus doHealthCheck(Endpoint endpoint) { try (UnifiedJedis jedis = new UnifiedJedis(hostAndPort, jedisClientConfig)) { @@ -271,10 +287,10 @@ MultiClusterClientConfig.StrategySupplier pingStrategy = (hostAndPort, jedisClie }; }; -MultiClusterClientConfig.ClusterConfig dbConfig = - MultiClusterClientConfig.ClusterConfig.builder(hostAndPort, clientConfig) - .healthCheckStrategySupplier(pingStrategy) - .build(); +MultiDbConfig.DatabaseConfig dbConfig = + MultiDbConfig.DatabaseConfig.builder(hostAndPort, clientConfig) + .healthCheckStrategySupplier(pingStrategy) + .build(); ``` #### Disabling Health Checks @@ -282,9 +298,9 @@ MultiClusterClientConfig.ClusterConfig dbConfig = Use the `healthCheckEnabled(false)` method to completely disable health checks: ```java -DatabaseConfig dbConfig = DatabaseConfig.builder(east, config) - .healthCheckEnabled(false) // Disable health checks entirely - .build(); +MultiDbConfig.DatabaseConfig dbConfig = MultiDbConfig.DatabaseConfig.builder(east, config) + .healthCheckEnabled(false) // Disable health checks entirely + .build(); ``` ### Fallback configuration @@ -306,7 +322,7 @@ To use this feature, you'll need to design a class that implements `java.util.fu This class must implement the `accept` method, as you can see below. ```java - public class FailoverReporter implements Consumer { +public class FailoverReporter implements Consumer { @Override public void accept(DatabaseSwitchEvent e) { @@ -317,18 +333,18 @@ This class must implement the `accept` method, as you can see below. DatabaseSwitchEvent consumer can be registered as follows: -``` - FailoverReporter reporter = new FailoverReporter(); - MultiDbClient client = MultiDbClient.builder() - .databaseSwitchListener(reporter) - .build(); +```java +FailoverReporter reporter = new FailoverReporter(); +MultiDbClient client = MultiDbClient.builder() + .databaseSwitchListener(reporter) + .build(); ``` The provider will call your `accept` whenever a failover occurs. or directly using lambda expression: -``` - MultiDbClient client = MultiDbClient.builder() - .databaseSwitchListener(event -> System.out.println("Switched to: " + event.getEndpoint())) - .build(); +```java +MultiDbClient client = MultiDbClient.builder() + .databaseSwitchListener(event -> System.out.println("Switched to: " + event.getEndpoint())) + .build(); ``` @@ -414,7 +430,7 @@ HealthCheckStrategy.Config config = HealthCheckStrategy.Config.builder() .build(); // Adjust failback timing -MultiDbConfig multiConfig = new MultiDbConfig.Builder() +MultiDbConfig multiConfig = MultiDbConfig.builder() .gracePeriod(5000) // Shorter grace period .build(); ``` diff --git a/src/main/java/redis/clients/jedis/MultiDbClient.java b/src/main/java/redis/clients/jedis/MultiDbClient.java index 3ffeaf93aa..0c82a128e2 100644 --- a/src/main/java/redis/clients/jedis/MultiDbClient.java +++ b/src/main/java/redis/clients/jedis/MultiDbClient.java @@ -44,18 +44,22 @@ * MultiDbClient client = MultiDbClient.builder() * .multiDbConfig( * MultiDbConfig.builder() - * .endpoint( + * .database( * DatabaseConfig.builder( * primary, * DefaultJedisClientConfig.builder().build()) * .weight(100.0f) * .build()) - * .endpoint(DatabaseConfig.builder( + * .database(DatabaseConfig.builder( * secondary, * DefaultJedisClientConfig.builder().build()) * .weight(50.0f).build()) - * .circuitBreakerFailureRateThreshold(50.0f) - * .retryMaxAttempts(3) + * .failureDetector(MultiDbConfig.CircuitBreakerConfig.builder() + * .failureRateThreshold(50.0f) + * .build()) + * .commandRetry(MultiDbConfig.RetryConfig.builder() + * .maxAttempts(3) + * .build()) * .build() * ) * .databaseSwitchListener(event -> @@ -104,10 +108,10 @@ public class MultiDbClient extends UnifiedJedis { /** * Returns the underlying MultiDbConnectionProvider. *

- * This provides access to multi-cluster specific operations like manual failover, health status - * monitoring, and cluster switch event handling. + * This provides access to multi-database specific operations like manual failover, health status + * monitoring, and database switch event handling. *

- * @return the multi-cluster connection provider + * @return the multi-db connection provider * @throws ClassCastException if the provider is not a MultiDbConnectionProvider */ private MultiDbConnectionProvider getMultiDbConnectionProvider() { @@ -127,7 +131,7 @@ public void setActiveDatabase(Endpoint endpoint) { } /** - * Adds a pre-configured cluster configuration. + * Adds a pre-configured database configuration. *

* This method allows adding a fully configured DatabaseConfig instance, providing maximum * flexibility for advanced configurations including custom health check strategies, connection @@ -135,23 +139,23 @@ public void setActiveDatabase(Endpoint endpoint) { *

* @param databaseConfig the pre-configured database configuration */ - public void addEndpoint(DatabaseConfig databaseConfig) { + public void addDatabase(DatabaseConfig databaseConfig) { getMultiDbConnectionProvider().add(databaseConfig); } /** - * Dynamically adds a new cluster endpoint to the resilient client. + * Dynamically adds a new database endpoint to the multi-database client. *

- * This allows adding new endpoints at runtime without recreating the client. The new endpoint - * will be available for failover operations immediately after being added and passing health - * checks (if configured). + * This allows adding new database endpoints at runtime without recreating the client. The new + * endpoint will be available for failover operations immediately after being added and passing + * health checks (if configured). *

* @param endpoint the Redis server endpoint * @param weight the weight for this endpoint (higher values = higher priority) * @param clientConfig the client configuration for this endpoint * @throws redis.clients.jedis.exceptions.JedisValidationException if the endpoint already exists */ - public void addEndpoint(Endpoint endpoint, float weight, JedisClientConfig clientConfig) { + public void addDatabase(Endpoint endpoint, float weight, JedisClientConfig clientConfig) { DatabaseConfig databaseConfig = DatabaseConfig.builder(endpoint, clientConfig).weight(weight) .build(); @@ -159,18 +163,19 @@ public void addEndpoint(Endpoint endpoint, float weight, JedisClientConfig clien } /** - * Returns the set of all configured endpoints. + * Returns the set of all configured database endpoints. *

- * This method provides a view of all endpoints currently configured in the resilient client. + * This method provides a view of all database endpoints currently configured in the + * multi-database client. These are the endpoints that can be used for failover operations. *

- * @return the set of all configured endpoints + * @return the set of all configured database endpoints */ - public Set getEndpoints() { + public Set getDatabaseEndpoints() { return getMultiDbConnectionProvider().getEndpoints(); } /** - * Returns the health status of the specified endpoint. + * Returns the health status of the specified database. *

* This method provides the current health status of a specific endpoint. *

@@ -182,39 +187,39 @@ public boolean isHealthy(Endpoint endpoint) { } /** - * Dynamically removes a cluster endpoint from the resilient client. + * Dynamically removes a database endpoint from the multi-database client. *

- * This allows removing endpoints at runtime. If the removed endpoint is currently active, the - * client will automatically failover to the next available healthy endpoint based on weight - * priority. + * This allows removing database endpoints at runtime. If the removed endpoint is currently + * active, the client will automatically failover to the next available healthy endpoint based on + * weight priority. *

* @param endpoint the endpoint to remove * @throws redis.clients.jedis.exceptions.JedisValidationException if the endpoint doesn't exist * @throws redis.clients.jedis.exceptions.JedisException if removing the endpoint would leave no - * healthy clusters available + * healthy databases available */ - public void removeEndpoint(Endpoint endpoint) { + public void removeDatabase(Endpoint endpoint) { getMultiDbConnectionProvider().remove(endpoint); } /** - * Forces the client to switch to a specific endpoint for a duration. + * Forces the client to switch to a specific database for a duration. *

- * This method forces the client to use the specified endpoint and puts all other endpoints in a - * grace period, preventing automatic failover for the specified duration. This is useful for - * maintenance scenarios or testing specific endpoints. + * This method forces the client to use the specified database endpoint and puts all other + * endpoints in a grace period, preventing automatic failover for the specified duration. This is + * useful for maintenance scenarios or testing specific database endpoints. *

* @param endpoint the endpoint to force as active * @param forcedActiveDurationMs the duration in milliseconds to keep this endpoint forced * @throws redis.clients.jedis.exceptions.JedisValidationException if the endpoint is not healthy * or doesn't exist */ - public void forceActiveEndpoint(Endpoint endpoint, long forcedActiveDurationMs) { + public void forceActiveDatabase(Endpoint endpoint, long forcedActiveDurationMs) { getMultiDbConnectionProvider().forceActiveDatabase(endpoint, forcedActiveDurationMs); } /** - * Creates a new pipeline for batch operations with multi-cluster support. + * Creates a new pipeline for batch operations with multi-db support. *

* The returned pipeline supports the same resilience features as the main client, including * automatic failover during batch execution. @@ -227,7 +232,7 @@ public MultiDbPipeline pipelined() { } /** - * Creates a new transaction with multi-cluster support. + * Creates a new transaction with multi-database support. *

* The returned transaction supports the same resilience features as the main client, including * automatic failover during transaction execution. @@ -253,7 +258,15 @@ public MultiDbTransaction transaction(boolean doMulti) { return new MultiDbTransaction(getMultiDbConnectionProvider(), doMulti, commandObjects); } - public Endpoint getActiveEndpoint() { + /** + * Returns the currently active database endpoint. + *

+ * The active endpoint is the one currently being used for all operations. It can change at any + * time due to health checks, failover, failback, or manual switching. + *

+ * @return the active database endpoint + */ + public Endpoint getActiveDatabaseEndpoint() { return getMultiDbConnectionProvider().getDatabase().getEndpoint(); } diff --git a/src/main/java/redis/clients/jedis/MultiDbConfig.java b/src/main/java/redis/clients/jedis/MultiDbConfig.java index 7bafdd9f5e..867abae7d1 100644 --- a/src/main/java/redis/clients/jedis/MultiDbConfig.java +++ b/src/main/java/redis/clients/jedis/MultiDbConfig.java @@ -15,12 +15,12 @@ import redis.clients.jedis.mcf.HealthCheckStrategy; /** - * Configuration class for multi-cluster Redis deployments with automatic failover and failback + * Configuration class for multi-database Redis deployments with automatic failover and failback * capabilities. *

- * This configuration enables seamless failover between multiple Redis clusters, databases, or - * endpoints by providing comprehensive settings for retry logic, circuit breaker behavior, health - * checks, and failback mechanisms. It is designed to work with + * This configuration enables seamless failover between multiple Redis databases endpoints by + * providing comprehensive settings for retry logic, circuit breaker behavior, health checks, and + * failback mechanisms. It is designed to work with * {@link redis.clients.jedis.mcf.MultiDbConnectionProvider} to provide high availability and * disaster recovery capabilities. *

@@ -28,7 +28,7 @@ * Key Features: *

*
    - *
  • Multi-Cluster Support: Configure multiple Redis endpoints with individual + *
  • Multi-Database Support: Configure multiple Redis endpoints with individual * weights and health checks
  • *
  • Circuit Breaker Pattern: Automatic failure detection and circuit opening * based on configurable thresholds
  • @@ -36,9 +36,9 @@ * transient failures *
  • Health Check Integration: Pluggable health check strategies for proactive * monitoring
  • - *
  • Automatic Failback: Intelligent failback to higher-priority clusters when + *
  • Automatic Failback: Intelligent failback to higher-priority databases when * they recover
  • - *
  • Weight-Based Routing: Priority-based cluster selection using configurable + *
  • Weight-Based Routing: Priority-based database selection using configurable * weights
  • *
*

@@ -48,16 +48,17 @@ *

  * {
  *   @code
- *   // Configure individual clusters
+ *   // Configure individual databases
  *   DatabaseConfig primary = DatabaseConfig.builder(primaryEndpoint, clientConfig).weight(1.0f)
  *       .build();
  *
  *   DatabaseConfig secondary = DatabaseConfig.builder(secondaryEndpoint, clientConfig).weight(0.5f)
  *       .healthCheckEnabled(true).build();
  *
- *   // Build multi-cluster configuration
+ *   // Build multi-database configuration
  *   MultiDbConfig config = MultiDbConfig.builder(primary, secondary)
- *       .circuitBreakerFailureRateThreshold(10.0f).retryMaxAttempts(3).failbackSupported(true)
+ *       .failureDetector(CircuitBreakerConfig.builder().failureRateThreshold(10.0f).build())
+ *       .commandRetry(RetryConfig.builder().maxAttempts(3).build()).failbackSupported(true)
  *       .gracePeriod(10000).build();
  *
  *   // Use with connection provider
@@ -112,6 +113,340 @@ public static interface StrategySupplier {
     HealthCheckStrategy get(HostAndPort hostAndPort, JedisClientConfig jedisClientConfig);
   }
 
+  /**
+   * Configuration for command retry behavior.
+   * 

+ * This class encapsulates all retry-related settings including maximum attempts, wait duration, + * exponential backoff, and exception handling. It provides a clean separation of retry concerns + * from other configuration aspects. + *

+ * @since 7.0 + */ + public static final class RetryConfig { + + private final int maxAttempts; + private final Duration waitDuration; + private final int exponentialBackoffMultiplier; + private final List includedExceptionList; + private final List ignoreExceptionList; + + private RetryConfig(Builder builder) { + this.maxAttempts = builder.maxAttempts; + this.waitDuration = Duration.ofMillis(builder.waitDuration); + this.exponentialBackoffMultiplier = builder.exponentialBackoffMultiplier; + this.includedExceptionList = builder.includedExceptionList; + this.ignoreExceptionList = builder.ignoreExceptionList; + } + + /** + * Returns the maximum number of retry attempts including the initial call. + * @return maximum retry attempts + */ + public int getMaxAttempts() { + return maxAttempts; + } + + /** + * Returns the base wait duration between retry attempts. + * @return wait duration between retries + */ + public Duration getWaitDuration() { + return waitDuration; + } + + /** + * Returns the exponential backoff multiplier for retry wait duration. + * @return exponential backoff multiplier + */ + public int getExponentialBackoffMultiplier() { + return exponentialBackoffMultiplier; + } + + /** + * Returns the list of exception classes that trigger retry attempts. + * @return list of exception classes that are retried, never null + */ + public List getIncludedExceptionList() { + return includedExceptionList; + } + + /** + * Returns the list of exception classes that are ignored for retry purposes. + * @return list of exception classes to ignore for retries, may be null + */ + public List getIgnoreExceptionList() { + return ignoreExceptionList; + } + + /** + * Creates a new Builder instance for configuring RetryConfig. + * @return new Builder instance with default values + */ + public static Builder builder() { + return new Builder(); + } + + /** + * Builder for {@link RetryConfig}. + */ + public static final class Builder { + + private int maxAttempts = RETRY_MAX_ATTEMPTS_DEFAULT; + private int waitDuration = RETRY_WAIT_DURATION_DEFAULT; + private int exponentialBackoffMultiplier = RETRY_WAIT_DURATION_EXPONENTIAL_BACKOFF_MULTIPLIER_DEFAULT; + private List includedExceptionList = RETRY_INCLUDED_EXCEPTIONS_DEFAULT; + private List ignoreExceptionList = null; + + /** + * Sets the maximum number of retry attempts including the initial call. + * @param maxAttempts maximum number of attempts (must be >= 1) + * @return this builder instance for method chaining + */ + public Builder maxAttempts(int maxAttempts) { + this.maxAttempts = maxAttempts; + return this; + } + + /** + * Sets the base wait duration between retry attempts in milliseconds. + * @param waitDuration wait duration in milliseconds (must be >= 0) + * @return this builder instance for method chaining + */ + public Builder waitDuration(int waitDuration) { + this.waitDuration = waitDuration; + return this; + } + + /** + * Sets the exponential backoff multiplier for retry wait duration. + * @param exponentialBackoffMultiplier exponential backoff multiplier (must be >= 1) + * @return this builder instance for method chaining + */ + public Builder exponentialBackoffMultiplier(int exponentialBackoffMultiplier) { + this.exponentialBackoffMultiplier = exponentialBackoffMultiplier; + return this; + } + + /** + * Sets the list of exception classes that trigger retry attempts. + * @param includedExceptionList list of exception classes that should be retried + * @return this builder instance for method chaining + */ + public Builder includedExceptionList(List includedExceptionList) { + this.includedExceptionList = includedExceptionList; + return this; + } + + /** + * Sets the list of exception classes that are ignored for retry purposes. + * @param ignoreExceptionList list of exception classes to ignore for retries + * @return this builder instance for method chaining + */ + public Builder ignoreExceptionList(List ignoreExceptionList) { + this.ignoreExceptionList = ignoreExceptionList; + return this; + } + + /** + * Builds and returns a new RetryConfig instance. + * @return new RetryConfig instance with configured settings + */ + public RetryConfig build() { + return new RetryConfig(this); + } + } + } + + /** + * Configuration for circuit breaker failure detection. + *

+ * This class encapsulates all circuit breaker-related settings including failure rate threshold, + * sliding window size, minimum failures, and exception handling. + *

+ * @since 7.0 + */ + public static final class CircuitBreakerConfig { + + private final float failureRateThreshold; + private final int slidingWindowSize; + private final int minNumOfFailures; + private final List includedExceptionList; + private final List ignoreExceptionList; + + private CircuitBreakerConfig(Builder builder) { + this.failureRateThreshold = builder.failureRateThreshold; + this.slidingWindowSize = builder.slidingWindowSize; + this.minNumOfFailures = builder.minNumOfFailures; + this.includedExceptionList = builder.includedExceptionList; + this.ignoreExceptionList = builder.ignoreExceptionList; + } + + /** + * Returns the failure rate threshold percentage for circuit breaker activation. + *

+ * 0.0f means failure rate is ignored, and only minimum number of failures is considered. + *

+ *

+ * When the failure rate exceeds both this threshold and the minimum number of failures, the + * circuit breaker transitions to the OPEN state and starts short-circuiting calls, immediately + * failing them without attempting to reach the Redis database. This prevents cascading failures + * and allows the system to fail over to the next available database. + *

+ *

+ * Range: 0.0 to 100.0 (percentage) + *

+ * @return failure rate threshold as a percentage (0.0 to 100.0) + * @see #getMinNumOfFailures() + */ + public float getFailureRateThreshold() { + return failureRateThreshold; + } + + /** + * Returns the size of the sliding window used to record call outcomes when the circuit breaker + * is CLOSED. + *

+ * Default: {@value #CIRCUIT_BREAKER_SLIDING_WINDOW_SIZE_DEFAULT} + *

+ * @return sliding window size (calls or seconds depending on window type) + */ + public int getSlidingWindowSize() { + return slidingWindowSize; + } + + /** + * Returns the minimum number of failures before circuit breaker is tripped. + *

+ * 0 means minimum number of failures is ignored, and only failure rate is considered. + *

+ *

+ * When the number of failures exceeds both this threshold and the failure rate threshold, the + * circuit breaker will trip and prevent further requests from being sent to the database until + * it has recovered. + *

+ * @return minimum number of failures before circuit breaker is tripped + * @see #getFailureRateThreshold() + */ + public int getMinNumOfFailures() { + return minNumOfFailures; + } + + /** + * Returns the list of exception classes that are recorded as circuit breaker failures and + * increase the failure rate. + *

+ * Any exception that matches or inherits from the classes in this list counts as a failure for + * circuit breaker calculations, unless explicitly ignored via + * {@link #getIgnoreExceptionList()}. If you specify this list, all other exceptions count as + * successes unless they are explicitly ignored. + *

+ *

+ * Default: {@link JedisConnectionException} + *

+ * @return list of exception classes that count as failures, never null + * @see #getIgnoreExceptionList() + */ + public List getIncludedExceptionList() { + return includedExceptionList; + } + + /** + * Returns the list of exception classes that are ignored by the circuit breaker and neither + * count as failures nor successes. + *

+ * Any exception that matches or inherits from the classes in this list will not affect circuit + * breaker failure rate calculations, even if the exception is included in + * {@link #getIncludedExceptionList()}. + *

+ *

+ * Default: null (no exceptions ignored) + *

+ * @return list of exception classes to ignore for circuit breaker calculations, may be null + * @see #getIncludedExceptionList() + */ + public List getIgnoreExceptionList() { + return ignoreExceptionList; + } + + /** + * Creates a new Builder instance for configuring CircuitBreakerConfig. + * @return new Builder instance with default values + */ + public static Builder builder() { + return new Builder(); + } + + /** + * Builder for {@link CircuitBreakerConfig}. + */ + public static final class Builder { + + private float failureRateThreshold = CIRCUIT_BREAKER_FAILURE_RATE_THRESHOLD_DEFAULT; + private int slidingWindowSize = CIRCUIT_BREAKER_SLIDING_WINDOW_SIZE_DEFAULT; + private int minNumOfFailures = CIRCUITBREAKER_THRESHOLD_MIN_NUM_OF_FAILURES_DEFAULT; + private List includedExceptionList = CIRCUIT_BREAKER_INCLUDED_EXCEPTIONS_DEFAULT; + private List ignoreExceptionList = null; + + /** + * Sets the failure rate threshold percentage that triggers circuit breaker activation. + * @param failureRateThreshold failure rate threshold as percentage (0.0 to 100.0) + * @return this builder instance for method chaining + */ + public Builder failureRateThreshold(float failureRateThreshold) { + this.failureRateThreshold = failureRateThreshold; + return this; + } + + /** + * Sets the size of the sliding window for circuit breaker calculations. + * @param slidingWindowSize sliding window size + * @return this builder instance for method chaining + */ + public Builder slidingWindowSize(int slidingWindowSize) { + this.slidingWindowSize = slidingWindowSize; + return this; + } + + /** + * Sets the minimum number of failures before circuit breaker is tripped. + * @param minNumOfFailures minimum number of failures + * @return this builder instance for method chaining + */ + public Builder minNumOfFailures(int minNumOfFailures) { + this.minNumOfFailures = minNumOfFailures; + return this; + } + + /** + * Sets the list of exception classes that are recorded as circuit breaker failures. + * @param includedExceptionList list of exception classes that count as failures + * @return this builder instance for method chaining + */ + public Builder includedExceptionList(List includedExceptionList) { + this.includedExceptionList = includedExceptionList; + return this; + } + + /** + * Sets the list of exception classes that are ignored by the circuit breaker. + * @param ignoreExceptionList list of exception classes to ignore + * @return this builder instance for method chaining + */ + public Builder ignoreExceptionList(List ignoreExceptionList) { + this.ignoreExceptionList = ignoreExceptionList; + return this; + } + + /** + * Builds and returns a new CircuitBreakerConfig instance. + * @return new CircuitBreakerConfig instance with configured settings + */ + public CircuitBreakerConfig build() { + return new CircuitBreakerConfig(this); + } + } + } + // ============ Default Configuration Constants ============ /** Default maximum number of retry attempts including the initial call. */ @@ -140,14 +475,16 @@ public static interface StrategySupplier { private static final List CIRCUIT_BREAKER_INCLUDED_EXCEPTIONS_DEFAULT = Arrays .asList(JedisConnectionException.class); - /** Default list of exceptions that trigger fallback to next available cluster. */ + /** Default list of exceptions that trigger fallback to next available database. */ private static final List> FALLBACK_EXCEPTIONS_DEFAULT = Arrays .asList(CallNotPermittedException.class, ConnectionFailoverException.class); - /** Default interval in milliseconds for checking if failed clusters have recovered. */ + /** Default interval in milliseconds for checking if failed databases have recovered. */ private static final long FAILBACK_CHECK_INTERVAL_DEFAULT = 120000; - /** Default grace period in milliseconds to keep clusters disabled after they become unhealthy. */ + /** + * Default grace period in milliseconds to keep databases disabled after they become unhealthy. + */ private static final long GRACE_PERIOD_DEFAULT = 60000; /** Default maximum number of failover attempts. */ @@ -156,168 +493,38 @@ public static interface StrategySupplier { /** Default delay in milliseconds between failover attempts. */ private static final int DELAY_IN_BETWEEN_FAILOVER_ATTEMPTS_DEFAULT = 12000; - /** Array of cluster configurations defining the available Redis endpoints and their settings. */ + /** Array of database configurations defining the available Redis endpoints and their settings. */ private final DatabaseConfig[] databaseConfigs; // ============ Retry Configuration ============ // Based on Resilience4j Retry: https://resilience4j.readme.io/docs/retry /** - * Maximum number of retry attempts including the initial call as the first attempt. - *

- * For example, if set to 3, the system will make 1 initial attempt plus 2 retry attempts for a - * total of 3 attempts before giving up. - *

- *

- * Default: {@value #RETRY_MAX_ATTEMPTS_DEFAULT} - *

- * @see #getRetryMaxAttempts() - */ - private int retryMaxAttempts; - - /** - * Fixed wait duration between retry attempts. - *

- * This duration is used as the base wait time and may be modified by the exponential backoff - * multiplier to create increasing delays between retry attempts. - *

- *

- * Default: {@value #RETRY_WAIT_DURATION_DEFAULT} milliseconds - *

- * @see #getRetryWaitDuration() - * @see #retryWaitDurationExponentialBackoffMultiplier - */ - private Duration retryWaitDuration; - - /** - * Exponential backoff multiplier applied to the wait duration between retry attempts. + * Encapsulated retry configuration for command execution. *

- * The wait duration increases exponentially between attempts using this multiplier. For example, - * with an initial wait time of 1 second and a multiplier of 2, the retries would occur after - * delays of: 1s, 2s, 4s, 8s, 16s, etc. + * This provides a cleaner API for configuring retry behavior by grouping all retry-related + * settings into a single configuration object. *

- *

- * Formula: {@code actualWaitTime = baseWaitTime * (multiplier ^ attemptNumber)} - *

- *

- * Default: {@value #RETRY_WAIT_DURATION_EXPONENTIAL_BACKOFF_MULTIPLIER_DEFAULT} - *

- * @see #getRetryWaitDurationExponentialBackoffMultiplier() - * @see #retryWaitDuration + * @see RetryConfig */ - private int retryWaitDurationExponentialBackoffMultiplier; - - /** - * List of exception classes that are recorded as failures and trigger retry attempts. - *

- * This parameter supports inheritance - any exception that is an instance of or extends from the - * specified classes will trigger a retry. If this list is specified, all other exceptions are - * considered successful unless explicitly ignored. - *

- *

- * Default: {@link JedisConnectionException} - *

- * @see #getRetryIncludedExceptionList() - * @see #retryIgnoreExceptionList - */ - private List retryIncludedExceptionList; - - /** - * List of exception classes that are ignored and do not trigger retry attempts. - *

- * This parameter supports inheritance - any exception that is an instance of or extends from the - * specified classes will be ignored for retry purposes, even if they are included in the - * {@link #retryIncludedExceptionList}. - *

- *

- * Default: null (no exceptions ignored) - *

- * @see #getRetryIgnoreExceptionList() - * @see #retryIncludedExceptionList - */ - private List retryIgnoreExceptionList; + private RetryConfig commandRetry; // ============ Circuit Breaker Configuration ============ /** - * Minimum number of failures before circuit breaker is tripped. - *

- * When the number of failures exceeds both this threshold and the failure rate threshold, the - * circuit breaker will trip and prevent further requests from being sent to the cluster until it - * has recovered. - *

- *

- * Default: {@value #CIRCUITBREAKER_THRESHOLD_MIN_NUM_OF_FAILURES_DEFAULT} - *

- * @see #getCircuitBreakerMinNumOfFailures() - * @see #circuitBreakerFailureRateThreshold - */ - private int circuitBreakerMinNumOfFailures; - - /** - * Failure rate threshold percentage that triggers circuit breaker transition to OPEN state. - *

- * When the failure rate exceeds both this threshold and the minimum number of failures, the - * circuit breaker transitions to the OPEN state and starts short-circuiting calls, immediately - * failing them without attempting to reach the Redis cluster. This prevents cascading failures - * and allows the system to fail over to the next available cluster. - *

- *

- * Range: 0.0 to 100.0 (percentage) - *

- *

- * Default: {@value #CIRCUIT_BREAKER_FAILURE_RATE_THRESHOLD_DEFAULT}% - *

- * @see #getCircuitBreakerFailureRateThreshold() - * @see #circuitBreakerMinNumOfFailures - */ - private float circuitBreakerFailureRateThreshold; - - /** - * Size of the sliding window used to record call outcomes when the circuit breaker is CLOSED. - * Default: {@value #CIRCUIT_BREAKER_SLIDING_WINDOW_SIZE_DEFAULT} - *

- * @see #getCircuitBreakerSlidingWindowSize() - */ - private int circuitBreakerSlidingWindowSize; - - /** - * List of exception classes that are recorded as circuit breaker failures and increase the - * failure rate. - *

- * Any exception that matches or inherits from the classes in this list counts as a failure for - * circuit breaker calculations, unless explicitly ignored via - * {@link #circuitBreakerIgnoreExceptionList}. If you specify this list, all other exceptions - * count as successes unless they are explicitly ignored. - *

+ * Encapsulated circuit breaker configuration for failure detection. *

- * Default: {@link JedisConnectionException} + * This provides a cleaner API for configuring circuit breaker behavior by grouping all circuit + * breaker-related settings into a single configuration object. *

- * @see #getCircuitBreakerIncludedExceptionList() - * @see #circuitBreakerIgnoreExceptionList + * @see CircuitBreakerConfig */ - private List circuitBreakerIncludedExceptionList; + private CircuitBreakerConfig failureDetector; /** - * List of exception classes that are ignored by the circuit breaker and neither count as failures - * nor successes. + * List of exception classes that trigger fallback to the next available database. *

- * Any exception that matches or inherits from the classes in this list will not affect circuit - * breaker failure rate calculations, even if the exception is included in - * {@link #circuitBreakerIncludedExceptionList}. - *

- *

- * Default: null (no exceptions ignored) - *

- * @see #getCircuitBreakerIgnoreExceptionList() - * @see #circuitBreakerIncludedExceptionList - */ - private List circuitBreakerIgnoreExceptionList; - - /** - * List of exception classes that trigger fallback to the next available cluster. - *

- * When these exceptions occur, the system will attempt to failover to the next available cluster + * When these exceptions occur, the system will attempt to failover to the next available database * based on weight priority. This enables immediate failover for specific error conditions without * waiting for circuit breaker thresholds. *

@@ -342,15 +549,15 @@ public static interface StrategySupplier { * Default: false *

* @see #isRetryOnFailover() - * @see #retryMaxAttempts + * @see #commandRetry */ private boolean retryOnFailover; /** - * Whether automatic failback to higher-priority clusters is supported. + * Whether automatic failback to higher-priority databases is supported. *

- * When enabled, the system will automatically monitor failed clusters using health checks and - * failback to higher-priority (higher weight) clusters when they recover. When disabled, manual + * When enabled, the system will automatically monitor failed databases using health checks and + * failback to higher-priority (higher weight) databases when they recover. When disabled, manual * intervention is required to failback. *

*

@@ -363,9 +570,9 @@ public static interface StrategySupplier { private boolean isFailbackSupported; /** - * Interval in milliseconds between checks for failback opportunities to recovered clusters. + * Interval in milliseconds between checks for failback opportunities to recovered databases. *

- * This setting controls how frequently the system checks if a higher-priority cluster has + * This setting controls how frequently the system checks if a higher-priority database has * recovered and is available for failback. Lower values provide faster failback but increase * monitoring overhead. *

@@ -379,11 +586,11 @@ public static interface StrategySupplier { private long failbackCheckInterval; /** - * Grace period in milliseconds to keep clusters disabled after they become unhealthy. + * Grace period in milliseconds to keep databases disabled after they become unhealthy. *

- * After a cluster is marked as unhealthy, it remains disabled for this grace period before being + * After a database is marked as unhealthy, it remains disabled for this grace period before being * eligible for failback, even if health checks indicate recovery. This prevents rapid oscillation - * between clusters during intermittent failures. + * between databases during intermittent failures. *

*

* Default: {@value #GRACE_PERIOD_DEFAULT} milliseconds (10 seconds) @@ -395,9 +602,9 @@ public static interface StrategySupplier { private long gracePeriod; /** - * Whether to forcefully terminate connections during failover for faster cluster switching. + * Whether to forcefully terminate connections during failover for faster database switching. *

- * When enabled, existing connections to the failed cluster are immediately closed during + * When enabled, existing connections to the failed database are immediately closed during * failover, potentially reducing failover time but may cause some in-flight operations to fail. * When disabled, connections are closed gracefully. *

@@ -411,9 +618,9 @@ public static interface StrategySupplier { /** * Maximum number of failover attempts. *

- * This setting controls how many times the system will attempt to failover to a different cluster - * before giving up. For example, if set to 3, the system will make 1 initial attempt plus 2 - * failover attempts for a total of 3 attempts. + * This setting controls how many times the system will attempt to failover to a different + * database before giving up. For example, if set to 3, the system will make 1 initial attempt + * plus 2 failover attempts for a total of 3 attempts. *

*

* Default: {@value #MAX_NUM_FAILOVER_ATTEMPTS_DEFAULT} @@ -426,8 +633,8 @@ public static interface StrategySupplier { * Delay in milliseconds between failover attempts. *

* This setting controls how long the system will wait before attempting to failover to a - * different cluster. For example, if set to 1000, the system will wait 1 second before attempting - * to failover to a different cluster. + * different database. For example, if set to 1000, the system will wait 1 second before + * attempting to failover to a different database. *

*

* Default: {@value #DELAY_IN_BETWEEN_FAILOVER_ATTEMPTS_DEFAULT} milliseconds @@ -437,15 +644,15 @@ public static interface StrategySupplier { private int delayInBetweenFailoverAttempts; /** - * Constructs a new MultiDbConfig with the specified cluster configurations. + * Constructs a new MultiDbConfig with the specified database configurations. *

- * This constructor validates that at least one cluster configuration is provided and that all + * This constructor validates that at least one database configuration is provided and that all * configurations are non-null. Use the {@link Builder} class for more convenient configuration * with default values. *

- * @param databaseConfigs array of cluster configurations defining the available Redis endpoints + * @param databaseConfigs array of database configurations defining the available Redis endpoints * @throws JedisValidationException if databaseConfigs is null or empty - * @throws IllegalArgumentException if any cluster configuration is null + * @throws IllegalArgumentException if any database configuration is null * @see Builder#Builder(DatabaseConfig[]) */ public MultiDbConfig(DatabaseConfig[] databaseConfigs) { @@ -461,109 +668,40 @@ public MultiDbConfig(DatabaseConfig[] databaseConfigs) { } /** - * Returns the array of cluster configurations defining available Redis endpoints. - * @return array of cluster configurations, never null or empty + * Returns the array of database configurations defining available Redis endpoints. + * @return array of database configurations, never null or empty */ public DatabaseConfig[] getDatabaseConfigs() { return databaseConfigs; } /** - * Returns the maximum number of retry attempts including the initial call. - * @return maximum retry attempts - * @see #retryMaxAttempts - */ - public int getRetryMaxAttempts() { - return retryMaxAttempts; - } - - /** - * Returns the base wait duration between retry attempts. - * @return wait duration between retries - * @see #retryWaitDuration - */ - public Duration getRetryWaitDuration() { - return retryWaitDuration; - } - - /** - * Returns the exponential backoff multiplier for retry wait duration. - * @return exponential backoff multiplier - * @see #retryWaitDurationExponentialBackoffMultiplier - */ - public int getRetryWaitDurationExponentialBackoffMultiplier() { - return retryWaitDurationExponentialBackoffMultiplier; - } - - /** - * Returns the failure rate threshold percentage for circuit breaker activation. 0.0f means - * failure rate is ignored, and only minimum number of failures is considered. - * @return failure rate threshold as a percentage (0.0 to 100.0) - * @see #circuitBreakerFailureRateThreshold - * @see #getCircuitBreakerMinNumOfFailures - */ - public float getCircuitBreakerFailureRateThreshold() { - return circuitBreakerFailureRateThreshold; - } - - /** - * Returns the size of the sliding window used for circuit breaker calculations. - * @return sliding window size (calls or seconds depending on window type) - * @see #circuitBreakerSlidingWindowSize - */ - public int getCircuitBreakerSlidingWindowSize() { - return circuitBreakerSlidingWindowSize; - } - - /** - * Returns the minimum number of failures before circuit breaker is tripped. 0 means minimum - * number of failures is ignored, and only failure rate is considered. - * @return minimum number of failures before circuit breaker is tripped - * @see #circuitBreakerMinNumOfFailures - * @see #getCircuitBreakerFailureRateThreshold - */ - public int getCircuitBreakerMinNumOfFailures() { - return circuitBreakerMinNumOfFailures; - } - - /** - * Returns the list of exception classes that trigger retry attempts. - * @return list of exception classes that are retried, never null - * @see #retryIncludedExceptionList - */ - public List getRetryIncludedExceptionList() { - return retryIncludedExceptionList; - } - - /** - * Returns the list of exception classes that are ignored for retry purposes. - * @return list of exception classes to ignore for retries, may be null - * @see #retryIgnoreExceptionList - */ - public List getRetryIgnoreExceptionList() { - return retryIgnoreExceptionList; - } - - /** - * Returns the list of exception classes that are recorded as circuit breaker failures. - * @return list of exception classes that count as failures, never null - * @see #circuitBreakerIncludedExceptionList + * Returns the encapsulated retry configuration for command execution. + *

+ * This provides access to all retry-related settings through a single configuration object. + *

+ * @return retry configuration, never null + * @see RetryConfig */ - public List getCircuitBreakerIncludedExceptionList() { - return circuitBreakerIncludedExceptionList; + public RetryConfig getCommandRetry() { + return commandRetry; } /** - * Returns the list of exception classes that are ignored by the circuit breaker. - * @return list of exception classes to ignore for circuit breaker calculations, may be null - * @see #circuitBreakerIgnoreExceptionList + * Returns the encapsulated circuit breaker configuration for failure detection. + *

+ * This provides access to all circuit breaker-related settings through a single configuration + * object. + *

+ * @return circuit breaker configuration, never null + * @see CircuitBreakerConfig */ - public List getCircuitBreakerIgnoreExceptionList() { - return circuitBreakerIgnoreExceptionList; + public CircuitBreakerConfig getFailureDetector() { + return failureDetector; } /** - * Returns the list of exception classes that trigger immediate fallback to next cluster. + * Returns the list of exception classes that trigger immediate fallback to next database. * @return list of exception classes that trigger fallback, never null * @see #fallbackExceptionList */ @@ -581,7 +719,7 @@ public boolean isRetryOnFailover() { } /** - * Returns whether automatic failback to higher-priority clusters is supported. + * Returns whether automatic failback to higher-priority databases is supported. * @return true if automatic failback is enabled, false if manual failback is required * @see #isFailbackSupported */ @@ -599,7 +737,7 @@ public long getFailbackCheckInterval() { } /** - * Returns the grace period to keep clusters disabled after they become unhealthy. + * Returns the grace period to keep databases disabled after they become unhealthy. * @return grace period in milliseconds * @see #gracePeriod */ @@ -638,8 +776,8 @@ public boolean isFastFailover() { /** * Creates a new Builder instance for configuring MultiDbConfig. *

- * At least one cluster configuration must be added to the builder before calling build(). Use the - * endpoint() methods to add cluster configurations. + * At least one database configuration must be added to the builder before calling build(). Use + * the endpoint() methods to add database configurations. *

* @return new Builder instance * @throws JedisValidationException if databaseConfigs is null or empty @@ -651,7 +789,7 @@ public static Builder builder() { /** * Creates a new Builder instance for configuring MultiDbConfig. - * @param databaseConfigs array of cluster configurations defining available Redis endpoints + * @param databaseConfigs array of database configurations defining available Redis endpoints * @return new Builder instance * @throws JedisValidationException if databaseConfigs is null or empty * @see Builder#Builder(DatabaseConfig[]) @@ -662,7 +800,7 @@ public static Builder builder(DatabaseConfig[] databaseConfigs) { /** * Creates a new Builder instance for configuring MultiDbConfig. - * @param databaseConfigs list of cluster configurations defining available Redis endpoints + * @param databaseConfigs list of database configurations defining available Redis endpoints * @return new Builder instance * @throws JedisValidationException if databaseConfigs is null or empty * @see Builder#Builder(List) @@ -672,10 +810,10 @@ public static Builder builder(List databaseConfigs) { } /** - * Configuration class for individual Redis cluster endpoints within a multi-cluster setup. + * Configuration class for individual Redis database endpoints within a multi-database setup. *

* Each DatabaseConfig represents a single Redis endpoint that can participate in the - * multi-cluster failover system. It encapsulates the connection details, weight for + * multi-database failover system. It encapsulates the connection details, weight for * priority-based selection, and health check configuration for that endpoint. *

* @see Builder @@ -978,7 +1116,7 @@ public DatabaseConfig build() { /** * Builder class for creating MultiDbConfig instances with comprehensive configuration options. *

- * The Builder provides a fluent API for configuring all aspects of multi-cluster failover + * The Builder provides a fluent API for configuring all aspects of multi-database failover * behavior, including retry logic, circuit breaker settings, and failback mechanisms. It uses * sensible defaults based on production best practices while allowing fine-tuning for specific * requirements. @@ -992,51 +1130,27 @@ public static class Builder { private final List databaseConfigs = new ArrayList<>(); // ============ Retry Configuration Fields ============ - /** Maximum number of retry attempts including the initial call. */ - private int retryMaxAttempts = RETRY_MAX_ATTEMPTS_DEFAULT; - - /** Wait duration between retry attempts in milliseconds. */ - private int retryWaitDuration = RETRY_WAIT_DURATION_DEFAULT; - - /** Exponential backoff multiplier for retry wait duration. */ - private int retryWaitDurationExponentialBackoffMultiplier = RETRY_WAIT_DURATION_EXPONENTIAL_BACKOFF_MULTIPLIER_DEFAULT; - - /** List of exception classes that trigger retry attempts. */ - private List retryIncludedExceptionList = RETRY_INCLUDED_EXCEPTIONS_DEFAULT; - - /** List of exception classes that are ignored for retry purposes. */ - private List retryIgnoreExceptionList = null; + /** Encapsulated retry configuration for command execution. */ + private RetryConfig commandRetry = RetryConfig.builder().build(); // ============ Circuit Breaker Configuration Fields ============ - /** Failure rate threshold percentage for circuit breaker activation. */ - private float circuitBreakerFailureRateThreshold = CIRCUIT_BREAKER_FAILURE_RATE_THRESHOLD_DEFAULT; - - /** Size of the sliding window for circuit breaker calculations. */ - private int circuitBreakerSlidingWindowSize = CIRCUIT_BREAKER_SLIDING_WINDOW_SIZE_DEFAULT; + /** Encapsulated circuit breaker configuration for failure detection. */ + private CircuitBreakerConfig failureDetector = CircuitBreakerConfig.builder().build(); - /** List of exception classes that are recorded as circuit breaker failures. */ - private List circuitBreakerIncludedExceptionList = CIRCUIT_BREAKER_INCLUDED_EXCEPTIONS_DEFAULT; - - /** List of exception classes that are ignored by the circuit breaker. */ - private List circuitBreakerIgnoreExceptionList = null; - - /** List of exception classes that trigger immediate fallback to next cluster. */ + /** List of exception classes that trigger immediate fallback to next database. */ private List> fallbackExceptionList = FALLBACK_EXCEPTIONS_DEFAULT; // ============ Failover Configuration Fields ============ /** Whether to retry failed commands during failover. */ private boolean retryOnFailover = false; - /** Minimum number of failures before circuit breaker is tripped. */ - private int circuitBreakerMinNumOfFailures = CIRCUITBREAKER_THRESHOLD_MIN_NUM_OF_FAILURES_DEFAULT; - - /** Whether automatic failback to higher-priority clusters is supported. */ + /** Whether automatic failback to higher-priority databases is supported. */ private boolean isFailbackSupported = true; /** Interval between checks for failback opportunities in milliseconds. */ private long failbackCheckInterval = FAILBACK_CHECK_INTERVAL_DEFAULT; - /** Grace period to keep clusters disabled after they become unhealthy in milliseconds. */ + /** Grace period to keep databases disabled after they become unhealthy in milliseconds. */ private long gracePeriod = GRACE_PERIOD_DEFAULT; /** Whether to forcefully terminate connections during failover. */ @@ -1049,14 +1163,14 @@ public static class Builder { private int delayInBetweenFailoverAttempts = DELAY_IN_BETWEEN_FAILOVER_ATTEMPTS_DEFAULT; /** - * Constructs a new Builder with the specified cluster configurations. + * Constructs a new Builder with the specified database configurations. */ public Builder() { } /** - * Constructs a new Builder with the specified cluster configurations. - * @param databaseConfigs array of cluster configurations defining available Redis endpoints + * Constructs a new Builder with the specified database configurations. + * @param databaseConfigs array of database configurations defining available Redis endpoints * @throws JedisValidationException if databaseConfigs is null or empty */ public Builder(DatabaseConfig[] databaseConfigs) { @@ -1074,7 +1188,7 @@ public Builder(List databaseConfigs) { } /** - * Adds a pre-configured endpoint configuration. + * Adds a pre-configured database configuration. *

* This method allows adding a fully configured DatabaseConfig instance, providing maximum * flexibility for advanced configurations including custom health check strategies, connection @@ -1083,24 +1197,24 @@ public Builder(List databaseConfigs) { * @param databaseConfig the pre-configured database configuration * @return this builder */ - public Builder endpoint(DatabaseConfig databaseConfig) { + public Builder database(DatabaseConfig databaseConfig) { this.databaseConfigs.add(databaseConfig); return this; } /** - * Adds a Redis endpoint with custom client configuration. + * Adds a database endpoint with custom client configuration. *

- * This method allows specifying endpoint-specific configuration such as authentication, SSL + * This method allows specifying database-specific configuration such as authentication, SSL * settings, timeouts, etc. This configuration will override the default client configuration - * for this specific endpoint. + * for this specific database endpoint. *

* @param endpoint the Redis server endpoint * @param weight the weight for this endpoint (higher values = higher priority) * @param clientConfig the client configuration for this endpoint * @return this builder */ - public Builder endpoint(Endpoint endpoint, float weight, JedisClientConfig clientConfig) { + public Builder database(Endpoint endpoint, float weight, JedisClientConfig clientConfig) { DatabaseConfig databaseConfig = DatabaseConfig.builder(endpoint, clientConfig).weight(weight) .build(); @@ -1112,221 +1226,43 @@ public Builder endpoint(Endpoint endpoint, float weight, JedisClientConfig clien // ============ Retry Configuration Methods ============ /** - * Sets the maximum number of retry attempts including the initial call. + * Sets the encapsulated retry configuration for command execution. *

- * This controls how many times a failed operation will be retried before giving up. For - * example, if set to 3, the system will make 1 initial attempt plus 2 retry attempts for a - * total of 3 attempts. + * This provides a cleaner API for configuring retry behavior by using a dedicated + * {@link RetryConfig} object. *

- *

- * Recommendations: - *

- *
    - *
  • 1: No retries (fail fast)
  • - *
  • 3: Standard retry behavior (default)
  • - *
  • 5+: Aggressive retry for critical operations, but be careful of retry - * storms
  • - *
- * @param retryMaxAttempts maximum number of attempts (must be >= 1) - * @return this builder instance for method chaining - */ - public Builder retryMaxAttempts(int retryMaxAttempts) { - this.retryMaxAttempts = retryMaxAttempts; - return this; - } - - /** - * Sets the base wait duration between retry attempts in milliseconds. - *

- * This duration is used as the base wait time and may be modified by the exponential backoff - * multiplier to create increasing delays between attempts. - *

- *

- * Typical Values: - *

- *
    - *
  • 100-500ms: Fast retry for low-latency scenarios
  • - *
  • 500-1000ms: Standard retry timing (default: 500ms)
  • - *
  • 1000-5000ms: Conservative retry for high-latency networks
  • - *
- * @param retryWaitDuration wait duration in milliseconds (must be >= 0) - * @return this builder instance for method chaining - */ - public Builder retryWaitDuration(int retryWaitDuration) { - this.retryWaitDuration = retryWaitDuration; - return this; - } - - /** - * Sets the exponential backoff multiplier for retry wait duration. - *

- * The wait duration increases exponentially between attempts using this multiplier. Formula: - * {@code actualWaitTime = baseWaitTime * (multiplier ^ attemptNumber)} - *

- *

- * Example with 500ms base wait and multiplier 2: - *

- *
    - *
  • Attempt 1: 500ms wait
  • - *
  • Attempt 2: 1000ms wait
  • - *
  • Attempt 3: 2000ms wait
  • - *
- * @param retryWaitDurationExponentialBackoffMultiplier exponential backoff multiplier (must be - * >= 1) - * @return this builder instance for method chaining - */ - public Builder retryWaitDurationExponentialBackoffMultiplier( - int retryWaitDurationExponentialBackoffMultiplier) { - this.retryWaitDurationExponentialBackoffMultiplier = retryWaitDurationExponentialBackoffMultiplier; - return this; - } - - /** - * Sets the list of exception classes that trigger retry attempts. - *

- * Only exceptions that match or inherit from the classes in this list will trigger retry - * attempts. This parameter supports inheritance - subclasses of the specified exceptions will - * also trigger retries. - *

- *

- * Default: {@link JedisConnectionException} - *

- * @param retryIncludedExceptionList list of exception classes that should be retried - * @return this builder instance for method chaining - */ - public Builder retryIncludedExceptionList(List retryIncludedExceptionList) { - this.retryIncludedExceptionList = retryIncludedExceptionList; - return this; - } - - /** - * Sets the list of exception classes that are ignored for retry purposes. - *

- * Exceptions that match or inherit from the classes in this list will not trigger retry - * attempts, even if they are included in the retry included exception list. This allows for - * fine-grained control over retry behavior. - *

- * @param retryIgnoreExceptionList list of exception classes to ignore for retries + * @param commandRetry the retry configuration * @return this builder instance for method chaining + * @see RetryConfig */ - public Builder retryIgnoreExceptionList(List retryIgnoreExceptionList) { - this.retryIgnoreExceptionList = retryIgnoreExceptionList; + public Builder commandRetry(RetryConfig commandRetry) { + this.commandRetry = commandRetry; return this; } // ============ Circuit Breaker Configuration Methods ============ /** - * Sets the failure rate threshold percentage that triggers circuit breaker activation. + * Sets the encapsulated circuit breaker configuration for failure detection. *

- * When both the failure rate and minimum number of failures exceeds this threshold, the circuit - * breaker transitions to the OPEN state and starts short-circuiting calls, enabling immediate - * failover to the next available cluster. + * This provides a cleaner API for configuring circuit breaker behavior by using a dedicated + * {@link CircuitBreakerConfig} object. *

- *

- * Typical Values: - *

- *
    - *
  • 30-40%: Aggressive failover for high-availability scenarios
  • - *
  • 50%: Balanced approach (default)
  • - *
  • 70-80%: Conservative failover to avoid false positives
  • - *
- * @param circuitBreakerFailureRateThreshold failure rate threshold as percentage (0.0 to 100.0) - * @return this builder instance for method chaining - * @see #circuitBreakerMinNumOfFailures(int) - */ - public Builder circuitBreakerFailureRateThreshold(float circuitBreakerFailureRateThreshold) { - checkThresholds(circuitBreakerMinNumOfFailures, circuitBreakerFailureRateThreshold); - this.circuitBreakerFailureRateThreshold = circuitBreakerFailureRateThreshold; - return this; - } - - /** - * Sets the size of the sliding window for circuit breaker calculations. - * @param circuitBreakerSlidingWindowSize sliding window size + * @param failureDetector the circuit breaker configuration * @return this builder instance for method chaining + * @see CircuitBreakerConfig */ - public Builder circuitBreakerSlidingWindowSize(int circuitBreakerSlidingWindowSize) { - this.circuitBreakerSlidingWindowSize = circuitBreakerSlidingWindowSize; + public Builder failureDetector(CircuitBreakerConfig failureDetector) { + this.failureDetector = failureDetector; return this; } - /** - * Sets the minimum number of failures before circuit breaker is tripped. - *

- * When both the number of failures and failure rate exceeds this threshold, the circuit breaker - * will trip and prevent further requests from being sent to the cluster until it has recovered. - *

- *

- * Default: 1000 - *

- * @param circuitBreakerMinNumOfFailures minimum number of failures before circuit breaker is - * tripped - * @return this builder instance for method chaining - * @see #circuitBreakerFailureRateThreshold(float) - */ - public Builder circuitBreakerMinNumOfFailures(int circuitBreakerMinNumOfFailures) { - checkThresholds(circuitBreakerMinNumOfFailures, circuitBreakerFailureRateThreshold); - this.circuitBreakerMinNumOfFailures = circuitBreakerMinNumOfFailures; - return this; - } - - private void checkThresholds(int failures, float rate) { - if (failures == 0 && rate == 0) { - throw new JedisValidationException( - "Both circuitBreakerMinNumOfFailures and circuitBreakerFailureRateThreshold can not be 0 at the same time!"); - } - } - - /** - * Sets the list of exception classes that are recorded as circuit breaker failures. - *

- * Only exceptions matching or inheriting from these classes will count as failures for circuit - * breaker calculations. This allows fine-grained control over which errors should trigger - * failover. - *

- * @param circuitBreakerIncludedExceptionList list of exception classes that count as failures - * @return this builder instance for method chaining - */ - public Builder circuitBreakerIncludedExceptionList( - List circuitBreakerIncludedExceptionList) { - this.circuitBreakerIncludedExceptionList = circuitBreakerIncludedExceptionList; - return this; - } - - /** - * Sets the list of exception classes that are ignored by the circuit breaker. - *

- * Exceptions matching or inheriting from these classes will not affect circuit breaker failure - * rate calculations, even if they are included in the included exception list. - *

- * @param circuitBreakerIgnoreExceptionList list of exception classes to ignore - * @return this builder instance for method chaining - */ - public Builder circuitBreakerIgnoreExceptionList( - List circuitBreakerIgnoreExceptionList) { - this.circuitBreakerIgnoreExceptionList = circuitBreakerIgnoreExceptionList; - return this; - } - - /** - * Sets the list of exception classes that trigger immediate fallback to next cluster. - * @param circuitBreakerFallbackExceptionList list of exception classes that trigger fallback - * @return this builder instance for method chaining - * @deprecated Use {@link #fallbackExceptionList(java.util.List)} instead. - */ - @Deprecated - public Builder circuitBreakerFallbackExceptionList( - List> circuitBreakerFallbackExceptionList) { - return fallbackExceptionList(circuitBreakerFallbackExceptionList); - } - /** * Sets the list of exception classes that trigger immediate fallback to the next available - * cluster. + * database. *

* When these exceptions occur, the system will immediately attempt to failover to the next - * available cluster without waiting for circuit breaker thresholds. This enables fast failover + * available database without waiting for circuit breaker thresholds. This enables fast failover * for specific error conditions. *

*

@@ -1351,7 +1287,7 @@ public Builder fallbackExceptionList(List> fallbackEx * Sets whether failed commands should be retried during the failover process. *

* When enabled, commands that fail during failover will be retried according to the configured - * retry settings on the new cluster. When disabled, failed commands during failover will + * retry settings on the new database. When disabled, failed commands during failover will * immediately return the failure to the caller. *

*

@@ -1370,19 +1306,19 @@ public Builder retryOnFailover(boolean retryOnFailover) { } /** - * Sets whether automatic failback to higher-priority clusters is supported. + * Sets whether automatic failback to higher-priority databases is supported. *

- * When enabled, the system will automatically monitor failed clusters using health checks and - * failback to higher-priority (higher weight) clusters when they recover. When disabled, + * When enabled, the system will automatically monitor failed da using health checks and + * failback to higher-priority (higher weight) databases when they recover. When disabled, * failback must be triggered manually. *

*

* Requirements for automatic failback: *

*
    - *
  • Health checks must be enabled on cluster configurations
  • - *
  • Grace period must elapse after cluster becomes unhealthy
  • - *
  • Higher-priority cluster must pass health checks
  • + *
  • Health checks must be enabled on database configurations
  • + *
  • Grace period must elapse after database becomes unhealthy
  • + *
  • Higher-priority database must pass health checks
  • *
* @param supported true to enable automatic failback, false for manual failback only * @return this builder instance for method chaining @@ -1393,10 +1329,10 @@ public Builder failbackSupported(boolean supported) { } /** - * Sets the interval between checks for failback opportunities to recovered clusters. + * Sets the interval between checks for failback opportunities to recovered databases. *

- * This controls how frequently the system checks if a higher-priority cluster has recovered and - * is available for failback. Lower values provide faster failback response but increase + * This controls how frequently the system checks if a higher-priority database has recovered + * and is available for failback. Lower values provide faster failback response but increase * monitoring overhead. *

*

@@ -1416,11 +1352,11 @@ public Builder failbackCheckInterval(long failbackCheckInterval) { } /** - * Sets the grace period to keep clusters disabled after they become unhealthy. + * Sets the grace period to keep databases disabled after they become unhealthy. *

- * After a cluster is marked as unhealthy, it remains disabled for this grace period before + * After a database is marked as unhealthy, it remains disabled for this grace period before * being eligible for failback, even if health checks indicate recovery. This prevents rapid - * oscillation between clusters during intermittent failures. + * oscillation between databases during intermittent failures. *

*

* Considerations: @@ -1439,10 +1375,10 @@ public Builder gracePeriod(long gracePeriod) { } /** - * Sets whether to forcefully terminate connections during failover for faster cluster + * Sets whether to forcefully terminate connections during failover for faster database * switching. *

- * When enabled, existing connections to the failed cluster are immediately closed during + * When enabled, existing connections to the failed database are immediately closed during * failover, potentially reducing failover time but may cause some in-flight operations to fail. * When disabled, connections are closed gracefully. *

@@ -1465,7 +1401,7 @@ public Builder fastFailover(boolean fastFailover) { * Sets the maximum number of failover attempts. *

* This setting controls how many times the system will attempt to failover to a different - * cluster before giving up. For example, if set to 3, the system will make 1 initial attempt + * database before giving up. For example, if set to 3, the system will make 1 initial attempt * plus 2 failover attempts for a total of 3 attempts. *

*

@@ -1483,8 +1419,8 @@ public Builder maxNumFailoverAttempts(int maxNumFailoverAttempts) { * Sets the delay in milliseconds between failover attempts. *

* This setting controls how long the system will wait before attempting to failover to a - * different cluster. For example, if set to 1000, the system will wait 1 second before - * attempting to failover to a different cluster. + * different database. For example, if set to 1000, the system will wait 1 second before + * attempting to failover to a different database. *

*

* Default: {@value #DELAY_IN_BETWEEN_FAILOVER_ATTEMPTS_DEFAULT} milliseconds @@ -1511,18 +1447,10 @@ public MultiDbConfig build() { MultiDbConfig config = new MultiDbConfig(this.databaseConfigs.toArray(new DatabaseConfig[0])); // Copy retry configuration - config.retryMaxAttempts = this.retryMaxAttempts; - config.retryWaitDuration = Duration.ofMillis(this.retryWaitDuration); - config.retryWaitDurationExponentialBackoffMultiplier = this.retryWaitDurationExponentialBackoffMultiplier; - config.retryIncludedExceptionList = this.retryIncludedExceptionList; - config.retryIgnoreExceptionList = this.retryIgnoreExceptionList; + config.commandRetry = this.commandRetry; // Copy circuit breaker configuration - config.circuitBreakerMinNumOfFailures = this.circuitBreakerMinNumOfFailures; - config.circuitBreakerFailureRateThreshold = this.circuitBreakerFailureRateThreshold; - config.circuitBreakerSlidingWindowSize = this.circuitBreakerSlidingWindowSize; - config.circuitBreakerIncludedExceptionList = this.circuitBreakerIncludedExceptionList; - config.circuitBreakerIgnoreExceptionList = this.circuitBreakerIgnoreExceptionList; + config.failureDetector = this.failureDetector; // Copy fallback and failover configuration config.fallbackExceptionList = this.fallbackExceptionList; diff --git a/src/main/java/redis/clients/jedis/builders/MultiDbClientBuilder.java b/src/main/java/redis/clients/jedis/builders/MultiDbClientBuilder.java index 002de51666..a42a4fa00a 100644 --- a/src/main/java/redis/clients/jedis/builders/MultiDbClientBuilder.java +++ b/src/main/java/redis/clients/jedis/builders/MultiDbClientBuilder.java @@ -26,9 +26,9 @@ *

  • Circuit Breaker Integration: Built-in circuit breaker with configurable * thresholds
  • *
  • Health Monitoring: Automatic health checks with configurable strategies
  • - *
  • Event Handling: Listen to cluster switch events for monitoring and + *
  • Event Handling: Listen to database switch events for monitoring and * alerting
  • - *
  • Flexible Configuration: Support for both simple and advanced multi-cluster + *
  • Flexible Configuration: Support for both simple and advanced multi-database * configurations
  • * *

    @@ -39,18 +39,22 @@ * MultiDbClient client = MultiDbClient.builder() * .multiDbConfig( * MultiDbConfig.builder() - * .endpoint( + * .database( * DatabaseConfig.builder( * east, * DefaultJedisClientConfig.builder().credentials(credentialsEast).build()) * .weight(100.0f) * .build()) - * .endpoint(DatabaseConfig.builder( + * .database(DatabaseConfig.builder( * west, * DefaultJedisClientConfig.builder().credentials(credentialsWest).build()) * .weight(50.0f).build()) - * .circuitBreakerFailureRateThreshold(50.0f) - * .retryMaxAttempts(3) + * .failureDetector(MultiDbConfig.CircuitBreakerConfig.builder() + * .failureRateThreshold(50.0f) + * .build()) + * .commandRetry(MultiDbConfig.RetryConfig.builder() + * .maxAttempts(3) + * .build()) * .build() * ) * .databaseSwitchListener(event -> @@ -112,7 +116,7 @@ protected ConnectionProvider createDefaultConnectionProvider() { throw new IllegalArgumentException("At least one endpoint must be specified"); } - // Create the multi-cluster connection provider + // Create the multi-database connection provider MultiDbConnectionProvider provider = new MultiDbConnectionProvider(multiDbConfig); // Set database switch listener if provided diff --git a/src/main/java/redis/clients/jedis/mcf/CircuitBreakerThresholdsAdapter.java b/src/main/java/redis/clients/jedis/mcf/CircuitBreakerThresholdsAdapter.java index 3dfc65a1f5..0dd939aca0 100644 --- a/src/main/java/redis/clients/jedis/mcf/CircuitBreakerThresholdsAdapter.java +++ b/src/main/java/redis/clients/jedis/mcf/CircuitBreakerThresholdsAdapter.java @@ -79,6 +79,6 @@ int getSlidingWindowSize() { // and rely on our custom evaluateThresholds() logic. minimumNumberOfCalls = Integer.MAX_VALUE; - slidingWindowSize = multiDbConfig.getCircuitBreakerSlidingWindowSize(); + slidingWindowSize = multiDbConfig.getFailureDetector().getSlidingWindowSize(); } } diff --git a/src/main/java/redis/clients/jedis/mcf/JedisFailoverException.java b/src/main/java/redis/clients/jedis/mcf/JedisFailoverException.java index c431764d42..4fc054817b 100644 --- a/src/main/java/redis/clients/jedis/mcf/JedisFailoverException.java +++ b/src/main/java/redis/clients/jedis/mcf/JedisFailoverException.java @@ -3,7 +3,7 @@ import redis.clients.jedis.exceptions.JedisConnectionException; /** - * Exception thrown when a failover attempt fails due to lack of available/healthy clusters. + * Exception thrown when a failover attempt fails due to lack of available/healthy databases. *

    * This exception itself is not thrown, see the child exceptions for more details. *

    @@ -11,9 +11,9 @@ * @see JedisFailoverException.JedisTemporarilyNotAvailableException */ public class JedisFailoverException extends JedisConnectionException { - private static final String MESSAGE = "Cluster/database endpoint could not failover since the MultiDbConfig was not " - + "provided with an additional cluster/database endpoint according to its prioritized sequence. " - + "If applicable, consider falling back OR restarting with an available cluster/database endpoint"; + private static final String MESSAGE = "Database endpoint could not failover since the MultiDbConfig was not " + + "provided with an additional database endpoint according to its prioritized sequence. " + + "If applicable, consider falling back OR restarting with an available database endpoint"; public JedisFailoverException(String s) { super(s); @@ -24,8 +24,8 @@ public JedisFailoverException() { } /** - * Exception thrown when a failover attempt fails due to lack of available/healthy clusters, and - * the max number of failover attempts has been exceeded. And there is still no healthy cluster. + * Exception thrown when a failover attempt fails due to lack of available/healthy databases, and + * the max number of failover attempts has been exceeded. And there is still no healthy databases. *

    * See the configuration properties * {@link redis.clients.jedis.MultiDbConfig#maxNumFailoverAttempts} and @@ -42,10 +42,10 @@ public JedisPermanentlyNotAvailableException() { } /** - * Exception thrown when a failover attempt fails due to lack of available/healthy clusters, but + * Exception thrown when a failover attempt fails due to lack of available/healthy databases, but * the max number of failover attempts has not been exceeded yet. Though there is no healthy - * cluster including the selected/current one, given configuration suggests that it should be a - * temporary condition and it is possible that there will be a healthy cluster available. + * database including the selected/current one, given configuration suggests that it should be a + * temporary condition and it is possible that there will be a healthy database available. *

    * See the configuration properties * {@link redis.clients.jedis.MultiDbConfig#maxNumFailoverAttempts} and diff --git a/src/main/java/redis/clients/jedis/mcf/MultiDbCommandExecutor.java b/src/main/java/redis/clients/jedis/mcf/MultiDbCommandExecutor.java index d3b7c48e2e..1b5d48a19c 100644 --- a/src/main/java/redis/clients/jedis/mcf/MultiDbCommandExecutor.java +++ b/src/main/java/redis/clients/jedis/mcf/MultiDbCommandExecutor.java @@ -14,10 +14,10 @@ /** * @author Allen Terleto (aterleto) *

    - * CommandExecutor with built-in retry, circuit-breaker, and failover to another - * cluster/database endpoint. With this executor users can seamlessly failover to Disaster - * Recovery (DR), Backup, and Active-Active cluster(s) by using simple configuration which - * is passed through from Resilience4j - https://resilience4j.readme.io/docs + * CommandExecutor with built-in retry, circuit-breaker, and failover to another database + * endpoint. With this executor users can seamlessly failover to Disaster Recovery (DR), + * Backup, and Active-Active cluster(s) by using simple configuration which is passed + * through from Resilience4j - https://resilience4j.readme.io/docs *

    */ @Experimental @@ -37,12 +37,12 @@ public T executeCommand(CommandObject commandObject) { supplier.withCircuitBreaker(database.getCircuitBreaker()); supplier.withRetry(database.getRetry()); supplier.withFallback(provider.getFallbackExceptionList(), - e -> this.handleClusterFailover(commandObject, database)); + e -> this.handleDatabaseFailover(commandObject, database)); try { return supplier.decorate().get(); } catch (Exception e) { if (database.getCircuitBreaker().getState() == State.OPEN && isActiveDatabase(database)) { - clusterFailover(database); + databaseFailover(database); } throw e; } @@ -51,10 +51,10 @@ public T executeCommand(CommandObject commandObject) { /** * Functional interface wrapped in retry and circuit breaker logic to handle happy path scenarios */ - private T handleExecuteCommand(CommandObject commandObject, Database cluster) { + private T handleExecuteCommand(CommandObject commandObject, Database database) { Connection connection; try { - connection = cluster.getConnection(); + connection = database.getConnection(); } catch (JedisConnectionException e) { provider.assertOperability(); throw e; @@ -62,10 +62,10 @@ private T handleExecuteCommand(CommandObject commandObject, Database clus try { return connection.executeCommand(commandObject); } catch (Exception e) { - if (cluster.retryOnFailover() && !isActiveDatabase(cluster) - && isCircuitBreakerTrackedException(e, cluster)) { + if (database.retryOnFailover() && !isActiveDatabase(database) + && isCircuitBreakerTrackedException(e, database)) { throw new ConnectionFailoverException( - "Command failed during failover: " + cluster.getCircuitBreaker().getName(), e); + "Command failed during failover: " + database.getCircuitBreaker().getName(), e); } throw e; } finally { @@ -77,11 +77,11 @@ && isCircuitBreakerTrackedException(e, cluster)) { * Functional interface wrapped in retry and circuit breaker logic to handle open circuit breaker * failure scenarios */ - private T handleClusterFailover(CommandObject commandObject, Database cluster) { + private T handleDatabaseFailover(CommandObject commandObject, Database database) { - clusterFailover(cluster); + databaseFailover(database); - // Recursive call to the initiating method so the operation can be retried on the next cluster + // Recursive call to the initiating method so the operation can be retried on the next database // connection return executeCommand(commandObject); } diff --git a/src/main/java/redis/clients/jedis/mcf/MultiDbConnectionProvider.java b/src/main/java/redis/clients/jedis/mcf/MultiDbConnectionProvider.java index 97f7fa658f..e6d6ddabfd 100644 --- a/src/main/java/redis/clients/jedis/mcf/MultiDbConnectionProvider.java +++ b/src/main/java/redis/clients/jedis/mcf/MultiDbConnectionProvider.java @@ -82,9 +82,9 @@ public class MultiDbConnectionProvider implements ConnectionProvider { */ private Consumer databaseSwitchListener; - private List> fallbackExceptionList; + private final List> fallbackExceptionList; - private HealthStatusManager healthStatusManager = new HealthStatusManager(); + private final HealthStatusManager healthStatusManager = new HealthStatusManager(); // Flag to control when handleHealthStatusChange should process events (only after initialization) private volatile boolean initializationComplete = false; @@ -99,12 +99,12 @@ public class MultiDbConnectionProvider implements ConnectionProvider { }); // Store retry and circuit breaker configs for dynamic database addition/removal - private RetryConfig retryConfig; - private CircuitBreakerConfig circuitBreakerConfig; - private MultiDbConfig multiDbConfig; + private final RetryConfig retryConfig; + private final CircuitBreakerConfig circuitBreakerConfig; + private final MultiDbConfig multiDbConfig; - private AtomicLong failoverFreezeUntil = new AtomicLong(0); - private AtomicInteger failoverAttemptCount = new AtomicInteger(0); + private final AtomicLong failoverFreezeUntil = new AtomicLong(0); + private final AtomicInteger failoverAttemptCount = new AtomicInteger(0); public MultiDbConnectionProvider(MultiDbConfig multiDbConfig) { @@ -114,51 +114,14 @@ public MultiDbConnectionProvider(MultiDbConfig multiDbConfig) { this.multiDbConfig = multiDbConfig; ////////////// Configure Retry //////////////////// - - RetryConfig.Builder retryConfigBuilder = RetryConfig.custom(); - retryConfigBuilder.maxAttempts(multiDbConfig.getRetryMaxAttempts()); - retryConfigBuilder.intervalFunction( - IntervalFunction.ofExponentialBackoff(multiDbConfig.getRetryWaitDuration(), - multiDbConfig.getRetryWaitDurationExponentialBackoffMultiplier())); - retryConfigBuilder.failAfterMaxAttempts(false); // JedisConnectionException will be thrown - retryConfigBuilder.retryExceptions( - multiDbConfig.getRetryIncludedExceptionList().stream().toArray(Class[]::new)); - - List retryIgnoreExceptionList = multiDbConfig.getRetryIgnoreExceptionList(); - if (retryIgnoreExceptionList != null) - retryConfigBuilder.ignoreExceptions(retryIgnoreExceptionList.stream().toArray(Class[]::new)); - - this.retryConfig = retryConfigBuilder.build(); + MultiDbConfig.RetryConfig commandRetry = multiDbConfig.getCommandRetry(); + this.retryConfig = buildRetryConfig(commandRetry); ////////////// Configure Circuit Breaker //////////////////// - - CircuitBreakerConfig.Builder circuitBreakerConfigBuilder = CircuitBreakerConfig.custom(); - - CircuitBreakerThresholdsAdapter adapter = new CircuitBreakerThresholdsAdapter(multiDbConfig); - circuitBreakerConfigBuilder.minimumNumberOfCalls(adapter.getMinimumNumberOfCalls()); - circuitBreakerConfigBuilder.failureRateThreshold(adapter.getFailureRateThreshold()); - circuitBreakerConfigBuilder.slidingWindowSize(adapter.getSlidingWindowSize()); - circuitBreakerConfigBuilder.slidingWindowType(adapter.getSlidingWindowType()); - - circuitBreakerConfigBuilder.recordExceptions( - multiDbConfig.getCircuitBreakerIncludedExceptionList().stream().toArray(Class[]::new)); - circuitBreakerConfigBuilder.automaticTransitionFromOpenToHalfOpenEnabled(false); // State - // transitions - // are - // forced. No - // half open - // states - // are used - - List circuitBreakerIgnoreExceptionList = multiDbConfig - .getCircuitBreakerIgnoreExceptionList(); - if (circuitBreakerIgnoreExceptionList != null) circuitBreakerConfigBuilder - .ignoreExceptions(circuitBreakerIgnoreExceptionList.stream().toArray(Class[]::new)); - - this.circuitBreakerConfig = circuitBreakerConfigBuilder.build(); + MultiDbConfig.CircuitBreakerConfig failureDetector = multiDbConfig.getFailureDetector(); + this.circuitBreakerConfig = buildCircuitBreakerConfig(failureDetector, multiDbConfig); ////////////// Configure Database Map //////////////////// - DatabaseConfig[] databaseConfigs = multiDbConfig.getDatabaseConfigs(); // Now add databases - health checks will start but events will be queued @@ -194,6 +157,57 @@ public MultiDbConnectionProvider(MultiDbConfig multiDbConfig) { } } + /** + * Builds a Resilience4j RetryConfig from Jedis MultiDbConfig.RetryConfig. + * @param commandRetry the Jedis retry configuration + * @return configured Resilience4j RetryConfig + */ + private RetryConfig buildRetryConfig(redis.clients.jedis.MultiDbConfig.RetryConfig commandRetry) { + RetryConfig.Builder builder = RetryConfig.custom(); + + builder.maxAttempts(commandRetry.getMaxAttempts()); + builder.intervalFunction(IntervalFunction.ofExponentialBackoff(commandRetry.getWaitDuration(), + commandRetry.getExponentialBackoffMultiplier())); + builder.failAfterMaxAttempts(false); // JedisConnectionException will be thrown + builder.retryExceptions(commandRetry.getIncludedExceptionList().stream().toArray(Class[]::new)); + + List ignoreExceptions = commandRetry.getIgnoreExceptionList(); + if (ignoreExceptions != null) { + builder.ignoreExceptions(ignoreExceptions.stream().toArray(Class[]::new)); + } + + return builder.build(); + } + + /** + * Builds Resilience4j CircuitBreakerConfig from Jedis CircuitBreakerConfig. + * @param failureDetector the Jedis circuit breaker configuration + * @param multiDbConfig the multi-database configuration (for adapter) + * @return configured Resilience4j CircuitBreakerConfig + */ + private CircuitBreakerConfig buildCircuitBreakerConfig( + MultiDbConfig.CircuitBreakerConfig failureDetector, MultiDbConfig multiDbConfig) { + CircuitBreakerConfig.Builder builder = CircuitBreakerConfig.custom(); + + CircuitBreakerThresholdsAdapter adapter = new CircuitBreakerThresholdsAdapter(multiDbConfig); + builder.minimumNumberOfCalls(adapter.getMinimumNumberOfCalls()); + builder.failureRateThreshold(adapter.getFailureRateThreshold()); + builder.slidingWindowSize(adapter.getSlidingWindowSize()); + builder.slidingWindowType(adapter.getSlidingWindowType()); + + builder.recordExceptions( + failureDetector.getIncludedExceptionList().stream().toArray(Class[]::new)); + builder.automaticTransitionFromOpenToHalfOpenEnabled(false); // State transitions are forced. + // No half open states are used + + List ignoreExceptions = failureDetector.getIgnoreExceptionList(); + if (ignoreExceptions != null) { + builder.ignoreExceptions(ignoreExceptions.stream().toArray(Class[]::new)); + } + + return builder.build(); + } + /** * Adds a new database endpoint to the provider. * @param databaseConfig the configuration for the new database @@ -247,11 +261,11 @@ public void remove(Endpoint endpoint) { if (isActiveDatabase) { log.info("Active database is being removed. Finding a new active database..."); - Map.Entry candidate = findWeightedHealthyClusterToIterate( + Map.Entry candidate = findWeightedHealthyDatabaseToIterate( databaseToRemove); if (candidate != null) { - Database selectedCluster = candidate.getValue(); - if (setActiveDatabase(selectedCluster, true)) { + Database selectedDatabase = candidate.getValue(); + if (setActiveDatabase(selectedDatabase, true)) { log.info("New active database set to {}", candidate.getKey()); notificationData = candidate; } @@ -444,12 +458,12 @@ void periodicFailbackCheck() { // Perform failback if we found a better candidate if (bestCandidate != null) { - Database selectedCluster = bestCandidate.getValue(); + Database selectedDatabase = bestCandidate.getValue(); log.info("Performing failback from {} to {} (higher weight database available)", activeDatabase.getCircuitBreaker().getName(), - selectedCluster.getCircuitBreaker().getName()); - if (setActiveDatabase(selectedCluster, true)) { - onDatabaseSwitch(SwitchReason.FAILBACK, bestCandidate.getKey(), selectedCluster); + selectedDatabase.getCircuitBreaker().getName()); + if (setActiveDatabase(selectedDatabase, true)) { + onDatabaseSwitch(SwitchReason.FAILBACK, bestCandidate.getKey(), selectedDatabase); } } } catch (Exception e) { @@ -458,11 +472,11 @@ void periodicFailbackCheck() { } Endpoint switchToHealthyDatabase(SwitchReason reason, Database iterateFrom) { - Map.Entry databaseToIterate = findWeightedHealthyClusterToIterate( + Map.Entry databaseToIterate = findWeightedHealthyDatabaseToIterate( iterateFrom); if (databaseToIterate == null) { // throws exception anyway since not able to iterate - handleNoHealthyCluster(); + handleNoHealthyDatabase(); } Database database = databaseToIterate.getValue(); @@ -473,7 +487,7 @@ Endpoint switchToHealthyDatabase(SwitchReason reason, Database iterateFrom) { return databaseToIterate.getKey(); } - private void handleNoHealthyCluster() { + private void handleNoHealthyDatabase() { int max = multiDbConfig.getMaxNumFailoverAttempts(); log.error("No healthy database available to switch to"); if (failoverAttemptCount.get() > max) { @@ -514,7 +528,7 @@ private boolean markAsFreeze() { public void assertOperability() { Database current = activeDatabase; if (!current.isHealthy() && !this.canIterateFrom(current)) { - handleNoHealthyCluster(); + handleNoHealthyDatabase(); } } @@ -524,7 +538,7 @@ public void assertOperability() { private static Predicate> filterByHealth = c -> c.getValue() .isHealthy(); - private Map.Entry findWeightedHealthyClusterToIterate(Database iterateFrom) { + private Map.Entry findWeightedHealthyDatabaseToIterate(Database iterateFrom) { return databaseMap.entrySet().stream().filter(filterByHealth) .filter(entry -> entry.getValue() != iterateFrom).max(maxByWeight).orElse(null); } @@ -546,10 +560,8 @@ private void validateTargetConnection(Database database) { State originalState = circuitBreaker.getState(); try { // Transitions the state machine to a CLOSED state, allowing state transition, metrics and - // event publishing - // Safe since the activeMultiClusterIndex has not yet been changed and therefore no traffic - // will be routed - // yet + // event publishing. Safe since the activeDatabase has not yet been changed and therefore no + // traffic will be routed yet circuitBreaker.transitionToClosedState(); try (Connection targetConnection = database.getConnection()) { @@ -569,7 +581,7 @@ private void validateTargetConnection(Database database) { /** * Returns the set of all configured endpoints. - * @return + * @return the set of all configured endpoints */ public Set getEndpoints() { return new HashSet<>(databaseMap.keySet()); @@ -612,11 +624,11 @@ public void forceActiveDatabase(Endpoint endpoint, long forcedActiveDuration) { } private boolean setActiveDatabase(Database database, boolean validateConnection) { - // Database database = clusterEntry.getValue(); + // Database database = databaseEntry.getValue(); // Field-level synchronization is used to avoid the edge case in which // setActiveDatabase() is called at the same time activeDatabaseChangeLock.lock(); - Database oldCluster; + Database oldDatabase; try { // Allows an attempt to reset the current database from a FORCED_OPEN to CLOSED state in the @@ -625,25 +637,25 @@ private boolean setActiveDatabase(Database database, boolean validateConnection) if (validateConnection) validateTargetConnection(database); - String originalClusterName = getDatabaseCircuitBreaker().getName(); + String originalDatabaseName = getDatabaseCircuitBreaker().getName(); if (activeDatabase == database) log.warn("Database/database endpoint '{}' successfully closed its circuit breaker", - originalClusterName); + originalDatabaseName); else log.warn("Database/database endpoint successfully updated from '{}' to '{}'", - originalClusterName, database.circuitBreaker.getName()); - oldCluster = activeDatabase; + originalDatabaseName, database.circuitBreaker.getName()); + oldDatabase = activeDatabase; activeDatabase = database; } finally { activeDatabaseChangeLock.unlock(); } - boolean switched = oldCluster != database; + boolean switched = oldDatabase != database; if (switched && this.multiDbConfig.isFastFailover()) { log.info("Forcing disconnect of all active connections in old database: {}", - oldCluster.circuitBreaker.getName()); - oldCluster.forceDisconnect(); + oldDatabase.circuitBreaker.getName()); + oldDatabase.forceDisconnect(); log.info("Disconnected all active connections in old database: {}", - oldCluster.circuitBreaker.getName()); + oldDatabase.circuitBreaker.getName()); } return switched; @@ -737,7 +749,7 @@ public CircuitBreaker getDatabaseCircuitBreaker() { * possible. Users can manually failback to an available database */ public boolean canIterateFrom(Database iterateFrom) { - Map.Entry e = findWeightedHealthyClusterToIterate(iterateFrom); + Map.Entry e = findWeightedHealthyDatabaseToIterate(iterateFrom); return e != null; } @@ -849,11 +861,11 @@ public boolean retryOnFailover() { } public int getCircuitBreakerMinNumOfFailures() { - return multiDbConfig.getCircuitBreakerMinNumOfFailures(); + return multiDbConfig.getFailureDetector().getMinNumOfFailures(); } public float getCircuitBreakerFailureRateThreshold() { - return multiDbConfig.getCircuitBreakerFailureRateThreshold(); + return multiDbConfig.getFailureDetector().getFailureRateThreshold(); } public boolean isDisabled() { diff --git a/src/main/java/redis/clients/jedis/mcf/MultiDbConnectionSupplier.java b/src/main/java/redis/clients/jedis/mcf/MultiDbConnectionSupplier.java index 9bd1f35440..13ba85d734 100644 --- a/src/main/java/redis/clients/jedis/mcf/MultiDbConnectionSupplier.java +++ b/src/main/java/redis/clients/jedis/mcf/MultiDbConnectionSupplier.java @@ -9,7 +9,7 @@ import redis.clients.jedis.mcf.MultiDbConnectionProvider.Database; /** - * ConnectionProvider with built-in retry, circuit-breaker, and failover to another cluster/database + * ConnectionProvider with built-in retry, circuit-breaker, and failover to another /database * endpoint. With this executor users can seamlessly failover to Disaster Recovery (DR), Backup, and * Active-Active cluster(s) by using simple configuration */ @@ -21,21 +21,21 @@ public MultiDbConnectionSupplier(MultiDbConnectionProvider provider) { } public Connection getConnection() { - Database cluster = provider.getDatabase(); // Pass this by reference for thread safety + Database database = provider.getDatabase(); // Pass this by reference for thread safety DecorateSupplier supplier = Decorators - .ofSupplier(() -> this.handleGetConnection(cluster)); + .ofSupplier(() -> this.handleGetConnection(database)); - supplier.withRetry(cluster.getRetry()); - supplier.withCircuitBreaker(cluster.getCircuitBreaker()); + supplier.withRetry(database.getRetry()); + supplier.withCircuitBreaker(database.getCircuitBreaker()); supplier.withFallback(provider.getFallbackExceptionList(), - e -> this.handleClusterFailover(cluster)); + e -> this.handleDatabaseFailover(database)); try { return supplier.decorate().get(); } catch (Exception e) { - if (cluster.getCircuitBreaker().getState() == State.OPEN && isActiveDatabase(cluster)) { - clusterFailover(cluster); + if (database.getCircuitBreaker().getState() == State.OPEN && isActiveDatabase(database)) { + databaseFailover(database); } throw e; } @@ -44,8 +44,8 @@ public Connection getConnection() { /** * Functional interface wrapped in retry and circuit breaker logic to handle happy path scenarios */ - private Connection handleGetConnection(Database cluster) { - Connection connection = cluster.getConnection(); + private Connection handleGetConnection(Database database) { + Connection connection = database.getConnection(); connection.ping(); return connection; } @@ -54,11 +54,11 @@ private Connection handleGetConnection(Database cluster) { * Functional interface wrapped in retry and circuit breaker logic to handle open circuit breaker * failure scenarios */ - private Connection handleClusterFailover(Database cluster) { + private Connection handleDatabaseFailover(Database database) { - clusterFailover(cluster); + databaseFailover(database); - // Recursive call to the initiating method so the operation can be retried on the next cluster + // Recursive call to the initiating method so the operation can be retried on the next database // connection return getConnection(); } diff --git a/src/main/java/redis/clients/jedis/mcf/MultiDbFailoverBase.java b/src/main/java/redis/clients/jedis/mcf/MultiDbFailoverBase.java index e9fc874a2d..c404202d4b 100644 --- a/src/main/java/redis/clients/jedis/mcf/MultiDbFailoverBase.java +++ b/src/main/java/redis/clients/jedis/mcf/MultiDbFailoverBase.java @@ -13,10 +13,8 @@ * @author Allen Terleto (aterleto) *

    * Base class for CommandExecutor with built-in retry, circuit-breaker, and failover to - * another cluster/database endpoint. With this executor users can seamlessly failover to - * Disaster Recovery (DR), Backup, and Active-Active cluster(s) by using simple - * configuration which is passed through from Resilience4j - - * https://resilience4j.readme.io/docs + * another database endpoint. With this executor users can seamlessly failover to Disaster + * Recovery (DR), Backup, and Active-Active cluster(s) by using simple configuration *

    */ @Experimental @@ -38,7 +36,7 @@ public void close() { * Functional interface wrapped in retry and circuit breaker logic to handle open circuit breaker * failure scenarios */ - protected void clusterFailover(Database database) { + protected void databaseFailover(Database database) { lock.lock(); CircuitBreaker circuitBreaker = database.getCircuitBreaker(); @@ -70,7 +68,8 @@ protected void clusterFailover(Database database) { // only the first one will trigger a failover, and make the CB FORCED_OPEN. // when the rest reaches here, the active database is already the next one, and should be // different than - // active CB. If its the same one and there are no more clusters to failover to, then throw an + // active CB. If its the same one and there are no more databases to failover to, then throw + // an // exception else if (database == provider.getDatabase()) { provider.switchToHealthyDatabase(SwitchReason.CIRCUIT_BREAKER, database); diff --git a/src/test/java/redis/clients/jedis/MultiDbClientTest.java b/src/test/java/redis/clients/jedis/MultiDbClientTest.java index 43673da1ed..6b48b36cdf 100644 --- a/src/test/java/redis/clients/jedis/MultiDbClientTest.java +++ b/src/test/java/redis/clients/jedis/MultiDbClientTest.java @@ -57,8 +57,8 @@ public static void setupAdminClients() throws IOException { void setUp() { // Create a simple resilient client with mock endpoints for testing MultiDbConfig clientConfig = MultiDbConfig.builder() - .endpoint(endpoint1.getHostAndPort(), 100.0f, endpoint1.getClientConfigBuilder().build()) - .endpoint(endpoint2.getHostAndPort(), 50.0f, endpoint2.getClientConfigBuilder().build()) + .database(endpoint1.getHostAndPort(), 100.0f, endpoint1.getClientConfigBuilder().build()) + .database(endpoint2.getHostAndPort(), 50.0f, endpoint2.getClientConfigBuilder().build()) .build(); client = MultiDbClient.builder().multiDbConfig(clientConfig).build(); @@ -72,111 +72,111 @@ void tearDown() { } @Test - void testAddRemoveEndpointWithEndpointInterface() { + void testAddRemoveDatabaseWithEndpointInterface() { Endpoint newEndpoint = new HostAndPort("unavailable", 6381); assertDoesNotThrow( - () -> client.addEndpoint(newEndpoint, 25.0f, DefaultJedisClientConfig.builder().build())); + () -> client.addDatabase(newEndpoint, 25.0f, DefaultJedisClientConfig.builder().build())); - assertThat(client.getEndpoints(), hasItems(newEndpoint)); + assertThat(client.getDatabaseEndpoints(), hasItems(newEndpoint)); - assertDoesNotThrow(() -> client.removeEndpoint(newEndpoint)); + assertDoesNotThrow(() -> client.removeDatabase(newEndpoint)); - assertThat(client.getEndpoints(), not(hasItems(newEndpoint))); + assertThat(client.getDatabaseEndpoints(), not(hasItems(newEndpoint))); } @Test - void testAddRemoveEndpointWithDatabaseConfig() { + void testAddRemoveDatabaseWithDatabaseConfig() { // todo : (@ggivo) Replace HostAndPort with Endpoint HostAndPort newEndpoint = new HostAndPort("unavailable", 6381); DatabaseConfig newConfig = DatabaseConfig .builder(newEndpoint, DefaultJedisClientConfig.builder().build()).weight(25.0f).build(); - assertDoesNotThrow(() -> client.addEndpoint(newConfig)); + assertDoesNotThrow(() -> client.addDatabase(newConfig)); - assertThat(client.getEndpoints(), hasItems(newEndpoint)); + assertThat(client.getDatabaseEndpoints(), hasItems(newEndpoint)); - assertDoesNotThrow(() -> client.removeEndpoint(newEndpoint)); + assertDoesNotThrow(() -> client.removeDatabase(newEndpoint)); - assertThat(client.getEndpoints(), not(hasItems(newEndpoint))); + assertThat(client.getDatabaseEndpoints(), not(hasItems(newEndpoint))); } @Test void testSetActiveDatabase() { - Endpoint endpoint = client.getActiveEndpoint(); + Endpoint endpoint = client.getActiveDatabaseEndpoint(); awaitIsHealthy(endpoint1.getHostAndPort()); awaitIsHealthy(endpoint2.getHostAndPort()); // Ensure we have a healthy endpoint to switch to - Endpoint newEndpoint = client.getEndpoints().stream() + Endpoint newEndpoint = client.getDatabaseEndpoints().stream() .filter(e -> e.equals(endpoint) && client.isHealthy(e)).findFirst().orElse(null); assertNotNull(newEndpoint); // Switch to the new endpoint client.setActiveDatabase(newEndpoint); - assertEquals(newEndpoint, client.getActiveEndpoint()); + assertEquals(newEndpoint, client.getActiveDatabaseEndpoint()); } @Test void testBuilderWithMultipleEndpointTypes() { MultiDbConfig clientConfig = MultiDbConfig.builder() - .endpoint(endpoint1.getHostAndPort(), 100.0f, DefaultJedisClientConfig.builder().build()) - .endpoint(DatabaseConfig + .database(endpoint1.getHostAndPort(), 100.0f, DefaultJedisClientConfig.builder().build()) + .database(DatabaseConfig .builder(endpoint2.getHostAndPort(), DefaultJedisClientConfig.builder().build()) .weight(50.0f).build()) .build(); try (MultiDbClient testClient = MultiDbClient.builder().multiDbConfig(clientConfig).build()) { - assertThat(testClient.getEndpoints().size(), equalTo(2)); - assertThat(testClient.getEndpoints(), + assertThat(testClient.getDatabaseEndpoints().size(), equalTo(2)); + assertThat(testClient.getDatabaseEndpoints(), hasItems(endpoint1.getHostAndPort(), endpoint2.getHostAndPort())); } } @Test - public void testForceActiveEndpoint() { - Endpoint endpoint = client.getActiveEndpoint(); + public void testForceActiveDatabase() { + Endpoint endpoint = client.getActiveDatabaseEndpoint(); // Ensure we have a healthy endpoint to switch to awaitIsHealthy(endpoint1.getHostAndPort()); awaitIsHealthy(endpoint2.getHostAndPort()); - Endpoint newEndpoint = client.getEndpoints().stream() + Endpoint newEndpoint = client.getDatabaseEndpoints().stream() .filter(e -> e.equals(endpoint) && client.isHealthy(e)).findFirst().orElse(null); assertNotNull(newEndpoint); // Force switch to the new endpoint for 10 seconds - client.forceActiveEndpoint(newEndpoint, Duration.ofMillis(100).toMillis()); + client.forceActiveDatabase(newEndpoint, Duration.ofMillis(100).toMillis()); // Verify the active endpoint has changed - assertEquals(newEndpoint, client.getActiveEndpoint()); + assertEquals(newEndpoint, client.getActiveDatabaseEndpoint()); } @Test - public void testForceActiveEndpointWithNonHealthyEndpoint() { + public void testForceActiveDatabaseWithNonHealthyEndpoint() { Endpoint newEndpoint = new HostAndPort("unavailable", 6381); - client.addEndpoint(newEndpoint, 25.0f, DefaultJedisClientConfig.builder().build()); + client.addDatabase(newEndpoint, 25.0f, DefaultJedisClientConfig.builder().build()); assertThrows(JedisValidationException.class, - () -> client.forceActiveEndpoint(newEndpoint, Duration.ofMillis(100).toMillis())); + () -> client.forceActiveDatabase(newEndpoint, Duration.ofMillis(100).toMillis())); } @Test - public void testForceActiveEndpointWithNonExistingEndpoint() { + public void testForceActiveDatabaseWithNonExistingEndpoint() { Endpoint newEndpoint = new HostAndPort("unavailable", 6381); assertThrows(JedisValidationException.class, - () -> client.forceActiveEndpoint(newEndpoint, Duration.ofMillis(100).toMillis())); + () -> client.forceActiveDatabase(newEndpoint, Duration.ofMillis(100).toMillis())); } @Test public void testWithDatabaseSwitchListener() { MultiDbConfig endpointsConfig = MultiDbConfig.builder() - .endpoint(DatabaseConfig + .database(DatabaseConfig .builder(endpoint1.getHostAndPort(), endpoint1.getClientConfigBuilder().build()) .weight(100.0f).build()) - .endpoint(DatabaseConfig + .database(DatabaseConfig .builder(endpoint2.getHostAndPort(), endpoint2.getClientConfigBuilder().build()) .weight(50.0f).build()) .build(); diff --git a/src/test/java/redis/clients/jedis/failover/FailoverIntegrationTest.java b/src/test/java/redis/clients/jedis/failover/FailoverIntegrationTest.java index b3c19fdda5..f0349a851e 100644 --- a/src/test/java/redis/clients/jedis/failover/FailoverIntegrationTest.java +++ b/src/test/java/redis/clients/jedis/failover/FailoverIntegrationTest.java @@ -169,7 +169,7 @@ public void testAutomaticFailoverWhenServerBecomesUnavailable() throws Exception } @Test - public void testManualFailoverNewCommandsAreSentToActiveCluster() throws InterruptedException { + public void testManualFailoverNewCommandsAreSentToActiveDatabase() throws InterruptedException { assertThat(getNodeId(failoverClient.info("server")), equalTo(JEDIS1_ID)); await().atMost(1, TimeUnit.SECONDS).pollInterval(50, TimeUnit.MILLISECONDS) @@ -264,9 +264,11 @@ public void testCircuitBreakerCountsEachConnectionErrorSeparately() throws IOExc MultiDbConfig failoverConfig = new MultiDbConfig.Builder(getDatabaseConfigs( DefaultJedisClientConfig.builder().socketTimeoutMillis(RecommendedSettings.DEFAULT_TIMEOUT_MS) .connectionTimeoutMillis(RecommendedSettings.DEFAULT_TIMEOUT_MS).build(), - endpoint1, endpoint2)).retryMaxAttempts(2).retryWaitDuration(1) - .circuitBreakerSlidingWindowSize(3).circuitBreakerMinNumOfFailures(2) - .circuitBreakerFailureRateThreshold(50f) // %50 failure rate + endpoint1, endpoint2)) + .commandRetry(MultiDbConfig.RetryConfig.builder().maxAttempts(2).waitDuration(1).build()) + .failureDetector(MultiDbConfig.CircuitBreakerConfig.builder().slidingWindowSize(3) + .minNumOfFailures(2).failureRateThreshold(50f) // %50 failure rate + .build()) .build(); MultiDbConnectionProvider provider = new MultiDbConnectionProvider(failoverConfig); @@ -423,9 +425,12 @@ private MultiDbConnectionProvider createProvider() { .connectionTimeoutMillis(RecommendedSettings.DEFAULT_TIMEOUT_MS).build(); MultiDbConfig failoverConfig = new MultiDbConfig.Builder( - getDatabaseConfigs(clientConfig, endpoint1, endpoint2)).retryMaxAttempts(1) - .retryWaitDuration(1).circuitBreakerSlidingWindowSize(3) - .circuitBreakerMinNumOfFailures(1).circuitBreakerFailureRateThreshold(50f).build(); + getDatabaseConfigs(clientConfig, endpoint1, endpoint2)) + .commandRetry( + MultiDbConfig.RetryConfig.builder().maxAttempts(1).waitDuration(1).build()) + .failureDetector(MultiDbConfig.CircuitBreakerConfig.builder().slidingWindowSize(3) + .minNumOfFailures(1).failureRateThreshold(50f).build()) + .build(); return new MultiDbConnectionProvider(failoverConfig); } @@ -441,9 +446,11 @@ private MultiDbConnectionProvider createProvider( .connectionTimeoutMillis(RecommendedSettings.DEFAULT_TIMEOUT_MS).build(); MultiDbConfig.Builder builder = new MultiDbConfig.Builder( - getDatabaseConfigs(clientConfig, endpoint1, endpoint2)).retryMaxAttempts(1) - .retryWaitDuration(1).circuitBreakerSlidingWindowSize(3) - .circuitBreakerMinNumOfFailures(1).circuitBreakerFailureRateThreshold(50f); + getDatabaseConfigs(clientConfig, endpoint1, endpoint2)) + .commandRetry( + MultiDbConfig.RetryConfig.builder().maxAttempts(1).waitDuration(1).build()) + .failureDetector(MultiDbConfig.CircuitBreakerConfig.builder().slidingWindowSize(3) + .minNumOfFailures(1).failureRateThreshold(50f).build()); if (configCustomizer != null) { builder = configCustomizer.apply(builder); diff --git a/src/test/java/redis/clients/jedis/mcf/ActiveActiveLocalFailoverTest.java b/src/test/java/redis/clients/jedis/mcf/ActiveActiveLocalFailoverTest.java index a5aae5e9bf..fc52cdd74b 100644 --- a/src/test/java/redis/clients/jedis/mcf/ActiveActiveLocalFailoverTest.java +++ b/src/test/java/redis/clients/jedis/mcf/ActiveActiveLocalFailoverTest.java @@ -16,7 +16,9 @@ import eu.rekawek.toxiproxy.ToxiproxyClient; import eu.rekawek.toxiproxy.model.Toxic; import redis.clients.jedis.*; +import redis.clients.jedis.MultiDbConfig.CircuitBreakerConfig; import redis.clients.jedis.MultiDbConfig.DatabaseConfig; +import redis.clients.jedis.MultiDbConfig.RetryConfig; import redis.clients.jedis.scenario.ActiveActiveFailoverTest; import redis.clients.jedis.scenario.MultiThreadedFakeApp; import redis.clients.jedis.scenario.RecommendedSettings; @@ -94,37 +96,27 @@ public void testFailover(boolean fastFailover, long minFailoverCompletionDuratio "TESTING WITH PARAMETERS: fastFailover: {} numberOfThreads: {} minFailoverCompletionDuration: {} maxFailoverCompletionDuration: {] ", fastFailover, numberOfThreads, minFailoverCompletionDuration, maxFailoverCompletionDuration); - MultiDbConfig.DatabaseConfig[] clusterConfig = new MultiDbConfig.DatabaseConfig[2]; - JedisClientConfig config = endpoint1.getClientConfigBuilder() .socketTimeoutMillis(RecommendedSettings.DEFAULT_TIMEOUT_MS) .connectionTimeoutMillis(RecommendedSettings.DEFAULT_TIMEOUT_MS).build(); - clusterConfig[0] = DatabaseConfig.builder(endpoint1.getHostAndPort(), config) + DatabaseConfig db1 = DatabaseConfig.builder(endpoint1.getHostAndPort(), config) .connectionPoolConfig(RecommendedSettings.poolConfig).weight(1.0f).build(); - clusterConfig[1] = DatabaseConfig.builder(endpoint2.getHostAndPort(), config) + DatabaseConfig db2 = DatabaseConfig.builder(endpoint2.getHostAndPort(), config) .connectionPoolConfig(RecommendedSettings.poolConfig).weight(0.5f).build(); - MultiDbConfig.Builder builder = new MultiDbConfig.Builder(clusterConfig); - - builder.circuitBreakerSlidingWindowSize(1); // SLIDING WINDOW SIZE IN SECONDS - builder.circuitBreakerFailureRateThreshold(10.0f); // percentage of failures to trigger circuit - // breaker - - builder.failbackSupported(false); - // builder.failbackCheckInterval(1000); - builder.gracePeriod(10000); - - builder.retryWaitDuration(10); - builder.retryMaxAttempts(1); - builder.retryWaitDurationExponentialBackoffMultiplier(1); + MultiDbConfig.Builder builder = new MultiDbConfig.Builder().database(db1).database(db2) + .failureDetector( + CircuitBreakerConfig.builder().slidingWindowSize(1).failureRateThreshold(10.0f).build()) + .failbackSupported(false).gracePeriod(10000).commandRetry(RetryConfig.builder() + .waitDuration(10).maxAttempts(1).exponentialBackoffMultiplier(1).build()); // Use the parameterized fastFailover setting builder.fastFailover(fastFailover); class FailoverReporter implements Consumer { - String currentClusterName = "not set"; + String currentDatabaseName = "not set"; boolean failoverHappened = false; @@ -134,14 +126,14 @@ class FailoverReporter implements Consumer { Instant failbackAt = null; - public String getCurrentClusterName() { - return currentClusterName; + public String getCurrentDatabaseName() { + return currentDatabaseName; } @Override public void accept(DatabaseSwitchEvent e) { - this.currentClusterName = e.getDatabaseName(); - log.info("\n\n===={}=== \nJedis switching to cluster: {}\n====End of log===\n", + this.currentDatabaseName = e.getDatabaseName(); + log.info("\n\n===={}=== \nJedis switching to database: {}\n====End of log===\n", e.getReason(), e.getDatabaseName()); if ((e.getReason() == SwitchReason.CIRCUIT_BREAKER || e.getReason() == SwitchReason.HEALTH_CHECK)) { @@ -177,7 +169,7 @@ public void accept(DatabaseSwitchEvent e) { AtomicBoolean unexpectedErrors = new AtomicBoolean(false); AtomicReference lastException = new AtomicReference(); AtomicLong stopRunningAt = new AtomicLong(); - String cluster2Id = provider.getDatabase(endpoint2.getHostAndPort()).getCircuitBreaker() + String database2Id = provider.getDatabase(endpoint2.getHostAndPort()).getCircuitBreaker() .getName(); // Start thread that imitates an application that uses the client @@ -191,15 +183,15 @@ public void accept(DatabaseSwitchEvent e) { int attempt = 0; int maxTries = 500; int retryingDelay = 5; - String currentClusterId = null; + String currentDatabaseId = null; while (true) { try { if (System.currentTimeMillis() > stopRunningAt.get()) break; - currentClusterId = provider.getDatabase().getCircuitBreaker().getName(); + currentDatabaseId = provider.getDatabase().getCircuitBreaker().getName(); Map executionInfo = new HashMap() { { put("threadId", String.valueOf(threadId)); - put("cluster", reporter.getCurrentClusterName()); + put("database", reporter.getCurrentDatabaseName()); } }; @@ -212,7 +204,7 @@ public void accept(DatabaseSwitchEvent e) { break; } catch (JedisConnectionException e) { - if (cluster2Id.equals(currentClusterId)) { + if (database2Id.equals(currentDatabaseId)) { break; } lastException.set(e); @@ -240,7 +232,7 @@ public void accept(DatabaseSwitchEvent e) { } if (++attempt == maxTries) throw e; } catch (Exception e) { - if (cluster2Id.equals(currentClusterId)) { + if (database2Id.equals(currentDatabaseId)) { break; } lastException.set(e); diff --git a/src/test/java/redis/clients/jedis/mcf/DatabaseEvaluateThresholdsTest.java b/src/test/java/redis/clients/jedis/mcf/DatabaseEvaluateThresholdsTest.java index 2892005cb4..3a5b7b55d8 100644 --- a/src/test/java/redis/clients/jedis/mcf/DatabaseEvaluateThresholdsTest.java +++ b/src/test/java/redis/clients/jedis/mcf/DatabaseEvaluateThresholdsTest.java @@ -104,8 +104,8 @@ public void providerBuilder_zeroRate_mapsToHundredAndHugeMinCalls() { .builder(java.util.Arrays.asList(MultiDbConfig.DatabaseConfig .builder(new HostAndPort("localhost", 6379), DefaultJedisClientConfig.builder().build()) .healthCheckEnabled(false).build())); - cfgBuilder.circuitBreakerFailureRateThreshold(0.0f).circuitBreakerMinNumOfFailures(3) - .circuitBreakerSlidingWindowSize(10); + cfgBuilder.failureDetector(MultiDbConfig.CircuitBreakerConfig.builder() + .failureRateThreshold(0.0f).minNumOfFailures(3).slidingWindowSize(10).build()); MultiDbConfig mcc = cfgBuilder.build(); CircuitBreakerThresholdsAdapter adapter = new CircuitBreakerThresholdsAdapter(mcc); diff --git a/src/test/java/redis/clients/jedis/mcf/DefaultValuesTest.java b/src/test/java/redis/clients/jedis/mcf/DefaultValuesTest.java index e3e5f3f05e..767fd60c90 100644 --- a/src/test/java/redis/clients/jedis/mcf/DefaultValuesTest.java +++ b/src/test/java/redis/clients/jedis/mcf/DefaultValuesTest.java @@ -19,50 +19,50 @@ public class DefaultValuesTest { @Test void testDefaultValuesInConfig() { - MultiDbConfig.DatabaseConfig clusterConfig = MultiDbConfig.DatabaseConfig + MultiDbConfig.DatabaseConfig databaseConfig = MultiDbConfig.DatabaseConfig .builder(fakeEndpoint, config).build(); MultiDbConfig multiConfig = new MultiDbConfig.Builder( - new MultiDbConfig.DatabaseConfig[] { clusterConfig }).build(); + new MultiDbConfig.DatabaseConfig[] { databaseConfig }).build(); // check for grace period assertEquals(60000, multiConfig.getGracePeriod()); - // check for cluster config - assertEquals(clusterConfig, multiConfig.getDatabaseConfigs()[0]); + // check for database config + assertEquals(databaseConfig, multiConfig.getDatabaseConfigs()[0]); // check healthchecks enabled - assertNotNull(clusterConfig.getHealthCheckStrategySupplier()); + assertNotNull(databaseConfig.getHealthCheckStrategySupplier()); // check default healthcheck strategy is echo - assertEquals(EchoStrategy.DEFAULT, clusterConfig.getHealthCheckStrategySupplier()); + assertEquals(EchoStrategy.DEFAULT, databaseConfig.getHealthCheckStrategySupplier()); // check number of probes assertEquals(3, - clusterConfig.getHealthCheckStrategySupplier().get(fakeEndpoint, config).getNumProbes()); + databaseConfig.getHealthCheckStrategySupplier().get(fakeEndpoint, config).getNumProbes()); - assertEquals(500, clusterConfig.getHealthCheckStrategySupplier().get(fakeEndpoint, config) + assertEquals(500, databaseConfig.getHealthCheckStrategySupplier().get(fakeEndpoint, config) .getDelayInBetweenProbes()); assertEquals(ProbingPolicy.BuiltIn.ALL_SUCCESS, - clusterConfig.getHealthCheckStrategySupplier().get(fakeEndpoint, config).getPolicy()); + databaseConfig.getHealthCheckStrategySupplier().get(fakeEndpoint, config).getPolicy()); // check health check interval assertEquals(5000, - clusterConfig.getHealthCheckStrategySupplier().get(fakeEndpoint, config).getInterval()); + databaseConfig.getHealthCheckStrategySupplier().get(fakeEndpoint, config).getInterval()); // check lag aware tolerance LagAwareStrategy.Config lagAwareConfig = LagAwareStrategy.Config .builder(fakeEndpoint, config.getCredentialsProvider()).build(); assertEquals(Duration.ofMillis(5000), lagAwareConfig.getAvailabilityLagTolerance()); - // TODO: check CB number of failures threshold -- 1000 - // assertEquals(1000, multiConfig.circuitBreakerMinNumOfFailures()); - // check CB failure rate threshold - assertEquals(10, multiConfig.getCircuitBreakerFailureRateThreshold()); + assertEquals(10, multiConfig.getFailureDetector().getFailureRateThreshold()); // check CB sliding window size - assertEquals(2, multiConfig.getCircuitBreakerSlidingWindowSize()); + assertEquals(2, multiConfig.getFailureDetector().getSlidingWindowSize()); + + // check CB number of failures threshold + assertEquals(1000, multiConfig.getFailureDetector().getMinNumOfFailures()); // check failback check interval assertEquals(120000, multiConfig.getFailbackCheckInterval()); diff --git a/src/test/java/redis/clients/jedis/mcf/FailbackMechanismIntegrationTest.java b/src/test/java/redis/clients/jedis/mcf/FailbackMechanismIntegrationTest.java index 34e521683e..2e4e9409b8 100644 --- a/src/test/java/redis/clients/jedis/mcf/FailbackMechanismIntegrationTest.java +++ b/src/test/java/redis/clients/jedis/mcf/FailbackMechanismIntegrationTest.java @@ -48,37 +48,37 @@ private MockedConstruction mockPool() { @Test void testFailbackDisabledDoesNotPerformFailback() throws InterruptedException { try (MockedConstruction mockedPool = mockPool()) { - // Create clusters with different weights - MultiDbConfig.DatabaseConfig cluster1 = MultiDbConfig.DatabaseConfig + // Create databases with different weights + MultiDbConfig.DatabaseConfig database1 = MultiDbConfig.DatabaseConfig .builder(endpoint1, clientConfig).weight(1.0f).healthCheckEnabled(false).build(); // Lower // weight - MultiDbConfig.DatabaseConfig cluster2 = MultiDbConfig.DatabaseConfig + MultiDbConfig.DatabaseConfig database2 = MultiDbConfig.DatabaseConfig .builder(endpoint2, clientConfig).weight(2.0f) // Higher weight .healthCheckEnabled(false).build(); MultiDbConfig config = new MultiDbConfig.Builder( - new MultiDbConfig.DatabaseConfig[] { cluster1, cluster2 }).failbackSupported(false) // Disabled + new MultiDbConfig.DatabaseConfig[] { database1, database2 }).failbackSupported(false) // Disabled .failbackCheckInterval(100) // Short interval for testing .build(); try (MultiDbConnectionProvider provider = new MultiDbConnectionProvider(config)) { - // Initially, cluster2 should be active (highest weight) + // Initially, database2 should be active (highest weight) assertEquals(provider.getDatabase(endpoint2), provider.getDatabase()); - // Make cluster2 unhealthy to force failover to cluster1 + // Make database2 unhealthy to force failover to database1 MultiDbConnectionProviderHelper.onHealthStatusChange(provider, endpoint2, HealthStatus.HEALTHY, HealthStatus.UNHEALTHY); - // Should now be on cluster1 (only healthy option) + // Should now be on database1 (only healthy option) assertEquals(provider.getDatabase(endpoint1), provider.getDatabase()); - // Make cluster2 healthy again (higher weight - would normally trigger failback) + // Make database2 healthy again (higher weight - would normally trigger failback) MultiDbConnectionProviderHelper.onHealthStatusChange(provider, endpoint2, HealthStatus.UNHEALTHY, HealthStatus.HEALTHY); // Wait longer than failback interval - // Should still be on cluster1 since failback is disabled + // Should still be on database1 since failback is disabled await().atMost(Durations.FIVE_HUNDRED_MILLISECONDS).pollInterval(FIFTY_MILLISECONDS) .until(() -> provider.getDatabase(endpoint1) == provider.getDatabase()); } @@ -86,39 +86,39 @@ void testFailbackDisabledDoesNotPerformFailback() throws InterruptedException { } @Test - void testFailbackToHigherWeightCluster() throws InterruptedException { + void testFailbackToHigherWeightDatabase() throws InterruptedException { try (MockedConstruction mockedPool = mockPool()) { - // Create clusters with different weights - MultiDbConfig.DatabaseConfig cluster1 = MultiDbConfig.DatabaseConfig + // Create databases with different weights + MultiDbConfig.DatabaseConfig database1 = MultiDbConfig.DatabaseConfig .builder(endpoint1, clientConfig).weight(2.0f) // Higher weight .healthCheckEnabled(false).build(); - MultiDbConfig.DatabaseConfig cluster2 = MultiDbConfig.DatabaseConfig + MultiDbConfig.DatabaseConfig database2 = MultiDbConfig.DatabaseConfig .builder(endpoint2, clientConfig).weight(1.0f) // Lower weight .healthCheckEnabled(false).build(); MultiDbConfig config = new MultiDbConfig.Builder( - new MultiDbConfig.DatabaseConfig[] { cluster1, cluster2 }).failbackSupported(true) + new MultiDbConfig.DatabaseConfig[] { database1, database2 }).failbackSupported(true) .failbackCheckInterval(100) // Short interval for testing .gracePeriod(100).build(); try (MultiDbConnectionProvider provider = new MultiDbConnectionProvider(config)) { - // Initially, cluster1 should be active (highest weight) + // Initially, database1 should be active (highest weight) assertEquals(provider.getDatabase(endpoint1), provider.getDatabase()); - // Make cluster1 unhealthy to force failover to cluster2 + // Make database1 unhealthy to force failover to database2 MultiDbConnectionProviderHelper.onHealthStatusChange(provider, endpoint1, HealthStatus.HEALTHY, HealthStatus.UNHEALTHY); - // Should now be on cluster2 (lower weight, but only healthy option) + // Should now be on database2 (lower weight, but only healthy option) assertEquals(provider.getDatabase(endpoint2), provider.getDatabase()); - // Make cluster1 healthy again + // Make database1 healthy again MultiDbConnectionProviderHelper.onHealthStatusChange(provider, endpoint1, HealthStatus.UNHEALTHY, HealthStatus.HEALTHY); // Wait for failback check interval + some buffer - // Should have failed back to cluster1 (higher weight) + // Should have failed back to database1 (higher weight) await().atMost(Durations.FIVE_HUNDRED_MILLISECONDS).pollInterval(FIFTY_MILLISECONDS) .until(() -> provider.getDatabase(endpoint1) == provider.getDatabase()); } @@ -126,43 +126,43 @@ void testFailbackToHigherWeightCluster() throws InterruptedException { } @Test - void testNoFailbackToLowerWeightCluster() throws InterruptedException { + void testNoFailbackToLowerWeightDatabase() throws InterruptedException { try (MockedConstruction mockedPool = mockPool()) { - // Create three clusters with different weights to properly test no failback to lower weight - MultiDbConfig.DatabaseConfig cluster1 = MultiDbConfig.DatabaseConfig + // Create three databases with different weights to properly test no failback to lower weight + MultiDbConfig.DatabaseConfig database1 = MultiDbConfig.DatabaseConfig .builder(endpoint1, clientConfig).weight(1.0f) // Lowest weight .healthCheckEnabled(false).build(); - MultiDbConfig.DatabaseConfig cluster2 = MultiDbConfig.DatabaseConfig + MultiDbConfig.DatabaseConfig database2 = MultiDbConfig.DatabaseConfig .builder(endpoint2, clientConfig).weight(2.0f) // Medium weight .healthCheckEnabled(false).build(); - MultiDbConfig.DatabaseConfig cluster3 = MultiDbConfig.DatabaseConfig + MultiDbConfig.DatabaseConfig database3 = MultiDbConfig.DatabaseConfig .builder(endpoint3, clientConfig).weight(3.0f) // Highest weight .healthCheckEnabled(false).build(); MultiDbConfig config = new MultiDbConfig.Builder( - new MultiDbConfig.DatabaseConfig[] { cluster1, cluster2, cluster3 }) + new MultiDbConfig.DatabaseConfig[] { database1, database2, database3 }) .failbackSupported(true).failbackCheckInterval(100).build(); try (MultiDbConnectionProvider provider = new MultiDbConnectionProvider(config)) { - // Initially, cluster3 should be active (highest weight) + // Initially, database3 should be active (highest weight) assertEquals(provider.getDatabase(endpoint3), provider.getDatabase()); - // Make cluster3 unhealthy to force failover to cluster2 (medium weight) + // Make database3 unhealthy to force failover to database2 (medium weight) MultiDbConnectionProviderHelper.onHealthStatusChange(provider, endpoint3, HealthStatus.HEALTHY, HealthStatus.UNHEALTHY); - // Should now be on cluster2 (highest weight among healthy clusters) + // Should now be on database2 (highest weight among healthy databases) assertEquals(provider.getDatabase(endpoint2), provider.getDatabase()); - // Make cluster1 (lowest weight) healthy - this should NOT trigger failback - // since we don't failback to lower weight clusters + // Make database1 (lowest weight) healthy - this should NOT trigger failback + // since we don't failback to lower weight databases MultiDbConnectionProviderHelper.onHealthStatusChange(provider, endpoint1, HealthStatus.UNHEALTHY, HealthStatus.HEALTHY); // Wait for failback check interval - // Should still be on cluster2 (no failback to lower weight cluster1) + // Should still be on database2 (no failback to lower weight database1) await().atMost(Durations.FIVE_HUNDRED_MILLISECONDS).pollInterval(FIFTY_MILLISECONDS) .until(() -> provider.getDatabase(endpoint2) == provider.getDatabase()); } @@ -170,37 +170,37 @@ void testNoFailbackToLowerWeightCluster() throws InterruptedException { } @Test - void testFailbackToHigherWeightClusterImmediately() throws InterruptedException { + void testFailbackToHigherWeightDatabaseImmediately() throws InterruptedException { try (MockedConstruction mockedPool = mockPool()) { - MultiDbConfig.DatabaseConfig cluster1 = MultiDbConfig.DatabaseConfig + MultiDbConfig.DatabaseConfig database1 = MultiDbConfig.DatabaseConfig .builder(endpoint1, clientConfig).weight(2.0f).healthCheckEnabled(false).build(); // Higher // weight - MultiDbConfig.DatabaseConfig cluster2 = MultiDbConfig.DatabaseConfig + MultiDbConfig.DatabaseConfig database2 = MultiDbConfig.DatabaseConfig .builder(endpoint2, clientConfig).weight(1.0f).healthCheckEnabled(false).build(); // Lower // weight MultiDbConfig config = new MultiDbConfig.Builder( - new MultiDbConfig.DatabaseConfig[] { cluster1, cluster2 }).failbackSupported(true) + new MultiDbConfig.DatabaseConfig[] { database1, database2 }).failbackSupported(true) .failbackCheckInterval(100).gracePeriod(50).build(); try (MultiDbConnectionProvider provider = new MultiDbConnectionProvider(config)) { - // Initially, cluster1 should be active (highest weight) + // Initially, database1 should be active (highest weight) assertEquals(provider.getDatabase(endpoint1), provider.getDatabase()); - // Make cluster1 unhealthy to force failover to cluster2 + // Make database1 unhealthy to force failover to database2 MultiDbConnectionProviderHelper.onHealthStatusChange(provider, endpoint1, HealthStatus.HEALTHY, HealthStatus.UNHEALTHY); - // Should now be on cluster2 (only healthy option) + // Should now be on database2 (only healthy option) assertEquals(provider.getDatabase(endpoint2), provider.getDatabase()); - // Make cluster1 healthy again + // Make database1 healthy again MultiDbConnectionProviderHelper.onHealthStatusChange(provider, endpoint1, HealthStatus.UNHEALTHY, HealthStatus.HEALTHY); // Wait for failback check - // Should have failed back to cluster1 immediately (higher weight, no stability period + // Should have failed back to database1 immediately (higher weight, no stability period // required) await().atMost(Durations.TWO_HUNDRED_MILLISECONDS).pollInterval(FIFTY_MILLISECONDS) .until(() -> provider.getDatabase(endpoint1) == provider.getDatabase()); @@ -209,44 +209,44 @@ void testFailbackToHigherWeightClusterImmediately() throws InterruptedException } @Test - void testUnhealthyClusterCancelsFailback() throws InterruptedException { + void testUnhealthyDatabaseCancelsFailback() throws InterruptedException { try (MockedConstruction mockedPool = mockPool()) { - MultiDbConfig.DatabaseConfig cluster1 = MultiDbConfig.DatabaseConfig + MultiDbConfig.DatabaseConfig database1 = MultiDbConfig.DatabaseConfig .builder(endpoint1, clientConfig).weight(2.0f).healthCheckEnabled(false).build(); // Higher // weight - MultiDbConfig.DatabaseConfig cluster2 = MultiDbConfig.DatabaseConfig + MultiDbConfig.DatabaseConfig database2 = MultiDbConfig.DatabaseConfig .builder(endpoint2, clientConfig).weight(1.0f).healthCheckEnabled(false).build(); // Lower // weight MultiDbConfig config = new MultiDbConfig.Builder( - new MultiDbConfig.DatabaseConfig[] { cluster1, cluster2 }).failbackSupported(true) + new MultiDbConfig.DatabaseConfig[] { database1, database2 }).failbackSupported(true) .failbackCheckInterval(200).build(); try (MultiDbConnectionProvider provider = new MultiDbConnectionProvider(config)) { - // Initially, cluster1 should be active (highest weight) + // Initially, database1 should be active (highest weight) assertEquals(provider.getDatabase(endpoint1), provider.getDatabase()); - // Make cluster1 unhealthy to force failover to cluster2 + // Make database1 unhealthy to force failover to database2 MultiDbConnectionProviderHelper.onHealthStatusChange(provider, endpoint1, HealthStatus.HEALTHY, HealthStatus.UNHEALTHY); - // Should now be on cluster2 (only healthy option) + // Should now be on database2 (only healthy option) assertEquals(provider.getDatabase(endpoint2), provider.getDatabase()); - // Make cluster1 healthy again (should trigger failback attempt) + // Make database1 healthy again (should trigger failback attempt) MultiDbConnectionProviderHelper.onHealthStatusChange(provider, endpoint1, HealthStatus.UNHEALTHY, HealthStatus.HEALTHY); // Wait a bit Thread.sleep(100); - // Make cluster1 unhealthy again before failback completes + // Make database1 unhealthy again before failback completes MultiDbConnectionProviderHelper.onHealthStatusChange(provider, endpoint1, HealthStatus.HEALTHY, HealthStatus.UNHEALTHY); // Wait past the original failback interval - // Should still be on cluster2 (failback was cancelled due to cluster1 becoming unhealthy) + // Should still be on database2 (failback was cancelled due to database1 becoming unhealthy) await().atMost(Durations.TWO_HUNDRED_MILLISECONDS).pollInterval(FIFTY_MILLISECONDS) .until(() -> provider.getDatabase(endpoint2) == provider.getDatabase()); } @@ -254,41 +254,41 @@ void testUnhealthyClusterCancelsFailback() throws InterruptedException { } @Test - void testMultipleClusterFailbackPriority() throws InterruptedException { + void testMultipleDatabaseFailbackPriority() throws InterruptedException { try (MockedConstruction mockedPool = mockPool()) { - MultiDbConfig.DatabaseConfig cluster1 = MultiDbConfig.DatabaseConfig + MultiDbConfig.DatabaseConfig database1 = MultiDbConfig.DatabaseConfig .builder(endpoint1, clientConfig).weight(1.0f).healthCheckEnabled(false).build(); // Lowest // weight - MultiDbConfig.DatabaseConfig cluster2 = MultiDbConfig.DatabaseConfig + MultiDbConfig.DatabaseConfig database2 = MultiDbConfig.DatabaseConfig .builder(endpoint2, clientConfig).weight(2.0f).healthCheckEnabled(false).build(); // Medium // weight - MultiDbConfig.DatabaseConfig cluster3 = MultiDbConfig.DatabaseConfig + MultiDbConfig.DatabaseConfig database3 = MultiDbConfig.DatabaseConfig .builder(endpoint3, clientConfig).weight(3.0f) // Highest weight .healthCheckEnabled(false).build(); MultiDbConfig config = new MultiDbConfig.Builder( - new MultiDbConfig.DatabaseConfig[] { cluster1, cluster2, cluster3 }) + new MultiDbConfig.DatabaseConfig[] { database1, database2, database3 }) .failbackSupported(true).failbackCheckInterval(100).gracePeriod(100).build(); try (MultiDbConnectionProvider provider = new MultiDbConnectionProvider(config)) { - // Initially, cluster3 should be active (highest weight) + // Initially, database3 should be active (highest weight) assertEquals(provider.getDatabase(endpoint3), provider.getDatabase()); - // Make cluster3 unhealthy to force failover to cluster2 (next highest weight) + // Make database3 unhealthy to force failover to database2 (next highest weight) MultiDbConnectionProviderHelper.onHealthStatusChange(provider, endpoint3, HealthStatus.HEALTHY, HealthStatus.UNHEALTHY); - // Should now be on cluster2 (highest weight among healthy clusters) + // Should now be on database2 (highest weight among healthy databases) assertEquals(provider.getDatabase(endpoint2), provider.getDatabase()); - // Make cluster3 healthy again + // Make database3 healthy again MultiDbConnectionProviderHelper.onHealthStatusChange(provider, endpoint3, HealthStatus.UNHEALTHY, HealthStatus.HEALTHY); // Wait for failback - // Should fail back to cluster3 (highest weight) + // Should fail back to database3 (highest weight) await().atMost(Durations.FIVE_HUNDRED_MILLISECONDS).pollInterval(FIFTY_MILLISECONDS) .until(() -> provider.getDatabase(endpoint3) == provider.getDatabase()); } @@ -296,84 +296,84 @@ void testMultipleClusterFailbackPriority() throws InterruptedException { } @Test - void testGracePeriodDisablesClusterOnUnhealthy() throws InterruptedException { + void testGracePeriodDisablesDatabaseOnUnhealthy() throws InterruptedException { try (MockedConstruction mockedPool = mockPool()) { - MultiDbConfig.DatabaseConfig cluster1 = MultiDbConfig.DatabaseConfig + MultiDbConfig.DatabaseConfig database1 = MultiDbConfig.DatabaseConfig .builder(endpoint1, clientConfig).weight(1.0f).healthCheckEnabled(false).build(); // Lower // weight - MultiDbConfig.DatabaseConfig cluster2 = MultiDbConfig.DatabaseConfig + MultiDbConfig.DatabaseConfig database2 = MultiDbConfig.DatabaseConfig .builder(endpoint2, clientConfig).weight(2.0f).healthCheckEnabled(false).build(); // Higher // weight MultiDbConfig config = new MultiDbConfig.Builder( - new MultiDbConfig.DatabaseConfig[] { cluster1, cluster2 }).failbackSupported(true) + new MultiDbConfig.DatabaseConfig[] { database1, database2 }).failbackSupported(true) .failbackCheckInterval(100).gracePeriod(200) // 200ms grace // period .build(); try (MultiDbConnectionProvider provider = new MultiDbConnectionProvider(config)) { - // Initially, cluster2 should be active (highest weight) + // Initially, database2 should be active (highest weight) assertEquals(provider.getDatabase(endpoint2), provider.getDatabase()); - // Now make cluster2 unhealthy - it should be disabled for grace period + // Now make database2 unhealthy - it should be disabled for grace period MultiDbConnectionProviderHelper.onHealthStatusChange(provider, endpoint2, HealthStatus.HEALTHY, HealthStatus.UNHEALTHY); - // Should failover to cluster1 + // Should failover to database1 assertEquals(provider.getDatabase(endpoint1), provider.getDatabase()); - // Cluster2 should be in grace period + // Database2 should be in grace period assertTrue(provider.getDatabase(endpoint2).isInGracePeriod()); } } } @Test - void testGracePeriodReEnablesClusterAfterPeriod() throws InterruptedException { + void testGracePeriodReEnablesDatabaseAfterPeriod() throws InterruptedException { try (MockedConstruction mockedPool = mockPool()) { - MultiDbConfig.DatabaseConfig cluster1 = MultiDbConfig.DatabaseConfig + MultiDbConfig.DatabaseConfig database1 = MultiDbConfig.DatabaseConfig .builder(endpoint1, clientConfig).weight(1.0f).healthCheckEnabled(false).build(); // Lower // weight - MultiDbConfig.DatabaseConfig cluster2 = MultiDbConfig.DatabaseConfig + MultiDbConfig.DatabaseConfig database2 = MultiDbConfig.DatabaseConfig .builder(endpoint2, clientConfig).weight(2.0f).healthCheckEnabled(false).build(); // Higher // weight MultiDbConfig config = new MultiDbConfig.Builder( - new MultiDbConfig.DatabaseConfig[] { cluster1, cluster2 }).failbackSupported(true) + new MultiDbConfig.DatabaseConfig[] { database1, database2 }).failbackSupported(true) .failbackCheckInterval(50) // Short interval for testing .gracePeriod(100) // Short grace period for testing .build(); try (MultiDbConnectionProvider provider = new MultiDbConnectionProvider(config)) { - // Initially, cluster2 should be active (highest weight) + // Initially, database2 should be active (highest weight) assertEquals(provider.getDatabase(endpoint2), provider.getDatabase()); - // Make cluster2 unhealthy to start grace period and force failover + // Make database2 unhealthy to start grace period and force failover MultiDbConnectionProviderHelper.onHealthStatusChange(provider, endpoint2, HealthStatus.HEALTHY, HealthStatus.UNHEALTHY); - // Should failover to cluster1 + // Should failover to database1 assertEquals(provider.getDatabase(endpoint1), provider.getDatabase()); - // Cluster2 should be in grace period + // Database2 should be in grace period assertTrue(provider.getDatabase(endpoint2).isInGracePeriod()); - // Make cluster2 healthy again while it's still in grace period + // Make database2 healthy again while it's still in grace period MultiDbConnectionProviderHelper.onHealthStatusChange(provider, endpoint2, HealthStatus.UNHEALTHY, HealthStatus.HEALTHY); - // Should still be on cluster1 because cluster2 is in grace period + // Should still be on database1 because database2 is in grace period assertEquals(provider.getDatabase(endpoint1), provider.getDatabase()); // Wait for grace period to expire - // Cluster2 should no longer be in grace period + // Database2 should no longer be in grace period await().atMost(Durations.FIVE_HUNDRED_MILLISECONDS).pollInterval(FIFTY_MILLISECONDS) .until(() -> !provider.getDatabase(endpoint2).isInGracePeriod()); // Wait for failback check to run - // Should now failback to cluster2 (higher weight) since grace period has expired + // Should now failback to database2 (higher weight) since grace period has expired await().atMost(Durations.FIVE_HUNDRED_MILLISECONDS).pollInterval(FIFTY_MILLISECONDS) .until(() -> provider.getDatabase(endpoint2) == provider.getDatabase()); } diff --git a/src/test/java/redis/clients/jedis/mcf/FailbackMechanismUnitTest.java b/src/test/java/redis/clients/jedis/mcf/FailbackMechanismUnitTest.java index ad251975c2..e60712710a 100644 --- a/src/test/java/redis/clients/jedis/mcf/FailbackMechanismUnitTest.java +++ b/src/test/java/redis/clients/jedis/mcf/FailbackMechanismUnitTest.java @@ -26,65 +26,65 @@ void setUp() { @Test void testFailbackCheckIntervalConfiguration() { // Test default value - MultiDbConfig.DatabaseConfig clusterConfig = MultiDbConfig.DatabaseConfig + MultiDbConfig.DatabaseConfig databaseConfig = MultiDbConfig.DatabaseConfig .builder(endpoint1, clientConfig).healthCheckEnabled(false).build(); MultiDbConfig defaultConfig = new MultiDbConfig.Builder( - new MultiDbConfig.DatabaseConfig[] { clusterConfig }).build(); + new MultiDbConfig.DatabaseConfig[] { databaseConfig }).build(); assertEquals(120000, defaultConfig.getFailbackCheckInterval()); // Test custom value MultiDbConfig customConfig = new MultiDbConfig.Builder( - new MultiDbConfig.DatabaseConfig[] { clusterConfig }).failbackCheckInterval(3000).build(); + new MultiDbConfig.DatabaseConfig[] { databaseConfig }).failbackCheckInterval(3000).build(); assertEquals(3000, customConfig.getFailbackCheckInterval()); } @Test void testFailbackSupportedConfiguration() { - MultiDbConfig.DatabaseConfig clusterConfig = MultiDbConfig.DatabaseConfig + MultiDbConfig.DatabaseConfig databaseConfig = MultiDbConfig.DatabaseConfig .builder(endpoint1, clientConfig).healthCheckEnabled(false).build(); // Test default (should be true) MultiDbConfig defaultConfig = new MultiDbConfig.Builder( - new MultiDbConfig.DatabaseConfig[] { clusterConfig }).build(); + new MultiDbConfig.DatabaseConfig[] { databaseConfig }).build(); assertTrue(defaultConfig.isFailbackSupported()); // Test disabled MultiDbConfig disabledConfig = new MultiDbConfig.Builder( - new MultiDbConfig.DatabaseConfig[] { clusterConfig }).failbackSupported(false).build(); + new MultiDbConfig.DatabaseConfig[] { databaseConfig }).failbackSupported(false).build(); assertFalse(disabledConfig.isFailbackSupported()); } @Test void testFailbackCheckIntervalValidation() { - MultiDbConfig.DatabaseConfig clusterConfig = MultiDbConfig.DatabaseConfig + MultiDbConfig.DatabaseConfig databaseConfig = MultiDbConfig.DatabaseConfig .builder(endpoint1, clientConfig).healthCheckEnabled(false).build(); // Test zero interval (should be allowed) MultiDbConfig zeroConfig = new MultiDbConfig.Builder( - new MultiDbConfig.DatabaseConfig[] { clusterConfig }).failbackCheckInterval(0).build(); + new MultiDbConfig.DatabaseConfig[] { databaseConfig }).failbackCheckInterval(0).build(); assertEquals(0, zeroConfig.getFailbackCheckInterval()); // Test negative interval (should be allowed - implementation decision) MultiDbConfig negativeConfig = new MultiDbConfig.Builder( - new MultiDbConfig.DatabaseConfig[] { clusterConfig }).failbackCheckInterval(-1000).build(); + new MultiDbConfig.DatabaseConfig[] { databaseConfig }).failbackCheckInterval(-1000).build(); assertEquals(-1000, negativeConfig.getFailbackCheckInterval()); } @Test void testBuilderChaining() { - MultiDbConfig.DatabaseConfig clusterConfig = MultiDbConfig.DatabaseConfig + MultiDbConfig.DatabaseConfig databaseConfig = MultiDbConfig.DatabaseConfig .builder(endpoint1, clientConfig).healthCheckEnabled(false).build(); // Test that builder methods can be chained MultiDbConfig config = new MultiDbConfig.Builder( - new MultiDbConfig.DatabaseConfig[] { clusterConfig }).failbackSupported(true) + new MultiDbConfig.DatabaseConfig[] { databaseConfig }).failbackSupported(true) .failbackCheckInterval(2000).retryOnFailover(true).build(); assertTrue(config.isFailbackSupported()); @@ -95,47 +95,47 @@ void testBuilderChaining() { @Test void testGracePeriodConfiguration() { // Test default value - MultiDbConfig.DatabaseConfig clusterConfig = MultiDbConfig.DatabaseConfig + MultiDbConfig.DatabaseConfig databaseConfig = MultiDbConfig.DatabaseConfig .builder(endpoint1, clientConfig).healthCheckEnabled(false).build(); MultiDbConfig defaultConfig = new MultiDbConfig.Builder( - new MultiDbConfig.DatabaseConfig[] { clusterConfig }).build(); + new MultiDbConfig.DatabaseConfig[] { databaseConfig }).build(); assertEquals(60000, defaultConfig.getGracePeriod()); // Test custom value MultiDbConfig customConfig = new MultiDbConfig.Builder( - new MultiDbConfig.DatabaseConfig[] { clusterConfig }).gracePeriod(5000).build(); + new MultiDbConfig.DatabaseConfig[] { databaseConfig }).gracePeriod(5000).build(); assertEquals(5000, customConfig.getGracePeriod()); } @Test void testGracePeriodValidation() { - MultiDbConfig.DatabaseConfig clusterConfig = MultiDbConfig.DatabaseConfig + MultiDbConfig.DatabaseConfig databaseConfig = MultiDbConfig.DatabaseConfig .builder(endpoint1, clientConfig).healthCheckEnabled(false).build(); // Test zero grace period (should be allowed) MultiDbConfig zeroConfig = new MultiDbConfig.Builder( - new MultiDbConfig.DatabaseConfig[] { clusterConfig }).gracePeriod(0).build(); + new MultiDbConfig.DatabaseConfig[] { databaseConfig }).gracePeriod(0).build(); assertEquals(0, zeroConfig.getGracePeriod()); // Test negative grace period (should be allowed - implementation decision) MultiDbConfig negativeConfig = new MultiDbConfig.Builder( - new MultiDbConfig.DatabaseConfig[] { clusterConfig }).gracePeriod(-1000).build(); + new MultiDbConfig.DatabaseConfig[] { databaseConfig }).gracePeriod(-1000).build(); assertEquals(-1000, negativeConfig.getGracePeriod()); } @Test void testGracePeriodBuilderChaining() { - MultiDbConfig.DatabaseConfig clusterConfig = MultiDbConfig.DatabaseConfig + MultiDbConfig.DatabaseConfig databaseConfig = MultiDbConfig.DatabaseConfig .builder(endpoint1, clientConfig).healthCheckEnabled(false).build(); // Test that builder methods can be chained MultiDbConfig config = new MultiDbConfig.Builder( - new MultiDbConfig.DatabaseConfig[] { clusterConfig }).failbackSupported(true) + new MultiDbConfig.DatabaseConfig[] { databaseConfig }).failbackSupported(true) .failbackCheckInterval(2000).gracePeriod(8000).retryOnFailover(true).build(); assertTrue(config.isFailbackSupported()); diff --git a/src/test/java/redis/clients/jedis/mcf/HealthCheckIntegrationTest.java b/src/test/java/redis/clients/jedis/mcf/HealthCheckIntegrationTest.java index ce12cde8a7..f19af41a6e 100644 --- a/src/test/java/redis/clients/jedis/mcf/HealthCheckIntegrationTest.java +++ b/src/test/java/redis/clients/jedis/mcf/HealthCheckIntegrationTest.java @@ -88,9 +88,11 @@ private MultiDbConnectionProvider getMCCF(MultiDbConfig.StrategySupplier strateg .apply(MultiDbConfig.DatabaseConfig.builder(e.getHostAndPort(), clientConfig)).build()) .collect(Collectors.toList()); - MultiDbConfig mccf = new MultiDbConfig.Builder(databaseConfigs).retryMaxAttempts(1) - .retryWaitDuration(1).circuitBreakerSlidingWindowSize(1) - .circuitBreakerFailureRateThreshold(100).build(); + MultiDbConfig mccf = new MultiDbConfig.Builder(databaseConfigs) + .commandRetry(MultiDbConfig.RetryConfig.builder().maxAttempts(1).waitDuration(1).build()) + .failureDetector(MultiDbConfig.CircuitBreakerConfig.builder().slidingWindowSize(1) + .failureRateThreshold(100).build()) + .build(); return new MultiDbConnectionProvider(mccf); } diff --git a/src/test/java/redis/clients/jedis/mcf/HealthCheckTest.java b/src/test/java/redis/clients/jedis/mcf/HealthCheckTest.java index b83ecb8981..10090811c1 100644 --- a/src/test/java/redis/clients/jedis/mcf/HealthCheckTest.java +++ b/src/test/java/redis/clients/jedis/mcf/HealthCheckTest.java @@ -349,14 +349,14 @@ void testEchoStrategyDefaultSupplier() { @Test void testNewFieldLocations() { // Test new field locations in DatabaseConfig and MultiDbConfig - MultiDbConfig.DatabaseConfig clusterConfig = MultiDbConfig.DatabaseConfig + MultiDbConfig.DatabaseConfig databaseConfig = MultiDbConfig.DatabaseConfig .builder(testEndpoint, testConfig).weight(2.5f).build(); MultiDbConfig multiConfig = new MultiDbConfig.Builder( - new MultiDbConfig.DatabaseConfig[] { clusterConfig }).retryOnFailover(true) + new MultiDbConfig.DatabaseConfig[] { databaseConfig }).retryOnFailover(true) .failbackSupported(false).build(); - assertEquals(2.5f, clusterConfig.getWeight()); + assertEquals(2.5f, databaseConfig.getWeight()); assertTrue(multiConfig.isRetryOnFailover()); assertFalse(multiConfig.isFailbackSupported()); } @@ -364,19 +364,19 @@ void testNewFieldLocations() { @Test void testDefaultValues() { // Test default values in DatabaseConfig - MultiDbConfig.DatabaseConfig clusterConfig = MultiDbConfig.DatabaseConfig + MultiDbConfig.DatabaseConfig databaseConfig = MultiDbConfig.DatabaseConfig .builder(testEndpoint, testConfig).build(); - assertEquals(1.0f, clusterConfig.getWeight()); // Default weight - assertEquals(EchoStrategy.DEFAULT, clusterConfig.getHealthCheckStrategySupplier()); // Default - // is null - // (no - // health - // check) + assertEquals(1.0f, databaseConfig.getWeight()); // Default weight + assertEquals(EchoStrategy.DEFAULT, databaseConfig.getHealthCheckStrategySupplier()); // Default + // is null + // (no + // health + // check) // Test default values in MultiDbConfig MultiDbConfig multiConfig = new MultiDbConfig.Builder( - new MultiDbConfig.DatabaseConfig[] { clusterConfig }).build(); + new MultiDbConfig.DatabaseConfig[] { databaseConfig }).build(); assertFalse(multiConfig.isRetryOnFailover()); // Default is false assertTrue(multiConfig.isFailbackSupported()); // Default is true @@ -388,11 +388,11 @@ void testDatabaseConfigWithHealthCheckStrategy() { MultiDbConfig.StrategySupplier supplier = (hostAndPort, jedisClientConfig) -> customStrategy; - MultiDbConfig.DatabaseConfig clusterConfig = MultiDbConfig.DatabaseConfig + MultiDbConfig.DatabaseConfig databaseConfig = MultiDbConfig.DatabaseConfig .builder(testEndpoint, testConfig).healthCheckStrategySupplier(supplier).build(); - assertNotNull(clusterConfig.getHealthCheckStrategySupplier()); - HealthCheckStrategy result = clusterConfig.getHealthCheckStrategySupplier().get(testEndpoint, + assertNotNull(databaseConfig.getHealthCheckStrategySupplier()); + HealthCheckStrategy result = databaseConfig.getHealthCheckStrategySupplier().get(testEndpoint, testConfig); assertEquals(customStrategy, result); } @@ -403,10 +403,10 @@ void testDatabaseConfigWithStrategySupplier() { return mock(HealthCheckStrategy.class); }; - MultiDbConfig.DatabaseConfig clusterConfig = MultiDbConfig.DatabaseConfig + MultiDbConfig.DatabaseConfig databaseConfig = MultiDbConfig.DatabaseConfig .builder(testEndpoint, testConfig).healthCheckStrategySupplier(customSupplier).build(); - assertEquals(customSupplier, clusterConfig.getHealthCheckStrategySupplier()); + assertEquals(customSupplier, databaseConfig.getHealthCheckStrategySupplier()); } @Test @@ -415,38 +415,38 @@ void testDatabaseConfigWithEchoStrategy() { return new EchoStrategy(hostAndPort, jedisClientConfig); }; - MultiDbConfig.DatabaseConfig clusterConfig = MultiDbConfig.DatabaseConfig + MultiDbConfig.DatabaseConfig databaseConfig = MultiDbConfig.DatabaseConfig .builder(testEndpoint, testConfig).healthCheckStrategySupplier(echoSupplier).build(); - MultiDbConfig.StrategySupplier supplier = clusterConfig.getHealthCheckStrategySupplier(); + MultiDbConfig.StrategySupplier supplier = databaseConfig.getHealthCheckStrategySupplier(); assertNotNull(supplier); assertInstanceOf(EchoStrategy.class, supplier.get(testEndpoint, testConfig)); } @Test void testDatabaseConfigWithDefaultHealthCheck() { - MultiDbConfig.DatabaseConfig clusterConfig = MultiDbConfig.DatabaseConfig + MultiDbConfig.DatabaseConfig databaseConfig = MultiDbConfig.DatabaseConfig .builder(testEndpoint, testConfig).build(); // Should use default EchoStrategy - assertNotNull(clusterConfig.getHealthCheckStrategySupplier()); - assertEquals(EchoStrategy.DEFAULT, clusterConfig.getHealthCheckStrategySupplier()); + assertNotNull(databaseConfig.getHealthCheckStrategySupplier()); + assertEquals(EchoStrategy.DEFAULT, databaseConfig.getHealthCheckStrategySupplier()); } @Test void testDatabaseConfigWithDisabledHealthCheck() { - MultiDbConfig.DatabaseConfig clusterConfig = MultiDbConfig.DatabaseConfig + MultiDbConfig.DatabaseConfig databaseConfig = MultiDbConfig.DatabaseConfig .builder(testEndpoint, testConfig).healthCheckEnabled(false).build(); - assertNull(clusterConfig.getHealthCheckStrategySupplier()); + assertNull(databaseConfig.getHealthCheckStrategySupplier()); } @Test void testDatabaseConfigHealthCheckEnabledExplicitly() { - MultiDbConfig.DatabaseConfig clusterConfig = MultiDbConfig.DatabaseConfig + MultiDbConfig.DatabaseConfig databaseConfig = MultiDbConfig.DatabaseConfig .builder(testEndpoint, testConfig).healthCheckEnabled(true).build(); - assertNotNull(clusterConfig.getHealthCheckStrategySupplier()); - assertEquals(EchoStrategy.DEFAULT, clusterConfig.getHealthCheckStrategySupplier()); + assertNotNull(databaseConfig.getHealthCheckStrategySupplier()); + assertEquals(EchoStrategy.DEFAULT, databaseConfig.getHealthCheckStrategySupplier()); } // ========== Integration Tests ========== diff --git a/src/test/java/redis/clients/jedis/mcf/MultiDbCircuitBreakerThresholdsTest.java b/src/test/java/redis/clients/jedis/mcf/MultiDbCircuitBreakerThresholdsTest.java index 7a0f4319c6..e838729586 100644 --- a/src/test/java/redis/clients/jedis/mcf/MultiDbCircuitBreakerThresholdsTest.java +++ b/src/test/java/redis/clients/jedis/mcf/MultiDbCircuitBreakerThresholdsTest.java @@ -26,18 +26,18 @@ /** * Tests for circuit breaker thresholds: both failure-rate threshold and minimum number of failures * must be exceeded to trigger failover. Uses a real CircuitBreaker and real Retry, but mocks the - * provider and cluster wiring to avoid network I/O. + * provider and database wiring to avoid network I/O. */ public class MultiDbCircuitBreakerThresholdsTest { private MultiDbConnectionProvider realProvider; private MultiDbConnectionProvider spyProvider; - private Database cluster; + private Database database; private MultiDbCommandExecutor executor; private CommandObject dummyCommand; private TrackingConnectionPool poolMock; - private HostAndPort fakeEndpoint = new HostAndPort("fake", 6379); - private HostAndPort fakeEndpoint2 = new HostAndPort("fake2", 6379); + private final HostAndPort fakeEndpoint = new HostAndPort("fake", 6379); + private final HostAndPort fakeEndpoint2 = new HostAndPort("fake2", 6379); private DatabaseConfig[] fakeDatabaseConfigs; @BeforeEach @@ -51,24 +51,26 @@ public void setup() throws Exception { fakeDatabaseConfigs = databaseConfigs; MultiDbConfig.Builder cfgBuilder = MultiDbConfig.builder(databaseConfigs) - .circuitBreakerFailureRateThreshold(50.0f).circuitBreakerMinNumOfFailures(3) - .circuitBreakerSlidingWindowSize(10).retryMaxAttempts(1).retryOnFailover(false); + .failureDetector(MultiDbConfig.CircuitBreakerConfig.builder().failureRateThreshold(50.0f) + .minNumOfFailures(3).slidingWindowSize(10).build()) + .commandRetry(MultiDbConfig.RetryConfig.builder().maxAttempts(1).build()) + .retryOnFailover(false); MultiDbConfig mcc = cfgBuilder.build(); realProvider = new MultiDbConnectionProvider(mcc); spyProvider = spy(realProvider); - cluster = spyProvider.getDatabase(); + database = spyProvider.getDatabase(); executor = new MultiDbCommandExecutor(spyProvider); dummyCommand = new CommandObject<>(new CommandArguments(Protocol.Command.PING), BuilderFactory.STRING); - // Replace the cluster's pool with a mock to avoid real network I/O + // Replace the database's pool with a mock to avoid real network I/O poolMock = mock(TrackingConnectionPool.class); - ReflectionTestUtil.setField(cluster, "connectionPool", poolMock); + ReflectionTestUtil.setField(database, "connectionPool", poolMock); } /** @@ -124,8 +126,10 @@ public void minFailuresAndRateExceeded_triggersFailover() { public void rateBelowThreshold_doesNotFailover() throws Exception { // Use local provider with higher threshold (80%) and no retries MultiDbConfig.Builder cfgBuilder = MultiDbConfig.builder(fakeDatabaseConfigs) - .circuitBreakerFailureRateThreshold(80.0f).circuitBreakerMinNumOfFailures(3) - .circuitBreakerSlidingWindowSize(10).retryMaxAttempts(1).retryOnFailover(false); + .failureDetector(MultiDbConfig.CircuitBreakerConfig.builder().failureRateThreshold(80.0f) + .minNumOfFailures(3).slidingWindowSize(10).build()) + .commandRetry(MultiDbConfig.RetryConfig.builder().maxAttempts(1).build()) + .retryOnFailover(false); MultiDbConnectionProvider rp = new MultiDbConnectionProvider(cfgBuilder.build()); MultiDbConnectionProvider sp = spy(rp); Database c = sp.getDatabase(); @@ -163,8 +167,8 @@ public void rateBelowThreshold_doesNotFailover() throws Exception { @Test public void providerBuilder_zeroRate_mapsToHundredAndHugeMinCalls() { MultiDbConfig.Builder cfgBuilder = MultiDbConfig.builder(fakeDatabaseConfigs); - cfgBuilder.circuitBreakerFailureRateThreshold(0.0f).circuitBreakerMinNumOfFailures(3) - .circuitBreakerSlidingWindowSize(10); + cfgBuilder.failureDetector(MultiDbConfig.CircuitBreakerConfig.builder() + .failureRateThreshold(0.0f).minNumOfFailures(3).slidingWindowSize(10).build()); MultiDbConfig mcc = cfgBuilder.build(); CircuitBreakerThresholdsAdapter adapter = new CircuitBreakerThresholdsAdapter(mcc); @@ -190,8 +194,10 @@ public void thresholdMatrix(int minFailures, float ratePercent, int successes, i boolean expectFailoverOnNext) throws Exception { MultiDbConfig.Builder cfgBuilder = MultiDbConfig.builder(fakeDatabaseConfigs) - .circuitBreakerFailureRateThreshold(ratePercent).circuitBreakerMinNumOfFailures(minFailures) - .circuitBreakerSlidingWindowSize(Math.max(10, successes + failures + 2)).retryMaxAttempts(1) + .failureDetector(MultiDbConfig.CircuitBreakerConfig.builder() + .failureRateThreshold(ratePercent).minNumOfFailures(minFailures) + .slidingWindowSize(Math.max(10, successes + failures + 2)).build()) + .commandRetry(MultiDbConfig.RetryConfig.builder().maxAttempts(1).build()) .retryOnFailover(false); MultiDbConnectionProvider real = new MultiDbConnectionProvider(cfgBuilder.build()); diff --git a/src/test/java/redis/clients/jedis/mcf/MultiDbConnectionProviderDynamicEndpointUnitTest.java b/src/test/java/redis/clients/jedis/mcf/MultiDbConnectionProviderDynamicEndpointUnitTest.java index 663f33529e..de38c199e6 100644 --- a/src/test/java/redis/clients/jedis/mcf/MultiDbConnectionProviderDynamicEndpointUnitTest.java +++ b/src/test/java/redis/clients/jedis/mcf/MultiDbConnectionProviderDynamicEndpointUnitTest.java @@ -109,7 +109,7 @@ private MockedConstruction mockPool(Connection mockConne } @Test - void testRemoveNonExistentCluster() { + void testRemoveNonExistentDatabase() { HostAndPort nonExistentEndpoint = new HostAndPort("localhost", 9999); // Should throw validation exception for non-existent endpoint @@ -168,7 +168,7 @@ void testActiveDatabaseHandlingOnAdd() { } @Test - void testActiveClusterHandlingOnRemove() { + void testActiveDatabaseHandlingOnRemove() { Connection mockConnection = mock(Connection.class); when(mockConnection.ping()).thenReturn(true); diff --git a/src/test/java/redis/clients/jedis/mcf/MultiDbConnectionProviderFailoverAttemptsConfigTest.java b/src/test/java/redis/clients/jedis/mcf/MultiDbConnectionProviderFailoverAttemptsConfigTest.java index 0b062e4298..e5595d085b 100644 --- a/src/test/java/redis/clients/jedis/mcf/MultiDbConnectionProviderFailoverAttemptsConfigTest.java +++ b/src/test/java/redis/clients/jedis/mcf/MultiDbConnectionProviderFailoverAttemptsConfigTest.java @@ -47,7 +47,7 @@ void setUp() throws Exception { provider = new MultiDbConnectionProvider(builder.build()); - // Disable both databases to force handleNoHealthyCluster path + // Disable both databases to force handleNoHealthyDatabase path provider.getDatabase(endpoint0).setDisabled(true); provider.getDatabase(endpoint1).setDisabled(true); } @@ -69,8 +69,8 @@ void delayBetweenFailoverAttempts_gatesCounterIncrementsWithinWindow() throws Ex // First call: should throw temporary and start the freeze window, incrementing attempt count to // 1 - assertThrows(JedisTemporarilyNotAvailableException.class, () -> MultiDbConnectionProviderHelper - .switchToHealthyCluster(provider, SwitchReason.HEALTH_CHECK, provider.getDatabase())); + assertThrows(JedisTemporarilyNotAvailableException.class, + () -> provider.switchToHealthyDatabase(SwitchReason.HEALTH_CHECK, provider.getDatabase())); int afterFirst = getProviderAttemptCount(); assertEquals(1, afterFirst); @@ -78,8 +78,7 @@ void delayBetweenFailoverAttempts_gatesCounterIncrementsWithinWindow() throws Ex // and should NOT increment the attempt count beyond 1 for (int i = 0; i < 50; i++) { assertThrows(JedisTemporarilyNotAvailableException.class, - () -> MultiDbConnectionProviderHelper.switchToHealthyCluster(provider, - SwitchReason.HEALTH_CHECK, provider.getDatabase())); + () -> provider.switchToHealthyDatabase(SwitchReason.HEALTH_CHECK, provider.getDatabase())); assertEquals(1, getProviderAttemptCount()); } } @@ -96,8 +95,8 @@ void delayBetweenFailoverAttempts_permanentExceptionAfterAttemptsExhausted() thr // First call: should throw temporary and start the freeze window, incrementing attempt count to // 1 - assertThrows(JedisTemporarilyNotAvailableException.class, () -> MultiDbConnectionProviderHelper - .switchToHealthyCluster(provider, SwitchReason.HEALTH_CHECK, provider.getDatabase())); + assertThrows(JedisTemporarilyNotAvailableException.class, + () -> provider.switchToHealthyDatabase(SwitchReason.HEALTH_CHECK, provider.getDatabase())); int afterFirst = getProviderAttemptCount(); assertEquals(1, afterFirst); diff --git a/src/test/java/redis/clients/jedis/mcf/MultiDbConnectionProviderHelper.java b/src/test/java/redis/clients/jedis/mcf/MultiDbConnectionProviderHelper.java index 4ae061c9f5..62d64fede0 100644 --- a/src/test/java/redis/clients/jedis/mcf/MultiDbConnectionProviderHelper.java +++ b/src/test/java/redis/clients/jedis/mcf/MultiDbConnectionProviderHelper.java @@ -13,7 +13,7 @@ public static void periodicFailbackCheck(MultiDbConnectionProvider provider) { provider.periodicFailbackCheck(); } - public static Endpoint switchToHealthyCluster(MultiDbConnectionProvider provider, + public static Endpoint switchToHealthyDatabase(MultiDbConnectionProvider provider, SwitchReason reason, MultiDbConnectionProvider.Database iterateFrom) { return provider.switchToHealthyDatabase(reason, iterateFrom); } diff --git a/src/test/java/redis/clients/jedis/mcf/MultiDbConnectionProviderInitializationTest.java b/src/test/java/redis/clients/jedis/mcf/MultiDbConnectionProviderInitializationTest.java index 1935647d46..f134490112 100644 --- a/src/test/java/redis/clients/jedis/mcf/MultiDbConnectionProviderInitializationTest.java +++ b/src/test/java/redis/clients/jedis/mcf/MultiDbConnectionProviderInitializationTest.java @@ -96,7 +96,7 @@ void testInitializationWithAllHealthChecksDisabled() { } @Test - void testInitializationWithSingleCluster() { + void testInitializationWithSingleDatabase() { try (MockedConstruction mockedPool = mockPool()) { DatabaseConfig db = DatabaseConfig.builder(endpoint1, clientConfig).weight(1.0f) .healthCheckEnabled(false).build(); @@ -118,7 +118,7 @@ void testErrorHandlingWithNullConfiguration() { } @Test - void testErrorHandlingWithEmptyClusterArray() { + void testErrorHandlingWithEmptyDatabaseArray() { assertThrows(JedisValidationException.class, () -> { new MultiDbConfig.Builder(new DatabaseConfig[0]).build(); }); diff --git a/src/test/java/redis/clients/jedis/mcf/MultiDbConnectionProviderTest.java b/src/test/java/redis/clients/jedis/mcf/MultiDbConnectionProviderTest.java index 841896aa34..d22c26f39c 100644 --- a/src/test/java/redis/clients/jedis/mcf/MultiDbConnectionProviderTest.java +++ b/src/test/java/redis/clients/jedis/mcf/MultiDbConnectionProviderTest.java @@ -111,8 +111,8 @@ public void testDatabaseSwitchListener() { MultiDbConfig.Builder builder = new MultiDbConfig.Builder(databaseConfigs); // Configures a single failed command to trigger an open circuit on the next subsequent failure - builder.circuitBreakerSlidingWindowSize(3).circuitBreakerMinNumOfFailures(1) - .circuitBreakerFailureRateThreshold(0); + builder.failureDetector(MultiDbConfig.CircuitBreakerConfig.builder().slidingWindowSize(3) + .minNumOfFailures(1).failureRateThreshold(0).build()); AtomicBoolean isValidTest = new AtomicBoolean(false); @@ -168,8 +168,8 @@ public void testConnectionPoolConfigApplied() { endpointStandalone0.getClientConfigBuilder().build(), poolConfig); try (MultiDbConnectionProvider customProvider = new MultiDbConnectionProvider( new MultiDbConfig.Builder(databaseConfigs).build())) { - MultiDbConnectionProvider.Database activeCluster = customProvider.getDatabase(); - ConnectionPool connectionPool = activeCluster.getConnectionPool(); + MultiDbConnectionProvider.Database activeDatabase = customProvider.getDatabase(); + ConnectionPool connectionPool = activeDatabase.getConnectionPool(); assertEquals(8, connectionPool.getMaxTotal()); assertEquals(4, connectionPool.getMaxIdle()); assertEquals(1, connectionPool.getMinIdle()); @@ -233,7 +233,8 @@ public void userCommand_firstTemporary_thenPermanent_inOrder() { MultiDbConnectionProvider testProvider = new MultiDbConnectionProvider( new MultiDbConfig.Builder(databaseConfigs).delayInBetweenFailoverAttempts(100) - .maxNumFailoverAttempts(2).retryMaxAttempts(1).build()); + .maxNumFailoverAttempts(2) + .commandRetry(MultiDbConfig.RetryConfig.builder().maxAttempts(1).build()).build()); try (UnifiedJedis jedis = new UnifiedJedis(testProvider)) { jedis.get("foo"); @@ -270,8 +271,11 @@ public void userCommand_connectionExceptions_thenMultipleTemporary_thenPermanent // and open to impact from other defaulted values withing the components in use. MultiDbConnectionProvider testProvider = new MultiDbConnectionProvider( new MultiDbConfig.Builder(databaseConfigs).delayInBetweenFailoverAttempts(100) - .maxNumFailoverAttempts(2).retryMaxAttempts(1).circuitBreakerSlidingWindowSize(5) - .circuitBreakerFailureRateThreshold(60).build()) { + .maxNumFailoverAttempts(2) + .commandRetry(MultiDbConfig.RetryConfig.builder().maxAttempts(1).build()) + .failureDetector(MultiDbConfig.CircuitBreakerConfig.builder().slidingWindowSize(5) + .failureRateThreshold(60).build()) + .build()) { }; try (UnifiedJedis jedis = new UnifiedJedis(testProvider)) { diff --git a/src/test/java/redis/clients/jedis/mcf/PeriodicFailbackTest.java b/src/test/java/redis/clients/jedis/mcf/PeriodicFailbackTest.java index f58df34e0c..8c98d3adae 100644 --- a/src/test/java/redis/clients/jedis/mcf/PeriodicFailbackTest.java +++ b/src/test/java/redis/clients/jedis/mcf/PeriodicFailbackTest.java @@ -21,13 +21,13 @@ class PeriodicFailbackTest { private HostAndPort endpoint1; private HostAndPort endpoint2; - private JedisClientConfig clientConfig; + private JedisClientConfig databaseConfig; @BeforeEach void setUp() { endpoint1 = new HostAndPort("localhost", 6379); endpoint2 = new HostAndPort("localhost", 6380); - clientConfig = DefaultJedisClientConfig.builder().build(); + databaseConfig = DefaultJedisClientConfig.builder().build(); } private MockedConstruction mockPool() { @@ -40,70 +40,70 @@ private MockedConstruction mockPool() { } @Test - void testPeriodicFailbackCheckWithDisabledCluster() throws InterruptedException { + void testPeriodicFailbackCheckWithDisabledDatabase() throws InterruptedException { try (MockedConstruction mockedPool = mockPool()) { - MultiDbConfig.DatabaseConfig cluster1 = MultiDbConfig.DatabaseConfig - .builder(endpoint1, clientConfig).weight(1.0f).healthCheckEnabled(false).build(); + MultiDbConfig.DatabaseConfig database1 = MultiDbConfig.DatabaseConfig + .builder(endpoint1, databaseConfig).weight(1.0f).healthCheckEnabled(false).build(); - MultiDbConfig.DatabaseConfig cluster2 = MultiDbConfig.DatabaseConfig - .builder(endpoint2, clientConfig).weight(2.0f).healthCheckEnabled(false).build(); + MultiDbConfig.DatabaseConfig database2 = MultiDbConfig.DatabaseConfig + .builder(endpoint2, databaseConfig).weight(2.0f).healthCheckEnabled(false).build(); MultiDbConfig config = new MultiDbConfig.Builder( - new MultiDbConfig.DatabaseConfig[] { cluster1, cluster2 }).failbackSupported(true) + new MultiDbConfig.DatabaseConfig[] { database1, database2 }).failbackSupported(true) .failbackCheckInterval(100).build(); try (MultiDbConnectionProvider provider = new MultiDbConnectionProvider(config)) { - // Initially, cluster2 should be active (highest weight: 2.0f vs 1.0f) + // Initially, database2 should be active (highest weight: 2.0f vs 1.0f) assertEquals(provider.getDatabase(endpoint2), provider.getDatabase()); - // Start grace period for cluster2 manually + // Start grace period for database2 manually provider.getDatabase(endpoint2).setGracePeriod(); provider.getDatabase(endpoint2).setDisabled(true); - // Force failover to cluster1 since cluster2 is disabled + // Force failover to database1 since database2 is disabled provider.switchToHealthyDatabase(SwitchReason.FORCED, provider.getDatabase(endpoint2)); // Manually trigger periodic check MultiDbConnectionProviderHelper.periodicFailbackCheck(provider); - // Should still be on cluster1 (cluster2 is in grace period) + // Should still be on database1 (database2 is in grace period) assertEquals(provider.getDatabase(endpoint1), provider.getDatabase()); } } } @Test - void testPeriodicFailbackCheckWithHealthyCluster() throws InterruptedException { + void testPeriodicFailbackCheckWithHealthyDatabase() throws InterruptedException { try (MockedConstruction mockedPool = mockPool()) { - MultiDbConfig.DatabaseConfig cluster1 = MultiDbConfig.DatabaseConfig - .builder(endpoint1, clientConfig).weight(1.0f).healthCheckEnabled(false).build(); + MultiDbConfig.DatabaseConfig database1 = MultiDbConfig.DatabaseConfig + .builder(endpoint1, databaseConfig).weight(1.0f).healthCheckEnabled(false).build(); - MultiDbConfig.DatabaseConfig cluster2 = MultiDbConfig.DatabaseConfig - .builder(endpoint2, clientConfig).weight(2.0f).healthCheckEnabled(false).build(); + MultiDbConfig.DatabaseConfig database2 = MultiDbConfig.DatabaseConfig + .builder(endpoint2, databaseConfig).weight(2.0f).healthCheckEnabled(false).build(); MultiDbConfig config = new MultiDbConfig.Builder( - new MultiDbConfig.DatabaseConfig[] { cluster1, cluster2 }).failbackSupported(true) + new MultiDbConfig.DatabaseConfig[] { database1, database2 }).failbackSupported(true) .failbackCheckInterval(50).gracePeriod(100).build(); // Add // grace // period try (MultiDbConnectionProvider provider = new MultiDbConnectionProvider(config)) { - // Initially, cluster2 should be active (highest weight: 2.0f vs 1.0f) + // Initially, database2 should be active (highest weight: 2.0f vs 1.0f) assertEquals(provider.getDatabase(endpoint2), provider.getDatabase()); - // Make cluster2 unhealthy to force failover to cluster1 + // Make database2 unhealthy to force failover to database1 onHealthStatusChange(provider, endpoint2, HealthStatus.HEALTHY, HealthStatus.UNHEALTHY); - // Should now be on cluster1 (cluster2 is in grace period) + // Should now be on database1 (database2 is in grace period) assertEquals(provider.getDatabase(endpoint1), provider.getDatabase()); - // Verify cluster2 is in grace period + // Verify database2 is in grace period assertTrue(provider.getDatabase(endpoint2).isInGracePeriod()); - // Make cluster2 healthy again (but it's still in grace period) + // Make database2 healthy again (but it's still in grace period) onHealthStatusChange(provider, endpoint2, HealthStatus.UNHEALTHY, HealthStatus.HEALTHY); - // Trigger periodic check immediately - should still be on cluster1 + // Trigger periodic check immediately - should still be on database1 MultiDbConnectionProviderHelper.periodicFailbackCheck(provider); assertEquals(provider.getDatabase(endpoint1), provider.getDatabase()); @@ -113,7 +113,7 @@ void testPeriodicFailbackCheckWithHealthyCluster() throws InterruptedException { // Trigger periodic check after grace period expires MultiDbConnectionProviderHelper.periodicFailbackCheck(provider); - // Should have failed back to cluster2 (higher weight, grace period expired) + // Should have failed back to database2 (higher weight, grace period expired) assertEquals(provider.getDatabase(endpoint2), provider.getDatabase()); } } @@ -122,27 +122,27 @@ void testPeriodicFailbackCheckWithHealthyCluster() throws InterruptedException { @Test void testPeriodicFailbackCheckWithFailbackDisabled() throws InterruptedException { try (MockedConstruction mockedPool = mockPool()) { - MultiDbConfig.DatabaseConfig cluster1 = MultiDbConfig.DatabaseConfig - .builder(endpoint1, clientConfig).weight(1.0f).healthCheckEnabled(false).build(); + MultiDbConfig.DatabaseConfig database1 = MultiDbConfig.DatabaseConfig + .builder(endpoint1, databaseConfig).weight(1.0f).healthCheckEnabled(false).build(); - MultiDbConfig.DatabaseConfig cluster2 = MultiDbConfig.DatabaseConfig - .builder(endpoint2, clientConfig).weight(2.0f).healthCheckEnabled(false).build(); + MultiDbConfig.DatabaseConfig database2 = MultiDbConfig.DatabaseConfig + .builder(endpoint2, databaseConfig).weight(2.0f).healthCheckEnabled(false).build(); MultiDbConfig config = new MultiDbConfig.Builder( - new MultiDbConfig.DatabaseConfig[] { cluster1, cluster2 }).failbackSupported(false) // Disabled + new MultiDbConfig.DatabaseConfig[] { database1, database2 }).failbackSupported(false) // Disabled .failbackCheckInterval(50).build(); try (MultiDbConnectionProvider provider = new MultiDbConnectionProvider(config)) { - // Initially, cluster2 should be active (highest weight: 2.0f vs 1.0f) + // Initially, database2 should be active (highest weight: 2.0f vs 1.0f) assertEquals(provider.getDatabase(endpoint2), provider.getDatabase()); - // Make cluster2 unhealthy to force failover to cluster1 + // Make database2 unhealthy to force failover to database1 onHealthStatusChange(provider, endpoint2, HealthStatus.HEALTHY, HealthStatus.UNHEALTHY); - // Should now be on cluster1 + // Should now be on database1 assertEquals(provider.getDatabase(endpoint1), provider.getDatabase()); - // Make cluster2 healthy again + // Make database2 healthy again onHealthStatusChange(provider, endpoint2, HealthStatus.UNHEALTHY, HealthStatus.HEALTHY); // Wait for stability period @@ -151,50 +151,50 @@ void testPeriodicFailbackCheckWithFailbackDisabled() throws InterruptedException // Trigger periodic check MultiDbConnectionProviderHelper.periodicFailbackCheck(provider); - // Should still be on cluster1 (failback disabled) + // Should still be on database1 (failback disabled) assertEquals(provider.getDatabase(endpoint1), provider.getDatabase()); } } } @Test - void testPeriodicFailbackCheckSelectsHighestWeightCluster() throws InterruptedException { + void testPeriodicFailbackCheckSelectsHighestWeightDatabase() throws InterruptedException { try (MockedConstruction mockedPool = mockPool()) { HostAndPort endpoint3 = new HostAndPort("localhost", 6381); - MultiDbConfig.DatabaseConfig cluster1 = MultiDbConfig.DatabaseConfig - .builder(endpoint1, clientConfig).weight(1.0f).healthCheckEnabled(false).build(); + MultiDbConfig.DatabaseConfig database1 = MultiDbConfig.DatabaseConfig + .builder(endpoint1, databaseConfig).weight(1.0f).healthCheckEnabled(false).build(); - MultiDbConfig.DatabaseConfig cluster2 = MultiDbConfig.DatabaseConfig - .builder(endpoint2, clientConfig).weight(2.0f).healthCheckEnabled(false).build(); + MultiDbConfig.DatabaseConfig database2 = MultiDbConfig.DatabaseConfig + .builder(endpoint2, databaseConfig).weight(2.0f).healthCheckEnabled(false).build(); - MultiDbConfig.DatabaseConfig cluster3 = MultiDbConfig.DatabaseConfig - .builder(endpoint3, clientConfig).weight(3.0f) // Highest weight + MultiDbConfig.DatabaseConfig database3 = MultiDbConfig.DatabaseConfig + .builder(endpoint3, databaseConfig).weight(3.0f) // Highest weight .healthCheckEnabled(false).build(); MultiDbConfig config = new MultiDbConfig.Builder( - new MultiDbConfig.DatabaseConfig[] { cluster1, cluster2, cluster3 }) + new MultiDbConfig.DatabaseConfig[] { database1, database2, database3 }) .failbackSupported(true).failbackCheckInterval(50).gracePeriod(100).build(); // Add // grace // period try (MultiDbConnectionProvider provider = new MultiDbConnectionProvider(config)) { - // Initially, cluster3 should be active (highest weight: 3.0f vs 2.0f vs 1.0f) + // Initially, database3 should be active (highest weight: 3.0f vs 2.0f vs 1.0f) assertEquals(provider.getDatabase(endpoint3), provider.getDatabase()); - // Make cluster3 unhealthy to force failover to cluster2 (next highest weight) + // Make database3 unhealthy to force failover to database2 (next highest weight) onHealthStatusChange(provider, endpoint3, HealthStatus.HEALTHY, HealthStatus.UNHEALTHY); - // Should now be on cluster2 (weight 2.0f, higher than cluster1's 1.0f) + // Should now be on database2 (weight 2.0f, higher than database1's 1.0f) assertEquals(provider.getDatabase(endpoint2), provider.getDatabase()); - // Make cluster2 unhealthy to force failover to cluster1 + // Make database2 unhealthy to force failover to database1 onHealthStatusChange(provider, endpoint2, HealthStatus.HEALTHY, HealthStatus.UNHEALTHY); - // Should now be on cluster1 (only healthy cluster left) + // Should now be on database1 (only healthy databases left) assertEquals(provider.getDatabase(endpoint1), provider.getDatabase()); - // Make cluster2 and cluster3 healthy again + // Make database2 and database3 healthy again onHealthStatusChange(provider, endpoint2, HealthStatus.UNHEALTHY, HealthStatus.HEALTHY); onHealthStatusChange(provider, endpoint3, HealthStatus.UNHEALTHY, HealthStatus.HEALTHY); @@ -204,7 +204,7 @@ void testPeriodicFailbackCheckSelectsHighestWeightCluster() throws InterruptedEx // Trigger periodic check MultiDbConnectionProviderHelper.periodicFailbackCheck(provider); - // Should have failed back to cluster3 (highest weight, grace period expired) + // Should have failed back to database3 (highest weight, grace period expired) assertEquals(provider.getDatabase(endpoint3), provider.getDatabase()); } } diff --git a/src/test/java/redis/clients/jedis/misc/AutomaticFailoverTest.java b/src/test/java/redis/clients/jedis/misc/AutomaticFailoverTest.java index ac74738226..306cb7a998 100644 --- a/src/test/java/redis/clients/jedis/misc/AutomaticFailoverTest.java +++ b/src/test/java/redis/clients/jedis/misc/AutomaticFailoverTest.java @@ -77,7 +77,7 @@ public void pipelineWithSwitch() { AbstractPipeline pipe = client.pipelined(); pipe.set("pstr", "foobar"); pipe.hset("phash", "foo", "bar"); - MultiDbConnectionProviderHelper.switchToHealthyCluster(provider, + MultiDbConnectionProviderHelper.switchToHealthyDatabase(provider, SwitchReason.HEALTH_CHECK, provider.getDatabase()); pipe.sync(); } @@ -97,7 +97,7 @@ public void transactionWithSwitch() { AbstractTransaction tx = client.multi(); tx.set("tstr", "foobar"); tx.hset("thash", "foo", "bar"); - MultiDbConnectionProviderHelper.switchToHealthyCluster(provider, + MultiDbConnectionProviderHelper.switchToHealthyDatabase(provider, SwitchReason.HEALTH_CHECK, provider.getDatabase()); assertEquals(Arrays.asList("OK", 1L), tx.exec()); } @@ -114,9 +114,11 @@ public void commandFailoverUnresolvableHost() { HostAndPort unresolvableHostAndPort = new HostAndPort("unresolvable", 6379); MultiDbConfig.Builder builder = new MultiDbConfig.Builder( getDatabaseConfigs(clientConfig, unresolvableHostAndPort, workingEndpoint.getHostAndPort())) - .retryWaitDuration(1).retryMaxAttempts(1) - .circuitBreakerSlidingWindowSize(slidingWindowSize) - .circuitBreakerMinNumOfFailures(slidingWindowMinFails); + .commandRetry(MultiDbConfig.RetryConfig.builder().waitDuration(1).maxAttempts(1).build()) + .failureDetector(MultiDbConfig.CircuitBreakerConfig.builder() + .slidingWindowSize(slidingWindowSize) + .minNumOfFailures(slidingWindowMinFails) + .build()); RedisFailoverReporter failoverReporter = new RedisFailoverReporter(); MultiDbConnectionProvider connectionProvider = new MultiDbConnectionProvider( @@ -154,12 +156,12 @@ public void commandFailover() { MultiDbConfig.Builder builder = new MultiDbConfig.Builder( getDatabaseConfigs(clientConfig, hostPortWithFailure, workingEndpoint.getHostAndPort())) - .retryMaxAttempts(retryMaxAttempts) // Default - // is - // 3 - .circuitBreakerFailureRateThreshold(50) - .circuitBreakerMinNumOfFailures(slidingWindowMinFails) - .circuitBreakerSlidingWindowSize(slidingWindowSize); + .commandRetry(MultiDbConfig.RetryConfig.builder().maxAttempts(retryMaxAttempts).build()) + .failureDetector(MultiDbConfig.CircuitBreakerConfig.builder() + .failureRateThreshold(50) + .minNumOfFailures(slidingWindowMinFails) + .slidingWindowSize(slidingWindowSize) + .build()); RedisFailoverReporter failoverReporter = new RedisFailoverReporter(); MultiDbConnectionProvider connectionProvider = new MultiDbConnectionProvider( @@ -196,7 +198,9 @@ public void pipelineFailover() { MultiDbConfig.Builder builder = new MultiDbConfig.Builder( getDatabaseConfigs(clientConfig, hostPortWithFailure, workingEndpoint.getHostAndPort())) - .circuitBreakerSlidingWindowSize(slidingWindowSize) + .failureDetector(MultiDbConfig.CircuitBreakerConfig.builder() + .slidingWindowSize(slidingWindowSize) + .build()) .fallbackExceptionList(Collections.singletonList(JedisConnectionException.class)); RedisFailoverReporter failoverReporter = new RedisFailoverReporter(); @@ -228,7 +232,10 @@ public void failoverFromAuthError() { MultiDbConfig.Builder builder = new MultiDbConfig.Builder( getDatabaseConfigs(clientConfig, endpointForAuthFailure.getHostAndPort(), - workingEndpoint.getHostAndPort())).circuitBreakerSlidingWindowSize(slidingWindowSize) + workingEndpoint.getHostAndPort())) + .failureDetector(MultiDbConfig.CircuitBreakerConfig.builder() + .slidingWindowSize(slidingWindowSize) + .build()) .fallbackExceptionList(Collections.singletonList(JedisAccessControlException.class)); RedisFailoverReporter failoverReporter = new RedisFailoverReporter(); diff --git a/src/test/java/redis/clients/jedis/providers/MultiDbProviderHealthStatusChangeTest.java b/src/test/java/redis/clients/jedis/providers/MultiDbProviderHealthStatusChangeTest.java index ed874c816c..edaa9e59fb 100644 --- a/src/test/java/redis/clients/jedis/providers/MultiDbProviderHealthStatusChangeTest.java +++ b/src/test/java/redis/clients/jedis/providers/MultiDbProviderHealthStatusChangeTest.java @@ -51,14 +51,14 @@ private MockedConstruction mockConnectionPool() { @Test void postInit_unhealthy_active_sets_grace_and_fails_over() throws Exception { try (MockedConstruction mockedPool = mockConnectionPool()) { - // Create clusters without health checks - MultiDbConfig.DatabaseConfig cluster1 = MultiDbConfig.DatabaseConfig + // Create databases without health checks + MultiDbConfig.DatabaseConfig database1 = MultiDbConfig.DatabaseConfig .builder(endpoint1, clientConfig).weight(1.0f).healthCheckEnabled(false).build(); - MultiDbConfig.DatabaseConfig cluster2 = MultiDbConfig.DatabaseConfig + MultiDbConfig.DatabaseConfig database2 = MultiDbConfig.DatabaseConfig .builder(endpoint2, clientConfig).weight(0.5f).healthCheckEnabled(false).build(); MultiDbConfig config = new MultiDbConfig.Builder( - new MultiDbConfig.DatabaseConfig[] { cluster1, cluster2 }).build(); + new MultiDbConfig.DatabaseConfig[] { database1, database2 }).build(); try (MultiDbConnectionProvider provider = new MultiDbConnectionProvider(config)) { @@ -71,11 +71,11 @@ void postInit_unhealthy_active_sets_grace_and_fails_over() throws Exception { HealthStatus.HEALTHY, HealthStatus.UNHEALTHY); }, "Post-initialization events should be processed immediately"); - // Verify the cluster has changed according to the UNHEALTHY status + // Verify the database has changed according to the UNHEALTHY status assertTrue(provider.getDatabase(endpoint1).isInGracePeriod(), - "UNHEALTHY status on active cluster should cause a grace period"); + "UNHEALTHY status on active database should cause a grace period"); assertNotEquals(provider.getDatabase(), provider.getDatabase(endpoint1), - "UNHEALTHY status on active cluster should cause a failover"); + "UNHEALTHY status on active database should cause a failover"); } } } @@ -83,13 +83,13 @@ void postInit_unhealthy_active_sets_grace_and_fails_over() throws Exception { @Test void postInit_nonActive_changes_do_not_switch_active() throws Exception { try (MockedConstruction mockedPool = mockConnectionPool()) { - MultiDbConfig.DatabaseConfig cluster1 = MultiDbConfig.DatabaseConfig + MultiDbConfig.DatabaseConfig database1 = MultiDbConfig.DatabaseConfig .builder(endpoint1, clientConfig).weight(1.0f).healthCheckEnabled(false).build(); - MultiDbConfig.DatabaseConfig cluster2 = MultiDbConfig.DatabaseConfig + MultiDbConfig.DatabaseConfig database2 = MultiDbConfig.DatabaseConfig .builder(endpoint2, clientConfig).weight(0.5f).healthCheckEnabled(false).build(); MultiDbConfig config = new MultiDbConfig.Builder( - new MultiDbConfig.DatabaseConfig[] { cluster1, cluster2 }).build(); + new MultiDbConfig.DatabaseConfig[] { database1, database2 }).build(); try (MultiDbConnectionProvider provider = new MultiDbConnectionProvider(config)) { // Verify initial state @@ -100,27 +100,27 @@ void postInit_nonActive_changes_do_not_switch_active() throws Exception { MultiDbConnectionProviderHelper.onHealthStatusChange(provider, endpoint1, HealthStatus.HEALTHY, HealthStatus.UNHEALTHY); - // After first UNHEALTHY on active cluster: it enters grace period and provider fails over + // After first UNHEALTHY on active database: it enters grace period and provider fails over assertTrue(provider.getDatabase(endpoint1).isInGracePeriod(), - "Active cluster should enter grace period"); + "Active database should enter grace period"); assertEquals(provider.getDatabase(endpoint2), provider.getDatabase(), "Should fail over to endpoint2"); MultiDbConnectionProviderHelper.onHealthStatusChange(provider, endpoint1, HealthStatus.UNHEALTHY, HealthStatus.HEALTHY); - // Healthy event for non-active cluster should not immediately revert active cluster + // Healthy event for non-active database should not immediately revert active database assertEquals(provider.getDatabase(endpoint2), provider.getDatabase(), - "Active cluster should remain endpoint2"); + "Active database should remain endpoint2"); assertTrue(provider.getDatabase(endpoint1).isInGracePeriod(), "Grace period should still be in effect"); MultiDbConnectionProviderHelper.onHealthStatusChange(provider, endpoint1, HealthStatus.HEALTHY, HealthStatus.UNHEALTHY); - // Further UNHEALTHY for non-active cluster is a no-op + // Further UNHEALTHY for non-active database is a no-op assertEquals(provider.getDatabase(endpoint2), provider.getDatabase(), - "Active cluster unchanged"); + "Active database unchanged"); assertTrue(provider.getDatabase(endpoint1).isInGracePeriod(), "Still in grace period"); } } @@ -129,19 +129,19 @@ void postInit_nonActive_changes_do_not_switch_active() throws Exception { @Test void init_selects_highest_weight_healthy_when_checks_disabled() throws Exception { try (MockedConstruction mockedPool = mockConnectionPool()) { - MultiDbConfig.DatabaseConfig cluster1 = MultiDbConfig.DatabaseConfig + MultiDbConfig.DatabaseConfig database1 = MultiDbConfig.DatabaseConfig .builder(endpoint1, clientConfig).weight(1.0f).healthCheckEnabled(false).build(); - MultiDbConfig.DatabaseConfig cluster2 = MultiDbConfig.DatabaseConfig + MultiDbConfig.DatabaseConfig database2 = MultiDbConfig.DatabaseConfig .builder(endpoint2, clientConfig).weight(2.0f).healthCheckEnabled(false).build(); MultiDbConfig config = new MultiDbConfig.Builder( - new MultiDbConfig.DatabaseConfig[] { cluster1, cluster2 }).build(); + new MultiDbConfig.DatabaseConfig[] { database1, database2 }).build(); try (MultiDbConnectionProvider provider = new MultiDbConnectionProvider(config)) { // This test verifies that multiple endpoints are properly initialized - // Verify both clusters are initialized properly + // Verify both databases are initialized properly assertNotNull(provider.getDatabase(endpoint1), "Database 1 should be available"); assertNotNull(provider.getDatabase(endpoint2), "Database 2 should be available"); @@ -153,13 +153,13 @@ void init_selects_highest_weight_healthy_when_checks_disabled() throws Exception } @Test - void init_single_cluster_initializes_and_is_healthy() throws Exception { + void init_single_database_initializes_and_is_healthy() throws Exception { try (MockedConstruction mockedPool = mockConnectionPool()) { - MultiDbConfig.DatabaseConfig cluster1 = MultiDbConfig.DatabaseConfig + MultiDbConfig.DatabaseConfig database1 = MultiDbConfig.DatabaseConfig .builder(endpoint1, clientConfig).weight(1.0f).healthCheckEnabled(false).build(); MultiDbConfig config = new MultiDbConfig.Builder( - new MultiDbConfig.DatabaseConfig[] { cluster1 }).build(); + new MultiDbConfig.DatabaseConfig[] { database1 }).build(); // This test verifies that the provider initializes correctly and doesn't lose events // In practice, with health checks disabled, no events should be generated during init @@ -167,7 +167,7 @@ void init_single_cluster_initializes_and_is_healthy() throws Exception { // Verify successful initialization assertNotNull(provider.getDatabase(), "Provider should have initialized successfully"); assertEquals(provider.getDatabase(endpoint1), provider.getDatabase(), - "Should have selected the configured cluster"); + "Should have selected the configured database"); assertTrue(provider.getDatabase().isHealthy(), "Database should be healthy (assumed healthy with no health checks)"); } @@ -179,17 +179,17 @@ void init_single_cluster_initializes_and_is_healthy() throws Exception { @Test void postInit_two_hop_failover_chain_respected() throws Exception { try (MockedConstruction mockedPool = mockConnectionPool()) { - MultiDbConfig.DatabaseConfig cluster1 = MultiDbConfig.DatabaseConfig + MultiDbConfig.DatabaseConfig database1 = MultiDbConfig.DatabaseConfig .builder(endpoint1, clientConfig).weight(1.0f).healthCheckEnabled(false).build(); - MultiDbConfig.DatabaseConfig cluster2 = MultiDbConfig.DatabaseConfig + MultiDbConfig.DatabaseConfig database2 = MultiDbConfig.DatabaseConfig .builder(endpoint2, clientConfig).weight(0.5f).healthCheckEnabled(false).build(); - MultiDbConfig.DatabaseConfig cluster3 = MultiDbConfig.DatabaseConfig + MultiDbConfig.DatabaseConfig database3 = MultiDbConfig.DatabaseConfig .builder(endpoint3, clientConfig).weight(0.2f).healthCheckEnabled(false).build(); MultiDbConfig config = new MultiDbConfig.Builder( - new MultiDbConfig.DatabaseConfig[] { cluster1, cluster2, cluster3 }).build(); + new MultiDbConfig.DatabaseConfig[] { database1, database2, database3 }).build(); try (MultiDbConnectionProvider provider = new MultiDbConnectionProvider(config)) { // First event: endpoint1 (active) becomes UNHEALTHY -> failover to endpoint2, endpoint1 @@ -214,7 +214,7 @@ void postInit_two_hop_failover_chain_respected() throws Exception { MultiDbConnectionProviderHelper.onHealthStatusChange(provider, endpoint1, HealthStatus.UNHEALTHY, HealthStatus.HEALTHY); assertEquals(provider.getDatabase(endpoint3), provider.getDatabase(), - "Active cluster should remain endpoint3"); + "Active database should remain endpoint3"); } } } @@ -222,14 +222,14 @@ void postInit_two_hop_failover_chain_respected() throws Exception { @Test void postInit_rapid_events_respect_grace_and_keep_active_stable() throws Exception { try (MockedConstruction mockedPool = mockConnectionPool()) { - MultiDbConfig.DatabaseConfig cluster1 = MultiDbConfig.DatabaseConfig + MultiDbConfig.DatabaseConfig database1 = MultiDbConfig.DatabaseConfig .builder(endpoint1, clientConfig).weight(1.0f).healthCheckEnabled(false).build(); - MultiDbConfig.DatabaseConfig cluster2 = MultiDbConfig.DatabaseConfig + MultiDbConfig.DatabaseConfig database2 = MultiDbConfig.DatabaseConfig .builder(endpoint2, clientConfig).weight(0.5f).healthCheckEnabled(false).build(); MultiDbConfig config = new MultiDbConfig.Builder( - new MultiDbConfig.DatabaseConfig[] { cluster1, cluster2 }).build(); + new MultiDbConfig.DatabaseConfig[] { database1, database2 }).build(); try (MultiDbConnectionProvider provider = new MultiDbConnectionProvider(config)) { // Verify initial state @@ -240,7 +240,7 @@ void postInit_rapid_events_respect_grace_and_keep_active_stable() throws Excepti MultiDbConnectionProviderHelper.onHealthStatusChange(provider, endpoint1, HealthStatus.HEALTHY, HealthStatus.UNHEALTHY); // triggers failover and grace MultiDbConnectionProviderHelper.onHealthStatusChange(provider, endpoint1, - HealthStatus.UNHEALTHY, HealthStatus.HEALTHY); // non-active cluster becomes healthy + HealthStatus.UNHEALTHY, HealthStatus.HEALTHY); // non-active database becomes healthy MultiDbConnectionProviderHelper.onHealthStatusChange(provider, endpoint1, HealthStatus.HEALTHY, HealthStatus.UNHEALTHY); // still non-active and in grace; no change @@ -248,7 +248,7 @@ void postInit_rapid_events_respect_grace_and_keep_active_stable() throws Excepti assertTrue(provider.getDatabase(endpoint1).isInGracePeriod(), "Endpoint1 should be in grace period"); assertEquals(provider.getDatabase(endpoint2), provider.getDatabase(), - "Active cluster should remain endpoint2"); + "Active database should remain endpoint2"); } } } diff --git a/src/test/java/redis/clients/jedis/scenario/ActiveActiveFailoverTest.java b/src/test/java/redis/clients/jedis/scenario/ActiveActiveFailoverTest.java index e6ebc42b8d..1327501b48 100644 --- a/src/test/java/redis/clients/jedis/scenario/ActiveActiveFailoverTest.java +++ b/src/test/java/redis/clients/jedis/scenario/ActiveActiveFailoverTest.java @@ -69,16 +69,20 @@ public void testFailover() { .connectionPoolConfig(RecommendedSettings.poolConfig).weight(0.5f).build(); MultiDbConfig multiConfig = MultiDbConfig.builder() - .endpoint(primary) - .endpoint(secondary) - .circuitBreakerSlidingWindowSize(1) // SLIDING WINDOW SIZE IN SECONDS - .circuitBreakerFailureRateThreshold(10.0f) // percentage of failures to trigger circuit breaker + .database(primary) + .database(secondary) + .failureDetector(MultiDbConfig.CircuitBreakerConfig.builder() + .slidingWindowSize(1) // SLIDING WINDOW SIZE IN SECONDS + .failureRateThreshold(10.0f) // percentage of failures to trigger circuit breaker + .build()) .failbackSupported(true) .failbackCheckInterval(1000) .gracePeriod(2000) - .retryWaitDuration(10) - .retryMaxAttempts(1) - .retryWaitDurationExponentialBackoffMultiplier(1) + .commandRetry(MultiDbConfig.RetryConfig.builder() + .waitDuration(10) + .maxAttempts(1) + .exponentialBackoffMultiplier(1) + .build()) .fastFailover(true) .retryOnFailover(false) .build();