diff --git a/README.md b/README.md index 22fd3ee0..cf52d78e 100644 --- a/README.md +++ b/README.md @@ -30,6 +30,8 @@ In addition to local in-memory buckets, the Bucket4j supports clustered usage sc | ```Inifinispan``` | Yes | Yes | No | | ```Oracle Coherence``` | Yes | Yes | No | +## [Documentation](https://bucket4j.com) + ## Get Bucket4j library #### You can add Bucket4j to your project as maven dependency The Bucket4j is distributed through [Maven Central](http://search.maven.org/): @@ -51,7 +53,7 @@ mvn clean install Feel free to ask via: * [Bucket4j discussions](https://github.com/vladimir-bukhtoyarov/bucket4j/discussions) for questions, feature proposals, sharing of experience. * [Bucket4j issue tracker](https://github.com/vladimir-bukhtoyarov/bucket4j/issues/new) to report a bug. -* [Vladimir Bukhtoyarov - Upwork Profile](https://www.upwork.com/freelancers/~013d8e02a32ffdd5f5) if you want to get one time paid support. +* [Vladimir Bukhtoyarov - Upwork Profile](https://www.upwork.com/freelancers/~013d8e02a32ffdd5f5) if you want to get one time paid support. ## License Copyright 2015-2021 Vladimir Bukhtoyarov diff --git a/asciidoc/src/main/docs/asciidoc/advanced/configuration-replacement.adoc b/asciidoc/src/main/docs/asciidoc/advanced/configuration-replacement.adoc index ef1121b0..eb2ff1fd 100644 --- a/asciidoc/src/main/docs/asciidoc/advanced/configuration-replacement.adoc +++ b/asciidoc/src/main/docs/asciidoc/advanced/configuration-replacement.adoc @@ -12,12 +12,12 @@ It is not possible to add, remove or change the limits for already created confi 2. There is another problem when you are choosing <>, <> or <> or <> and bucket has more then one bandwidth. For example how does replaceConfiguration implementation should bind bandwidths to each other in the following example? [source, java] ---- -Bucket bucket = Bucket4j.builder() +Bucket bucket = Bucket.builder() .addLimit(Bandwidth.simple(10, Duration.ofSeconds(1))) .addLimit(Bandwidth.simple(10000, Duration.ofHours(1))) .build(); ... -BucketConfiguration newConfiguration = Bucket4j.configurationBuilder() +BucketConfiguration newConfiguration = BucketConfiguration.configurationBuilder() .addLimit(Bandwidth.simple(5000, Duration.ofHours(1))) .addLimit(Bandwidth.simple(100, Duration.ofSeconds(10))) .build(); @@ -30,12 +30,12 @@ Instead of inventing the backward maggic Bucket4j provides to you ability to dea so in case of multiple bandwidth configuratoin replacement code can copy available tokens by bandwidth ID. So it is better to rewrite code above as following: [source, java] ---- -Bucket bucket = Bucket4j.builder() +Bucket bucket = Bucket.builder() .addLimit(Bandwidth.simple(10, Duration.ofSeconds(1)).withId("technical-limit")) .addLimit(Bandwidth.simple(10000, Duration.ofHours(1)).withId("business-limit")) .build(); ... - BucketConfiguration newConfiguration = Bucket4j.configurationBuilder() + BucketConfiguration newConfiguration = BucketConfiguration.builder() .addLimit(Bandwidth.simple(5000, Duration.ofHours(1)).withId("business-limit")) .addLimit(Bandwidth.simple(100, Duration.ofSeconds(10)).withId("technical-limit")) .build(); diff --git a/asciidoc/src/main/docs/asciidoc/advanced/listener.adoc b/asciidoc/src/main/docs/asciidoc/advanced/listener.adoc index 9417ff92..426a8de9 100644 --- a/asciidoc/src/main/docs/asciidoc/advanced/listener.adoc +++ b/asciidoc/src/main/docs/asciidoc/advanced/listener.adoc @@ -40,7 +40,7 @@ The bucket can be decorated by listener via ``toListenable`` method. ---- BucketListener listener = new MyListener(); -Bucket bucket = Bucket4j.builder() +Bucket bucket = Bucket.builder() .addLimit(Bandwidth.simple(100, Duration.ofMinutes(1))) .build() .toListenable(listener); diff --git a/asciidoc/src/main/docs/asciidoc/basic/api-reference.adoc b/asciidoc/src/main/docs/asciidoc/basic/api-reference.adoc index 86765b47..5d75af7c 100644 --- a/asciidoc/src/main/docs/asciidoc/basic/api-reference.adoc +++ b/asciidoc/src/main/docs/asciidoc/basic/api-reference.adoc @@ -188,12 +188,12 @@ long getAvailableTokens(); * For example how does replaceConfiguration implementation should bind bandwidths to each other in the following example? *
      * 
-     *     Bucket bucket = Bucket4j.builder()
+     *     Bucket bucket = Bucket.builder()
      *                       .addLimit(Bandwidth.simple(10, Duration.ofSeconds(1)))
      *                       .addLimit(Bandwidth.simple(10000, Duration.ofHours(1)))
      *                       .build();
      *     ...
-     *     BucketConfiguration newConfiguration = Bucket4j.configurationBuilder()
+     *     BucketConfiguration newConfiguration = BucketConfiguration.builder()
      *                                               .addLimit(Bandwidth.simple(5000, Duration.ofHours(1)))
      *                                               .addLimit(Bandwidth.simple(100, Duration.ofSeconds(10)))
      *                                               .build();
@@ -205,12 +205,12 @@ long getAvailableTokens();
      * so in case of multiple bandwidth configuratoin replacement code can copy available tokens by bandwidth ID. So it is better to rewrite code above as following:
      * 
      * 
-     * Bucket bucket = Bucket4j.builder()
+     * Bucket bucket = Bucket.builder()
      *                            .addLimit(Bandwidth.simple(10, Duration.ofSeconds(1)).withId("technical-limit"))
      *                            .addLimit(Bandwidth.simple(10000, Duration.ofHours(1)).withId("business-limit"))
      *                            .build();
      * ...
-     * BucketConfiguration newConfiguration = Bucket4j.configurationBuilder()
+     * BucketConfiguration newConfiguration = BucketConfiguration.builder()
      *                            .addLimit(Bandwidth.simple(5000, Duration.ofHours(1)).withId("business-limit"))
      *                            .addLimit(Bandwidth.simple(100, Duration.ofSeconds(10)).withId("technical-limit"))
      *                            .build();
diff --git a/asciidoc/src/main/docs/asciidoc/basic/production-generic-checklist.adoc b/asciidoc/src/main/docs/asciidoc/basic/production-generic-checklist.adoc
index 1fbbafde..286a1b16 100644
--- a/asciidoc/src/main/docs/asciidoc/basic/production-generic-checklist.adoc
+++ b/asciidoc/src/main/docs/asciidoc/basic/production-generic-checklist.adoc
@@ -14,10 +14,10 @@ you need to pay close attention to the throttling time window.
 To protect from this kind attacks, you should specify multiple limits like bellow
 [source, java]
 ----
-Bucket bucket = Bucket4j.jCacheBuilder(RecoveryStrategy.RECONSTRUCT)
+Bucket bucket = Bucket.builder()
     .addLimit(Bandwidth.simple(10000, Duration.ofSeconds(3_600))
     .addLimit(Bandwidth.simple(20, Duration.ofSeconds(1)) // attacker is unable to achieve 1000RPS and crash service in short time
-    .build(cache, bucketId);
+    .build();
 ----
 The number of limits specified per bucket does not impact the performance.
 
diff --git a/asciidoc/src/main/docs/asciidoc/basic/quick-start.adoc b/asciidoc/src/main/docs/asciidoc/basic/quick-start.adoc
index 066b4de9..134d7046 100644
--- a/asciidoc/src/main/docs/asciidoc/basic/quick-start.adoc
+++ b/asciidoc/src/main/docs/asciidoc/basic/quick-start.adoc
@@ -28,7 +28,7 @@ But acquiring stacktraces is very cost operation by itself, and you want to do i
 // define the limit 1 time per 10 minute
 Bandwidth limit = Bandwidth.simple(1, Duration.ofMinutes(10));
 // construct the bucket
-Bucket bucket = Bucket4j.builder().addLimit(limit).build();
+Bucket bucket = Bucket.builder().addLimit(limit).build();
 
 ...
 
@@ -53,7 +53,7 @@ and by contract with provider you should poll not often than 100 times per 1 min
 // define the limit 100 times per 1 minute
 Bandwidth limit = Bandwidth.simple(100, Duration.ofMinutes(1));
 // construct the bucket
-Bucket bucket = Bucket4j.builder().addLimit(limit).build();
+Bucket bucket = Bucket.builder().addLimit(limit).build();
 
 ...
 volatile double exchangeRate;
@@ -89,7 +89,7 @@ public class ThrottlingFilter implements javax.servlet.Filter {
          long overdraft = 50;
          Refill refill = Refill.greedy(10, Duration.ofSeconds(1));
          Bandwidth limit = Bandwidth.classic(overdraft, refill);
-         return Bucket4j.builder().addLimit(limit).build();
+         return Bucket.builder().addLimit(limit).build();
     }
 
     @Override
@@ -147,7 +147,7 @@ To solve problem you can construct following bucket:
 static final long MAX_WAIT_NANOS = TimeUnit.HOURS.toNanos(1);
 // ...
 
-Bucket bucket = Bucket4j.builder()
+Bucket bucket = Bucket.builder()
        // allows 1000 tokens per 1 minute
        .addLimit(Bandwidth.simple(1000, Duration.ofMinutes(1)))
        // but not often then 50 tokens per 1 second
@@ -173,7 +173,7 @@ int initialTokens = 42;
 Bandwidth limit = Bandwidth
     .simple(1000, Duration.ofHours(1))
     .withInitialTokens(initialTokens);
-Bucket bucket = Bucket4j.builder()
+Bucket bucket = Bucket.builder()
     .addLimit(limit)
     .build();
 ----
@@ -230,7 +230,7 @@ By default Bucket4j uses millisecond time resolution, it is preferred time measu
 But rarely(for example benchmarking) you wish the nanosecond precision:
 [source, java]
 ----
-Bucket4j.builder().withNanosecondPrecision()
+Bucket.builder().withNanosecondPrecision()
 ----
 Be very careful to choose this time measurement strategy, because ``System.nanoTime()`` produces inaccurate results,
 use this strategy only if period of bandwidth is too small that millisecond resolution will be undesired.
@@ -252,7 +252,7 @@ public class ClusteredTimeMeter implements TimeMeter {
 }
 
 Bandwidth limit = Bandwidth.simple(100, Duration.ofMinutes(1));
-Bucket bucket = Bucket4j.builder()
+Bucket bucket = Bucket.builder()
     .withCustomTimePrecision(new ClusteredTimeMeter())
     .addLimit(limit)
     .build();
diff --git a/asciidoc/src/main/docs/asciidoc/distributed/asynchronous.adoc b/asciidoc/src/main/docs/asciidoc/distributed/asynchronous.adoc
index 8d05dd84..38b35bdc 100644
--- a/asciidoc/src/main/docs/asciidoc/distributed/asynchronous.adoc
+++ b/asciidoc/src/main/docs/asciidoc/distributed/asynchronous.adoc
@@ -1,13 +1,15 @@
 === Asynchronous API
 Since version ``3.0`` Bucket4j provides asynchronous analogs for majority of API methods.
-Async view of bucket is availble through ``asAsync()`` method:
+Async view of proxyManager is available through ``asAsync()`` method:
 [source, java]
 ----
-Bucket bucket = ...;
-AsyncBucket asyncBucket = bucket.asAsync();
+ProxyManager proxyManager = ...;
+AsyncProxyManager asyncProxyManager = proxyManager.asAsync();
+
+BucketConfiguration configuration = ...;
+AsyncBucketProxy asyncBucket = asyncProxyManager.builder().build(key, configuration);
 ----
-Each method of class [AsyncBucket](https://github.com/vladimir-bukhtoyarov/bucket4j/blob/3.1/bucket4j-core/src/main/java/io/github/bucket4j/AsyncBucket.java)
- has full equivalence with same semantic in synchronous version in the [Bucket](https://github.com/vladimir-bukhtoyarov/bucket4j/blob/3.0/bucket4j-core/src/main/java/io/github/bucket4j/Bucket.java) class.
+Each method of class ```AsyncBucketProxy``` has full equivalence with same semantic in synchronous version in the ```Bucket``` class.
 
 ==== Example - limiting the rate of access to asynchronous servlet
 Imagine that you develop SMS service, which allows send SMS via HTTP interface.
@@ -36,11 +38,10 @@ non-blocking architecture means that both SMS sending and limit checking should
 **Mockup of service based on top of Servlet API and bucket4j-infinispan**:
 [source, java]
 ----
-
 public class SmsServlet extends javax.servlet.http.HttpServlet {
 
     private SmsSender smsSender;
-    private ProxyManager buckets;
+    private AsyncProxyManager buckets;
     private Supplier configuration;
        
     @Override
@@ -51,13 +52,13 @@ public class SmsServlet extends javax.servlet.http.HttpServlet {
         smsSender = (SmsSender) ctx.getAttribute("sms-sender");
         
         FunctionalMapImpl bucketMap = (FunctionalMapImpl) ctx.getAttribute("bucket-map");
-        this.buckets = Bucket4j.extension(Infinispan.class).proxyManagerForMap(bucketMap);
+        this.buckets = new InfinispanProxyManager(bucketMap).asAsync();
         
         this.configuration = () -> {
             long overdraft = 20;
             Refill refill = Refill.greedy(10, Duration.ofMinutes(1));
             Bandwidth limit = Bandwidth.classic(overdraft, refill);
-            return Bucket4j.configurationBuilder()
+            return BucketConfiguratiion.builder()
                 .addLimit(limit)
                 .build();
         };
@@ -71,7 +72,7 @@ public class SmsServlet extends javax.servlet.http.HttpServlet {
         String toNumber = req.getParameter("to");
         String text = req.getParameter("text");
         
-        Bucket bucket = buckets.getProxy(fromNumber, configuration);
+        AsyncBucketProxy bucket = buckets.builder().build(fromNumber, configuration);
         CompletableFuture limitCheckingFuture = bucket.asAsync().tryConsumeAndReturnRemaining(1);
         final AsyncContext asyncContext = req.startAsync();
         limitCheckingFuture.thenCompose(probe -> {
diff --git a/asciidoc/src/main/docs/asciidoc/distributed/jcache/coherence.adoc b/asciidoc/src/main/docs/asciidoc/distributed/jcache/coherence.adoc
index 31f129d9..00fb0432 100644
--- a/asciidoc/src/main/docs/asciidoc/distributed/jcache/coherence.adoc
+++ b/asciidoc/src/main/docs/asciidoc/distributed/jcache/coherence.adoc
@@ -1,15 +1,5 @@
 [[bucket4j-coherence, Bucket4j-Coherence]]
 === Oracle Coherence integration
-Before use ``bucket4j-coherence`` module please read [bucket4j-jcache documentation](jcache-usage.md),
-because ``bucket4j-coherence`` is just a follow-up of ``bucket4j-jcache``.
-
-**Question:** Bucket4j already supports JCache since version ``1.2``. Why it was needed to introduce direct support for ``Oracle Coherence``?  
-**Answer:** Because https://www.jcp.org/en/jsr/detail?id=107[JCache API (JSR 107)] does not specify asynchronous API,
-developing the dedicated module ``bucket4j-coherence`` was the only way to provide asynchrony for users who use ``Bucket4j`` and ``Oracle Coherence`` together.
-
-**Question:** Should I migrate from ``bucket4j-jcache`` to ``bucketj-coherence`` If I do not need in asynchronous API?  
-**Answer:** No, you should not migrate to ``bucketj-coherence`` in this case.
-
 ==== Dependencies
 To use ``bucket4j-coherence`` extension you need to add following dependency:
 [source, xml, subs=attributes+]
@@ -25,30 +15,25 @@ To use ``bucket4j-coherence`` extension you need to add following dependency:
 [source, java]
 ----
 com.tangosol.net.NamedCache cache = ...;
-...
+private static final CoherenceProxyManager proxyManager = new CoherenceProxyManager(map);
 
-Bucket bucket = Bucket4j.extension(Coherence.class).builder()
-                   .addLimit(Bandwidth.simple(1_000, Duration.ofMinutes(1)))
-                   .build(cache, key, RecoveryStrategy.RECONSTRUCT);
-----
-
-==== Example of ProxyManager instantiation
-[source, java]
-----
-com.tangosol.net.NamedCache cache = ...;
+...
+BucketConfiguration configuration = BucketConfiguration.builder()
+    .addLimit(Bandwidth.simple(1_000, Duration.ofMinutes(1)))
+    .build(key, configuration);
 
-ProxyManager proxyManager = Bucket4j.extension(Coherence.class).proxyManagerForCache(cache);
+Bucket bucket = proxyManager.builder().build(configuration);
 ----
 
 ==== Configuring POF serialization for Bucket4j library classes
 If you configure nothing, then by default Java serialization will be used for serialization Bucket4j library classes. Java serialization can be rather slow and should be avoided in general.
 ``Bucket4j`` provides https://docs.oracle.com/cd/E24290_01/coh.371/e22837/api_pof.htm#COHDG1363[custom POF serializers] for all library classes that could be transferred over network.
 To let Coherence know about POF serializers you should register three serializers in the POF configuration config file: 
-* ``CoherenceEntryProcessorAdapterPofSerializer`` for class ``CoherenceEntryProcessorAdapter``
-* ``GridBucketStatePofSerializer`` for class ``GridBucketState``
-* ``CommandResultPofSerializer`` for class ``CommandResult``
+====
+``io.github.bucket4j.grid.coherence.pof.CoherenceEntryProcessorPofSerializer`` for class ``io.github.bucket4j.grid.coherence.CoherenceProcessor``
+====
 
-*Example of POF serialization:*
+.Example of POF serialization config:
 [source, xml]
 ----
 
         
             1001
-            io.github.bucket4j.grid.coherence.CoherenceEntryProcessorAdapter
-            
-                io.github.bucket4j.grid.coherence.pof.CoherenceEntryProcessorAdapterPofSerializer
-            
-        
-        
-            1002
-            io.github.bucket4j.grid.GridBucketState
-            
-                io.github.bucket4j.grid.coherence.pof.GridBucketStatePofSerializer
-            
-        
-        
-            1003
-            io.github.bucket4j.grid.CommandResult
+            io.github.bucket4j.grid.coherence.CoherenceProcessor
             
-                io.github.bucket4j.grid.coherence.pof.CommandResultPofSerializer
+                io.github.bucket4j.grid.coherence.pof.CoherenceEntryProcessorPofSerializer
             
         
     
diff --git a/asciidoc/src/main/docs/asciidoc/distributed/jcache/hazelcast.adoc b/asciidoc/src/main/docs/asciidoc/distributed/jcache/hazelcast.adoc
index e6e5bae3..f41204b4 100644
--- a/asciidoc/src/main/docs/asciidoc/distributed/jcache/hazelcast.adoc
+++ b/asciidoc/src/main/docs/asciidoc/distributed/jcache/hazelcast.adoc
@@ -1,15 +1,5 @@
 [[bucket4j-hazelcast, Bucket4j-Hazelcast]]
 === Hazelcast integration
-Before use ``bucket4j-hazelcast`` module please read [bucket4j-jcache documentation](jcache-usage.md),
-because ``bucket4j-hazelcast`` is just a follow-up of ``bucket4j-jcache``.
-
-**Question:** Bucket4j already supports JCache since version ``1.2``. Why it was needed to introduce direct support for ``Hazelcast``?  
-**Answer:** Because https://www.jcp.org/en/jsr/detail?id=107[JCache API (JSR 107)] does not specify asynchronous API,
-developing the dedicated module ``bucket4j-hazelcast`` was the only way to provide asynchrony for users who use ``Bucket4j`` and ``Hazelcast`` together.
-
-**Question:** Should I migrate from ``bucket4j-jcache`` to ``bucket4j-hazelcast`` If I do not need in asynchronous API?  
-**Answer:** No, you should not migrate to ``bucket4j-hazelcast`` in this case.
-
 ==== Dependencies
 To use Bucket4j extension for Hazelcast with ``Hazelcast 4.x`` you need to add following dependency:
 [source, xml, subs=attributes+]
@@ -39,20 +29,14 @@ just log issue to https://github.com/vladimir-bukhtoyarov/bucket4j/issues[bug tr
 [source, java]
 ----
 IMap map = ...;
-...
+private static final HazelcastProxyManager proxyManager = new HazelcastProxyManager(map);
 
-Bucket bucket = Bucket4j.extension(Hazelcast.class).builder()
-                   .addLimit(Bandwidth.simple(1_000, Duration.ofMinutes(1)))
-                   .build(map, key, RecoveryStrategy.RECONSTRUCT);
-----
-
-==== Example of ProxyManager instantiation
-[source, java]
-----
-IMap map = ...;
 ...
+BucketConfiguration configuration = BucketConfiguration.builder()
+    .addLimit(Bandwidth.simple(1_000, Duration.ofMinutes(1)))
+    .build(key, configuration);
 
-ProxyManager proxyManager = Bucket4j.extension(Hazelcast.class).proxyManagerForMap(map);
+Bucket bucket = proxyManager.builder().build(configuration);
 ----
 
 ==== Configuring Custom Serialization for Bucket4j library classes
@@ -74,9 +58,9 @@ import io.github.bucket4j.grid.hazelcast.serialization.HazelcastSerializer;
     // and may use more types in the future, so leave enough empty space after baseTypeIdNumber 
     int baseTypeIdNumber = 10000;
     
-    HazelcastSerializer.addCustomSerializers(serializationConfig, baseTypeIdNumber);
+    HazelcastProxyManager.addCustomSerializers(serializationConfig, baseTypeIdNumber);
 ----
 
 ==== Known issues related with Docker and(or) SpringBoot
-* [#186 HazelcastEntryProcessorAdapter class not found](https://github.com/vladimir-bukhtoyarov/bucket4j/discussions/186) - check file permissins inside your image.
-* [#182 HazelcastSerializationException with Hazelcast 4.2](https://github.com/vladimir-bukhtoyarov/bucket4j/issues/162) - properly setup classloader for Hazelcast client configuration.
+* https://github.com/vladimir-bukhtoyarov/bucket4j/discussions/186:[#186 HazelcastEntryProcessor class not found] - check file permissions inside your image.
+* https://github.com/vladimir-bukhtoyarov/bucket4j/issues/162:[#182 HazelcastSerializationException with Hazelcast 4.2] - properly setup classloader for Hazelcast client configuration.
diff --git a/asciidoc/src/main/docs/asciidoc/distributed/jcache/ignite.adoc b/asciidoc/src/main/docs/asciidoc/distributed/jcache/ignite.adoc
index ca9dac15..3bf3b108 100644
--- a/asciidoc/src/main/docs/asciidoc/distributed/jcache/ignite.adoc
+++ b/asciidoc/src/main/docs/asciidoc/distributed/jcache/ignite.adoc
@@ -23,45 +23,45 @@ To use ``bucket4j-ignite`` extension you need to add following dependency:
 
 ----
 
-==== Example of Bucket instantiation
+==== Example of Bucket instantiation via IgniteProxyManager
 [source, java]
 ----
 org.apache.ignite.IgniteCache cache = ...;
+private static final IgniteProxyManager proxyManager = new IgniteProxyManager(cache);
 ...
 
-Bucket bucket = Bucket4j.extension(io.github.bucket4j.grid.ignite.Ignite.class).builder()
-                   .addLimit(Bandwidth.simple(1_000, Duration.ofMinutes(1)))
-                   .build(cache, key, RecoveryStrategy.RECONSTRUCT);
-----
-
-==== Example of ProxyManager instantiation
-[source, java]
-----
-org.apache.ignite.IgniteCache cache = ...;
-...
-
-ProxyManager proxyManager = Bucket4j.extension(io.github.bucket4j.grid.ignite.Ignite.class).proxyManagerForCache(cache);
+BucketConfiguration configuration = BucketConfiguration.builder()
+    .addLimit(Bandwidth.simple(1_000, Duration.ofMinutes(1)))
+    .build();
+Bucket bucket = proxyManager.builder().build(key, configuration);
 ----
+IMPORTANT: Pay attention that IgniteProxyManager requires from all nodes in the cluster to contain bucket4j Jars in classpath.
 
 ==== Example of Bucket instantiation via Thin Client
 [source, java]
 ----
 org.apache.ignite.client.ClientCache cache = ...;
 org.apache.ignite.client.ClientCompute clientCompute = ...;
-
+private static final IgniteThinClientProxyManager proxyManager = new IgniteThinClientProxyManager(cache, clientCompute)
 ...
 
-Bucket bucket = Bucket4j.extension(io.github.bucket4j.grid.ignite.Ignite.class).builder()
-                   .addLimit(Bandwidth.simple(1_000, Duration.ofMinutes(1)))
-                   .build(clientCompute, cache, key, RecoveryStrategy.RECONSTRUCT);
+BucketConfiguration configuration = BucketConfiguration.builder()
+    .addLimit(Bandwidth.simple(1_000, Duration.ofMinutes(1)))
+    .build();
+Bucket bucket = proxyManager.builder().build(key, configuration);
 ----
+IMPORTANT: Pay attention that IgniteThinClientProxyManager requires from all nodes in the cluster to contain bucket4j Jars in classpath.
 
-==== Example of ProxyManager instantiation via Thin Client
+==== Example of Bucket instantiation of via Thin Client and IgniteThinClientCasBasedProxyManager
 [source, java]
 ----
-org.apache.ignite.client.ClientCache cache = ...;
-org.apache.ignite.client.ClientCompute clientCompute = ...;
+org.apache.ignite.client.ClientCache cache = ...;
+private static final IgniteThinClientCasBasedProxyManager proxyManager = new IgniteThinClientCasBasedProxyManager(cache)
 ...
 
-ProxyManager proxyManager = Bucket4j.extension(io.github.bucket4j.grid.ignite.Ignite.class).proxyManagerForCache(clientCompute, cache);
-----
\ No newline at end of file
+BucketConfiguration configuration = BucketConfiguration.builder()
+    .addLimit(Bandwidth.simple(1_000, Duration.ofMinutes(1)))
+    .build();
+Bucket bucket = proxyManager.builder().build(key, configuration);
+----
+IMPORTANT: IgniteThinClientCasBasedProxyManager does not require from all nodes in the cluster to contain bucket4j Jars in classpath, but it operates with more latency, so choose it over IgniteThinClientProxyManager if and only if you have no control over cluster classpath.
\ No newline at end of file
diff --git a/asciidoc/src/main/docs/asciidoc/distributed/jcache/infinispan.adoc b/asciidoc/src/main/docs/asciidoc/distributed/jcache/infinispan.adoc
index 56e9cfe0..b64eab98 100644
--- a/asciidoc/src/main/docs/asciidoc/distributed/jcache/infinispan.adoc
+++ b/asciidoc/src/main/docs/asciidoc/distributed/jcache/infinispan.adoc
@@ -1,13 +1,5 @@
 [[bucket4j-infinispan, Bucket4j-Infinispan]]
 === Infinispan integration
-Before use ``bucket4j-infinispan`` module please read [bucket4j-jcache documentation](jcache-usage.md),
-because ``bucket4j-infinispan`` is just a follow-up of ``bucket4j-jcache``.
-
-**Question:** Bucket4j already supports JCache since version ``1.2``. Why it was needed to introduce direct support for ``Infinispan``?  
-**Answer:** When you want to use Bucket4j together with Infinispan, you must always use ``bucket4j-infinispan`` module instead of ``bucket4j-jcache``,   
-because Infinispan does not provide mutual exclusion for entry-processors. Any attempt to use Infinispan via ``bucket4j-jcache`` will be failed with UnsupportedOperationException exception
-at bucket construction time.
-
 ==== Dependencies
 To use ``bucket4j-infinispan`` with ``Infinispan 9.x, 10.x`` extension you need to add following dependency:
 [source, xml, subs=attributes+]
@@ -28,18 +20,16 @@ If you are using legacy version of Infinispan ``8.x`` then you need to add follo
 
 ----
 ==== General compatibility matrix principles::
-* Bucket4j authors do not perform continues monitoring of new Infinispan releases. So, there is can be case when there is no one version of Bucket4j which is compatible with newly released Infinispan,
-just log issue to https://github.com/vladimir-bukhtoyarov/bucket4j/issues[bug tracker] in this case, adding support to new version of Infinispan is usually easy exercise.
-* Integrations with legacy versions of Infinispan are not removed without a clear reason. Hence You are in safety, even you are working in a big enterprise company that does not update its infrastructure frequently because You still get new Bucket4j's features even for legacy Infinispan's releases.
+* Bucket4j authors do not perform continues monitoring of new Infinispan releases. So, there is can be case when there is no one version of Bucket4j which is compatible with newly released Infinispan, just log issue to https://github.com/vladimir-bukhtoyarov/bucket4j/issues[bug tracker] in this case, adding support to new version of Infinispan is usually easy exercise.
+* Integrations with legacy versions of Infinispan are not removed without a clear reason. Hence, you are in safety, even you are working in a big enterprise company that does not update its infrastructure frequently because You still get new Bucket4j's features even for legacy Infinispan's releases.
 
 
 ==== Special notes for Infinispan 10.0+
-As mentioned in the https://infinispan.org/docs/dev/titles/developing/developing.html#marshalling[Infinispan Marshalling documentation], since release ``10.0.0`` Infinispan does not allow deserialization of custom payloads into Java classes.
-If you do not configure serialization(as described bellow), you will get the error like this on any attempt to use Bucket4j with brand new Infinispan release:
+As mentioned in the https://infinispan.org/docs/dev/titles/developing/developing.html#marshalling[Infinispan Marshalling documentation], since release ``10.0.0`` Infinispan does not allow deserialization of custom payloads into Java classes. If you do not configure serialization(as described bellow), you will get the error like this on any attempt to use Bucket4j with brand new Infinispan release:
 [source, bash]
 ----
 Jan 02, 2020 4:57:56 PM org.infinispan.marshall.persistence.impl.PersistenceMarshallerImpl objectToBuffer
-WARN: ISPN000559: Cannot marshall 'class io.github.bucket4j.grid.infinispan.SerializableFunctionAdapter'
+WARN: ISPN000559: Cannot marshall 'class io.github.bucket4j.grid.infinispan.InfinispanProcessor'
 java.lang.IllegalArgumentException: No marshaller registered for Java type io.github.bucket4j.grid.infinispan.SerializableFunctionAdapter
 	at org.infinispan.protostream.impl.SerializationContextImpl.getMarshallerDelegate(SerializationContextImpl.java:279)
 	at org.infinispan.protostream.WrappedMessage.writeMessage(WrappedMessage.java:240)
@@ -52,7 +42,7 @@ Do not forget to add ``io.github.bucket4j.*`` regexp to the whitelist if choosin
 * And last way(recommended) just register ``Bucket4j serialization context initializer`` in the serialization configuration. 
 You can do it in both programmatically and declarative ways:
 
-*Programmatic registration of Bucket4jProtobufContextInitializer*
+.Programmatic registration of Bucket4jProtobufContextInitializer
 [source, java]
 ----
 import io.github.bucket4j.grid.infinispan.serialization.Bucket4jProtobufContextInitializer;
@@ -62,31 +52,25 @@ GlobalConfigurationBuilder builder = new GlobalConfigurationBuilder();
 builder.serialization().addContextInitializer(new Bucket4jProtobufContextInitializer());
 ----
 
-*Declarative registration of Bucket4jProtobufContextInitializer*
+.Declarative registration of Bucket4jProtobufContextInitializer
 [source, xml]
 ----
 
     
 
 ----
-And that is all. Just registering ``Bucket4jProtobufContextInitializer`` in any way is enough to make Bucket4j compatible with ProtoStream marshaller, you do not have to care about *.proto files, annotations, whitelist etc,
-all neccessary Protobuffers configs generated by ``Bucket4jProtobufContextInitializer`` and registerd on the fly.
+And that is all. Just registering ``Bucket4jProtobufContextInitializer`` in any way is enough to make Bucket4j compatible with ProtoStream marshaller, you do not have to care about ``*.proto`` files, annotations, whitelist etc, all neccessary Protobuffers configs generated by ``Bucket4jProtobufContextInitializer`` and registerd on the fly.
 
 ==== Example of Bucket instantiation
 [source, java]
 ----
 org.infinispan.functional.FunctionalMap.ReadWriteMap map = ...;
-...
-Bucket bucket = Bucket4j.extension(Infinispan.class).builder()
-                   .addLimit(Bandwidth.simple(1_000, Duration.ofMinutes(1)))
-                   .build(map, key, RecoveryStrategy.RECONSTRUCT);
-----
+private static final InfinispanProxyManager proxyManager = new InfinispanProxyManager(map);
 
-==== Example of ProxyManager instantiation
-[source, java]
-----
-org.infinispan.functional.FunctionalMap.ReadWriteMap map = ...;
 ...
+BucketConfiguration configuration = BucketConfiguration.builder()
+    .addLimit(Bandwidth.simple(1_000, Duration.ofMinutes(1)))
+    .build(key, configuration);
 
-ProxyManager proxyManager = Bucket4j.extension(Infinispan.class).proxyManagerForMap(map);
+Bucket bucket = proxyManager.builder().build(configuration);
 ----
\ No newline at end of file
diff --git a/asciidoc/src/main/docs/asciidoc/distributed/jcache/jcache-usage.adoc b/asciidoc/src/main/docs/asciidoc/distributed/jcache/jcache-usage.adoc
index 15f98866..754e2a5c 100644
--- a/asciidoc/src/main/docs/asciidoc/distributed/jcache/jcache-usage.adoc
+++ b/asciidoc/src/main/docs/asciidoc/distributed/jcache/jcache-usage.adoc
@@ -33,10 +33,10 @@ ServletFilter would be obvious place to check limits:
 ----
 public class IpThrottlingFilter implements javax.servlet.Filter {
     
-    private static final BucketConfiguration configuration = Bucket4j.configurationBuilder()
-                                                              .addLimit(Bandwidth.simple(30, Duration.ofMinutes(1)))
-                                                              .build();
-    
+    private static final BucketConfiguration configuration = BucketConfiguration.builder()
+          .addLimit(Bandwidth.simple(30, Duration.ofMinutes(1)))
+          .build();
+
     // cache for storing token buckets, where IP is key.
     @Inject
     private javax.cache.Cache cache;
@@ -46,7 +46,7 @@ public class IpThrottlingFilter implements javax.servlet.Filter {
     @Override
     public void init(FilterConfig filterConfig) throws ServletException {
          // init bucket registry
-         buckets = Bucket4j.extension(JCache.class).proxyManagerForCache(cache);
+         buckets = new JCacheProxyManager<>(cache);
     }
     
     @Override
@@ -55,7 +55,7 @@ public class IpThrottlingFilter implements javax.servlet.Filter {
         String ip = IpHelper.getIpFromRequest(httpRequest);
         
         // acquire cheap proxy to bucket  
-        Bucket bucket = buckets.getProxy(ip, configuration);
+        Bucket bucket = proxyManager.builder().build(key, configuration);
 
         // tryConsume returns false immediately if no tokens available with the bucket
         if (bucket.tryConsume(1)) {
@@ -98,7 +98,7 @@ public class IpThrottlingFilter implements javax.servlet.Filter {
     @Override
     public void init(FilterConfig filterConfig) throws ServletException {
          // init bucket registry
-         buckets = new JCacheProxyManager<>(getCache(), ClientSideConfig.getDefault());
+         buckets = new JCacheProxyManager<>(cache);
     }
     
     @Override
@@ -128,7 +128,7 @@ public class IpThrottlingFilter implements javax.servlet.Filter {
     private Supplier getConfigSupplierForUser(String userId) {
          return () -> {
              long translationsPerDay = limitProvider.readPerDayLimitFromAgreementsDatabase(userId);
-             return Bucket4j.configurationBuilder()
+             return BucketConfiguratiion.builder()
                          .addLimit(Bandwidth.simple(translationsPerDay, Duration.ofDays(1)))
                          .build();
          };
diff --git a/bucket4j-coherence/src/main/java/io/github/bucket4j/grid/coherence/CoherenceProxyManager.java b/bucket4j-coherence/src/main/java/io/github/bucket4j/grid/coherence/CoherenceProxyManager.java
index 8e52e1ea..12834750 100644
--- a/bucket4j-coherence/src/main/java/io/github/bucket4j/grid/coherence/CoherenceProxyManager.java
+++ b/bucket4j-coherence/src/main/java/io/github/bucket4j/grid/coherence/CoherenceProxyManager.java
@@ -59,6 +59,10 @@ public class CoherenceProxyManager extends AbstractProxyManager {
 
     private final NamedCache cache;
 
+    public CoherenceProxyManager(NamedCache cache) {
+        this(cache, ClientSideConfig.getDefault());
+    }
+
     public CoherenceProxyManager(NamedCache cache, ClientSideConfig clientSideConfig) {
         super(clientSideConfig);
         this.cache = cache;
diff --git a/bucket4j-core/src/main/java/io/github/bucket4j/Bandwidth.java b/bucket4j-core/src/main/java/io/github/bucket4j/Bandwidth.java
index f598885e..098e0fa1 100644
--- a/bucket4j-core/src/main/java/io/github/bucket4j/Bandwidth.java
+++ b/bucket4j-core/src/main/java/io/github/bucket4j/Bandwidth.java
@@ -59,7 +59,7 @@
  * in other words any token can not be partially consumed.
  * 
Example of multiple bandwidth: *
{@code // Adds bandwidth that restricts to consume not often 1000 tokens per 1 minute and not often than 100 tokens per second
- * Bucket bucket = Bucket4j.builder().
+ * Bucket bucket = Bucket.builder().
  *      .addLimit(Bandwidth.create(1000, Duration.ofMinutes(1)));
  *      .addLimit(Bandwidth.create(100, Duration.ofSeconds(1)));
  *      .build()
diff --git a/bucket4j-core/src/main/java/io/github/bucket4j/Bucket.java b/bucket4j-core/src/main/java/io/github/bucket4j/Bucket.java
index d9a6d6ca..1e236321 100644
--- a/bucket4j-core/src/main/java/io/github/bucket4j/Bucket.java
+++ b/bucket4j-core/src/main/java/io/github/bucket4j/Bucket.java
@@ -199,12 +199,12 @@ static LocalBucketBuilder builder() {
      * For example how does replaceConfiguration implementation should bind bandwidths to each other in the following example?
      * 
      * 
-     *     Bucket bucket = Bucket4j.builder()
+     *     Bucket bucket = Bucket.builder()
      *                       .addLimit(Bandwidth.simple(10, Duration.ofSeconds(1)))
      *                       .addLimit(Bandwidth.simple(10000, Duration.ofHours(1)))
      *                       .build();
      *     ...
-     *     BucketConfiguration newConfiguration = Bucket4j.configurationBuilder()
+     *     BucketConfiguration newConfiguration = BucketConfiguratiion.builder()
      *                                               .addLimit(Bandwidth.simple(5000, Duration.ofHours(1)))
      *                                               .addLimit(Bandwidth.simple(100, Duration.ofSeconds(10)))
      *                                               .build();
@@ -216,12 +216,12 @@ static LocalBucketBuilder builder() {
      * so in case of multiple bandwidth configuratoin replacement code can copy available tokens by bandwidth ID. So it is better to rewrite code above as following:
      * 
      * 
-     * Bucket bucket = Bucket4j.builder()
+     * Bucket bucket = Bucket.builder()
      *                            .addLimit(Bandwidth.simple(10, Duration.ofSeconds(1)).withId("technical-limit"))
      *                            .addLimit(Bandwidth.simple(10000, Duration.ofHours(1)).withId("business-limit"))
      *                            .build();
      * ...
-     * BucketConfiguration newConfiguration = Bucket4j.configurationBuilder()
+     * BucketConfiguration newConfiguration = BucketConfiguratiion.builder()
      *                            .addLimit(Bandwidth.simple(5000, Duration.ofHours(1)).withId("business-limit"))
      *                            .addLimit(Bandwidth.simple(100, Duration.ofSeconds(10)).withId("technical-limit"))
      *                            .build();
diff --git a/bucket4j-hazelcast-all/bucket4j-hazelcast-3/src/main/java/io/github/bucket4j/grid/hazelcast/HazelcastProxyManager.java b/bucket4j-hazelcast-all/bucket4j-hazelcast-3/src/main/java/io/github/bucket4j/grid/hazelcast/HazelcastProxyManager.java
index d9329fc9..caf7324f 100644
--- a/bucket4j-hazelcast-all/bucket4j-hazelcast-3/src/main/java/io/github/bucket4j/grid/hazelcast/HazelcastProxyManager.java
+++ b/bucket4j-hazelcast-all/bucket4j-hazelcast-3/src/main/java/io/github/bucket4j/grid/hazelcast/HazelcastProxyManager.java
@@ -60,6 +60,10 @@ public class HazelcastProxyManager extends AbstractProxyManager {
 
     private final IMap map;
 
+    public HazelcastProxyManager(IMap map) {
+        this(map, ClientSideConfig.getDefault());
+    }
+
     public HazelcastProxyManager(IMap map, ClientSideConfig clientSideConfig) {
         super(clientSideConfig);
         this.map = Objects.requireNonNull(map);
diff --git a/bucket4j-hazelcast-all/bucket4j-hazelcast/src/main/java/io/github/bucket4j/grid/hazelcast/HazelcastBucketBuilder.java b/bucket4j-hazelcast-all/bucket4j-hazelcast/src/main/java/io/github/bucket4j/grid/hazelcast/HazelcastBucketBuilder.java
deleted file mode 100644
index c8976ba4..00000000
--- a/bucket4j-hazelcast-all/bucket4j-hazelcast/src/main/java/io/github/bucket4j/grid/hazelcast/HazelcastBucketBuilder.java
+++ /dev/null
@@ -1,19 +0,0 @@
-/*-
- * ========================LICENSE_START=================================
- * Bucket4j
- * %%
- * Copyright (C) 2015 - 2021 Vladimir Bukhtoyarov
- * %%
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * 
- *      http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- * =========================LICENSE_END==================================
- */
diff --git a/bucket4j-hazelcast-all/bucket4j-hazelcast/src/main/java/io/github/bucket4j/grid/hazelcast/HazelcastProxyManager.java b/bucket4j-hazelcast-all/bucket4j-hazelcast/src/main/java/io/github/bucket4j/grid/hazelcast/HazelcastProxyManager.java
index a4c09751..2720a727 100644
--- a/bucket4j-hazelcast-all/bucket4j-hazelcast/src/main/java/io/github/bucket4j/grid/hazelcast/HazelcastProxyManager.java
+++ b/bucket4j-hazelcast-all/bucket4j-hazelcast/src/main/java/io/github/bucket4j/grid/hazelcast/HazelcastProxyManager.java
@@ -38,7 +38,6 @@
 
 import com.hazelcast.config.SerializationConfig;
 import com.hazelcast.config.SerializerConfig;
-import com.hazelcast.core.ExecutionCallback;
 import com.hazelcast.map.IMap;
 import io.github.bucket4j.distributed.proxy.AbstractProxyManager;
 import io.github.bucket4j.distributed.proxy.ClientSideConfig;
@@ -62,6 +61,10 @@ public class HazelcastProxyManager extends AbstractProxyManager {
 
     private final IMap map;
 
+    public HazelcastProxyManager(IMap map) {
+        this(map, ClientSideConfig.getDefault());
+    }
+
     public HazelcastProxyManager(IMap map, ClientSideConfig clientSideConfig) {
         super(clientSideConfig);
         this.map = Objects.requireNonNull(map);
diff --git a/bucket4j-hazelcast-all/bucket4j-hazelcast/src/main/java/io/github/bucket4j/grid/hazelcast/serialization/HazelcastSerializationAdapter.java b/bucket4j-hazelcast-all/bucket4j-hazelcast/src/main/java/io/github/bucket4j/grid/hazelcast/serialization/HazelcastSerializationAdapter.java
deleted file mode 100644
index c8976ba4..00000000
--- a/bucket4j-hazelcast-all/bucket4j-hazelcast/src/main/java/io/github/bucket4j/grid/hazelcast/serialization/HazelcastSerializationAdapter.java
+++ /dev/null
@@ -1,19 +0,0 @@
-/*-
- * ========================LICENSE_START=================================
- * Bucket4j
- * %%
- * Copyright (C) 2015 - 2021 Vladimir Bukhtoyarov
- * %%
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * 
- *      http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- * =========================LICENSE_END==================================
- */
diff --git a/bucket4j-ignite/src/main/java/io/github/bucket4j/grid/ignite/thick/IgniteProxyManager.java b/bucket4j-ignite/src/main/java/io/github/bucket4j/grid/ignite/thick/IgniteProxyManager.java
index 2abe5037..06c7beb3 100644
--- a/bucket4j-ignite/src/main/java/io/github/bucket4j/grid/ignite/thick/IgniteProxyManager.java
+++ b/bucket4j-ignite/src/main/java/io/github/bucket4j/grid/ignite/thick/IgniteProxyManager.java
@@ -61,6 +61,10 @@ public class IgniteProxyManager extends AbstractProxyManager {
 
     private final IgniteCache cache;
 
+    public IgniteProxyManager(IgniteCache cache) {
+        this(cache, ClientSideConfig.getDefault());
+    }
+
     public IgniteProxyManager(IgniteCache cache, ClientSideConfig clientSideConfig) {
         super(clientSideConfig);
         this.cache = Objects.requireNonNull(cache);
diff --git a/bucket4j-ignite/src/main/java/io/github/bucket4j/grid/ignite/thin/cas/IgniteThinClientCasBasedProxyManager.java b/bucket4j-ignite/src/main/java/io/github/bucket4j/grid/ignite/thin/cas/IgniteThinClientCasBasedProxyManager.java
index 79fc5bd3..8cd51522 100644
--- a/bucket4j-ignite/src/main/java/io/github/bucket4j/grid/ignite/thin/cas/IgniteThinClientCasBasedProxyManager.java
+++ b/bucket4j-ignite/src/main/java/io/github/bucket4j/grid/ignite/thin/cas/IgniteThinClientCasBasedProxyManager.java
@@ -37,6 +37,10 @@ public class IgniteThinClientCasBasedProxyManager extends AbstractCompareAndS
 
     private final ClientCache cache;
 
+    public IgniteThinClientCasBasedProxyManager(ClientCache cache) {
+        this(cache, ClientSideConfig.getDefault());
+    }
+
     public IgniteThinClientCasBasedProxyManager(ClientCache cache, ClientSideConfig clientSideConfig) {
         super(clientSideConfig);
         this.cache = Objects.requireNonNull(cache);
diff --git a/bucket4j-ignite/src/main/java/io/github/bucket4j/grid/ignite/thin/compute/IgniteThinClientProxyManager.java b/bucket4j-ignite/src/main/java/io/github/bucket4j/grid/ignite/thin/compute/IgniteThinClientProxyManager.java
index 034187a1..c44256df 100644
--- a/bucket4j-ignite/src/main/java/io/github/bucket4j/grid/ignite/thin/compute/IgniteThinClientProxyManager.java
+++ b/bucket4j-ignite/src/main/java/io/github/bucket4j/grid/ignite/thin/compute/IgniteThinClientProxyManager.java
@@ -60,6 +60,10 @@ public class IgniteThinClientProxyManager extends AbstractProxyManager {
     private final ClientCache cache;
     private final ClientCompute clientCompute;
 
+    public IgniteThinClientProxyManager(ClientCache cache, ClientCompute clientCompute) {
+        this(cache, clientCompute, ClientSideConfig.getDefault());
+    }
+
     public IgniteThinClientProxyManager(ClientCache cache, ClientCompute clientCompute, ClientSideConfig clientSideConfig) {
         super(clientSideConfig);
         this.cache = Objects.requireNonNull(cache);
diff --git a/bucket4j-infinispan-all/bucket4j-infinispan-8/src/main/java/io/github/bucket4j/grid/infinispan/InfinispanProxyManager.java b/bucket4j-infinispan-all/bucket4j-infinispan-8/src/main/java/io/github/bucket4j/grid/infinispan/InfinispanProxyManager.java
index f4c11810..2a3b93d1 100644
--- a/bucket4j-infinispan-all/bucket4j-infinispan-8/src/main/java/io/github/bucket4j/grid/infinispan/InfinispanProxyManager.java
+++ b/bucket4j-infinispan-all/bucket4j-infinispan-8/src/main/java/io/github/bucket4j/grid/infinispan/InfinispanProxyManager.java
@@ -59,6 +59,10 @@ public class InfinispanProxyManager extends AbstractProxyManager {
 
     private final ReadWriteMap readWriteMap;
 
+    public InfinispanProxyManager(ReadWriteMap readWriteMap) {
+        this(readWriteMap, ClientSideConfig.getDefault());
+    }
+
     public InfinispanProxyManager(ReadWriteMap readWriteMap, ClientSideConfig clientSideConfig) {
         super(clientSideConfig);
         this.readWriteMap = Objects.requireNonNull(readWriteMap);
diff --git a/bucket4j-infinispan-all/bucket4j-infinispan/src/main/java/io/github/bucket4j/grid/infinispan/InfinispanProxyManager.java b/bucket4j-infinispan-all/bucket4j-infinispan/src/main/java/io/github/bucket4j/grid/infinispan/InfinispanProxyManager.java
index f25b0e56..1c61c56d 100644
--- a/bucket4j-infinispan-all/bucket4j-infinispan/src/main/java/io/github/bucket4j/grid/infinispan/InfinispanProxyManager.java
+++ b/bucket4j-infinispan-all/bucket4j-infinispan/src/main/java/io/github/bucket4j/grid/infinispan/InfinispanProxyManager.java
@@ -55,6 +55,10 @@ public class InfinispanProxyManager extends AbstractProxyManager {
     private final InfinispanProcessor REMOVE_BUCKET_ENTRY_PROCESSOR = new InfinispanProcessor<>(new byte[0]);
     private final ReadWriteMap readWriteMap;
 
+    public InfinispanProxyManager(ReadWriteMap readWriteMap) {
+        this(readWriteMap, ClientSideConfig.getDefault());
+    }
+
     public InfinispanProxyManager(ReadWriteMap readWriteMap, ClientSideConfig clientSideConfig) {
         super(clientSideConfig);
         this.readWriteMap = Objects.requireNonNull(readWriteMap);
diff --git a/bucket4j-jcache/src/main/java/io/github/bucket4j/grid/jcache/JCacheProxyManager.java b/bucket4j-jcache/src/main/java/io/github/bucket4j/grid/jcache/JCacheProxyManager.java
index b49ccd0a..eaf36c6a 100644
--- a/bucket4j-jcache/src/main/java/io/github/bucket4j/grid/jcache/JCacheProxyManager.java
+++ b/bucket4j-jcache/src/main/java/io/github/bucket4j/grid/jcache/JCacheProxyManager.java
@@ -65,6 +65,10 @@ public class JCacheProxyManager extends AbstractProxyManager {
     private final Cache cache;
     private final boolean preferLambdaStyle;
 
+    public JCacheProxyManager(Cache cache) {
+        this(cache, ClientSideConfig.getDefault());
+    }
+
     public JCacheProxyManager(Cache cache, ClientSideConfig clientSideConfig) {
         super(clientSideConfig);
         checkCompatibilityWithProvider(cache);
diff --git a/bucket4j-redis/src/main/java/io/github/bucket4j/redis/redisson/cas/RedissonBasedProxyManager.java b/bucket4j-redis/src/main/java/io/github/bucket4j/redis/redisson/cas/RedissonBasedProxyManager.java
index 5fc0a62c..d99069fd 100644
--- a/bucket4j-redis/src/main/java/io/github/bucket4j/redis/redisson/cas/RedissonBasedProxyManager.java
+++ b/bucket4j-redis/src/main/java/io/github/bucket4j/redis/redisson/cas/RedissonBasedProxyManager.java
@@ -43,6 +43,10 @@ public class RedissonBasedProxyManager extends AbstractCompareAndSwapBasedProxyM
     private final CommandExecutor commandExecutor;
     private final long ttlMillis;
 
+    public RedissonBasedProxyManager(CommandExecutor commandExecutor, Duration ttl) {
+        this(commandExecutor, ClientSideConfig.getDefault(), ttl);
+    }
+
     public RedissonBasedProxyManager(CommandExecutor commandExecutor, ClientSideConfig clientSideConfig, Duration ttl) {
         super(clientSideConfig);
         this.commandExecutor = Objects.requireNonNull(commandExecutor);