diff --git a/client/CHANGES.txt b/client/CHANGES.txt
index c2d1ccb3c..e7505e9af 100644
--- a/client/CHANGES.txt
+++ b/client/CHANGES.txt
@@ -1,5 +1,11 @@
CHANGES
+4.1.4 (Mar 19, 2021)
+- Updated: Internal cache structure refactor.
+- Updated: Streaming revamp with several bugfixes and improved log messages.
+- Added: Cache-Control header for on-demand requests to sdk-server.
+- Updated: Localhost Client revamp & bugfix for missing splits.
+
4.1.3 (Dec 2, 2020)
- Fix Issue when closing SSE Connection
- Updated log-level for some messages
diff --git a/client/pom.xml b/client/pom.xml
index 087467ec5..e5b2d16e1 100644
--- a/client/pom.xml
+++ b/client/pom.xml
@@ -5,7 +5,7 @@
io.split.client
java-client-parent
- 4.1.3
+ 4.1.4
java-client
jar
diff --git a/client/src/main/java/io/split/cache/InMemoryCacheImp.java b/client/src/main/java/io/split/cache/InMemoryCacheImp.java
new file mode 100644
index 000000000..decc31d25
--- /dev/null
+++ b/client/src/main/java/io/split/cache/InMemoryCacheImp.java
@@ -0,0 +1,126 @@
+package io.split.cache;
+
+import com.google.common.collect.ConcurrentHashMultiset;
+import com.google.common.collect.Maps;
+import com.google.common.collect.Multiset;
+import com.google.common.collect.Sets;
+import io.split.engine.experiments.ParsedSplit;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.List;
+import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.atomic.AtomicLong;
+
+public class InMemoryCacheImp implements SplitCache {
+
+ private static final Logger _log = LoggerFactory.getLogger(InMemoryCacheImp.class);
+
+ private final ConcurrentMap _concurrentMap;
+ private final Multiset _concurrentTrafficTypeNameSet;
+
+ private AtomicLong _changeNumber;
+
+ public InMemoryCacheImp() {
+ this(-1);
+ }
+
+ public InMemoryCacheImp(long startingChangeNumber) {
+ _concurrentMap = Maps.newConcurrentMap();
+ _changeNumber = new AtomicLong(startingChangeNumber);
+ _concurrentTrafficTypeNameSet = ConcurrentHashMultiset.create();
+ }
+
+ @Override
+ public void put(ParsedSplit split) {
+ _concurrentMap.put(split.feature(), split);
+
+ if (split.trafficTypeName() != null) {
+ _concurrentTrafficTypeNameSet.add(split.trafficTypeName());
+ }
+ }
+
+ @Override
+ public boolean remove(String name) {
+ ParsedSplit removed = _concurrentMap.remove(name);
+
+ if (removed != null && removed.trafficTypeName() != null) {
+ _concurrentTrafficTypeNameSet.remove(removed.trafficTypeName());
+ }
+
+ return removed != null;
+ }
+
+ @Override
+ public ParsedSplit get(String name) {
+ return _concurrentMap.get(name);
+ }
+
+ @Override
+ public Collection getAll() {
+ return _concurrentMap.values();
+ }
+
+ @Override
+ public Collection getMany(List names) {
+ List splits = new ArrayList<>();
+
+ for (String name : names) {
+ ParsedSplit split = _concurrentMap.get(name);
+
+ if (split != null) {
+ splits.add(split);
+ }
+ }
+
+ return splits;
+ }
+
+ @Override
+ public long getChangeNumber() {
+ return _changeNumber.get();
+ }
+
+ @Override
+ public void setChangeNumber(long changeNumber) {
+ if (changeNumber < _changeNumber.get()) {
+ _log.error("ChangeNumber for splits cache is less than previous");
+ }
+
+ _changeNumber.set(changeNumber);
+ }
+
+ @Override
+ public boolean trafficTypeExists(String trafficTypeName) {
+ // If the multiset has [{"user",2}.{"account",0}], elementSet only returns
+ // ["user"] (it ignores "account")
+ return Sets.newHashSet(_concurrentTrafficTypeNameSet.elementSet()).contains(trafficTypeName);
+ }
+
+ @Override
+ public void kill(String splitName, String defaultTreatment, long changeNumber) {
+ ParsedSplit parsedSplit = _concurrentMap.get(splitName);
+
+ ParsedSplit updatedSplit = new ParsedSplit(parsedSplit.feature(),
+ parsedSplit.seed(),
+ true,
+ defaultTreatment,
+ parsedSplit.parsedConditions(),
+ parsedSplit.trafficTypeName(),
+ changeNumber,
+ parsedSplit.trafficAllocation(),
+ parsedSplit.trafficAllocationSeed(),
+ parsedSplit.algo(),
+ parsedSplit.configurations());
+
+ _concurrentMap.put(splitName, updatedSplit);
+ }
+
+ @Override
+ public void clear() {
+ _concurrentMap.clear();
+ _concurrentTrafficTypeNameSet.clear();
+ }
+}
diff --git a/client/src/main/java/io/split/cache/SegmentCache.java b/client/src/main/java/io/split/cache/SegmentCache.java
new file mode 100644
index 000000000..d75fe11d3
--- /dev/null
+++ b/client/src/main/java/io/split/cache/SegmentCache.java
@@ -0,0 +1,45 @@
+package io.split.cache;
+
+import java.util.List;
+
+/**
+ * Memory for segments
+ * @author lucasecheverz
+ */
+public interface SegmentCache {
+
+ /**
+ * update segment
+ * @param segmentName
+ * @param toAdd
+ * @param toRemove
+ */
+ void updateSegment(String segmentName, List toAdd, List toRemove) ;
+
+ /**
+ * evaluates if a key belongs to a segment
+ * @param segmentName
+ * @param key
+ * @return
+ */
+ boolean isInSegment(String segmentName, String key);
+
+ /**
+ * update the changeNumber of a segment
+ * @param segmentName
+ * @param changeNumber
+ */
+ void setChangeNumber(String segmentName, long changeNumber);
+
+ /**
+ * returns the changeNumber of a segment
+ * @param segmentName
+ * @return
+ */
+ long getChangeNumber(String segmentName);
+
+ /**
+ * clear all segments
+ */
+ void clear();
+}
diff --git a/client/src/main/java/io/split/cache/SegmentCacheInMemoryImpl.java b/client/src/main/java/io/split/cache/SegmentCacheInMemoryImpl.java
new file mode 100644
index 000000000..0c705c016
--- /dev/null
+++ b/client/src/main/java/io/split/cache/SegmentCacheInMemoryImpl.java
@@ -0,0 +1,62 @@
+package io.split.cache;
+
+import com.google.common.collect.Maps;
+import io.split.engine.segments.SegmentImp;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.List;
+import java.util.concurrent.ConcurrentMap;
+
+/**
+ * InMemoryCache Implementation
+ * @author lucasecheverz
+ */
+public class SegmentCacheInMemoryImpl implements SegmentCache {
+ private static final Logger _log = LoggerFactory.getLogger(SegmentCacheInMemoryImpl.class);
+ private static final long DEFAULT_CHANGE_NUMBER = -1l;
+ private final ConcurrentMap _segments = Maps.newConcurrentMap();
+
+ @Override
+ public void updateSegment(String segmentName, List toAdd, List toRemove) {
+ if(_segments.get(segmentName) == null){
+ _segments.put(segmentName, new SegmentImp(DEFAULT_CHANGE_NUMBER, segmentName,toAdd));
+ }
+
+ _segments.get(segmentName).update(toAdd,toRemove);
+ }
+
+ @Override
+ public boolean isInSegment(String segmentName, String key) {
+ SegmentImp segmentImp = _segments.get(segmentName);
+ if(segmentImp == null){
+ _log.error("Segment " + segmentName + "Not found.");
+ return false;
+ }
+ return segmentImp.contains(key);
+ }
+
+ @Override
+ public void setChangeNumber(String segmentName, long changeNumber) {
+ if(_segments.get(segmentName) == null){
+ _log.error("Segment " + segmentName + "Not found.");
+ return ;
+ }
+ _segments.get(segmentName).setChangeNumber(changeNumber);
+ }
+
+ @Override
+ public long getChangeNumber(String segmentName) {
+ SegmentImp segmentImp = _segments.get(segmentName);
+ if(segmentImp == null){
+ _log.error("Segment " + segmentName + "Not found.");
+ return DEFAULT_CHANGE_NUMBER;
+ }
+ return segmentImp.getChangeNumber();
+ }
+
+ @Override
+ public void clear() {
+ _segments.clear();
+ }
+}
diff --git a/client/src/main/java/io/split/cache/SplitCache.java b/client/src/main/java/io/split/cache/SplitCache.java
new file mode 100644
index 000000000..c9769d176
--- /dev/null
+++ b/client/src/main/java/io/split/cache/SplitCache.java
@@ -0,0 +1,19 @@
+package io.split.cache;
+
+import io.split.engine.experiments.ParsedSplit;
+
+import java.util.Collection;
+import java.util.List;
+
+public interface SplitCache {
+ void put(ParsedSplit split);
+ boolean remove(String name);
+ ParsedSplit get(String name);
+ Collection getAll();
+ Collection getMany(List names);
+ long getChangeNumber();
+ void setChangeNumber(long changeNumber);
+ boolean trafficTypeExists(String trafficTypeName);
+ void kill(String splitName, String defaultTreatment, long changeNumber);
+ void clear();
+}
diff --git a/client/src/main/java/io/split/client/ApiKeyCounter.java b/client/src/main/java/io/split/client/ApiKeyCounter.java
new file mode 100644
index 000000000..8c39394dd
--- /dev/null
+++ b/client/src/main/java/io/split/client/ApiKeyCounter.java
@@ -0,0 +1,66 @@
+package io.split.client;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.collect.ConcurrentHashMultiset;
+import com.google.common.collect.Multiset;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class ApiKeyCounter {
+
+ private static final Logger _log = LoggerFactory.getLogger(ApiKeyCounter.class);
+ private static final Multiset USED_API_KEYS = ConcurrentHashMultiset.create();
+
+ private ApiKeyCounter() {}
+
+ public static ApiKeyCounter getApiKeyCounterInstance() {
+ return ApyKeyCounterHolder.INSTANCE;
+ }
+
+ //Inner class to provide instance of class
+ private static class ApyKeyCounterHolder
+ {
+ private static final ApiKeyCounter INSTANCE = new ApiKeyCounter();
+ }
+
+ public void add(String apiKey) {
+ String message;
+ if (USED_API_KEYS.contains(apiKey)) {
+ message = String.format("factory instantiation: You already have %s with this API Key. " +
+ "We recommend keeping only one instance of the factory at all times (Singleton pattern) and reusing " +
+ "it throughout your application.",
+ USED_API_KEYS.count(apiKey) == 1 ? "1 factory" : String.format("%s factories", USED_API_KEYS.count(apiKey)));
+ _log.warn(message);
+ } else if (!USED_API_KEYS.isEmpty()) {
+ message = "factory instantiation: You already have an instance of the Split factory. " +
+ "Make sure you definitely want this additional instance. We recommend keeping only one instance of " +
+ "the factory at all times (Singleton pattern) and reusing it throughout your application.“";
+ _log.warn(message);
+ }
+ USED_API_KEYS.add(apiKey);
+ }
+
+ public void remove(String apiKey) {
+ USED_API_KEYS.remove(apiKey);
+ }
+
+ /**
+ * Just for test
+ * @param apiKey
+ * @return
+ */
+ @VisibleForTesting
+ boolean isApiKeyPresent(String apiKey) {
+ return USED_API_KEYS.contains(apiKey);
+ }
+
+ /**
+ * Just for test
+ * @param apiKey
+ * @return
+ */
+ @VisibleForTesting
+ int getCount(String apiKey) {
+ return USED_API_KEYS.count(apiKey);
+ }
+}
diff --git a/client/src/main/java/io/split/client/CacheUpdaterService.java b/client/src/main/java/io/split/client/CacheUpdaterService.java
new file mode 100644
index 000000000..6823fdefd
--- /dev/null
+++ b/client/src/main/java/io/split/client/CacheUpdaterService.java
@@ -0,0 +1,90 @@
+package io.split.client;
+
+import com.google.common.collect.Lists;
+import io.split.cache.SplitCache;
+import io.split.client.dtos.ConditionType;
+import io.split.client.dtos.MatcherCombiner;
+import io.split.client.dtos.Partition;
+import io.split.engine.experiments.ParsedCondition;
+import io.split.engine.experiments.ParsedSplit;
+import io.split.engine.matchers.AllKeysMatcher;
+import io.split.engine.matchers.AttributeMatcher;
+import io.split.engine.matchers.CombiningMatcher;
+import io.split.engine.matchers.strings.WhitelistMatcher;
+import io.split.grammar.Treatments;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Comparator;
+import java.util.stream.Collectors;
+
+public final class CacheUpdaterService {
+
+ private static String LOCALHOST = "localhost";
+ private SplitCache _splitCache;
+
+ public CacheUpdaterService(SplitCache splitCache) {
+ _splitCache = splitCache;
+ }
+
+ public void updateCache(Map map) {
+ _splitCache.clear();
+ for (Map.Entry entrySplit : map.entrySet()) {
+ SplitAndKey splitAndKey = entrySplit.getKey();
+ String splitName = splitAndKey.split();
+ String splitKey = splitAndKey.key();
+ LocalhostSplit localhostSplit = entrySplit.getValue();
+ ParsedSplit split = _splitCache.get(splitName);
+ List conditions = getConditions(splitKey, split, localhostSplit.treatment);
+ String treatment = conditions.size() > 0 ? Treatments.CONTROL : localhostSplit.treatment;
+ Map configurations = new HashMap<>();
+ if(split != null && split.configurations().size() > 0) {
+ configurations = split.configurations();
+ }
+ configurations.put(localhostSplit.treatment, localhostSplit.config);
+
+ split = new ParsedSplit(splitName, 0, false, treatment,conditions, LOCALHOST, 0, 100, 0, 0, configurations);
+ _splitCache.put(split);
+ }
+ }
+
+ private List getConditions(String splitKey, ParsedSplit split, String treatment){
+ List conditions = split == null ? new ArrayList<>() : split.parsedConditions().stream().collect(Collectors.toList());
+ Partition partition = new Partition();
+ partition.treatment = treatment;
+ partition.size = 100;
+
+ if(splitKey != null) {
+ conditions.add(createWhitelistCondition(splitKey, partition));
+ }
+ else {
+ conditions = conditions.stream().filter(pc -> ConditionType.WHITELIST.equals(pc.conditionType())).collect(Collectors.toList());
+ conditions.add(createRolloutCondition(partition));
+ }
+ conditions.sort(Comparator.comparing(ParsedCondition::conditionType));
+ return conditions;
+ }
+
+ private ParsedCondition createWhitelistCondition(String splitKey, Partition partition) {
+ ParsedCondition parsedCondition = new ParsedCondition(ConditionType.WHITELIST,
+ new CombiningMatcher(MatcherCombiner.AND,
+ Lists.newArrayList(new AttributeMatcher(null, new WhitelistMatcher(Lists.newArrayList(splitKey)), false))),
+ Lists.newArrayList(partition), splitKey);
+ return parsedCondition;
+ }
+
+ private ParsedCondition createRolloutCondition(Partition partition) {
+ Partition rolloutPartition = new Partition();
+ rolloutPartition.treatment = "-";
+ rolloutPartition.size = 0;
+ ParsedCondition parsedCondition = new ParsedCondition(ConditionType.ROLLOUT,
+ new CombiningMatcher(MatcherCombiner.AND,
+ Lists.newArrayList(new AttributeMatcher(null, new AllKeysMatcher(), false))),
+ Lists.newArrayList(partition, rolloutPartition), "LOCAL");
+
+ return parsedCondition;
+ }
+
+}
diff --git a/client/src/main/java/io/split/client/HttpSegmentChangeFetcher.java b/client/src/main/java/io/split/client/HttpSegmentChangeFetcher.java
index d8118a052..7d7d735f6 100644
--- a/client/src/main/java/io/split/client/HttpSegmentChangeFetcher.java
+++ b/client/src/main/java/io/split/client/HttpSegmentChangeFetcher.java
@@ -28,6 +28,8 @@ public final class HttpSegmentChangeFetcher implements SegmentChangeFetcher {
private static final String SINCE = "since";
private static final String PREFIX = "segmentChangeFetcher";
+ private static final String NAME_CACHE = "Cache-Control";
+ private static final String VALUE_CACHE = "no-cache";
private final CloseableHttpClient _client;
private final URI _target;
@@ -49,7 +51,7 @@ private HttpSegmentChangeFetcher(CloseableHttpClient client, URI uri, Metrics me
}
@Override
- public SegmentChange fetch(String segmentName, long since) {
+ public SegmentChange fetch(String segmentName, long since, boolean addCacheHeader) {
long start = System.currentTimeMillis();
CloseableHttpResponse response = null;
@@ -58,6 +60,9 @@ public SegmentChange fetch(String segmentName, long since) {
String path = _target.getPath() + "/" + segmentName;
URI uri = new URIBuilder(_target).setPath(path).addParameter(SINCE, "" + since).build();
HttpGet request = new HttpGet(uri);
+ if(addCacheHeader) {
+ request.setHeader(NAME_CACHE, VALUE_CACHE);
+ }
response = _client.execute(request);
int statusCode = response.getCode();
diff --git a/client/src/main/java/io/split/client/HttpSplitChangeFetcher.java b/client/src/main/java/io/split/client/HttpSplitChangeFetcher.java
index 50ab2dd2c..3c5f9b8fc 100644
--- a/client/src/main/java/io/split/client/HttpSplitChangeFetcher.java
+++ b/client/src/main/java/io/split/client/HttpSplitChangeFetcher.java
@@ -28,6 +28,8 @@ public final class HttpSplitChangeFetcher implements SplitChangeFetcher {
private static final String SINCE = "since";
private static final String PREFIX = "splitChangeFetcher";
+ private static final String NAME_CACHE = "Cache-Control";
+ private static final String VALUE_CACHE = "no-cache";
private final CloseableHttpClient _client;
private final URI _target;
@@ -49,7 +51,7 @@ private HttpSplitChangeFetcher(CloseableHttpClient client, URI uri, Metrics metr
}
@Override
- public SplitChange fetch(long since) {
+ public SplitChange fetch(long since, boolean addCacheHeader) {
long start = System.currentTimeMillis();
@@ -59,6 +61,9 @@ public SplitChange fetch(long since) {
URI uri = new URIBuilder(_target).addParameter(SINCE, "" + since).build();
HttpGet request = new HttpGet(uri);
+ if(addCacheHeader) {
+ request.setHeader(NAME_CACHE, VALUE_CACHE);
+ }
response = _client.execute(request);
int statusCode = response.getCode();
diff --git a/client/src/main/java/io/split/client/LocalhostSplitClient.java b/client/src/main/java/io/split/client/LocalhostSplitClient.java
deleted file mode 100644
index 2b7ebf71b..000000000
--- a/client/src/main/java/io/split/client/LocalhostSplitClient.java
+++ /dev/null
@@ -1,130 +0,0 @@
-package io.split.client;
-
-import io.split.client.api.Key;
-import io.split.client.api.SplitResult;
-import io.split.grammar.Treatments;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.Map;
-import java.util.concurrent.TimeoutException;
-
-import static com.google.common.base.Preconditions.checkNotNull;
-
-/**
- * An implementation of SplitClient that considers all partitions
- * passed in the constructor to be 100% on for all users, and
- * any other split to be 100% off for all users. This implementation
- * is useful for using Codigo in localhost environment.
- *
- * @author adil
- */
-public final class LocalhostSplitClient implements SplitClient {
- private static final Logger _log = LoggerFactory.getLogger(LocalhostSplitClient.class);
- private static SplitResult SPLIT_RESULT_CONTROL = new SplitResult(Treatments.CONTROL, null);
-
- private Map _map;
-
- public LocalhostSplitClient(Map map) {
- checkNotNull(map, "map must not be null");
- _map = map;
- }
-
- @Override
- public String getTreatment(String key, String split) {
- return getTreatmentAndConfigInternal(key, split).treatment();
- }
-
- @Override
- public String getTreatment(String key, String split, Map attributes) {
- return getTreatmentAndConfigInternal(key, split).treatment();
- }
-
- @Override
- public String getTreatment(Key key, String split, Map attributes) {
- return getTreatmentAndConfigInternal(key.matchingKey(), split, attributes).treatment();
- }
-
- @Override
- public SplitResult getTreatmentWithConfig(String key, String split) {
- return getTreatmentAndConfigInternal(key, split);
- }
-
- @Override
- public SplitResult getTreatmentWithConfig(String key, String split, Map attributes) {
- return getTreatmentAndConfigInternal(key, split, attributes);
- }
-
- @Override
- public SplitResult getTreatmentWithConfig(Key key, String split, Map attributes) {
- return getTreatmentAndConfigInternal(key.matchingKey(), split, attributes);
- }
-
- private SplitResult getTreatmentAndConfigInternal(String key, String split) {
- return getTreatmentAndConfigInternal(key, split, null);
- }
-
- private SplitResult getTreatmentAndConfigInternal(String key, String split, Map attributes) {
- if (key == null || split == null) {
- return SPLIT_RESULT_CONTROL;
- }
-
- SplitAndKey override = SplitAndKey.of(split, key);
- if (_map.containsKey(override)) {
- return toSplitResult(_map.get(override));
- }
-
- SplitAndKey splitDefaultTreatment = SplitAndKey.of(split);
-
- LocalhostSplit localhostSplit = _map.get(splitDefaultTreatment);
-
- if (localhostSplit == null) {
- return SPLIT_RESULT_CONTROL;
- }
-
- return toSplitResult(localhostSplit);
- }
-
- private SplitResult toSplitResult(LocalhostSplit localhostSplit) {
- return new SplitResult(localhostSplit.treatment,localhostSplit.config);
- }
-
- public void updateFeatureToTreatmentMap(Map map) {
- if (map == null) {
- _log.warn("A null map was passed as an update. Ignoring this update.");
- return;
- }
- _map = map;
- }
-
- @Override
- public void destroy() {
- _map.clear();
- }
-
- @Override
- public boolean track(String key, String trafficType, String eventType) {
- return false;
- }
-
- @Override
- public boolean track(String key, String trafficType, String eventType, double value) {
- return false;
- }
-
- @Override
- public boolean track(String key, String trafficType, String eventType, Map properties) {
- return false;
- }
-
- @Override
- public boolean track(String key, String trafficType, String eventType, double value, Map properties) {
- return false;
- }
-
- @Override
- public void blockUntilReady() throws TimeoutException, InterruptedException {
- // LocalhostSplitClient is always ready
- }
-
-}
diff --git a/client/src/main/java/io/split/client/LocalhostSplitClientAndFactory.java b/client/src/main/java/io/split/client/LocalhostSplitClientAndFactory.java
deleted file mode 100644
index e7f47a1a4..000000000
--- a/client/src/main/java/io/split/client/LocalhostSplitClientAndFactory.java
+++ /dev/null
@@ -1,104 +0,0 @@
-package io.split.client;
-
-import io.split.client.api.Key;
-import io.split.client.api.SplitResult;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.Map;
-import java.util.concurrent.TimeoutException;
-
-import static com.google.common.base.Preconditions.checkNotNull;
-
-/**
- * An implementation of SplitClient that considers all partitions
- * passed in the constructor to be 100% on for all users, and
- * any other split to be 100% off for all users. This implementation
- * is useful for using Split in localhost environment.
- *
- * @author adil
- */
-public final class LocalhostSplitClientAndFactory implements SplitClient {
- private static final Logger _log = LoggerFactory.getLogger(LocalhostSplitClientAndFactory.class);
-
- private LocalhostSplitFactory _factory;
- private LocalhostSplitClient _splitClient;
-
- public LocalhostSplitClientAndFactory(LocalhostSplitFactory container, LocalhostSplitClient client) {
- _factory = container;
- _splitClient = client;
-
- checkNotNull(_factory);
- checkNotNull(_splitClient);
- }
-
- @Override
- public String getTreatment(String key, String split) {
- return _splitClient.getTreatment(key, split);
- }
-
- @Override
- public String getTreatment(String key, String split, Map attributes) {
- return _splitClient.getTreatment(key, split, attributes);
- }
-
- @Override
- public String getTreatment(Key key, String split, Map attributes) {
- return _splitClient.getTreatment(key.matchingKey(), split, attributes);
- }
-
- @Override
- public SplitResult getTreatmentWithConfig(String key, String split) {
- return _splitClient.getTreatmentWithConfig(key, split);
- }
-
- @Override
- public SplitResult getTreatmentWithConfig(String key, String split, Map attributes) {
- return _splitClient.getTreatmentWithConfig(key, split, attributes);
- }
-
- @Override
- public SplitResult getTreatmentWithConfig(Key key, String split, Map attributes) {
- return _splitClient.getTreatmentWithConfig(key, split, attributes);
- }
-
- public void updateFeatureToTreatmentMap(Map map) {
- if (map == null) {
- _log.warn("A null map was passed as an update. Ignoring this update.");
- return;
- }
- _splitClient.updateFeatureToTreatmentMap(map);
- }
-
- @Override
- public void destroy() {
- _factory.destroy();
- _splitClient.destroy();
- }
-
- @Override
- public boolean track(String key, String trafficType, String eventType) {
- return _splitClient.track(key, trafficType, eventType);
- }
-
- @Override
- public boolean track(String key, String trafficType, String eventType, double value) {
- return _splitClient.track(key, trafficType, eventType, value);
- }
-
- @Override
- public boolean track(String key, String trafficType, String eventType, Map properties) {
- return _splitClient.track(key, trafficType, eventType, properties);
- }
-
- @Override
- public boolean track(String key, String trafficType, String eventType, double value, Map properties) {
- return _splitClient.track(key, trafficType, eventType, value, properties);
- }
-
- @Override
- public void blockUntilReady() throws TimeoutException, InterruptedException {
- _splitClient.blockUntilReady();
- }
-
-}
diff --git a/client/src/main/java/io/split/client/LocalhostSplitFactory.java b/client/src/main/java/io/split/client/LocalhostSplitFactory.java
index 940fd2fbe..0ec01f8c9 100644
--- a/client/src/main/java/io/split/client/LocalhostSplitFactory.java
+++ b/client/src/main/java/io/split/client/LocalhostSplitFactory.java
@@ -1,5 +1,11 @@
package io.split.client;
+import io.split.cache.InMemoryCacheImp;
+import io.split.cache.SplitCache;
+import io.split.client.impressions.ImpressionsManager;
+import io.split.engine.SDKReadinessGates;
+import io.split.engine.evaluator.EvaluatorImp;
+import io.split.engine.metrics.Metrics;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -23,9 +29,10 @@ public final class LocalhostSplitFactory implements SplitFactory {
static final String FILENAME = ".split";
static final String LOCALHOST = "localhost";
- private final LocalhostSplitClientAndFactory _client;
+ private final SplitClient _client;
private final LocalhostSplitManager _manager;
private final AbstractLocalhostSplitFile _splitFile;
+ private final CacheUpdaterService _cacheUpdaterService;
public static LocalhostSplitFactory createLocalhostSplitFactory(SplitClientConfig config) throws IOException {
String directory = System.getProperty("user.home");
@@ -44,7 +51,15 @@ public LocalhostSplitFactory(String directory, String file) throws IOException {
}
Map splitAndKeyToTreatment = _splitFile.readOnSplits();
- _client = new LocalhostSplitClientAndFactory(this, new LocalhostSplitClient(splitAndKeyToTreatment));
+ SplitCache splitCache = new InMemoryCacheImp();
+ SDKReadinessGates sdkReadinessGates = new SDKReadinessGates();
+
+ sdkReadinessGates.splitsAreReady();
+ _cacheUpdaterService = new CacheUpdaterService(splitCache);
+ _cacheUpdaterService.updateCache(splitAndKeyToTreatment);
+ _client = new SplitClientImpl(this, splitCache,
+ new ImpressionsManager.NoOpImpressionsManager(), new Metrics.NoopMetrics(), new NoopEventClient(),
+ SplitClientConfig.builder().setBlockUntilReadyTimeout(1).build(), sdkReadinessGates, new EvaluatorImp(splitCache));
_manager = LocalhostSplitManager.of(splitAndKeyToTreatment);
_splitFile.registerWatcher();
@@ -73,7 +88,7 @@ public boolean isDestroyed() {
}
public void updateFeatureToTreatmentMap(Map featureToTreatmentMap) {
- _client.updateFeatureToTreatmentMap(featureToTreatmentMap);
+ _cacheUpdaterService.updateCache(featureToTreatmentMap);
_manager.updateFeatureToTreatmentMap(featureToTreatmentMap);
}
}
diff --git a/client/src/main/java/io/split/client/LocalhostSplitManager.java b/client/src/main/java/io/split/client/LocalhostSplitManager.java
index 26a8118fc..dc72ea4d1 100644
--- a/client/src/main/java/io/split/client/LocalhostSplitManager.java
+++ b/client/src/main/java/io/split/client/LocalhostSplitManager.java
@@ -77,7 +77,7 @@ public void blockUntilReady() throws TimeoutException, InterruptedException {
@Override
public SplitView split(String featureName) {
- if (!_splitAndKeyToTreatmentMap.containsKey(featureName)) {
+ if (!_splitToTreatmentsMap.containsKey(featureName)) {
return null;
}
diff --git a/client/src/main/java/io/split/client/SplitClientImpl.java b/client/src/main/java/io/split/client/SplitClientImpl.java
index 25b652971..7792f5a1f 100644
--- a/client/src/main/java/io/split/client/SplitClientImpl.java
+++ b/client/src/main/java/io/split/client/SplitClientImpl.java
@@ -1,29 +1,29 @@
package io.split.client;
-import com.google.common.annotations.VisibleForTesting;
+import io.split.cache.SplitCache;
import io.split.client.api.Key;
import io.split.client.api.SplitResult;
-import io.split.client.dtos.ConditionType;
import io.split.client.dtos.Event;
-import io.split.client.exceptions.ChangeNumberExceptionWrapper;
import io.split.client.impressions.Impression;
import io.split.client.impressions.ImpressionsManager;
-import io.split.client.impressions.ImpressionsManagerImpl;
import io.split.engine.SDKReadinessGates;
-import io.split.engine.experiments.ParsedCondition;
-import io.split.engine.experiments.ParsedSplit;
-import io.split.engine.experiments.SplitFetcher;
+import io.split.engine.evaluator.Evaluator;
+import io.split.engine.evaluator.EvaluatorImp;
+import io.split.engine.evaluator.Labels;
import io.split.engine.metrics.Metrics;
-import io.split.engine.splitter.Splitter;
import io.split.grammar.Treatments;
+import io.split.inputValidation.EventsValidator;
+import io.split.inputValidation.KeyValidator;
+import io.split.inputValidation.SplitNameValidator;
+import io.split.inputValidation.TrafficTypeValidator;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
+import java.util.Optional;
import java.util.concurrent.TimeoutException;
-import java.util.regex.Pattern;
import static com.google.common.base.Preconditions.checkNotNull;
@@ -33,54 +33,38 @@
* @author adil
*/
public final class SplitClientImpl implements SplitClient {
+ public static final SplitResult SPLIT_RESULT_CONTROL = new SplitResult(Treatments.CONTROL, null);
- private static final Logger _log = LoggerFactory.getLogger(SplitClientImpl.class);
-
- private static final String GET_TREATMENT_LABEL = "sdk.getTreatment";
- private static final String GET_TREATMENT_CONFIG_LABEL = "sdk.getTreatmentWithConfig";
-
- private static final String NOT_IN_SPLIT = "not in split";
- private static final String DEFAULT_RULE = "default rule";
- private static final String DEFINITION_NOT_FOUND = "definition not found";
- private static final String EXCEPTION = "exception";
- private static final String KILLED = "killed";
-
- public static final Pattern EVENT_TYPE_MATCHER = Pattern.compile("^[a-zA-Z0-9][-_.:a-zA-Z0-9]{0,79}$");
+ private static final String GET_TREATMENT = "getTreatment";
+ private static final String GET_TREATMENT_WITH_CONFIG = "getTreatmentWithConfig";
- public static final SplitResult SPLIT_RESULT_CONTROL = new SplitResult(Treatments.CONTROL, null);
+ private static final Logger _log = LoggerFactory.getLogger(SplitClientImpl.class);
private final SplitFactory _container;
- private final SplitFetcher _splitFetcher;
+ private final SplitCache _splitCache;
private final ImpressionsManager _impressionManager;
private final Metrics _metrics;
private final SplitClientConfig _config;
private final EventClient _eventClient;
private final SDKReadinessGates _gates;
-
+ private final Evaluator _evaluator;
public SplitClientImpl(SplitFactory container,
- SplitFetcher splitFetcher,
+ SplitCache splitCache,
ImpressionsManager impressionManager,
Metrics metrics,
EventClient eventClient,
SplitClientConfig config,
- SDKReadinessGates gates) {
+ SDKReadinessGates gates,
+ Evaluator evaluator) {
_container = container;
- _splitFetcher = splitFetcher;
- _impressionManager = impressionManager;
+ _splitCache = checkNotNull(splitCache);
+ _impressionManager = checkNotNull(impressionManager);
_metrics = metrics;
_eventClient = eventClient;
_config = config;
- _gates = gates;
-
- checkNotNull(gates);
- checkNotNull(_splitFetcher);
- checkNotNull(_impressionManager);
- }
-
- @Override
- public void destroy() {
- _container.destroy();
+ _gates = checkNotNull(gates);
+ _evaluator = checkNotNull(evaluator);
}
@Override
@@ -90,237 +74,27 @@ public String getTreatment(String key, String split) {
@Override
public String getTreatment(String key, String split, Map attributes) {
- return getTreatment(key, null, split, attributes);
+ return getTreatmentWithConfigInternal(GET_TREATMENT, key, null, split, attributes).treatment();
}
@Override
public String getTreatment(Key key, String split, Map attributes) {
- if (key == null) {
- _log.error("getTreatment: you passed a null key, the key must be a non-empty string");
- return Treatments.CONTROL;
- }
-
- if (key.matchingKey() == null) {
- _log.error("getTreatment: you passed a null matchingKey, the matchingKey must be a non-empty string");
- return Treatments.CONTROL;
- }
-
-
- if (key.bucketingKey() == null) {
- _log.error("getTreatment: you passed a null bucketingKey, the bucketingKey must be a non-empty string");
- return Treatments.CONTROL;
- }
-
- return getTreatment(key.matchingKey(), key.bucketingKey(), split, attributes);
- }
-
- private String getTreatment(String matchingKey, String bucketingKey, String split, Map attributes) {
- return getTreatmentWithConfigInternal(GET_TREATMENT_LABEL, matchingKey, bucketingKey, split, attributes).treatment();
+ return getTreatmentWithConfigInternal(GET_TREATMENT, key.matchingKey(), key.bucketingKey(), split, attributes).treatment();
}
@Override
public SplitResult getTreatmentWithConfig(String key, String split) {
- return getTreatmentWithConfigInternal(GET_TREATMENT_LABEL, key, null, split, Collections.emptyMap());
+ return getTreatmentWithConfigInternal(GET_TREATMENT_WITH_CONFIG, key, null, split, Collections.emptyMap());
}
@Override
public SplitResult getTreatmentWithConfig(String key, String split, Map attributes) {
- return getTreatmentWithConfigInternal(GET_TREATMENT_LABEL, key, null, split, attributes);
+ return getTreatmentWithConfigInternal(GET_TREATMENT_WITH_CONFIG, key, null, split, attributes);
}
@Override
public SplitResult getTreatmentWithConfig(Key key, String split, Map attributes) {
- if (key == null) {
- _log.error("getTreatment: you passed a null key, the key must be a non-empty string");
- return SPLIT_RESULT_CONTROL;
- }
-
- if (key.matchingKey() == null) {
- _log.error("getTreatment: you passed a null matchingKey, the matchingKey must be a non-empty string");
- return SPLIT_RESULT_CONTROL;
- }
-
-
- if (key.bucketingKey() == null) {
- _log.error("getTreatment: you passed a null bucketingKey, the bucketingKey must be a non-empty string");
- return SPLIT_RESULT_CONTROL;
- }
-
- return getTreatmentWithConfigInternal(GET_TREATMENT_LABEL, key.matchingKey(), key.bucketingKey(), split, attributes);
- }
-
- private SplitResult getTreatmentWithConfigInternal(String label, String matchingKey, String bucketingKey, String split, Map attributes) {
- try {
- if (_container.isDestroyed()) {
- _log.error("Client has already been destroyed - no calls possible");
- return SPLIT_RESULT_CONTROL;
- }
-
- if (matchingKey == null) {
- _log.error("getTreatmentWithConfig: you passed a null matchingKey, the matchingKey must be a non-empty string");
- return SPLIT_RESULT_CONTROL;
- }
- if (matchingKey.length() > _config.maxStringLength()) {
- _log.error("getTreatmentWithConfig: matchingKey too long - must be " + _config.maxStringLength() + " characters or less");
- return SPLIT_RESULT_CONTROL;
- }
- if (matchingKey.isEmpty()) {
- _log.error("getTreatmentWithConfig: you passed an empty string, matchingKey must be a non-empty string");
- return SPLIT_RESULT_CONTROL;
- }
- if (bucketingKey != null && bucketingKey.isEmpty()) {
- _log.error("getTreatmentWithConfig: you passed an empty string, bucketingKey must be a non-empty string");
- return SPLIT_RESULT_CONTROL;
- }
- if (bucketingKey != null && bucketingKey.length() > _config.maxStringLength()) {
- _log.error("getTreatmentWithConfig: bucketingKey too long - must be " + _config.maxStringLength() + " characters or less");
- return SPLIT_RESULT_CONTROL;
- }
-
- if (split == null) {
- _log.error("getTreatmentWithConfig: you passed a null split name, split name must be a non-empty string");
- return SPLIT_RESULT_CONTROL;
- }
-
- if (split.isEmpty()) {
- _log.error("getTreatmentWithConfig: you passed an empty split name, split name must be a non-empty string");
- return SPLIT_RESULT_CONTROL;
- }
-
- String trimmed = split.trim();
- if (!trimmed.equals(split)) {
- _log.warn("getTreatmentWithConfig: split name \"" + split + "\" has extra whitespace, trimming");
- split = trimmed;
- }
-
- long start = System.currentTimeMillis();
-
- TreatmentLabelAndChangeNumber result = getTreatmentResultWithoutImpressions(matchingKey, bucketingKey, split, attributes);
-
- recordStats(
- matchingKey,
- bucketingKey,
- split,
- start,
- result._treatment,
- label,
- _config.labelsEnabled() ? result._label : null,
- result._changeNumber,
- attributes
- );
-
- return new SplitResult(result._treatment, result._configurations);
- } catch (Exception e) {
- try {
- _log.error("CatchAll Exception", e);
- } catch (Exception e1) {
- // ignore
- }
- return SPLIT_RESULT_CONTROL;
- }
- }
-
- private void recordStats(String matchingKey, String bucketingKey, String split, long start, String result,
- String operation, String label, Long changeNumber, Map attributes) {
- try {
- _impressionManager.track(new Impression(matchingKey, bucketingKey, split, result, System.currentTimeMillis(), label, changeNumber, attributes));
- _metrics.time(operation, System.currentTimeMillis() - start);
- } catch (Throwable t) {
- _log.error("Exception", t);
- }
- }
-
- @VisibleForTesting
- public String getTreatmentWithoutImpressions(String matchingKey, String bucketingKey, String split, Map attributes) {
- return getTreatmentResultWithoutImpressions(matchingKey, bucketingKey, split, attributes)._treatment;
- }
-
- private TreatmentLabelAndChangeNumber getTreatmentResultWithoutImpressions(String matchingKey, String bucketingKey, String split, Map attributes) {
- TreatmentLabelAndChangeNumber result;
- try {
- result = getTreatmentWithoutExceptionHandling(matchingKey, bucketingKey, split, attributes);
- } catch (ChangeNumberExceptionWrapper e) {
- result = new TreatmentLabelAndChangeNumber(Treatments.CONTROL, EXCEPTION, e.changeNumber());
- _log.error("Exception", e.wrappedException());
- } catch (Exception e) {
- result = new TreatmentLabelAndChangeNumber(Treatments.CONTROL, EXCEPTION);
- _log.error("Exception", e);
- }
-
- return result;
- }
-
- private TreatmentLabelAndChangeNumber getTreatmentWithoutExceptionHandling(String matchingKey, String bucketingKey, String split, Map attributes) throws ChangeNumberExceptionWrapper {
- ParsedSplit parsedSplit = _splitFetcher.fetch(split);
-
- if (parsedSplit == null) {
- if (_gates.isSDKReadyNow()) {
- _log.warn(
- "getTreatment: you passed \"" + split + "\" that does not exist in this environment, " +
- "please double check what Splits exist in the web console.");
- }
- return new TreatmentLabelAndChangeNumber(Treatments.CONTROL, DEFINITION_NOT_FOUND);
- }
-
- return getTreatment(matchingKey, bucketingKey, parsedSplit, attributes);
- }
-
- /**
- * @param matchingKey MUST NOT be null
- * @param bucketingKey
- * @param parsedSplit MUST NOT be null
- * @param attributes MUST NOT be null
- * @return
- * @throws ChangeNumberExceptionWrapper
- */
- private TreatmentLabelAndChangeNumber getTreatment(String matchingKey, String bucketingKey, ParsedSplit parsedSplit, Map attributes) throws ChangeNumberExceptionWrapper {
- try {
- if (parsedSplit.killed()) {
- String config = parsedSplit.configurations() != null ? parsedSplit.configurations().get(parsedSplit.defaultTreatment()) : null;
- return new TreatmentLabelAndChangeNumber(parsedSplit.defaultTreatment(), KILLED, parsedSplit.changeNumber(), config);
- }
-
- /*
- * There are three parts to a single Split: 1) Whitelists 2) Traffic Allocation
- * 3) Rollout. The flag inRollout is there to understand when we move into the Rollout
- * section. This is because we need to make sure that the Traffic Allocation
- * computation happens after the whitelist but before the rollout.
- */
- boolean inRollout = false;
-
- String bk = (bucketingKey == null) ? matchingKey : bucketingKey;
-
- for (ParsedCondition parsedCondition : parsedSplit.parsedConditions()) {
-
- if (!inRollout && parsedCondition.conditionType() == ConditionType.ROLLOUT) {
-
- if (parsedSplit.trafficAllocation() < 100) {
- // if the traffic allocation is 100%, no need to do anything special.
- int bucket = Splitter.getBucket(bk, parsedSplit.trafficAllocationSeed(), parsedSplit.algo());
-
- if (bucket > parsedSplit.trafficAllocation()) {
- // out of split
- String config = parsedSplit.configurations() != null ? parsedSplit.configurations().get(parsedSplit.defaultTreatment()) : null;
- return new TreatmentLabelAndChangeNumber(parsedSplit.defaultTreatment(), NOT_IN_SPLIT, parsedSplit.changeNumber(), config);
- }
-
- }
- inRollout = true;
- }
-
- if (parsedCondition.matcher().match(matchingKey, bucketingKey, attributes, this)) {
- String treatment = Splitter.getTreatment(bk, parsedSplit.seed(), parsedCondition.partitions(), parsedSplit.algo());
- String config = parsedSplit.configurations() != null ? parsedSplit.configurations().get(treatment) : null;
- return new TreatmentLabelAndChangeNumber(treatment, parsedCondition.label(), parsedSplit.changeNumber(), config);
- }
- }
-
- String config = parsedSplit.configurations() != null ? parsedSplit.configurations().get(parsedSplit.defaultTreatment()) : null;
- return new TreatmentLabelAndChangeNumber(parsedSplit.defaultTreatment(), DEFAULT_RULE, parsedSplit.changeNumber(), config);
- } catch (Exception e) {
- throw new ChangeNumberExceptionWrapper(e, parsedSplit.changeNumber());
- }
-
+ return getTreatmentWithConfigInternal(GET_TREATMENT_WITH_CONFIG, key.matchingKey(), key.bucketingKey(), split, attributes);
}
@Override
@@ -364,13 +138,9 @@ public void blockUntilReady() throws TimeoutException, InterruptedException {
_log.debug(String.format("Split SDK ready in %d ms", (System.currentTimeMillis() - startTime)));
}
- private Event createEvent(String key, String trafficType, String eventType) {
- Event event = new Event();
- event.eventTypeId = eventType;
- event.trafficTypeName = trafficType;
- event.key = key;
- event.timestamp = System.currentTimeMillis();
- return event;
+ @Override
+ public void destroy() {
+ _container.destroy();
}
private boolean track(Event event) {
@@ -378,115 +148,105 @@ private boolean track(Event event) {
_log.error("Client has already been destroyed - no calls possible");
return false;
}
- // Traffic Type validations
- if (event.trafficTypeName == null) {
- _log.error("track: you passed a null trafficTypeName, trafficTypeName must be a non-empty string");
- return false;
- }
- if (event.trafficTypeName.isEmpty()) {
- _log.error("track: you passed an empty trafficTypeName, trafficTypeName must be a non-empty string");
+ // Traffic Type validations
+ Optional trafficTypeResult = TrafficTypeValidator.isValid(event.trafficTypeName, _splitCache, "track");
+ if (!trafficTypeResult.isPresent()) {
return false;
}
-
- if (!event.trafficTypeName.equals(event.trafficTypeName.toLowerCase())) {
- _log.warn("track: trafficTypeName should be all lowercase - converting string to lowercase");
- event.trafficTypeName = event.trafficTypeName.toLowerCase();
- }
-
- if (!_splitFetcher.fetchKnownTrafficTypes().contains(event.trafficTypeName)) {
- _log.warn("track: Traffic Type " + event.trafficTypeName + " does not have any corresponding Splits in this environment, " +
- "make sure you’re tracking your events to a valid traffic type defined in the Split console.");
- }
+ event.trafficTypeName = trafficTypeResult.get();
// EventType validations
- if (event.eventTypeId == null) {
- _log.error("track: you passed a null eventTypeId, eventTypeId must be a non-empty string");
+ if (!EventsValidator.typeIsValid(event.eventTypeId, "track")) {
return false;
}
- if (event.eventTypeId.isEmpty()) {
- _log.error("track:you passed an empty eventTypeId, eventTypeId must be a non-empty string");
+ // Key Validations
+ if (!KeyValidator.isValid(event.key, "key", _config.maxStringLength(), "track")) {
return false;
}
- if (!EVENT_TYPE_MATCHER.matcher(event.eventTypeId).find()) {
- _log.error("track: you passed " + event.eventTypeId + ", eventTypeId must adhere to the regular expression " +
- "[a-zA-Z0-9][-_.:a-zA-Z0-9]{0,79}. This means an eventTypeID must be alphanumeric, " +
- "cannot be more than 80 characters long, and can only include a dash, underscore, period, " +
- "or colon as separators of alphanumeric characters");
+ // Properties validations
+ EventsValidator.EventValidatorResult propertiesResult = EventsValidator.propertiesAreValid(event.properties);
+ if (!propertiesResult.getSuccess()) {
return false;
}
- // Key Validations
- if (event.key == null) {
- _log.error("track: you passed a null key, key must be a non-empty string");
- return false;
- }
+ event.properties = propertiesResult.getValue();
- if (event.key.isEmpty()) {
- _log.error("track: you passed an empty key, key must be a non-empty string");
- return false;
- }
+ return _eventClient.track(event, propertiesResult.getEventSize());
+ }
- if (event.key.length() > _config.maxStringLength()) {
- _log.error("track: key too long - must be " + _config.maxStringLength() + "characters or less");
- return false;
- }
+ private SplitResult getTreatmentWithConfigInternal(String method, String matchingKey, String bucketingKey, String split, Map attributes) {
+ try {
+ if (_container.isDestroyed()) {
+ _log.error("Client has already been destroyed - no calls possible");
+ return SPLIT_RESULT_CONTROL;
+ }
- int size = 1024; // We assume 1kb events without properties (750 bytes avg measured)
- if (null != event.properties) {
- if (event.properties.size() > 300) {
- _log.warn("Event has more than 300 properties. Some of them will be trimmed when processed");
+ if (!KeyValidator.isValid(matchingKey, "matchingKey", _config.maxStringLength(), method)) {
+ return SPLIT_RESULT_CONTROL;
}
- for (Map.Entry entry: event.properties.entrySet()) {
- size += entry.getKey().length();
- Object value = entry.getValue();
- if (null == value) {
- continue;
- }
-
- if (!(value instanceof Number) && !(value instanceof Boolean) && !(value instanceof String)) {
- _log.warn(String.format("Property %s is of invalid type. Setting value to null", entry.getKey()));
- entry.setValue(null);
- }
-
- if (value instanceof String) {
- size += ((String) value).length();
- }
-
- if (size > Event.MAX_PROPERTIES_LENGTH_BYTES) {
- _log.error(String.format("The maximum size allowed for the properties is 32768 bytes. "
- + "Current one is %s bytes. Event not queued", size));
- return false;
- }
+ if (!KeyValidator.bucketingKeyIsValid(bucketingKey, _config.maxStringLength(), method)) {
+ return SPLIT_RESULT_CONTROL;
}
- }
+ Optional splitNameResult = SplitNameValidator.isValid(split, method);
+ if (!splitNameResult.isPresent()) {
+ return SPLIT_RESULT_CONTROL;
+ }
+ split = splitNameResult.get();
- return _eventClient.track(event, size);
- }
+ long start = System.currentTimeMillis();
- private static final class TreatmentLabelAndChangeNumber {
- private final String _treatment;
- private final String _label;
- private final Long _changeNumber;
- private final String _configurations;
+ EvaluatorImp.TreatmentLabelAndChangeNumber result = _evaluator.evaluateFeature(matchingKey, bucketingKey, split, attributes);
- public TreatmentLabelAndChangeNumber(String treatment, String label) {
- this(treatment, label, null, null);
- }
+ if (result.treatment.equals(Treatments.CONTROL) && result.label.equals(Labels.DEFINITION_NOT_FOUND) && _gates.isSDKReadyNow()) {
+ _log.warn(
+ "getTreatment: you passed \"" + split + "\" that does not exist in this environment, " +
+ "please double check what Splits exist in the web console.");
+ }
+
+ recordStats(
+ matchingKey,
+ bucketingKey,
+ split,
+ start,
+ result.treatment,
+ String.format("sdk.%s", method),
+ _config.labelsEnabled() ? result.label : null,
+ result.changeNumber,
+ attributes
+ );
- public TreatmentLabelAndChangeNumber(String treatment, String label, Long changeNumber) {
- this(treatment, label, changeNumber, null);
+ return new SplitResult(result.treatment, result.configurations);
+ } catch (Exception e) {
+ try {
+ _log.error("CatchAll Exception", e);
+ } catch (Exception e1) {
+ // ignore
+ }
+ return SPLIT_RESULT_CONTROL;
}
+ }
- public TreatmentLabelAndChangeNumber(String treatment, String label, Long changeNumber, String configurations) {
- _treatment = treatment;
- _label = label;
- _changeNumber = changeNumber;
- _configurations = configurations;
+ private void recordStats(String matchingKey, String bucketingKey, String split, long start, String result,
+ String operation, String label, Long changeNumber, Map attributes) {
+ try {
+ _impressionManager.track(new Impression(matchingKey, bucketingKey, split, result, System.currentTimeMillis(), label, changeNumber, attributes));
+ _metrics.time(operation, System.currentTimeMillis() - start);
+ } catch (Throwable t) {
+ _log.error("Exception", t);
}
}
+
+ private Event createEvent(String key, String trafficType, String eventType) {
+ Event event = new Event();
+ event.eventTypeId = eventType;
+ event.trafficTypeName = trafficType;
+ event.key = key;
+ event.timestamp = System.currentTimeMillis();
+ return event;
+ }
}
diff --git a/client/src/main/java/io/split/client/SplitFactoryBuilder.java b/client/src/main/java/io/split/client/SplitFactoryBuilder.java
index f1f665fb2..f18032416 100644
--- a/client/src/main/java/io/split/client/SplitFactoryBuilder.java
+++ b/client/src/main/java/io/split/client/SplitFactoryBuilder.java
@@ -1,5 +1,6 @@
package io.split.client;
+import io.split.inputValidation.ApiKeyValidator;
import io.split.grammar.Treatments;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -36,12 +37,7 @@ public static SplitFactory build(String apiToken) throws IOException, URISyntaxE
* there were problems reading the override file from disk.
*/
public static synchronized SplitFactory build(String apiToken, SplitClientConfig config) throws IOException, URISyntaxException {
- if (apiToken == null) {
- _log.error("factory instantiation: you passed a null apiToken, apiToken must be a non-empty string");
- }
- if (apiToken.isEmpty()) {
- _log.error("factory instantiation: you passed and empty apiToken, apiToken be a non-empty string");
- }
+ ApiKeyValidator.validate(apiToken);
if (LocalhostSplitFactory.LOCALHOST.equals(apiToken)) {
return LocalhostSplitFactory.createLocalhostSplitFactory(config);
@@ -56,7 +52,7 @@ public static synchronized SplitFactory build(String apiToken, SplitClientConfig
*
* @throws IOException if there were problems reading the override file from disk.
*/
- public static SplitFactory local() throws IOException {
+ public static SplitFactory local() throws IOException, URISyntaxException {
return LocalhostSplitFactory.createLocalhostSplitFactory(SplitClientConfig.builder().build());
}
@@ -66,7 +62,7 @@ public static SplitFactory local() throws IOException {
* @return config Split config file
* @throws IOException if there were problems reading the override file from disk.
*/
- public static SplitFactory local(SplitClientConfig config) throws IOException {
+ public static SplitFactory local(SplitClientConfig config) throws IOException, URISyntaxException {
return LocalhostSplitFactory.createLocalhostSplitFactory(config);
}
diff --git a/client/src/main/java/io/split/client/SplitFactoryImpl.java b/client/src/main/java/io/split/client/SplitFactoryImpl.java
index 22dd95f85..8b576d0dd 100644
--- a/client/src/main/java/io/split/client/SplitFactoryImpl.java
+++ b/client/src/main/java/io/split/client/SplitFactoryImpl.java
@@ -1,7 +1,5 @@
package io.split.client;
-import com.google.common.collect.ConcurrentHashMultiset;
-import com.google.common.collect.Multiset;
import io.split.client.impressions.AsynchronousImpressionListener;
import io.split.client.impressions.ImpressionListener;
import io.split.client.impressions.ImpressionsManagerImpl;
@@ -11,14 +9,22 @@
import io.split.client.metrics.CachedMetrics;
import io.split.client.metrics.FireAndForgetMetrics;
import io.split.client.metrics.HttpMetrics;
+import io.split.cache.InMemoryCacheImp;
+import io.split.cache.SplitCache;
+import io.split.engine.evaluator.Evaluator;
+import io.split.engine.evaluator.EvaluatorImp;
import io.split.engine.SDKReadinessGates;
import io.split.engine.common.SyncManager;
import io.split.engine.common.SyncManagerImp;
-import io.split.engine.experiments.RefreshableSplitFetcherProvider;
import io.split.engine.experiments.SplitChangeFetcher;
+import io.split.engine.experiments.SplitFetcher;
+import io.split.engine.experiments.SplitFetcherImp;
import io.split.engine.experiments.SplitParser;
-import io.split.engine.segments.RefreshableSegmentFetcher;
+import io.split.engine.experiments.SplitSynchronizationTask;
import io.split.engine.segments.SegmentChangeFetcher;
+import io.split.cache.SegmentCache;
+import io.split.cache.SegmentCacheInMemoryImpl;
+import io.split.engine.segments.SegmentSynchronizationTaskImp;
import io.split.integrations.IntegrationsConfig;
import org.apache.hc.client5.http.auth.AuthScope;
import org.apache.hc.client5.http.auth.Credentials;
@@ -55,14 +61,150 @@ public class SplitFactoryImpl implements SplitFactory {
private final static long SSE_CONNECT_TIMEOUT = 30000;
private final static long SSE_SOCKET_TIMEOUT = 70000;
- private static final Multiset USED_API_TOKENS = ConcurrentHashMultiset.create();
private static Random RANDOM = new Random();
+ private final URI _rootTarget;
+ private final URI _eventsRootTarget;
+ private final CloseableHttpClient _httpclient;
+ private final SDKReadinessGates _gates;
+ private final HttpMetrics _httpMetrics;
+ private final FireAndForgetMetrics _unCachedFireAndForget;
+ private final SegmentSynchronizationTaskImp _segmentSynchronizationTaskImp;
+ private final SplitFetcher _splitFetcher;
+ private final SplitSynchronizationTask _splitSynchronizationTask;
+ private final ImpressionsManagerImpl _impressionsManager;
+ private final FireAndForgetMetrics _cachedFireAndForgetMetrics;
+ private final EventClient _eventClient;
+ private final SyncManager _syncManager;
+ private final Evaluator _evaluator;
+ private final String _apiToken;
+
+ // Caches
+ private final SegmentCache _segmentCache;
+ private final SplitCache _splitCache;
+
+ // Client and Manager
private final SplitClient _client;
private final SplitManager _manager;
- private final Runnable destroyer;
- private final String _apiToken;
+
private boolean isTerminated = false;
+ private final ApiKeyCounter _apiKeyCounter;
+
+ public SplitFactoryImpl(String apiToken, SplitClientConfig config) throws URISyntaxException {
+ _apiToken = apiToken;
+ _apiKeyCounter = ApiKeyCounter.getApiKeyCounterInstance();
+ _apiKeyCounter.add(apiToken);
+
+ if (config.blockUntilReady() == -1) {
+ //BlockUntilReady not been set
+ _log.warn("no setBlockUntilReadyTimeout parameter has been set - incorrect control treatments could be logged” " +
+ "if no ready config has been set when building factory");
+
+ }
+
+ // SDKReadinessGates
+ _gates = new SDKReadinessGates();
+
+ // HttpClient
+ _httpclient = buildHttpClient(apiToken, config);
+
+ // Roots
+ _rootTarget = URI.create(config.endpoint());
+ _eventsRootTarget = URI.create(config.eventsEndpoint());
+
+ // HttpMetrics
+ _httpMetrics = HttpMetrics.create(_httpclient, _eventsRootTarget);
+
+ // Cache Initialisations
+ _segmentCache = new SegmentCacheInMemoryImpl();
+ _splitCache = new InMemoryCacheImp();
+
+ // Metrics
+ _unCachedFireAndForget = FireAndForgetMetrics.instance(_httpMetrics, 2, 1000);
+
+ // Segments
+ _segmentSynchronizationTaskImp = buildSegments(config);
+
+ // SplitFetcher
+ _splitFetcher = buildSplitFetcher();
+
+ // SplitSynchronizationTask
+ _splitSynchronizationTask = new SplitSynchronizationTask(_splitFetcher, _splitCache, findPollingPeriod(RANDOM, config.featuresRefreshRate()));
+
+ // Impressions
+ _impressionsManager = buildImpressionsManager(config);
+
+ // CachedFireAndForgetMetrics
+ _cachedFireAndForgetMetrics = buildCachedFireAndForgetMetrics(config);
+
+ // EventClient
+ _eventClient = EventClientImpl.create(_httpclient, _eventsRootTarget, config.eventsQueueSize(), config.eventFlushIntervalInMillis(), config.waitBeforeShutdown());
+
+ // SyncManager
+ _syncManager = SyncManagerImp.build(config.streamingEnabled(), _splitSynchronizationTask, _splitFetcher, _segmentSynchronizationTaskImp, _splitCache, config.authServiceURL(), _httpclient, config.streamingServiceURL(), config.authRetryBackoffBase(), buildSSEdHttpClient(config), _segmentCache);
+ _syncManager.start();
+
+ // Evaluator
+ _evaluator = new EvaluatorImp(_splitCache);
+
+ // SplitClient
+ _client = new SplitClientImpl(this, _splitCache, _impressionsManager, _cachedFireAndForgetMetrics, _eventClient, config, _gates, _evaluator);
+
+ // SplitManager
+ _manager = new SplitManagerImpl(_splitCache, config, _gates);
+
+ // DestroyOnShutDown
+ if (config.destroyOnShutDown()) {
+ Runtime.getRuntime().addShutdownHook(new Thread(() -> {
+ // Using the full path to avoid conflicting with Thread.destroy()
+ SplitFactoryImpl.this.destroy();
+ }));
+ }
+ }
+
+ @Override
+ public SplitClient client() {
+ return _client;
+ }
+
+ @Override
+ public SplitManager manager() {
+ return _manager;
+ }
+
+ @Override
+ public synchronized void destroy() {
+ if (!isTerminated) {
+ _log.info("Shutdown called for split");
+ try {
+ _segmentSynchronizationTaskImp.close();
+ _log.info("Successful shutdown of segment fetchers");
+ _splitSynchronizationTask.close();
+ _log.info("Successful shutdown of splits");
+ _impressionsManager.close();
+ _log.info("Successful shutdown of impressions manager");
+ _unCachedFireAndForget.close();
+ _log.info("Successful shutdown of metrics 1");
+ _cachedFireAndForgetMetrics.close();
+ _log.info("Successful shutdown of metrics 2");
+ _httpclient.close();
+ _log.info("Successful shutdown of httpclient");
+ _eventClient.close();
+ _log.info("Successful shutdown of eventClient");
+ _syncManager.shutdown();
+ _log.info("Successful shutdown of syncManager");
+ } catch (IOException e) {
+ _log.error("We could not shutdown split", e);
+ }
+ _apiKeyCounter.remove(_apiToken);
+ isTerminated = true;
+ }
+ }
+
+ @Override
+ public boolean isDestroyed() {
+ return isTerminated;
+ }
private static CloseableHttpClient buildHttpClient(String apiToken, SplitClientConfig config) {
@@ -148,60 +290,30 @@ private static HttpClientBuilder setupProxy(HttpClientBuilder httpClientbuilder,
return httpClientbuilder;
}
- public SplitFactoryImpl(String apiToken, SplitClientConfig config) throws URISyntaxException {
- _apiToken = apiToken;
-
- if (USED_API_TOKENS.contains(apiToken)) {
- String message = String.format("factory instantiation: You already have %s with this API Key. " +
- "We recommend keeping only one instance of the factory at all times (Singleton pattern) and reusing " +
- "it throughout your application.",
- USED_API_TOKENS.count(apiToken) == 1 ? "1 factory" : String.format("%s factories", USED_API_TOKENS.count(apiToken)));
- _log.warn(message);
- } else if (!USED_API_TOKENS.isEmpty()) {
- String message = "factory instantiation: You already have an instance of the Split factory. " +
- "Make sure you definitely want this additional instance. We recommend keeping only one instance of " +
- "the factory at all times (Singleton pattern) and reusing it throughout your application.“";
- _log.warn(message);
- }
- USED_API_TOKENS.add(apiToken);
-
- if (config.blockUntilReady() == -1) {
- //BlockUntilReady not been set
- _log.warn("no setBlockUntilReadyTimeout parameter has been set - incorrect control treatments could be logged” " +
- "if no ready config has been set when building factory");
-
- }
-
-
- final CloseableHttpClient httpclient = buildHttpClient(apiToken, config);
-
- URI rootTarget = URI.create(config.endpoint());
- URI eventsRootTarget = URI.create(config.eventsEndpoint());
-
- // Metrics
- HttpMetrics httpMetrics = HttpMetrics.create(httpclient, eventsRootTarget);
- final FireAndForgetMetrics uncachedFireAndForget = FireAndForgetMetrics.instance(httpMetrics, 2, 1000);
+ private static int findPollingPeriod(Random rand, int max) {
+ int min = max / 2;
+ return rand.nextInt((max - min) + 1) + min;
+ }
- SDKReadinessGates gates = new SDKReadinessGates();
+ private SegmentSynchronizationTaskImp buildSegments(SplitClientConfig config) throws URISyntaxException {
+ SegmentChangeFetcher segmentChangeFetcher = HttpSegmentChangeFetcher.create(_httpclient, _rootTarget, _unCachedFireAndForget);
- // Segments
- SegmentChangeFetcher segmentChangeFetcher = HttpSegmentChangeFetcher.create(httpclient, rootTarget, uncachedFireAndForget);
- final RefreshableSegmentFetcher segmentFetcher = new RefreshableSegmentFetcher(segmentChangeFetcher,
+ return new SegmentSynchronizationTaskImp(segmentChangeFetcher,
findPollingPeriod(RANDOM, config.segmentsRefreshRate()),
config.numThreadsForSegmentFetch(),
- gates);
-
-
- SplitParser splitParser = new SplitParser(segmentFetcher);
-
- // Feature Changes
- SplitChangeFetcher splitChangeFetcher = HttpSplitChangeFetcher.create(httpclient, rootTarget, uncachedFireAndForget);
+ _gates,
+ _segmentCache);
+ }
- final RefreshableSplitFetcherProvider splitFetcherProvider = new RefreshableSplitFetcherProvider(splitChangeFetcher, splitParser, findPollingPeriod(RANDOM, config.featuresRefreshRate()), gates);
+ private SplitFetcher buildSplitFetcher() throws URISyntaxException {
+ SplitChangeFetcher splitChangeFetcher = HttpSplitChangeFetcher.create(_httpclient, _rootTarget, _unCachedFireAndForget);
+ SplitParser splitParser = new SplitParser(_segmentSynchronizationTaskImp, _segmentCache);
+ return new SplitFetcherImp(splitChangeFetcher, splitParser, _gates, _splitCache);
+ }
+ private ImpressionsManagerImpl buildImpressionsManager(SplitClientConfig config) throws URISyntaxException {
List impressionListeners = new ArrayList<>();
- // Setup integrations
if (config.integrationsConfig() != null) {
config.integrationsConfig().getImpressionsListeners(IntegrationsConfig.Execution.ASYNC).stream()
.map(l -> AsynchronousImpressionListener.build(l.listener(), l.queueSize()))
@@ -212,89 +324,12 @@ public SplitFactoryImpl(String apiToken, SplitClientConfig config) throws URISyn
.collect(Collectors.toCollection(() -> impressionListeners));
}
- // Impressions
- final ImpressionsManagerImpl impressionsManager = ImpressionsManagerImpl.instance(httpclient, config, impressionListeners);
-
- CachedMetrics cachedMetrics = new CachedMetrics(httpMetrics, TimeUnit.SECONDS.toMillis(config.metricsRefreshRate()));
- final FireAndForgetMetrics cachedFireAndForgetMetrics = FireAndForgetMetrics.instance(cachedMetrics, 2, 1000);
-
- final EventClient eventClient = EventClientImpl.create(httpclient, eventsRootTarget, config.eventsQueueSize(), config.eventFlushIntervalInMillis(), config.waitBeforeShutdown());
-
- // SyncManager
- final SyncManager syncManager = SyncManagerImp.build(config.streamingEnabled(), splitFetcherProvider, segmentFetcher, config.authServiceURL(), httpclient, config.streamingServiceURL(), config.authRetryBackoffBase(), buildSSEdHttpClient(config));
- syncManager.start();
-
- destroyer = new Runnable() {
- public void run() {
- _log.info("Shutdown called for split");
- try {
- segmentFetcher.close();
- _log.info("Successful shutdown of segment fetchers");
- splitFetcherProvider.close();
- _log.info("Successful shutdown of splits");
- impressionsManager.close();
- _log.info("Successful shutdown of impressions manager");
- uncachedFireAndForget.close();
- _log.info("Successful shutdown of metrics 1");
- cachedFireAndForgetMetrics.close();
- _log.info("Successful shutdown of metrics 2");
- httpclient.close();
- _log.info("Successful shutdown of httpclient");
- eventClient.close();
- _log.info("Successful shutdown of httpclient");
- new Thread(syncManager::shutdown).start();
- _log.info("Successful shutdown of syncManager");
- } catch (IOException e) {
- _log.error("We could not shutdown split", e);
- }
- }
- };
-
- if (config.destroyOnShutDown()) {
- Runtime.getRuntime().addShutdownHook(new Thread() {
- @Override
- public void run() {
- // Using the full path to avoid conflicting with Thread.destroy()
- SplitFactoryImpl.this.destroy();
- }
- });
- }
-
- _client = new SplitClientImpl(this,
- splitFetcherProvider.getFetcher(),
- impressionsManager,
- cachedFireAndForgetMetrics,
- eventClient,
- config,
- gates);
- _manager = new SplitManagerImpl(splitFetcherProvider.getFetcher(), config, gates);
- }
-
- private static int findPollingPeriod(Random rand, int max) {
- int min = max / 2;
- return rand.nextInt((max - min) + 1) + min;
+ return ImpressionsManagerImpl.instance(_httpclient, config, impressionListeners);
}
- public SplitClient client() {
- return _client;
- }
-
- public SplitManager manager() {
- return _manager;
- }
+ private FireAndForgetMetrics buildCachedFireAndForgetMetrics(SplitClientConfig config) {
+ CachedMetrics cachedMetrics = new CachedMetrics(_httpMetrics, TimeUnit.SECONDS.toMillis(config.metricsRefreshRate()));
- public void destroy() {
- synchronized (SplitFactoryImpl.class) {
- if (!isTerminated) {
- destroyer.run();
- USED_API_TOKENS.remove(_apiToken);
- isTerminated = true;
- }
- }
- }
-
- @Override
- public boolean isDestroyed() {
- return isTerminated;
+ return FireAndForgetMetrics.instance(cachedMetrics, 2, 1000);
}
}
diff --git a/client/src/main/java/io/split/client/SplitManagerImpl.java b/client/src/main/java/io/split/client/SplitManagerImpl.java
index baa5fb462..5304b5911 100644
--- a/client/src/main/java/io/split/client/SplitManagerImpl.java
+++ b/client/src/main/java/io/split/client/SplitManagerImpl.java
@@ -2,19 +2,18 @@
import com.google.common.base.Preconditions;
import io.split.client.api.SplitView;
-import io.split.client.dtos.Partition;
import io.split.engine.SDKReadinessGates;
-import io.split.engine.experiments.ParsedCondition;
+import io.split.cache.SplitCache;
import io.split.engine.experiments.ParsedSplit;
-import io.split.engine.experiments.SplitFetcher;
+import io.split.inputValidation.SplitNameValidator;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
-import java.util.Collections;
-import java.util.HashSet;
+import java.util.Collection;
import java.util.List;
-import java.util.Set;
+
+import java.util.Optional;
import java.util.concurrent.TimeoutException;
/**
@@ -24,40 +23,39 @@ public class SplitManagerImpl implements SplitManager {
private static final Logger _log = LoggerFactory.getLogger(SplitManagerImpl.class);
- private final SplitFetcher _splitFetcher;
+ private final SplitCache _splitCache;
private final SplitClientConfig _config;
private final SDKReadinessGates _gates;
- public SplitManagerImpl(SplitFetcher splitFetcher,
+ public SplitManagerImpl(SplitCache splitCache,
SplitClientConfig config,
SDKReadinessGates gates) {
_config = Preconditions.checkNotNull(config);
- _splitFetcher = Preconditions.checkNotNull(splitFetcher);
+ _splitCache = Preconditions.checkNotNull(splitCache);
_gates = Preconditions.checkNotNull(gates);
}
@Override
public List splits() {
List result = new ArrayList<>();
- List parsedSplits = _splitFetcher.fetchAll();
+ Collection parsedSplits = _splitCache.getAll();
for (ParsedSplit split : parsedSplits) {
- result.add(toSplitView(split));
+ result.add(SplitView.fromParsedSplit(split));
}
+
return result;
}
@Override
public SplitView split(String featureName) {
- if (featureName == null) {
- _log.error("split: you passed a null split name, split name must be a non-empty string");
- return null;
- }
- if (featureName.isEmpty()) {
- _log.error("split: you passed an empty split name, split name must be a non-empty string");
+ Optional result = SplitNameValidator.isValid(featureName, "split");
+ if (!result.isPresent()) {
return null;
}
- ParsedSplit parsedSplit = _splitFetcher.fetch(featureName);
+ featureName = result.get();
+
+ ParsedSplit parsedSplit = _splitCache.get(featureName);
if (parsedSplit == null) {
if (_gates.isSDKReadyNow()) {
_log.warn("split: you passed \"" + featureName + "\" that does not exist in this environment, " +
@@ -65,16 +63,18 @@ public SplitView split(String featureName) {
}
return null;
}
- return toSplitView(parsedSplit);
+
+ return SplitView.fromParsedSplit(parsedSplit);
}
@Override
public List splitNames() {
List result = new ArrayList<>();
- List parsedSplits = _splitFetcher.fetchAll();
+ Collection parsedSplits = _splitCache.getAll();
for (ParsedSplit split : parsedSplits) {
result.add(split.feature());
}
+
return result;
}
@@ -87,25 +87,4 @@ public void blockUntilReady() throws TimeoutException, InterruptedException {
throw new TimeoutException("SDK was not ready in " + _config.blockUntilReady()+ " milliseconds");
}
}
-
- private SplitView toSplitView(ParsedSplit parsedSplit) {
- SplitView splitView = new SplitView();
- splitView.name = parsedSplit.feature();
- splitView.trafficType = parsedSplit.trafficTypeName();
- splitView.killed = parsedSplit.killed();
- splitView.changeNumber = parsedSplit.changeNumber();
-
- Set treatments = new HashSet();
- for (ParsedCondition condition : parsedSplit.parsedConditions()) {
- for (Partition partition : condition.partitions()) {
- treatments.add(partition.treatment);
- }
- }
- treatments.add(parsedSplit.defaultTreatment());
-
- splitView.treatments = new ArrayList(treatments);
- splitView.configs = parsedSplit.configurations() == null? Collections.emptyMap() : parsedSplit.configurations() ;
-
- return splitView;
- }
}
diff --git a/client/src/main/java/io/split/client/api/SplitView.java b/client/src/main/java/io/split/client/api/SplitView.java
index ea04627b0..c053c8950 100644
--- a/client/src/main/java/io/split/client/api/SplitView.java
+++ b/client/src/main/java/io/split/client/api/SplitView.java
@@ -1,7 +1,16 @@
package io.split.client.api;
+import io.split.client.dtos.Partition;
+import io.split.engine.experiments.ParsedCondition;
+import io.split.engine.experiments.ParsedSplit;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashSet;
import java.util.List;
import java.util.Map;
+import java.util.Set;
+
/**
* A view of a Split meant for consumption through SplitManager interface.
@@ -15,4 +24,25 @@ public class SplitView {
public List treatments;
public long changeNumber;
public Map configs;
+
+ public static SplitView fromParsedSplit(ParsedSplit parsedSplit) {
+ SplitView splitView = new SplitView();
+ splitView.name = parsedSplit.feature();
+ splitView.trafficType = parsedSplit.trafficTypeName();
+ splitView.killed = parsedSplit.killed();
+ splitView.changeNumber = parsedSplit.changeNumber();
+
+ Set treatments = new HashSet();
+ for (ParsedCondition condition : parsedSplit.parsedConditions()) {
+ for (Partition partition : condition.partitions()) {
+ treatments.add(partition.treatment);
+ }
+ }
+ treatments.add(parsedSplit.defaultTreatment());
+
+ splitView.treatments = new ArrayList(treatments);
+ splitView.configs = parsedSplit.configurations() == null? Collections.emptyMap() : parsedSplit.configurations() ;
+
+ return splitView;
+ }
}
diff --git a/client/src/main/java/io/split/client/jmx/SplitJmxMonitor.java b/client/src/main/java/io/split/client/jmx/SplitJmxMonitor.java
index 5dd1167ed..e5d49e115 100644
--- a/client/src/main/java/io/split/client/jmx/SplitJmxMonitor.java
+++ b/client/src/main/java/io/split/client/jmx/SplitJmxMonitor.java
@@ -1,11 +1,16 @@
package io.split.client.jmx;
+import io.split.cache.SegmentCache;
+import io.split.cache.SplitCache;
import io.split.client.SplitClient;
import io.split.engine.experiments.SplitFetcher;
import io.split.engine.segments.SegmentFetcher;
+import io.split.engine.segments.SegmentSynchronizationTask;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import static com.google.common.base.Preconditions.checkNotNull;
+
/**
* Created by patricioe on 1/18/16.
*/
@@ -15,24 +20,36 @@ public class SplitJmxMonitor implements SplitJmxMonitorMBean {
private final SplitClient _client;
private final SplitFetcher _featureFetcher;
- private final SegmentFetcher _segmentFetcher;
+ private final SplitCache _splitCache;
+ private final SegmentSynchronizationTask _segmentSynchronizationTask;
+ private SegmentCache _segmentCache;
- public SplitJmxMonitor(SplitClient splitClient, SplitFetcher fetcher, SegmentFetcher segmentFetcher) {
- _client = splitClient;
- _featureFetcher = fetcher;
- _segmentFetcher = segmentFetcher;
+ public SplitJmxMonitor(SplitClient splitClient, SplitFetcher featureFetcher, SplitCache splitCache, SegmentSynchronizationTask segmentSynchronizationTask, SegmentCache segmentCache) {
+ _client = checkNotNull(splitClient);
+ _featureFetcher = checkNotNull(featureFetcher);
+ _splitCache = checkNotNull(splitCache);
+ _segmentSynchronizationTask = checkNotNull(segmentSynchronizationTask);
+ _segmentCache = checkNotNull(segmentCache);
}
@Override
public boolean forceSyncFeatures() {
- _featureFetcher.forceRefresh();
+ _featureFetcher.forceRefresh(true);
_log.info("Features successfully refreshed via JMX");
return true;
}
@Override
public boolean forceSyncSegment(String segmentName) {
- _segmentFetcher.segment(segmentName).forceRefresh();
+ SegmentFetcher fetcher = _segmentSynchronizationTask.getFetcher(segmentName);
+ try{
+ fetcher.fetch(true);
+ }
+ //We are sure this will never happen because getFetcher firts initiate the segment. This try/catch is for safe only.
+ catch (NullPointerException np){
+ throw new NullPointerException();
+ }
+
_log.info("Segment " + segmentName + " successfully refreshed via JMX");
return true;
}
@@ -44,11 +61,11 @@ public String getTreatment(String key, String featureName) {
@Override
public String fetchDefinition(String featureName) {
- return _featureFetcher.fetch(featureName).toString();
+ return _splitCache.get(featureName).toString();
}
@Override
public boolean isKeyInSegment(String key, String segmentName) {
- return _segmentFetcher.segment(segmentName).contains(key);
+ return _segmentCache.isInSegment(segmentName, key);
}
}
diff --git a/client/src/main/java/io/split/engine/common/PushManager.java b/client/src/main/java/io/split/engine/common/PushManager.java
index 1fd79dfcf..62a491478 100644
--- a/client/src/main/java/io/split/engine/common/PushManager.java
+++ b/client/src/main/java/io/split/engine/common/PushManager.java
@@ -13,4 +13,5 @@ enum Status {
void stop();
void startWorkers();
void stopWorkers();
+ void scheduleConnectionReset();
}
diff --git a/client/src/main/java/io/split/engine/common/PushManagerImp.java b/client/src/main/java/io/split/engine/common/PushManagerImp.java
index 057a174e6..1d770b6b1 100644
--- a/client/src/main/java/io/split/engine/common/PushManagerImp.java
+++ b/client/src/main/java/io/split/engine/common/PushManagerImp.java
@@ -8,6 +8,7 @@
import io.split.engine.sse.EventSourceClientImp;
import io.split.engine.sse.PushStatusTracker;
import io.split.engine.sse.PushStatusTrackerImp;
+import io.split.engine.sse.client.SSEClient;
import io.split.engine.sse.dtos.AuthenticationResponse;
import io.split.engine.sse.dtos.SegmentQueueDto;
import io.split.engine.sse.workers.SegmentsWorkerImp;
@@ -24,6 +25,7 @@
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicLong;
import static com.google.common.base.Preconditions.checkNotNull;
@@ -32,28 +34,27 @@ public class PushManagerImp implements PushManager {
private final AuthApiClient _authApiClient;
private final EventSourceClient _eventSourceClient;
- private final Backoff _backoff;
private final SplitsWorker _splitsWorker;
private final Worker _segmentWorker;
private final PushStatusTracker _pushStatusTracker;
private Future> _nextTokenRefreshTask;
private final ScheduledExecutorService _scheduledExecutorService;
+ private AtomicLong _expirationTime;
@VisibleForTesting
/* package private */ PushManagerImp(AuthApiClient authApiClient,
EventSourceClient eventSourceClient,
SplitsWorker splitsWorker,
Worker segmentWorker,
- Backoff backoff,
PushStatusTracker pushStatusTracker) {
_authApiClient = checkNotNull(authApiClient);
_eventSourceClient = checkNotNull(eventSourceClient);
- _backoff = checkNotNull(backoff);
_splitsWorker = splitsWorker;
_segmentWorker = segmentWorker;
_pushStatusTracker = pushStatusTracker;
+ _expirationTime = new AtomicLong();
_scheduledExecutorService = Executors.newSingleThreadScheduledExecutor(new ThreadFactoryBuilder()
.setDaemon(true)
.setNameFormat("Split-SSERefreshToken-%d")
@@ -64,7 +65,6 @@ public static PushManagerImp build(Synchronizer synchronizer,
String streamingUrl,
String authUrl,
CloseableHttpClient httpClient,
- int authRetryBackOffBase,
LinkedBlockingQueue statusMessages,
CloseableHttpClient sseHttpClient) {
SplitsWorker splitsWorker = new SplitsWorkerImp(synchronizer);
@@ -74,7 +74,6 @@ public static PushManagerImp build(Synchronizer synchronizer,
EventSourceClientImp.build(streamingUrl, splitsWorker, segmentWorker, pushStatusTracker, sseHttpClient),
splitsWorker,
segmentWorker,
- new Backoff(authRetryBackOffBase),
pushStatusTracker);
}
@@ -83,14 +82,13 @@ public synchronized void start() {
AuthenticationResponse response = _authApiClient.Authenticate();
_log.debug(String.format("Auth service response pushEnabled: %s", response.isPushEnabled()));
if (response.isPushEnabled() && startSse(response.getToken(), response.getChannels())) {
- scheduleConnectionReset(response.getExpiration());
- _backoff.reset();
+ _expirationTime.set(response.getExpiration());
return;
}
stop();
if (response.isRetry()) {
- scheduleConnectionReset(_backoff.interval());
+ _pushStatusTracker.handleSseStatus(SSEClient.StatusMessage.RETRYABLE_ERROR);
} else {
_pushStatusTracker.forcePushDisable();
}
@@ -106,13 +104,14 @@ public synchronized void stop() {
}
}
- private void scheduleConnectionReset(long time) {
- _log.debug(String.format("scheduleNextTokenRefresh in %s SECONDS", time));
+ @Override
+ public synchronized void scheduleConnectionReset() {
+ _log.debug(String.format("scheduleNextTokenRefresh in %s SECONDS", _expirationTime));
_nextTokenRefreshTask = _scheduledExecutorService.schedule(() -> {
_log.debug("Starting scheduleNextTokenRefresh ...");
stop();
start();
- }, time, TimeUnit.SECONDS);
+ }, _expirationTime.get(), TimeUnit.SECONDS);
}
private boolean startSse(String token, String channels) {
diff --git a/client/src/main/java/io/split/engine/common/SyncManagerImp.java b/client/src/main/java/io/split/engine/common/SyncManagerImp.java
index 05bf44c46..2d0fe2fa9 100644
--- a/client/src/main/java/io/split/engine/common/SyncManagerImp.java
+++ b/client/src/main/java/io/split/engine/common/SyncManagerImp.java
@@ -2,8 +2,11 @@
import com.google.common.annotations.VisibleForTesting;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
-import io.split.engine.experiments.RefreshableSplitFetcherProvider;
-import io.split.engine.segments.RefreshableSegmentFetcher;
+import io.split.cache.SegmentCache;
+import io.split.cache.SplitCache;
+import io.split.engine.experiments.SplitFetcher;
+import io.split.engine.experiments.SplitSynchronizationTask;
+import io.split.engine.segments.SegmentSynchronizationTaskImp;
import org.apache.hc.client5.http.impl.classic.CloseableHttpClient;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -26,12 +29,14 @@ public class SyncManagerImp implements SyncManager {
private final LinkedBlockingQueue _incomingPushStatus;
private final ExecutorService _executorService;
private Future> _pushStatusMonitorTask;
+ private Backoff _backoff;
@VisibleForTesting
/* package private */ SyncManagerImp(boolean streamingEnabledConfig,
Synchronizer synchronizer,
PushManager pushManager,
- LinkedBlockingQueue pushMessages) {
+ LinkedBlockingQueue pushMessages,
+ int authRetryBackOffBase) {
_streamingEnabledConfig = new AtomicBoolean(streamingEnabledConfig);
_synchronizer = checkNotNull(synchronizer);
_pushManager = checkNotNull(pushManager);
@@ -41,20 +46,24 @@ public class SyncManagerImp implements SyncManager {
.setNameFormat("SPLIT-PushStatusMonitor-%d")
.setDaemon(true)
.build());
+ _backoff = new Backoff(authRetryBackOffBase);
}
public static SyncManagerImp build(boolean streamingEnabledConfig,
- RefreshableSplitFetcherProvider refreshableSplitFetcherProvider,
- RefreshableSegmentFetcher segmentFetcher,
- String authUrl,
- CloseableHttpClient httpClient,
- String streamingServiceUrl,
- int authRetryBackOffBase,
- CloseableHttpClient sseHttpClient) {
+ SplitSynchronizationTask splitSynchronizationTask,
+ SplitFetcher splitFetcher,
+ SegmentSynchronizationTaskImp segmentSynchronizationTaskImp,
+ SplitCache splitCache,
+ String authUrl,
+ CloseableHttpClient httpClient,
+ String streamingServiceUrl,
+ int authRetryBackOffBase,
+ CloseableHttpClient sseHttpClient,
+ SegmentCache segmentCache) {
LinkedBlockingQueue pushMessages = new LinkedBlockingQueue<>();
- Synchronizer synchronizer = new SynchronizerImp(refreshableSplitFetcherProvider, segmentFetcher);
- PushManager pushManager = PushManagerImp.build(synchronizer, streamingServiceUrl, authUrl, httpClient, authRetryBackOffBase, pushMessages, sseHttpClient);
- return new SyncManagerImp(streamingEnabledConfig, synchronizer, pushManager, pushMessages);
+ Synchronizer synchronizer = new SynchronizerImp(splitSynchronizationTask, splitFetcher, segmentSynchronizationTaskImp, splitCache, segmentCache);
+ PushManager pushManager = PushManagerImp.build(synchronizer, streamingServiceUrl, authUrl, httpClient, pushMessages, sseHttpClient);
+ return new SyncManagerImp(streamingEnabledConfig, synchronizer, pushManager, pushMessages, authRetryBackOffBase);
}
@Override
@@ -99,14 +108,21 @@ private void startPollingMode() {
_synchronizer.stopPeriodicFetching();
_synchronizer.syncAll();
_pushManager.startWorkers();
+ _pushManager.scheduleConnectionReset();
+ _backoff.reset();
break;
case STREAMING_DOWN:
_pushManager.stopWorkers();
_synchronizer.startPeriodicFetching();
break;
case STREAMING_BACKOFF:
+ long howLong = _backoff.interval() * 1000;
+ _log.error(String.format("Retryable error in streaming subsystem. Switching to polling and retrying in %d seconds", howLong/1000));
_synchronizer.startPeriodicFetching();
_pushManager.stopWorkers();
+ _pushManager.stop();
+ Thread.sleep(howLong);
+ _incomingPushStatus.clear();
_pushManager.start();
break;
case STREAMING_OFF:
diff --git a/client/src/main/java/io/split/engine/common/SynchronizerImp.java b/client/src/main/java/io/split/engine/common/SynchronizerImp.java
index 9d370391c..4e8115b18 100644
--- a/client/src/main/java/io/split/engine/common/SynchronizerImp.java
+++ b/client/src/main/java/io/split/engine/common/SynchronizerImp.java
@@ -1,9 +1,12 @@
package io.split.engine.common;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
-import io.split.engine.experiments.RefreshableSplitFetcher;
-import io.split.engine.experiments.RefreshableSplitFetcherProvider;
-import io.split.engine.segments.RefreshableSegmentFetcher;
+import io.split.cache.SegmentCache;
+import io.split.cache.SplitCache;
+import io.split.engine.experiments.SplitFetcher;
+import io.split.engine.experiments.SplitSynchronizationTask;
+import io.split.engine.segments.SegmentFetcher;
+import io.split.engine.segments.SegmentSynchronizationTask;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -17,16 +20,23 @@
public class SynchronizerImp implements Synchronizer {
private static final Logger _log = LoggerFactory.getLogger(Synchronizer.class);
- private final RefreshableSplitFetcherProvider _refreshableSplitFetcherProvider;
- private final RefreshableSplitFetcher _splitFetcher;
- private final RefreshableSegmentFetcher _segmentFetcher;
+ private final SplitSynchronizationTask _splitSynchronizationTask;
+ private final SplitFetcher _splitFetcher;
+ private final SegmentSynchronizationTask _segmentSynchronizationTaskImp;
private final ScheduledExecutorService _syncAllScheduledExecutorService;
+ private final SplitCache _splitCache;
+ private final SegmentCache _segmentCache;
- public SynchronizerImp(RefreshableSplitFetcherProvider refreshableSplitFetcherProvider,
- RefreshableSegmentFetcher segmentFetcher) {
- _refreshableSplitFetcherProvider = checkNotNull(refreshableSplitFetcherProvider);
- _splitFetcher = checkNotNull(_refreshableSplitFetcherProvider.getFetcher());
- _segmentFetcher = checkNotNull(segmentFetcher);
+ public SynchronizerImp(SplitSynchronizationTask splitSynchronizationTask,
+ SplitFetcher splitFetcher,
+ SegmentSynchronizationTask segmentSynchronizationTaskImp,
+ SplitCache splitCache,
+ SegmentCache segmentCache) {
+ _splitSynchronizationTask = checkNotNull(splitSynchronizationTask);
+ _splitFetcher = checkNotNull(splitFetcher);
+ _segmentSynchronizationTaskImp = checkNotNull(segmentSynchronizationTaskImp);
+ _splitCache = checkNotNull(splitCache);
+ _segmentCache = checkNotNull(segmentCache);
ThreadFactory splitsThreadFactory = new ThreadFactoryBuilder()
.setDaemon(true)
@@ -38,44 +48,51 @@ public SynchronizerImp(RefreshableSplitFetcherProvider refreshableSplitFetcherPr
@Override
public void syncAll() {
_syncAllScheduledExecutorService.schedule(() -> {
- _splitFetcher.run();
- _segmentFetcher.forceRefreshAll();
+ _splitFetcher.fetchAll(true);
+ _segmentSynchronizationTaskImp.fetchAll(true);
}, 0, TimeUnit.SECONDS);
}
@Override
public void startPeriodicFetching() {
_log.debug("Starting Periodic Fetching ...");
- _refreshableSplitFetcherProvider.startPeriodicFetching();
- _segmentFetcher.startPeriodicFetching();
+ _splitSynchronizationTask.startPeriodicFetching();
+ _segmentSynchronizationTaskImp.startPeriodicFetching();
}
@Override
public void stopPeriodicFetching() {
_log.debug("Stop Periodic Fetching ...");
- _refreshableSplitFetcherProvider.stop();
- _segmentFetcher.stop();
+ _splitSynchronizationTask.stop();
+ _segmentSynchronizationTaskImp.stop();
}
@Override
public void refreshSplits(long targetChangeNumber) {
- if (targetChangeNumber > _splitFetcher.changeNumber()) {
- _splitFetcher.forceRefresh();
+ if (targetChangeNumber > _splitCache.getChangeNumber()) {
+ _splitFetcher.forceRefresh(true);
}
}
@Override
public void localKillSplit(String splitName, String defaultTreatment, long newChangeNumber) {
- if (newChangeNumber > _splitFetcher.changeNumber()) {
- _splitFetcher.killSplit(splitName, defaultTreatment, newChangeNumber);
+ if (newChangeNumber > _splitCache.getChangeNumber()) {
+ _splitCache.kill(splitName, defaultTreatment, newChangeNumber);
refreshSplits(newChangeNumber);
}
}
@Override
public void refreshSegment(String segmentName, long changeNumber) {
- if (changeNumber > _segmentFetcher.getChangeNumber(segmentName)) {
- _segmentFetcher.forceRefresh(segmentName);
+ if (changeNumber > _segmentCache.getChangeNumber(segmentName)) {
+ SegmentFetcher fetcher = _segmentSynchronizationTaskImp.getFetcher(segmentName);
+ try{
+ fetcher.fetch(true);
+ }
+ //We are sure this will never happen because getFetcher firts initiate the segment. This try/catch is for safe only.
+ catch (NullPointerException np){
+ throw new NullPointerException();
+ }
}
}
}
diff --git a/client/src/main/java/io/split/engine/evaluator/Evaluator.java b/client/src/main/java/io/split/engine/evaluator/Evaluator.java
new file mode 100644
index 000000000..fbcc52c35
--- /dev/null
+++ b/client/src/main/java/io/split/engine/evaluator/Evaluator.java
@@ -0,0 +1,7 @@
+package io.split.engine.evaluator;
+
+import java.util.Map;
+
+public interface Evaluator {
+ EvaluatorImp.TreatmentLabelAndChangeNumber evaluateFeature(String matchingKey, String bucketingKey, String split, Map attributes);
+}
diff --git a/client/src/main/java/io/split/engine/evaluator/EvaluatorImp.java b/client/src/main/java/io/split/engine/evaluator/EvaluatorImp.java
new file mode 100644
index 000000000..c4efe70cb
--- /dev/null
+++ b/client/src/main/java/io/split/engine/evaluator/EvaluatorImp.java
@@ -0,0 +1,126 @@
+package io.split.engine.evaluator;
+
+import io.split.client.dtos.ConditionType;
+import io.split.client.exceptions.ChangeNumberExceptionWrapper;
+import io.split.cache.SplitCache;
+import io.split.engine.experiments.ParsedCondition;
+import io.split.engine.experiments.ParsedSplit;
+import io.split.engine.splitter.Splitter;
+import io.split.grammar.Treatments;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.Map;
+
+import static com.google.common.base.Preconditions.checkNotNull;
+
+public class EvaluatorImp implements Evaluator {
+
+
+ private static final Logger _log = LoggerFactory.getLogger(EvaluatorImp.class);
+
+ private final SplitCache _splitCache;
+
+ public EvaluatorImp(SplitCache splitCache) {
+ _splitCache = checkNotNull(splitCache);
+ }
+
+ @Override
+ public TreatmentLabelAndChangeNumber evaluateFeature(String matchingKey, String bucketingKey, String split, Map attributes) {
+ try {
+ ParsedSplit parsedSplit = _splitCache.get(split);
+
+ if (parsedSplit == null) {
+ return new TreatmentLabelAndChangeNumber(Treatments.CONTROL, Labels.DEFINITION_NOT_FOUND);
+ }
+
+ return getTreatment(matchingKey, bucketingKey, parsedSplit, attributes);
+ }
+ catch (ChangeNumberExceptionWrapper e) {
+ _log.error("Evaluator Exception", e.wrappedException());
+ return new EvaluatorImp.TreatmentLabelAndChangeNumber(Treatments.CONTROL, Labels.EXCEPTION, e.changeNumber());
+ } catch (Exception e) {
+ _log.error("Evaluator Exception", e);
+ return new EvaluatorImp.TreatmentLabelAndChangeNumber(Treatments.CONTROL, Labels.EXCEPTION);
+ }
+ }
+
+ /**
+ * @param matchingKey MUST NOT be null
+ * @param bucketingKey
+ * @param parsedSplit MUST NOT be null
+ * @param attributes MUST NOT be null
+ * @return
+ * @throws ChangeNumberExceptionWrapper
+ */
+ private TreatmentLabelAndChangeNumber getTreatment(String matchingKey, String bucketingKey, ParsedSplit parsedSplit, Map attributes) throws ChangeNumberExceptionWrapper {
+ try {
+ if (parsedSplit.killed()) {
+ String config = parsedSplit.configurations() != null ? parsedSplit.configurations().get(parsedSplit.defaultTreatment()) : null;
+ return new TreatmentLabelAndChangeNumber(parsedSplit.defaultTreatment(), Labels.KILLED, parsedSplit.changeNumber(), config);
+ }
+
+ /*
+ * There are three parts to a single Split: 1) Whitelists 2) Traffic Allocation
+ * 3) Rollout. The flag inRollout is there to understand when we move into the Rollout
+ * section. This is because we need to make sure that the Traffic Allocation
+ * computation happens after the whitelist but before the rollout.
+ */
+ boolean inRollout = false;
+
+ String bk = (bucketingKey == null) ? matchingKey : bucketingKey;
+
+ for (ParsedCondition parsedCondition : parsedSplit.parsedConditions()) {
+
+ if (!inRollout && parsedCondition.conditionType() == ConditionType.ROLLOUT) {
+
+ if (parsedSplit.trafficAllocation() < 100) {
+ // if the traffic allocation is 100%, no need to do anything special.
+ int bucket = Splitter.getBucket(bk, parsedSplit.trafficAllocationSeed(), parsedSplit.algo());
+
+ if (bucket > parsedSplit.trafficAllocation()) {
+ // out of split
+ String config = parsedSplit.configurations() != null ? parsedSplit.configurations().get(parsedSplit.defaultTreatment()) : null;
+ return new TreatmentLabelAndChangeNumber(parsedSplit.defaultTreatment(), Labels.NOT_IN_SPLIT, parsedSplit.changeNumber(), config);
+ }
+
+ }
+ inRollout = true;
+ }
+
+ if (parsedCondition.matcher().match(matchingKey, bucketingKey, attributes, this)) {
+ String treatment = Splitter.getTreatment(bk, parsedSplit.seed(), parsedCondition.partitions(), parsedSplit.algo());
+ String config = parsedSplit.configurations() != null ? parsedSplit.configurations().get(treatment) : null;
+ return new TreatmentLabelAndChangeNumber(treatment, parsedCondition.label(), parsedSplit.changeNumber(), config);
+ }
+ }
+
+ String config = parsedSplit.configurations() != null ? parsedSplit.configurations().get(parsedSplit.defaultTreatment()) : null;
+ return new TreatmentLabelAndChangeNumber(parsedSplit.defaultTreatment(), Labels.DEFAULT_RULE, parsedSplit.changeNumber(), config);
+ } catch (Exception e) {
+ throw new ChangeNumberExceptionWrapper(e, parsedSplit.changeNumber());
+ }
+ }
+
+ public static final class TreatmentLabelAndChangeNumber {
+ public final String treatment;
+ public final String label;
+ public final Long changeNumber;
+ public final String configurations;
+
+ public TreatmentLabelAndChangeNumber(String treatment, String label) {
+ this(treatment, label, null, null);
+ }
+
+ public TreatmentLabelAndChangeNumber(String treatment, String label, Long changeNumber) {
+ this(treatment, label, changeNumber, null);
+ }
+
+ public TreatmentLabelAndChangeNumber(String treatment, String label, Long changeNumber, String configurations) {
+ this.treatment = treatment;
+ this.label = label;
+ this.changeNumber = changeNumber;
+ this.configurations = configurations;
+ }
+ }
+}
diff --git a/client/src/main/java/io/split/engine/evaluator/Labels.java b/client/src/main/java/io/split/engine/evaluator/Labels.java
new file mode 100644
index 000000000..97e486b91
--- /dev/null
+++ b/client/src/main/java/io/split/engine/evaluator/Labels.java
@@ -0,0 +1,9 @@
+package io.split.engine.evaluator;
+
+public class Labels {
+ public static final String NOT_IN_SPLIT = "not in split";
+ public static final String DEFAULT_RULE = "default rule";
+ public static final String KILLED = "killed";
+ public static final String DEFINITION_NOT_FOUND = "definition not found";
+ public static final String EXCEPTION = "exception";
+}
diff --git a/client/src/main/java/io/split/engine/experiments/RefreshableSplitFetcher.java b/client/src/main/java/io/split/engine/experiments/RefreshableSplitFetcher.java
deleted file mode 100644
index 4ef49750f..000000000
--- a/client/src/main/java/io/split/engine/experiments/RefreshableSplitFetcher.java
+++ /dev/null
@@ -1,276 +0,0 @@
-package io.split.engine.experiments;
-
-import com.google.common.collect.ConcurrentHashMultiset;
-import com.google.common.collect.Lists;
-import com.google.common.collect.Maps;
-import com.google.common.collect.Multiset;
-import com.google.common.collect.Multisets;
-import com.google.common.collect.Sets;
-import io.split.client.dtos.Condition;
-import io.split.client.dtos.Matcher;
-import io.split.client.dtos.MatcherType;
-import io.split.client.dtos.Split;
-import io.split.client.dtos.SplitChange;
-import io.split.client.dtos.Status;
-import io.split.engine.SDKReadinessGates;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.Collection;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.concurrent.atomic.AtomicLong;
-
-import static com.google.common.base.Preconditions.checkNotNull;
-
-/**
- * An ExperimentFetcher that refreshes experiment definitions periodically.
- *
- * @author adil
- */
-public class RefreshableSplitFetcher implements SplitFetcher, Runnable {
-
- private static final Logger _log = LoggerFactory.getLogger(RefreshableSplitFetcher.class);
-
- private final SplitParser _parser;
- private final SplitChangeFetcher _splitChangeFetcher;
- private final AtomicLong _changeNumber;
-
- private Map _concurrentMap = Maps.newConcurrentMap();
-
- /**
- * Contains all the traffic types that are currently being used by the splits and also the count
- *
- * For example if there are three splits, one of traffic type "account" and two of traffic type "user",
- * this multiset will contain [{"user", 2}, {"account", 1}]
- *
- * The count is used to maintain how many splits are using a traffic type, so when
- * an ARCHIVED split is received, we know if we need to remove a traffic type from the multiset.
- */
- Multiset _concurrentTrafficTypeNameSet = ConcurrentHashMultiset.create();
- private final SDKReadinessGates _gates;
-
- private final Object _lock = new Object();
-
-
- public RefreshableSplitFetcher(SplitChangeFetcher splitChangeFetcher, SplitParser parser, SDKReadinessGates gates) {
- this(splitChangeFetcher, parser, gates, -1);
- }
-
- /**
- * This constructor is package private because it is meant primarily for unit tests
- * where we want to set the starting change number. All regular clients should use
- * the public constructor.
- *
- * @param splitChangeFetcher MUST NOT be null
- * @param parser MUST NOT be null
- * @param startingChangeNumber
- */
- /*package private*/ RefreshableSplitFetcher(SplitChangeFetcher splitChangeFetcher,
- SplitParser parser,
- SDKReadinessGates gates,
- long startingChangeNumber) {
- _splitChangeFetcher = splitChangeFetcher;
- _parser = parser;
- _gates = gates;
- _changeNumber = new AtomicLong(startingChangeNumber);
-
- checkNotNull(_parser);
- checkNotNull(_splitChangeFetcher);
- }
-
- @Override
- public void forceRefresh() {
- _log.debug("Force Refresh splits starting ...");
- try {
- while (true) {
- long start = _changeNumber.get();
- runWithoutExceptionHandling();
- long end = _changeNumber.get();
-
- if (start >= end) {
- break;
- }
- }
- } catch (InterruptedException e) {
- _log.warn("Interrupting split fetcher task");
- Thread.currentThread().interrupt();
- } catch (Throwable t) {
- _log.error("RefreshableSplitFetcher failed: " + t.getMessage());
- }
- }
-
- @Override
- public long changeNumber() {
- return _changeNumber.get();
- }
-
- @Override
- public void killSplit(String splitName, String defaultTreatment, long changeNumber) {
- synchronized (_lock) {
- ParsedSplit parsedSplit = _concurrentMap.get(splitName);
-
- ParsedSplit updatedSplit = new ParsedSplit(parsedSplit.feature(),
- parsedSplit.seed(),
- true,
- defaultTreatment,
- parsedSplit.parsedConditions(),
- parsedSplit.trafficTypeName(),
- changeNumber,
- parsedSplit.trafficAllocation(),
- parsedSplit.trafficAllocationSeed(),
- parsedSplit.algo(),
- parsedSplit.configurations());
-
- _concurrentMap.put(splitName, updatedSplit);
- }
- }
-
- @Override
- public ParsedSplit fetch(String test) {
- return _concurrentMap.get(test);
- }
-
- public List fetchAll() {
- return Lists.newArrayList(_concurrentMap.values());
- }
-
- @Override
- public Set fetchKnownTrafficTypes() {
- // We return the "keys" of the multiset that have a count greater than 0
- // If the multiset has [{"user",2}.{"account",0}], elementSet only returns
- // ["user"] (it ignores "account")
- return Sets.newHashSet(_concurrentTrafficTypeNameSet.elementSet());
- }
-
- public Collection fetch() {
- return _concurrentMap.values();
- }
-
- public void clear() {
- _concurrentMap.clear();
- _concurrentTrafficTypeNameSet.clear();
- }
-
- @Override
- public void run() {
- _log.debug("Fetch splits starting ...");
- long start = _changeNumber.get();
- try {
- runWithoutExceptionHandling();
- _gates.splitsAreReady();
- } catch (InterruptedException e) {
- _log.warn("Interrupting split fetcher task");
- Thread.currentThread().interrupt();
- } catch (Throwable t) {
- _log.error("RefreshableSplitFetcher failed: " + t.getMessage());
- if (_log.isDebugEnabled()) {
- _log.debug("Reason:", t);
- }
- } finally {
- if (_log.isDebugEnabled()) {
- _log.debug("split fetch before: " + start + ", after: " + _changeNumber.get());
- }
- }
- }
-
- public void runWithoutExceptionHandling() throws InterruptedException {
- SplitChange change = _splitChangeFetcher.fetch(_changeNumber.get());
-
- if (change == null) {
- throw new IllegalStateException("SplitChange was null");
- }
-
- if (change.till == _changeNumber.get()) {
- // no change.
- return;
- }
-
- if (change.since != _changeNumber.get() || change.till < _changeNumber.get()) {
- // some other thread may have updated the shared state. exit
- return;
- }
-
- if (change.splits.isEmpty()) {
- // there are no changes. weird!
- _changeNumber.set(change.till);
- return;
- }
-
- synchronized (_lock) {
- // check state one more time.
- if (change.since != _changeNumber.get()
- || change.till < _changeNumber.get()) {
- // some other thread may have updated the shared state. exit
- return;
- }
-
- Set toRemove = Sets.newHashSet();
- Map toAdd = Maps.newHashMap();
- List trafficTypeNamesToRemove = Lists.newArrayList();
- List trafficTypeNamesToAdd = Lists.newArrayList();
-
- for (Split split : change.splits) {
- if (Thread.currentThread().isInterrupted()) {
- throw new InterruptedException();
- }
-
- if (split.status != Status.ACTIVE) {
- // archive.
- toRemove.add(split.name);
- if (split.trafficTypeName != null) {
- trafficTypeNamesToRemove.add(split.trafficTypeName);
- }
- continue;
- }
-
- ParsedSplit parsedSplit = _parser.parse(split);
- if (parsedSplit == null) {
- _log.info("We could not parse the experiment definition for: " + split.name + " so we are removing it completely to be careful");
- toRemove.add(split.name);
- if (split.trafficTypeName != null) {
- trafficTypeNamesToRemove.add(split.trafficTypeName);
- }
- continue;
- }
-
- toAdd.put(split.name, parsedSplit);
-
- // If the split already exists, this is either an update, or the split has been
- // deleted and recreated (possibly with a different traffic type).
- // If it's an update, the traffic type should NOT be increased.
- // If it's deleted & recreated, the old one should be decreased and the new one increased.
- // To handle both cases, we simply delete the old one if the split is present.
- // The new one is always increased.
- ParsedSplit current = _concurrentMap.get(split.name);
- if (current != null && current.trafficTypeName() != null) {
- trafficTypeNamesToRemove.add(current.trafficTypeName());
- }
-
- if (split.trafficTypeName != null) {
- trafficTypeNamesToAdd.add(split.trafficTypeName);
- }
- }
-
- _concurrentMap.putAll(toAdd);
- _concurrentTrafficTypeNameSet.addAll(trafficTypeNamesToAdd);
- //removeAll does not work here, since it wont remove all the occurrences, just one
- Multisets.removeOccurrences(_concurrentTrafficTypeNameSet, trafficTypeNamesToRemove);
-
- for (String remove : toRemove) {
- _concurrentMap.remove(remove);
- }
-
- if (!toAdd.isEmpty()) {
- _log.debug("Updated features: " + toAdd.keySet());
- }
-
- if (!toRemove.isEmpty()) {
- _log.debug("Deleted features: " + toRemove);
- }
-
- _changeNumber.set(change.till);
- }
- }
-}
diff --git a/client/src/main/java/io/split/engine/experiments/SplitChangeFetcher.java b/client/src/main/java/io/split/engine/experiments/SplitChangeFetcher.java
index b05fea930..63298a5e7 100644
--- a/client/src/main/java/io/split/engine/experiments/SplitChangeFetcher.java
+++ b/client/src/main/java/io/split/engine/experiments/SplitChangeFetcher.java
@@ -31,5 +31,5 @@ public interface SplitChangeFetcher {
* @return SegmentChange
* @throws java.lang.RuntimeException if there was a problem computing split changes
*/
- SplitChange fetch(long since);
+ SplitChange fetch(long since, boolean addCacheHeader);
}
diff --git a/client/src/main/java/io/split/engine/experiments/SplitFetcher.java b/client/src/main/java/io/split/engine/experiments/SplitFetcher.java
index 585a53846..4266659b1 100644
--- a/client/src/main/java/io/split/engine/experiments/SplitFetcher.java
+++ b/client/src/main/java/io/split/engine/experiments/SplitFetcher.java
@@ -1,33 +1,18 @@
package io.split.engine.experiments;
-import java.util.List;
-import java.util.Set;
-
/**
* Created by adilaijaz on 5/8/15.
*/
-public interface SplitFetcher {
- ParsedSplit fetch(String splitName);
-
- List fetchAll();
-
+public interface SplitFetcher extends Runnable {
/**
- * Fetches all the traffic types that are being used by the splits that are currently stored.
- *
- * For example, if the fetcher currently contains three splits, one of traffic type "account"
- * and two of traffic type "user", this method will return ["account", "user"]
- *
- * @return a set of all the traffic types used by the parsed splits
+ * Forces a sync of splits, outside of any scheduled
+ * syncs. This method MUST NOT throw any exceptions.
*/
- Set fetchKnownTrafficTypes();
+ void forceRefresh(boolean addCacheHeader);
/**
- * Forces a sync of splits, outside of any scheduled
+ * Forces a sync of ALL splits, outside of any scheduled
* syncs. This method MUST NOT throw any exceptions.
*/
- void forceRefresh();
-
- long changeNumber();
-
- void killSplit(String splitName, String defaultTreatment, long changeNumber);
+ void fetchAll(boolean addCacheHeader);
}
diff --git a/client/src/main/java/io/split/engine/experiments/SplitFetcherImp.java b/client/src/main/java/io/split/engine/experiments/SplitFetcherImp.java
new file mode 100644
index 000000000..510001153
--- /dev/null
+++ b/client/src/main/java/io/split/engine/experiments/SplitFetcherImp.java
@@ -0,0 +1,162 @@
+package io.split.engine.experiments;
+
+import io.split.client.dtos.Split;
+import io.split.client.dtos.SplitChange;
+import io.split.client.dtos.Status;
+import io.split.engine.SDKReadinessGates;
+import io.split.cache.SplitCache;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import static com.google.common.base.Preconditions.checkNotNull;
+
+/**
+ * An ExperimentFetcher that refreshes experiment definitions periodically.
+ *
+ * @author adil
+ */
+public class SplitFetcherImp implements SplitFetcher {
+
+ private static final Logger _log = LoggerFactory.getLogger(SplitFetcherImp.class);
+
+ private final SplitParser _parser;
+ private final SplitChangeFetcher _splitChangeFetcher;
+ private final SplitCache _splitCache;
+ private final SDKReadinessGates _gates;
+ private final Object _lock = new Object();
+
+ /**
+ * Contains all the traffic types that are currently being used by the splits and also the count
+ *
+ * For example if there are three splits, one of traffic type "account" and two of traffic type "user",
+ * this multiset will contain [{"user", 2}, {"account", 1}]
+ *
+ * The count is used to maintain how many splits are using a traffic type, so when
+ * an ARCHIVED split is received, we know if we need to remove a traffic type from the multiset.
+ */
+
+ public SplitFetcherImp(SplitChangeFetcher splitChangeFetcher, SplitParser parser, SDKReadinessGates gates, SplitCache splitCache) {
+ _splitChangeFetcher = checkNotNull(splitChangeFetcher);
+ _parser = checkNotNull(parser);
+ _gates = checkNotNull(gates);
+ _splitCache = checkNotNull(splitCache);
+ }
+
+ @Override
+ public void forceRefresh(boolean addCacheHeader) {
+ _log.debug("Force Refresh splits starting ...");
+ try {
+ while (true) {
+ long start = _splitCache.getChangeNumber();
+ runWithoutExceptionHandling(addCacheHeader);
+ long end = _splitCache.getChangeNumber();
+
+ if (start >= end) {
+ break;
+ }
+ }
+ } catch (InterruptedException e) {
+ _log.warn("Interrupting split fetcher task");
+ Thread.currentThread().interrupt();
+ } catch (Throwable t) {
+ _log.error("RefreshableSplitFetcher failed: " + t.getMessage());
+ }
+ }
+
+ @Override
+ public void run() {
+ this.fetchAll(false);
+ }
+
+ private void runWithoutExceptionHandling(boolean addCacheHeader) throws InterruptedException {
+ SplitChange change = _splitChangeFetcher.fetch(_splitCache.getChangeNumber(), addCacheHeader);
+
+ if (change == null) {
+ throw new IllegalStateException("SplitChange was null");
+ }
+
+ if (change.till == _splitCache.getChangeNumber()) {
+ // no change.
+ return;
+ }
+
+ if (change.since != _splitCache.getChangeNumber() || change.till < _splitCache.getChangeNumber()) {
+ // some other thread may have updated the shared state. exit
+ return;
+ }
+
+ if (change.splits.isEmpty()) {
+ // there are no changes. weird!
+ _splitCache.setChangeNumber(change.till);
+ return;
+ }
+
+ synchronized (_lock) {
+ // check state one more time.
+ if (change.since != _splitCache.getChangeNumber()
+ || change.till < _splitCache.getChangeNumber()) {
+ // some other thread may have updated the shared state. exit
+ return;
+ }
+
+ for (Split split : change.splits) {
+ if (Thread.currentThread().isInterrupted()) {
+ throw new InterruptedException();
+ }
+
+ if (split.status != Status.ACTIVE) {
+ // archive.
+ _splitCache.remove(split.name);
+ continue;
+ }
+
+ ParsedSplit parsedSplit = _parser.parse(split);
+ if (parsedSplit == null) {
+ _log.info("We could not parse the experiment definition for: " + split.name + " so we are removing it completely to be careful");
+
+ _splitCache.remove(split.name);
+ _log.debug("Deleted feature: " + split.name);
+
+ continue;
+ }
+
+ // If the split already exists, this is either an update, or the split has been
+ // deleted and recreated (possibly with a different traffic type).
+ // If it's an update, the traffic type should NOT be increased.
+ // If it's deleted & recreated, the old one should be decreased and the new one increased.
+ // To handle both cases, we simply delete the old one if the split is present.
+ // The new one is always increased.
+ ParsedSplit current = _splitCache.get(split.name);
+ if (current != null) {
+ _splitCache.remove(split.name);
+ }
+
+ _splitCache.put(parsedSplit);
+ _log.debug("Updated feature: " + parsedSplit.feature());
+ }
+
+ _splitCache.setChangeNumber(change.till);
+ }
+ }
+ @Override
+ public void fetchAll(boolean addCacheHeader) {
+ _log.debug("Fetch splits starting ...");
+ long start = _splitCache.getChangeNumber();
+ try {
+ runWithoutExceptionHandling(addCacheHeader);
+ _gates.splitsAreReady();
+ } catch (InterruptedException e) {
+ _log.warn("Interrupting split fetcher task");
+ Thread.currentThread().interrupt();
+ } catch (Throwable t) {
+ _log.error("RefreshableSplitFetcher failed: " + t.getMessage());
+ if (_log.isDebugEnabled()) {
+ _log.debug("Reason:", t);
+ }
+ } finally {
+ if (_log.isDebugEnabled()) {
+ _log.debug("split fetch before: " + start + ", after: " + _splitCache.getChangeNumber());
+ }
+ }
+ }
+}
diff --git a/client/src/main/java/io/split/engine/experiments/SplitParser.java b/client/src/main/java/io/split/engine/experiments/SplitParser.java
index 7dad44aac..e58292092 100644
--- a/client/src/main/java/io/split/engine/experiments/SplitParser.java
+++ b/client/src/main/java/io/split/engine/experiments/SplitParser.java
@@ -1,6 +1,7 @@
package io.split.engine.experiments;
import com.google.common.collect.Lists;
+import io.split.cache.SegmentCache;
import io.split.client.dtos.Condition;
import io.split.client.dtos.Matcher;
import io.split.client.dtos.MatcherGroup;
@@ -26,8 +27,7 @@
import io.split.engine.matchers.strings.RegularExpressionMatcher;
import io.split.engine.matchers.strings.StartsWithAnyOfMatcher;
import io.split.engine.matchers.strings.WhitelistMatcher;
-import io.split.engine.segments.Segment;
-import io.split.engine.segments.SegmentFetcher;
+import io.split.engine.segments.SegmentSynchronizationTask;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -46,11 +46,13 @@ public final class SplitParser {
public static final int CONDITIONS_UPPER_LIMIT = 50;
private static final Logger _log = LoggerFactory.getLogger(SplitParser.class);
- private SegmentFetcher _segmentFetcher;
+ private final SegmentSynchronizationTask _segmentSynchronizationTask;
+ private final SegmentCache _segmentCache;
- public SplitParser(SegmentFetcher segmentFetcher) {
- _segmentFetcher = segmentFetcher;
- checkNotNull(_segmentFetcher);
+ public SplitParser(SegmentSynchronizationTask segmentSynchronizationTaskImp,
+ SegmentCache segmentCache) {
+ _segmentSynchronizationTask = checkNotNull(segmentSynchronizationTaskImp);
+ _segmentCache = checkNotNull(segmentCache);
}
public ParsedSplit parse(Split split) {
@@ -106,8 +108,9 @@ private AttributeMatcher toMatcher(Matcher matcher) {
break;
case IN_SEGMENT:
checkNotNull(matcher.userDefinedSegmentMatcherData);
- Segment segment = _segmentFetcher.segment(matcher.userDefinedSegmentMatcherData.segmentName);
- delegate = new UserDefinedSegmentMatcher(segment);
+ String segmentName = matcher.userDefinedSegmentMatcherData.segmentName;
+ _segmentSynchronizationTask.initializeSegment(segmentName);
+ delegate = new UserDefinedSegmentMatcher(_segmentCache, segmentName);
break;
case WHITELIST:
checkNotNull(matcher.whitelistMatcherData);
diff --git a/client/src/main/java/io/split/engine/experiments/RefreshableSplitFetcherProvider.java b/client/src/main/java/io/split/engine/experiments/SplitSynchronizationTask.java
similarity index 65%
rename from client/src/main/java/io/split/engine/experiments/RefreshableSplitFetcherProvider.java
rename to client/src/main/java/io/split/engine/experiments/SplitSynchronizationTask.java
index 04760b941..480331a77 100644
--- a/client/src/main/java/io/split/engine/experiments/RefreshableSplitFetcherProvider.java
+++ b/client/src/main/java/io/split/engine/experiments/SplitSynchronizationTask.java
@@ -1,7 +1,7 @@
package io.split.engine.experiments;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
-import io.split.engine.SDKReadinessGates;
+import io.split.cache.SplitCache;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -24,34 +24,24 @@
*
* @author adil
*/
-public class RefreshableSplitFetcherProvider implements Closeable {
- private static final Logger _log = LoggerFactory.getLogger(RefreshableSplitFetcherProvider.class);
+public class SplitSynchronizationTask implements Closeable {
+ private static final Logger _log = LoggerFactory.getLogger(SplitSynchronizationTask.class);
- private final SplitParser _splitParser;
- private final SplitChangeFetcher _splitChangeFetcher;
- private final AtomicLong _refreshEveryNSeconds;
- private final AtomicReference _splitFetcher = new AtomicReference();
- private final SDKReadinessGates _gates;
+ private final AtomicReference _splitFetcher = new AtomicReference<>();
+ private final AtomicReference _splitCache = new AtomicReference();
private final AtomicReference _executorService = new AtomicReference<>();
+ private final AtomicLong _refreshEveryNSeconds;
private final ScheduledExecutorService _scheduledExecutorService;
- private final Object _lock = new Object();
private final AtomicBoolean _running;
private ScheduledFuture> _scheduledFuture;
- public RefreshableSplitFetcherProvider(SplitChangeFetcher splitChangeFetcher, SplitParser splitParser, long refreshEveryNSeconds, SDKReadinessGates sdkBuildBlocker) {
- _splitChangeFetcher = splitChangeFetcher;
- checkNotNull(_splitChangeFetcher);
-
- _splitParser = splitParser;
- checkNotNull(_splitParser);
-
+ public SplitSynchronizationTask(SplitFetcher splitFetcher, SplitCache splitCache, long refreshEveryNSeconds) {
+ _splitFetcher.set(checkNotNull(splitFetcher));
+ _splitCache.set(checkNotNull(splitCache));
checkArgument(refreshEveryNSeconds >= 0L);
_refreshEveryNSeconds = new AtomicLong(refreshEveryNSeconds);
- _gates = sdkBuildBlocker;
- checkNotNull(_gates);
-
ThreadFactory threadFactory = new ThreadFactoryBuilder()
.setDaemon(true)
.setNameFormat("split-splitFetcher-%d")
@@ -63,25 +53,6 @@ public RefreshableSplitFetcherProvider(SplitChangeFetcher splitChangeFetcher, Sp
_running = new AtomicBoolean();
}
- public RefreshableSplitFetcher getFetcher() {
- if (_splitFetcher.get() != null) {
- return _splitFetcher.get();
- }
-
- // we are locking here since we wanna make sure that we create only ONE RefreshableExperimentChangeFetcher
- synchronized (_lock) {
- // double check
- if (_splitFetcher.get() != null) {
- return _splitFetcher.get();
- }
-
- RefreshableSplitFetcher splitFetcher = new RefreshableSplitFetcher(_splitChangeFetcher, _splitParser, _gates);
-
- _splitFetcher.set(splitFetcher);
- return splitFetcher;
- }
- }
-
public void startPeriodicFetching() {
if (_running.getAndSet(true)) {
_log.warn("Splits PeriodicFetching is running...");
@@ -89,7 +60,7 @@ public void startPeriodicFetching() {
}
_log.debug("Starting PeriodicFetching Splits ...");
- _scheduledFuture = _scheduledExecutorService.scheduleWithFixedDelay(getFetcher(), 0L, _refreshEveryNSeconds.get(), TimeUnit.SECONDS);
+ _scheduledFuture = _scheduledExecutorService.scheduleWithFixedDelay(_splitFetcher.get(), 0L, _refreshEveryNSeconds.get(), TimeUnit.SECONDS);
}
public void stop() {
@@ -109,9 +80,11 @@ public void close() {
}
if (_splitFetcher.get() != null) {
- _splitFetcher.get().clear();
+ _splitCache.get().clear();
}
+ stop();
+
ScheduledExecutorService scheduledExecutorService = _executorService.get();
if (scheduledExecutorService.isShutdown()) {
return;
@@ -130,5 +103,4 @@ public void close() {
Thread.currentThread().interrupt();
}
}
-
}
diff --git a/client/src/main/java/io/split/engine/matchers/AllKeysMatcher.java b/client/src/main/java/io/split/engine/matchers/AllKeysMatcher.java
index 461d1bffa..bad142453 100644
--- a/client/src/main/java/io/split/engine/matchers/AllKeysMatcher.java
+++ b/client/src/main/java/io/split/engine/matchers/AllKeysMatcher.java
@@ -1,6 +1,6 @@
package io.split.engine.matchers;
-import io.split.client.SplitClientImpl;
+import io.split.engine.evaluator.Evaluator;
import java.util.Map;
@@ -12,7 +12,7 @@
public final class AllKeysMatcher implements Matcher {
@Override
- public boolean match(Object matchValue, String bucketingKey, Map attributes, SplitClientImpl splitClient) {
+ public boolean match(Object matchValue, String bucketingKey, Map attributes, Evaluator evaluator) {
if (matchValue == null) {
return false;
}
diff --git a/client/src/main/java/io/split/engine/matchers/AttributeMatcher.java b/client/src/main/java/io/split/engine/matchers/AttributeMatcher.java
index a86d5b3ae..b4670509c 100644
--- a/client/src/main/java/io/split/engine/matchers/AttributeMatcher.java
+++ b/client/src/main/java/io/split/engine/matchers/AttributeMatcher.java
@@ -1,6 +1,6 @@
package io.split.engine.matchers;
-import io.split.client.SplitClientImpl;
+import io.split.engine.evaluator.Evaluator;
import java.util.Map;
import java.util.Objects;
@@ -27,9 +27,9 @@ public AttributeMatcher(String attribute, Matcher matcher, boolean negate) {
_matcher = new NegatableMatcher(matcher, negate);
}
- public boolean match(String key, String bucketingKey, Map attributes, SplitClientImpl splitClient) {
+ public boolean match(String key, String bucketingKey, Map attributes, Evaluator evaluator) {
if (_attribute == null) {
- return _matcher.match(key, bucketingKey, attributes, splitClient);
+ return _matcher.match(key, bucketingKey, attributes, evaluator);
}
if (attributes == null) {
@@ -95,8 +95,8 @@ public NegatableMatcher(Matcher matcher, boolean negate) {
@Override
- public boolean match(Object matchValue, String bucketingKey, Map attributes, SplitClientImpl splitClient) {
- boolean result = _delegate.match(matchValue, bucketingKey, attributes, splitClient);
+ public boolean match(Object matchValue, String bucketingKey, Map attributes, Evaluator evaluator) {
+ boolean result = _delegate.match(matchValue, bucketingKey, attributes, evaluator);
return (_negate) ? !result : result;
}
diff --git a/client/src/main/java/io/split/engine/matchers/BetweenMatcher.java b/client/src/main/java/io/split/engine/matchers/BetweenMatcher.java
index 2447d2831..dd77d9810 100644
--- a/client/src/main/java/io/split/engine/matchers/BetweenMatcher.java
+++ b/client/src/main/java/io/split/engine/matchers/BetweenMatcher.java
@@ -1,7 +1,7 @@
package io.split.engine.matchers;
-import io.split.client.SplitClientImpl;
import io.split.client.dtos.DataType;
+import io.split.engine.evaluator.Evaluator;
import java.util.Map;
@@ -36,7 +36,7 @@ public BetweenMatcher(long start, long end, DataType dataType) {
}
@Override
- public boolean match(Object matchValue, String bucketingKey, Map attributes, SplitClientImpl splitClient) {
+ public boolean match(Object matchValue, String bucketingKey, Map attributes, Evaluator evaluator) {
Long keyAsLong;
if (_dataType == DataType.DATETIME) {
diff --git a/client/src/main/java/io/split/engine/matchers/BooleanMatcher.java b/client/src/main/java/io/split/engine/matchers/BooleanMatcher.java
index c8e881e9b..28b3783a8 100644
--- a/client/src/main/java/io/split/engine/matchers/BooleanMatcher.java
+++ b/client/src/main/java/io/split/engine/matchers/BooleanMatcher.java
@@ -1,6 +1,6 @@
package io.split.engine.matchers;
-import io.split.client.SplitClientImpl;
+import io.split.engine.evaluator.Evaluator;
import java.util.Map;
@@ -14,7 +14,7 @@ public BooleanMatcher(boolean booleanValue) {
}
@Override
- public boolean match(Object matchValue, String bucketingKey, Map attributes, SplitClientImpl splitClient) {
+ public boolean match(Object matchValue, String bucketingKey, Map attributes, Evaluator evaluator) {
if (matchValue == null) {
return false;
}
@@ -26,7 +26,7 @@ public boolean match(Object matchValue, String bucketingKey, Map
@Override
public String toString() {
- return "is " + Boolean.toString(_booleanValue);
+ return "is " + _booleanValue;
}
@Override
diff --git a/client/src/main/java/io/split/engine/matchers/CombiningMatcher.java b/client/src/main/java/io/split/engine/matchers/CombiningMatcher.java
index ae5463e73..52a7d0874 100644
--- a/client/src/main/java/io/split/engine/matchers/CombiningMatcher.java
+++ b/client/src/main/java/io/split/engine/matchers/CombiningMatcher.java
@@ -4,6 +4,7 @@
import com.google.common.collect.Lists;
import io.split.client.SplitClientImpl;
import io.split.client.dtos.MatcherCombiner;
+import io.split.engine.evaluator.Evaluator;
import java.util.List;
import java.util.Map;
@@ -38,24 +39,24 @@ public CombiningMatcher(MatcherCombiner combiner, List delegat
checkArgument(_delegates.size() > 0);
}
- public boolean match(String key, String bucketingKey, Map attributes, SplitClientImpl splitClient) {
+ public boolean match(String key, String bucketingKey, Map attributes, Evaluator evaluator) {
if (_delegates.isEmpty()) {
return false;
}
switch (_combiner) {
case AND:
- return and(key, bucketingKey, attributes, splitClient);
+ return and(key, bucketingKey, attributes, evaluator);
default:
throw new IllegalArgumentException("Unknown combiner: " + _combiner);
}
}
- private boolean and(String key, String bucketingKey, Map attributes, SplitClientImpl splitClient) {
+ private boolean and(String key, String bucketingKey, Map attributes, Evaluator evaluator) {
boolean result = true;
for (AttributeMatcher delegate : _delegates) {
- result &= (delegate.match(key, bucketingKey, attributes, splitClient));
+ result &= (delegate.match(key, bucketingKey, attributes, evaluator));
}
return result;
}
diff --git a/client/src/main/java/io/split/engine/matchers/DependencyMatcher.java b/client/src/main/java/io/split/engine/matchers/DependencyMatcher.java
index b7be62cfd..5c6b3f2a5 100644
--- a/client/src/main/java/io/split/engine/matchers/DependencyMatcher.java
+++ b/client/src/main/java/io/split/engine/matchers/DependencyMatcher.java
@@ -1,9 +1,10 @@
package io.split.engine.matchers;
-import io.split.client.SplitClientImpl;
+import io.split.engine.evaluator.Evaluator;
import java.util.List;
import java.util.Map;
+import java.util.Objects;
/**
* Supports the logic: if user is in split "feature" treatments ["on","off"]
@@ -18,7 +19,7 @@ public DependencyMatcher(String split, List treatments) {
}
@Override
- public boolean match(Object matchValue, String bucketingKey, Map attributes, SplitClientImpl splitClient) {
+ public boolean match(Object matchValue, String bucketingKey, Map attributes, Evaluator evaluator) {
if (matchValue == null) {
return false;
}
@@ -27,16 +28,7 @@ public boolean match(Object matchValue, String bucketingKey, Map
return false;
}
- String result = splitClient.getTreatmentWithoutImpressions(
- (String) matchValue,
- bucketingKey,
- _split,
- attributes
- );
-
-// if(Treatments.isControl(result)) {
-// throw new ParentIsControlException();
-// }
+ String result = evaluator.evaluateFeature((String) matchValue, bucketingKey, _split, attributes).treatment;
return _treatments.contains(result);
}
@@ -58,8 +50,8 @@ public boolean equals(Object o) {
DependencyMatcher that = (DependencyMatcher) o;
- if (_split != null ? !_split.equals(that._split) : that._split != null) return false;
- return _treatments != null ? _treatments.equals(that._treatments) : that._treatments == null;
+ if (!Objects.equals(_split, that._split)) return false;
+ return Objects.equals(_treatments, that._treatments);
}
@Override
diff --git a/client/src/main/java/io/split/engine/matchers/EqualToMatcher.java b/client/src/main/java/io/split/engine/matchers/EqualToMatcher.java
index fa8f478be..c2d853a26 100644
--- a/client/src/main/java/io/split/engine/matchers/EqualToMatcher.java
+++ b/client/src/main/java/io/split/engine/matchers/EqualToMatcher.java
@@ -1,7 +1,7 @@
package io.split.engine.matchers;
-import io.split.client.SplitClientImpl;
import io.split.client.dtos.DataType;
+import io.split.engine.evaluator.Evaluator;
import java.util.Map;
@@ -29,7 +29,7 @@ public EqualToMatcher(long compareTo, DataType dataType) {
}
@Override
- public boolean match(Object matchValue, String bucketingKey, Map attributes, SplitClientImpl splitClient) {
+ public boolean match(Object matchValue, String bucketingKey, Map attributes, Evaluator evaluator) {
Long keyAsLong;
if (_dataType == DataType.DATETIME) {
diff --git a/client/src/main/java/io/split/engine/matchers/GreaterThanOrEqualToMatcher.java b/client/src/main/java/io/split/engine/matchers/GreaterThanOrEqualToMatcher.java
index 63310c592..7804fbe32 100644
--- a/client/src/main/java/io/split/engine/matchers/GreaterThanOrEqualToMatcher.java
+++ b/client/src/main/java/io/split/engine/matchers/GreaterThanOrEqualToMatcher.java
@@ -1,7 +1,7 @@
package io.split.engine.matchers;
-import io.split.client.SplitClientImpl;
import io.split.client.dtos.DataType;
+import io.split.engine.evaluator.Evaluator;
import java.util.Map;
@@ -29,7 +29,7 @@ public GreaterThanOrEqualToMatcher(long compareTo, DataType dataType) {
}
@Override
- public boolean match(Object matchValue, String bucketingKey, Map attributes, SplitClientImpl splitClient) {
+ public boolean match(Object matchValue, String bucketingKey, Map attributes, Evaluator evaluator) {
Long keyAsLong;
if (_dataType == DataType.DATETIME) {
diff --git a/client/src/main/java/io/split/engine/matchers/LessThanOrEqualToMatcher.java b/client/src/main/java/io/split/engine/matchers/LessThanOrEqualToMatcher.java
index 8bea36ea3..5afa35dc8 100644
--- a/client/src/main/java/io/split/engine/matchers/LessThanOrEqualToMatcher.java
+++ b/client/src/main/java/io/split/engine/matchers/LessThanOrEqualToMatcher.java
@@ -1,7 +1,7 @@
package io.split.engine.matchers;
-import io.split.client.SplitClientImpl;
import io.split.client.dtos.DataType;
+import io.split.engine.evaluator.Evaluator;
import java.util.Map;
@@ -28,7 +28,7 @@ public LessThanOrEqualToMatcher(long compareTo, DataType dataType) {
}
@Override
- public boolean match(Object matchValue, String bucketingKey, Map attributes, SplitClientImpl splitClient) {
+ public boolean match(Object matchValue, String bucketingKey, Map attributes, Evaluator evaluator) {
Long keyAsLong;
if (_dataType == DataType.DATETIME) {
diff --git a/client/src/main/java/io/split/engine/matchers/Matcher.java b/client/src/main/java/io/split/engine/matchers/Matcher.java
index 1e604d778..3acabb7bc 100644
--- a/client/src/main/java/io/split/engine/matchers/Matcher.java
+++ b/client/src/main/java/io/split/engine/matchers/Matcher.java
@@ -1,9 +1,9 @@
package io.split.engine.matchers;
-import io.split.client.SplitClientImpl;
+import io.split.engine.evaluator.Evaluator;
import java.util.Map;
public interface Matcher {
- boolean match(Object matchValue, String bucketingKey, Map attributes, SplitClientImpl splitClient);
+ boolean match(Object matchValue, String bucketingKey, Map attributes, Evaluator evaluator);
}
diff --git a/client/src/main/java/io/split/engine/matchers/UserDefinedSegmentMatcher.java b/client/src/main/java/io/split/engine/matchers/UserDefinedSegmentMatcher.java
index a9796cb2c..f25e4fec3 100644
--- a/client/src/main/java/io/split/engine/matchers/UserDefinedSegmentMatcher.java
+++ b/client/src/main/java/io/split/engine/matchers/UserDefinedSegmentMatcher.java
@@ -1,7 +1,7 @@
package io.split.engine.matchers;
-import io.split.client.SplitClientImpl;
-import io.split.engine.segments.Segment;
+import io.split.cache.SegmentCache;
+import io.split.engine.evaluator.Evaluator;
import java.util.Map;
@@ -16,22 +16,21 @@
*/
public class UserDefinedSegmentMatcher implements Matcher {
private final String _segmentName;
- private final Segment _segment;
+ private final SegmentCache _segmentCache;
- public UserDefinedSegmentMatcher(Segment segment) {
- checkNotNull(segment);
- _segmentName = segment.segmentName();
- _segment = segment;
- checkNotNull(_segmentName);
+ public UserDefinedSegmentMatcher(SegmentCache segmentCache, String segmentName) {
+ _segmentCache = checkNotNull(segmentCache);
+ _segmentName = checkNotNull(segmentName);
}
@Override
- public boolean match(Object matchValue, String bucketingKey, Map attributes, SplitClientImpl splitClient) {
+ public boolean match(Object matchValue, String bucketingKey, Map attributes, Evaluator evaluator) {
if (!(matchValue instanceof String)) {
return false;
}
- return _segment.contains((String) matchValue);
+
+ return _segmentCache.isInSegment(_segmentName, (String) matchValue);
}
@Override
diff --git a/client/src/main/java/io/split/engine/matchers/collections/ContainsAllOfSetMatcher.java b/client/src/main/java/io/split/engine/matchers/collections/ContainsAllOfSetMatcher.java
index a947cc6f5..f55e207e1 100644
--- a/client/src/main/java/io/split/engine/matchers/collections/ContainsAllOfSetMatcher.java
+++ b/client/src/main/java/io/split/engine/matchers/collections/ContainsAllOfSetMatcher.java
@@ -1,6 +1,6 @@
package io.split.engine.matchers.collections;
-import io.split.client.SplitClientImpl;
+import io.split.engine.evaluator.Evaluator;
import io.split.engine.matchers.Matcher;
import java.util.Collection;
@@ -24,7 +24,7 @@ public ContainsAllOfSetMatcher(Collection compareTo) {
}
@Override
- public boolean match(Object matchValue, String bucketingKey, Map attributes, SplitClientImpl splitClient) {
+ public boolean match(Object matchValue, String bucketingKey, Map attributes, Evaluator evaluator) {
if (matchValue == null) {
return false;
}
diff --git a/client/src/main/java/io/split/engine/matchers/collections/ContainsAnyOfSetMatcher.java b/client/src/main/java/io/split/engine/matchers/collections/ContainsAnyOfSetMatcher.java
index 9e123546a..9747cd689 100644
--- a/client/src/main/java/io/split/engine/matchers/collections/ContainsAnyOfSetMatcher.java
+++ b/client/src/main/java/io/split/engine/matchers/collections/ContainsAnyOfSetMatcher.java
@@ -1,6 +1,6 @@
package io.split.engine.matchers.collections;
-import io.split.client.SplitClientImpl;
+import io.split.engine.evaluator.Evaluator;
import io.split.engine.matchers.Matcher;
import java.util.Collection;
@@ -25,7 +25,7 @@ public ContainsAnyOfSetMatcher(Collection compareTo) {
}
@Override
- public boolean match(Object matchValue, String bucketingKey, Map attributes, SplitClientImpl splitClient) {
+ public boolean match(Object matchValue, String bucketingKey, Map attributes, Evaluator evaluator) {
if (matchValue == null) {
return false;
}
diff --git a/client/src/main/java/io/split/engine/matchers/collections/EqualToSetMatcher.java b/client/src/main/java/io/split/engine/matchers/collections/EqualToSetMatcher.java
index dd108e5a6..467cfcd19 100644
--- a/client/src/main/java/io/split/engine/matchers/collections/EqualToSetMatcher.java
+++ b/client/src/main/java/io/split/engine/matchers/collections/EqualToSetMatcher.java
@@ -1,6 +1,6 @@
package io.split.engine.matchers.collections;
-import io.split.client.SplitClientImpl;
+import io.split.engine.evaluator.Evaluator;
import io.split.engine.matchers.Matcher;
import java.util.Collection;
@@ -25,7 +25,7 @@ public EqualToSetMatcher(Collection compareTo) {
}
@Override
- public boolean match(Object matchValue, String bucketingKey, Map attributes, SplitClientImpl splitClient) {
+ public boolean match(Object matchValue, String bucketingKey, Map attributes, Evaluator evaluator) {
if (matchValue == null) {
return false;
}
diff --git a/client/src/main/java/io/split/engine/matchers/collections/PartOfSetMatcher.java b/client/src/main/java/io/split/engine/matchers/collections/PartOfSetMatcher.java
index 996847ece..4f9fc9217 100644
--- a/client/src/main/java/io/split/engine/matchers/collections/PartOfSetMatcher.java
+++ b/client/src/main/java/io/split/engine/matchers/collections/PartOfSetMatcher.java
@@ -1,6 +1,6 @@
package io.split.engine.matchers.collections;
-import io.split.client.SplitClientImpl;
+import io.split.engine.evaluator.Evaluator;
import io.split.engine.matchers.Matcher;
import java.util.Collection;
@@ -25,7 +25,7 @@ public PartOfSetMatcher(Collection compareTo) {
}
@Override
- public boolean match(Object matchValue, String bucketingKey, Map attributes, SplitClientImpl splitClient) {
+ public boolean match(Object matchValue, String bucketingKey, Map attributes, Evaluator evaluator) {
if (matchValue == null) {
return false;
}
diff --git a/client/src/main/java/io/split/engine/matchers/strings/ContainsAnyOfMatcher.java b/client/src/main/java/io/split/engine/matchers/strings/ContainsAnyOfMatcher.java
index 474fe92e4..286462297 100644
--- a/client/src/main/java/io/split/engine/matchers/strings/ContainsAnyOfMatcher.java
+++ b/client/src/main/java/io/split/engine/matchers/strings/ContainsAnyOfMatcher.java
@@ -1,6 +1,6 @@
package io.split.engine.matchers.strings;
-import io.split.client.SplitClientImpl;
+import io.split.engine.evaluator.Evaluator;
import io.split.engine.matchers.Matcher;
import java.util.Collection;
@@ -23,7 +23,7 @@ public ContainsAnyOfMatcher(Collection compareTo) {
}
@Override
- public boolean match(Object matchValue, String bucketingKey, Map attributes, SplitClientImpl splitClient) {
+ public boolean match(Object matchValue, String bucketingKey, Map attributes, Evaluator evaluator) {
if (matchValue == null) {
return false;
diff --git a/client/src/main/java/io/split/engine/matchers/strings/EndsWithAnyOfMatcher.java b/client/src/main/java/io/split/engine/matchers/strings/EndsWithAnyOfMatcher.java
index edbd5c5b4..33a63ac03 100644
--- a/client/src/main/java/io/split/engine/matchers/strings/EndsWithAnyOfMatcher.java
+++ b/client/src/main/java/io/split/engine/matchers/strings/EndsWithAnyOfMatcher.java
@@ -1,6 +1,6 @@
package io.split.engine.matchers.strings;
-import io.split.client.SplitClientImpl;
+import io.split.engine.evaluator.Evaluator;
import io.split.engine.matchers.Matcher;
import java.util.Collection;
@@ -23,7 +23,7 @@ public EndsWithAnyOfMatcher(Collection compareTo) {
}
@Override
- public boolean match(Object matchValue, String bucketingKey, Map attributes, SplitClientImpl splitClient) {
+ public boolean match(Object matchValue, String bucketingKey, Map attributes, Evaluator evaluator) {
if (matchValue == null) {
return false;
diff --git a/client/src/main/java/io/split/engine/matchers/strings/RegularExpressionMatcher.java b/client/src/main/java/io/split/engine/matchers/strings/RegularExpressionMatcher.java
index ca6b699bd..b651bae0a 100644
--- a/client/src/main/java/io/split/engine/matchers/strings/RegularExpressionMatcher.java
+++ b/client/src/main/java/io/split/engine/matchers/strings/RegularExpressionMatcher.java
@@ -1,6 +1,6 @@
package io.split.engine.matchers.strings;
-import io.split.client.SplitClientImpl;
+import io.split.engine.evaluator.Evaluator;
import io.split.engine.matchers.Matcher;
import java.util.Map;
@@ -16,7 +16,7 @@ public RegularExpressionMatcher(String matcherValue) {
}
@Override
- public boolean match(Object matchValue, String bucketingKey, Map attributes, SplitClientImpl splitClient) {
+ public boolean match(Object matchValue, String bucketingKey, Map attributes, Evaluator evaluator) {
if (matchValue == null) {
return false;
}
diff --git a/client/src/main/java/io/split/engine/matchers/strings/StartsWithAnyOfMatcher.java b/client/src/main/java/io/split/engine/matchers/strings/StartsWithAnyOfMatcher.java
index 0529d13be..b758dba36 100644
--- a/client/src/main/java/io/split/engine/matchers/strings/StartsWithAnyOfMatcher.java
+++ b/client/src/main/java/io/split/engine/matchers/strings/StartsWithAnyOfMatcher.java
@@ -1,6 +1,6 @@
package io.split.engine.matchers.strings;
-import io.split.client.SplitClientImpl;
+import io.split.engine.evaluator.Evaluator;
import io.split.engine.matchers.Matcher;
import java.util.Collection;
@@ -23,7 +23,7 @@ public StartsWithAnyOfMatcher(Collection compareTo) {
}
@Override
- public boolean match(Object matchValue, String bucketingKey, Map attributes, SplitClientImpl splitClient) {
+ public boolean match(Object matchValue, String bucketingKey, Map attributes, Evaluator evaluator) {
if (matchValue == null) {
return false;
}
diff --git a/client/src/main/java/io/split/engine/matchers/strings/WhitelistMatcher.java b/client/src/main/java/io/split/engine/matchers/strings/WhitelistMatcher.java
index 0efc0d7d6..afedc4050 100644
--- a/client/src/main/java/io/split/engine/matchers/strings/WhitelistMatcher.java
+++ b/client/src/main/java/io/split/engine/matchers/strings/WhitelistMatcher.java
@@ -1,6 +1,6 @@
package io.split.engine.matchers.strings;
-import io.split.client.SplitClientImpl;
+import io.split.engine.evaluator.Evaluator;
import io.split.engine.matchers.Matcher;
import java.util.Collection;
@@ -22,7 +22,7 @@ public WhitelistMatcher(Collection whitelist) {
}
@Override
- public boolean match(Object matchValue, String bucketingKey, Map attributes, SplitClientImpl splitClient) {
+ public boolean match(Object matchValue, String bucketingKey, Map attributes, Evaluator evaluator) {
return _whitelist.contains(matchValue);
}
diff --git a/client/src/main/java/io/split/engine/segments/RefreshableSegment.java b/client/src/main/java/io/split/engine/segments/RefreshableSegment.java
deleted file mode 100644
index 656f5bcec..000000000
--- a/client/src/main/java/io/split/engine/segments/RefreshableSegment.java
+++ /dev/null
@@ -1,191 +0,0 @@
-package io.split.engine.segments;
-
-import io.split.client.dtos.SegmentChange;
-import io.split.engine.SDKReadinessGates;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.Collections;
-import java.util.List;
-import java.util.Set;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.atomic.AtomicLong;
-
-import static com.google.common.base.Preconditions.checkNotNull;
-
-/**
- * A SegmentFetcher implementation that can periodically refresh itself.
- *
- * @author adil
- */
-public class RefreshableSegment implements Runnable, Segment {
- private static final Logger _log = LoggerFactory.getLogger(RefreshableSegment.class);
-
- private final String _segmentName;
- private final SegmentChangeFetcher _segmentChangeFetcher;
- private final AtomicLong _changeNumber;
- private final SDKReadinessGates _gates;
-
- private Set _concurrentKeySet = Collections.newSetFromMap(new ConcurrentHashMap());
- private final Object _lock = new Object();
-
- @Override
- public String segmentName() {
- return _segmentName;
- }
-
- @Override
- public boolean contains(String key) {
- return _concurrentKeySet.contains(key);
- }
-
- /*package private*/ Set fetch() {
- return Collections.unmodifiableSet(_concurrentKeySet);
- }
-
- @Override
- public void forceRefresh() {
- try {
- _log.debug("Force Refresh segment starting ...");
- while (true) {
- long start = _changeNumber.get();
- runWithoutExceptionHandling();
- long end = _changeNumber.get();
-
- if (start >= end) {
- break;
- }
- }
- } catch (Throwable t) {
- _log.error("forceRefresh segment failed: " + t.getMessage());
- }
- }
-
- @Override
- public long changeNumber() {
- return _changeNumber.get();
- }
-
- public static RefreshableSegment create(String segmentName, SegmentChangeFetcher segmentChangeFetcher, SDKReadinessGates gates) {
- return new RefreshableSegment(segmentName, segmentChangeFetcher, -1L, gates);
- }
-
-
- public RefreshableSegment(String segmentName, SegmentChangeFetcher segmentChangeFetcher, long changeNumber, SDKReadinessGates gates) {
- _segmentName = segmentName;
- _segmentChangeFetcher = segmentChangeFetcher;
- _changeNumber = new AtomicLong(changeNumber);
- _gates = gates;
-
- checkNotNull(_segmentChangeFetcher);
- checkNotNull(_segmentName);
- checkNotNull(_gates);
- }
-
- @Override
- public void run() {
- try {
- // Do this again in case the previous call errored out.
- _gates.registerSegment(_segmentName);
- while (true) {
- long start = _changeNumber.get();
- runWithoutExceptionHandling();
- long end = _changeNumber.get();
- if (_log.isDebugEnabled()) {
- _log.debug(_segmentName + " segment fetch before: " + start + ", after: " + _changeNumber.get() + " size: " + _concurrentKeySet.size());
- }
- if (start >= end) {
- break;
- }
- }
-
- _gates.segmentIsReady(_segmentName);
-
- } catch (Throwable t) {
- _log.error("RefreshableSegmentFetcher failed: " + t.getMessage());
- if (_log.isDebugEnabled()) {
- _log.debug("Reason:", t);
- }
- }
- }
-
- private void runWithoutExceptionHandling() {
- SegmentChange change = _segmentChangeFetcher.fetch(_segmentName, _changeNumber.get());
-
- if (change == null) {
- throw new IllegalStateException("SegmentChange was null");
- }
-
- if (change.till == _changeNumber.get()) {
- // no change.
- return;
- }
-
- if (change.since != _changeNumber.get()
- || change.since < _changeNumber.get()) {
- // some other thread may have updated the shared state. exit
- return;
- }
-
-
- if (change.added.isEmpty() && change.removed.isEmpty()) {
- // there are no changes. weird!
- _changeNumber.set(change.till);
- return;
- }
-
- synchronized (_lock) {
- // check state one more time.
- if (change.since != _changeNumber.get()
- || change.till < _changeNumber.get()) {
- // some other thread may have updated the shared state. exit
- return;
- }
-
- for (String added : change.added) {
- _concurrentKeySet.add(added);
- }
-
- if (!change.added.isEmpty()) {
- _log.info(_segmentName + " added keys: " + summarize(change.added));
- }
-
- for (String removed : change.removed) {
- _concurrentKeySet.remove(removed);
- }
-
- if (!change.removed.isEmpty()) {
- _log.info(_segmentName + " removed keys: " + summarize(change.removed));
- }
-
- _changeNumber.set(change.till);
- }
- }
-
- private String summarize(List changes) {
- StringBuilder bldr = new StringBuilder();
- bldr.append("[");
- for (int i = 0; i < Math.min(3, changes.size()); i++) {
- if (i != 0) {
- bldr.append(", ");
- }
- bldr.append(changes.get(i));
- }
-
- if (changes.size() > 3) {
- bldr.append("... ");
- bldr.append((changes.size() - 3));
- bldr.append(" others");
- }
- bldr.append("]");
-
- return bldr.toString();
- }
-
-
- @Override
- public String toString() {
- return "RefreshableSegmentFetcher[" + _segmentName + "]";
- }
-
-}
diff --git a/client/src/main/java/io/split/engine/segments/Segment.java b/client/src/main/java/io/split/engine/segments/Segment.java
deleted file mode 100644
index ae0bfd7de..000000000
--- a/client/src/main/java/io/split/engine/segments/Segment.java
+++ /dev/null
@@ -1,26 +0,0 @@
-package io.split.engine.segments;
-
-/**
- * Fetches the keys in a segment. Implementing classes are responsible for keeping
- * the segment up-to-date with the remote server.
- *
- * @author adil
- */
-public interface Segment {
- String segmentName();
-
- /**
- * This method MUST NOT throw any exceptions.
- *
- * @return true if this segment contains the key. false otherwise.
- */
- boolean contains(String key);
-
- /**
- * Forces a sync of the segment with the remote server, outside of any scheduled
- * syncs. This method MUST NOT throw any exceptions.
- */
- void forceRefresh();
-
- long changeNumber();
-}
diff --git a/client/src/main/java/io/split/engine/segments/SegmentChangeFetcher.java b/client/src/main/java/io/split/engine/segments/SegmentChangeFetcher.java
index 8b72f0ae6..f4d46ed13 100644
--- a/client/src/main/java/io/split/engine/segments/SegmentChangeFetcher.java
+++ b/client/src/main/java/io/split/engine/segments/SegmentChangeFetcher.java
@@ -25,5 +25,5 @@ public interface SegmentChangeFetcher {
* @return SegmentChange
* @throws java.lang.RuntimeException if there was a problem fetching segment changes
*/
- SegmentChange fetch(String segmentName, long changesSinceThisChangeNumber);
+ SegmentChange fetch(String segmentName, long changesSinceThisChangeNumber, boolean addCacheHeader);
}
diff --git a/client/src/main/java/io/split/engine/segments/SegmentFetcher.java b/client/src/main/java/io/split/engine/segments/SegmentFetcher.java
index 3d0670c9c..af4bbc767 100644
--- a/client/src/main/java/io/split/engine/segments/SegmentFetcher.java
+++ b/client/src/main/java/io/split/engine/segments/SegmentFetcher.java
@@ -4,10 +4,12 @@
* Created by adilaijaz on 5/7/15.
*/
public interface SegmentFetcher {
- Segment segment(String segmentName);
- long getChangeNumber(String segmentName);
- void forceRefresh(String segmentName);
- void forceRefreshAll();
- void startPeriodicFetching();
- void stop();
+ /**
+ * fetch
+ */
+ void fetch(boolean addCacheHeader);
+
+ void runWhitCacheHeader();
+
+ void fetchAll();
}
diff --git a/client/src/main/java/io/split/engine/segments/SegmentFetcherImp.java b/client/src/main/java/io/split/engine/segments/SegmentFetcherImp.java
new file mode 100644
index 000000000..ac21e8461
--- /dev/null
+++ b/client/src/main/java/io/split/engine/segments/SegmentFetcherImp.java
@@ -0,0 +1,157 @@
+package io.split.engine.segments;
+
+import io.split.cache.SegmentCache;
+import io.split.client.dtos.SegmentChange;
+import io.split.engine.SDKReadinessGates;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import static com.google.common.base.Preconditions.checkNotNull;
+
+public class SegmentFetcherImp implements SegmentFetcher {
+ private static final Logger _log = LoggerFactory.getLogger(SegmentFetcherImp.class);
+
+ private final String _segmentName;
+ private final SegmentChangeFetcher _segmentChangeFetcher;
+ private final SegmentCache _segmentCache;
+ private final SDKReadinessGates _gates;
+
+ private final Object _lock = new Object();
+
+ public SegmentFetcherImp(String segmentName, SegmentChangeFetcher segmentChangeFetcher, SDKReadinessGates gates, SegmentCache segmentCache) {
+ _segmentName = checkNotNull(segmentName);
+ _segmentChangeFetcher = checkNotNull(segmentChangeFetcher);
+ _segmentCache = checkNotNull(segmentCache);
+ _gates = checkNotNull(gates);
+
+ _segmentCache.updateSegment(segmentName, new ArrayList<>(), new ArrayList<>());
+ }
+
+ @Override
+ public void fetch(boolean addCacheHeader){
+ try {
+ callLoopRun(false, addCacheHeader);
+ } catch (Throwable t) {
+ _log.error("RefreshableSegmentFetcher failed: " + t.getMessage());
+ if (_log.isDebugEnabled()) {
+ _log.debug("Reason:", t);
+ }
+ }
+ }
+
+ private void runWithoutExceptionHandling(boolean addCacheHeader) {
+ SegmentChange change = _segmentChangeFetcher.fetch(_segmentName, _segmentCache.getChangeNumber(_segmentName), addCacheHeader);
+
+ if (change == null) {
+ throw new IllegalStateException("SegmentChange was null");
+ }
+
+ if (change.till == _segmentCache.getChangeNumber(_segmentName)) {
+ // no change.
+ return;
+ }
+
+ if (change.since != _segmentCache.getChangeNumber(_segmentName)
+ || change.since < _segmentCache.getChangeNumber(_segmentName)) {
+ // some other thread may have updated the shared state. exit
+ return;
+ }
+
+
+ if (change.added.isEmpty() && change.removed.isEmpty()) {
+ // there are no changes. weird!
+ _segmentCache.setChangeNumber(_segmentName,change.till);
+ return;
+ }
+
+ synchronized (_lock) {
+ // check state one more time.
+ if (change.since != _segmentCache.getChangeNumber(_segmentName)
+ || change.till < _segmentCache.getChangeNumber(_segmentName)) {
+ // some other thread may have updated the shared state. exit
+ return;
+ }
+ //updateSegment(sn, toadd, tormv, chngN)
+ _segmentCache.updateSegment(_segmentName,change.added, change.removed);
+
+ if (!change.added.isEmpty()) {
+ _log.info(_segmentName + " added keys: " + summarize(change.added));
+ }
+
+ if (!change.removed.isEmpty()) {
+ _log.info(_segmentName + " removed keys: " + summarize(change.removed));
+ }
+
+ _segmentCache.setChangeNumber(_segmentName,change.till);
+ }
+ }
+
+ private String summarize(List changes) {
+ StringBuilder bldr = new StringBuilder();
+ bldr.append("[");
+ for (int i = 0; i < Math.min(3, changes.size()); i++) {
+ if (i != 0) {
+ bldr.append(", ");
+ }
+ bldr.append(changes.get(i));
+ }
+
+ if (changes.size() > 3) {
+ bldr.append("... ");
+ bldr.append((changes.size() - 3));
+ bldr.append(" others");
+ }
+ bldr.append("]");
+
+ return bldr.toString();
+ }
+
+ private void callLoopRun(boolean isFetch, boolean addCacheHeader){
+ while (true) {
+ long start = _segmentCache.getChangeNumber(_segmentName);
+ runWithoutExceptionHandling(addCacheHeader);
+ long end = _segmentCache.getChangeNumber(_segmentName);
+ if (isFetch && _log.isDebugEnabled()) {
+ _log.debug(_segmentName + " segment fetch before: " + start + ", after: " + _segmentCache.getChangeNumber(_segmentName) /*+ " size: " + _concurrentKeySet.size()*/);
+ }
+ if (start >= end) {
+ break;
+ }
+ }
+ }
+
+ @Override
+ public void runWhitCacheHeader(){
+ this.fetchAndUpdate(true);
+ }
+
+ /**
+ * Calls callLoopRun and after fetchs segment.
+ * @param addCacheHeader indicates if CacheHeader is required
+ */
+ private void fetchAndUpdate(boolean addCacheHeader) {
+ try {
+ // Do this again in case the previous call errored out.
+ _gates.registerSegment(_segmentName);
+ callLoopRun(true, addCacheHeader);
+
+ _gates.segmentIsReady(_segmentName);
+
+ } catch (Throwable t) {
+ _log.error("RefreshableSegmentFetcher failed: " + t.getMessage());
+ if (_log.isDebugEnabled()) {
+ _log.debug("Reason:", t);
+ }
+ }
+ }
+
+ @Override
+ public void fetchAll() {
+ this.fetchAndUpdate(false);
+ }
+
+
+}
diff --git a/client/src/main/java/io/split/engine/segments/SegmentImp.java b/client/src/main/java/io/split/engine/segments/SegmentImp.java
new file mode 100644
index 000000000..2d153d1f3
--- /dev/null
+++ b/client/src/main/java/io/split/engine/segments/SegmentImp.java
@@ -0,0 +1,44 @@
+package io.split.engine.segments;
+
+import java.util.Collections;
+import java.util.List;
+import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.atomic.AtomicLong;
+
+public class SegmentImp{
+ private final String _name;
+ private final AtomicLong _changeNumber;
+ private Set _concurrentKeySet = Collections.newSetFromMap(new ConcurrentHashMap<>());
+
+ public SegmentImp(long changeNumber, String name) {
+ _name = name;
+ _changeNumber = new AtomicLong(changeNumber);
+ }
+
+ public SegmentImp(long changeNumber, String name, List keys){
+ this(changeNumber, name);
+ _concurrentKeySet.addAll(keys);
+ }
+
+ public String getName() {
+ return _name;
+ }
+
+ public long getChangeNumber() {
+ return _changeNumber.get();
+ }
+
+ public void setChangeNumber(long changeNumber){
+ _changeNumber.set(changeNumber);
+ }
+
+ public void update(List toAdd, List toRemove){
+ _concurrentKeySet.removeAll(toRemove);
+ _concurrentKeySet.addAll(toAdd);
+ }
+
+ public boolean contains(String key) {
+ return _concurrentKeySet.contains(key);
+ }
+}
diff --git a/client/src/main/java/io/split/engine/segments/SegmentSynchronizationTask.java b/client/src/main/java/io/split/engine/segments/SegmentSynchronizationTask.java
new file mode 100644
index 000000000..0bed99225
--- /dev/null
+++ b/client/src/main/java/io/split/engine/segments/SegmentSynchronizationTask.java
@@ -0,0 +1,32 @@
+package io.split.engine.segments;
+
+public interface SegmentSynchronizationTask extends Runnable {
+ /**
+ * initializes the segment
+ * @param segmentName
+ */
+ void initializeSegment(String segmentName);
+
+ /**
+ * returns segmentFecther
+ * @param segmentName
+ * @return
+ */
+ SegmentFetcher getFetcher(String segmentName);
+
+ /**
+ * starts the fetching
+ */
+ void startPeriodicFetching();
+
+ /**
+ * stops the thread
+ */
+ void stop();
+
+ /**
+ * fetch every Segment
+ * @param addCacheHeader
+ */
+ void fetchAll(boolean addCacheHeader);
+}
diff --git a/client/src/main/java/io/split/engine/segments/RefreshableSegmentFetcher.java b/client/src/main/java/io/split/engine/segments/SegmentSynchronizationTaskImp.java
similarity index 66%
rename from client/src/main/java/io/split/engine/segments/RefreshableSegmentFetcher.java
rename to client/src/main/java/io/split/engine/segments/SegmentSynchronizationTaskImp.java
index 5097a9c55..3db365ba9 100644
--- a/client/src/main/java/io/split/engine/segments/RefreshableSegmentFetcher.java
+++ b/client/src/main/java/io/split/engine/segments/SegmentSynchronizationTaskImp.java
@@ -2,51 +2,47 @@
import com.google.common.collect.Maps;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
+import io.split.cache.SegmentCache;
import io.split.engine.SDKReadinessGates;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.Closeable;
import java.util.List;
+import java.util.Map;
import java.util.concurrent.ConcurrentMap;
-import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
-import java.util.concurrent.TimeUnit;
import java.util.concurrent.ScheduledFuture;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicLong;
+import java.util.concurrent.Executors;
+import java.util.concurrent.TimeUnit;
import static com.google.common.base.Preconditions.checkArgument;
import static com.google.common.base.Preconditions.checkNotNull;
-/**
- * A SegmentFetchers implementation that creates RefreshableSegmentFetcher instances.
- *
- * @author adil
- */
-public class RefreshableSegmentFetcher implements Closeable, SegmentFetcher, Runnable {
- private static final Logger _log = LoggerFactory.getLogger(RefreshableSegmentFetcher.class);
+public class SegmentSynchronizationTaskImp implements SegmentSynchronizationTask, Closeable {
+ private static final Logger _log = LoggerFactory.getLogger(SegmentSynchronizationTaskImp.class);
private final SegmentChangeFetcher _segmentChangeFetcher;
private final AtomicLong _refreshEveryNSeconds;
private final AtomicBoolean _running;
private final Object _lock = new Object();
- private final ConcurrentMap _segmentFetchers = Maps.newConcurrentMap();
+ private final ConcurrentMap _segmentFetchers = Maps.newConcurrentMap();
+ private final SegmentCache _segmentCache;
private final SDKReadinessGates _gates;
private final ScheduledExecutorService _scheduledExecutorService;
private ScheduledFuture> _scheduledFuture;
- public RefreshableSegmentFetcher(SegmentChangeFetcher segmentChangeFetcher, long refreshEveryNSeconds, int numThreads, SDKReadinessGates gates) {
- _segmentChangeFetcher = segmentChangeFetcher;
- checkNotNull(_segmentChangeFetcher);
+ public SegmentSynchronizationTaskImp(SegmentChangeFetcher segmentChangeFetcher, long refreshEveryNSeconds, int numThreads, SDKReadinessGates gates, SegmentCache segmentCache) {
+ _segmentChangeFetcher = checkNotNull(segmentChangeFetcher);
checkArgument(refreshEveryNSeconds >= 0L);
_refreshEveryNSeconds = new AtomicLong(refreshEveryNSeconds);
- _gates = gates;
- checkNotNull(_gates);
+ _gates = checkNotNull(gates);
ThreadFactory threadFactory = new ThreadFactoryBuilder()
.setDaemon(true)
@@ -56,12 +52,20 @@ public RefreshableSegmentFetcher(SegmentChangeFetcher segmentChangeFetcher, long
_scheduledExecutorService = Executors.newScheduledThreadPool(numThreads, threadFactory);
_running = new AtomicBoolean(false);
+
+ _segmentCache = checkNotNull(segmentCache);
}
- public RefreshableSegment segment(String segmentName) {
- RefreshableSegment segment = _segmentFetchers.get(segmentName);
+ @Override
+ public void run() {
+ this.fetchAll(false);
+ }
+
+ @Override
+ public void initializeSegment(String segmentName) {
+ SegmentFetcher segment = _segmentFetchers.get(segmentName);
if (segment != null) {
- return segment;
+ return;
}
// we are locking here since we wanna make sure that we create only ONE RefreableSegmentFetcher
@@ -70,61 +74,30 @@ public RefreshableSegment segment(String segmentName) {
// double check
segment = _segmentFetchers.get(segmentName);
if (segment != null) {
- return segment;
+ return;
}
try {
_gates.registerSegment(segmentName);
} catch (InterruptedException e) {
_log.error("Unable to register segment " + segmentName);
- // We will try again inside the RefreshableSegment.
}
- segment = RefreshableSegment.create(segmentName, _segmentChangeFetcher, _gates);
+
+ segment = new SegmentFetcherImp(segmentName, _segmentChangeFetcher, _gates, _segmentCache);
if (_running.get()) {
- _scheduledExecutorService.submit(segment);
+ _scheduledExecutorService.submit(segment::fetchAll);
}
_segmentFetchers.putIfAbsent(segmentName, segment);
-
- return segment;
}
}
@Override
- public long getChangeNumber(String segmentName) {
- RefreshableSegment segment = _segmentFetchers.get(segmentName);
-
- if (segment == null) {
- return -1;
- }
-
- return segment.changeNumber();
- }
-
- @Override
- public void forceRefresh(String segmentName) {
- _log.debug(String.format("Fetching segment: %s ...", segmentName));
- RefreshableSegment segment = _segmentFetchers.get(segmentName);
-
- if (segment == null) {
- return;
- }
+ public SegmentFetcher getFetcher(String segmentName) {
+ initializeSegment(segmentName);
- segment.forceRefresh();
- }
-
- @Override
- public void forceRefreshAll() {
- for (ConcurrentMap.Entry entry : _segmentFetchers.entrySet()) {
- RefreshableSegment refreshableSegment = entry.getValue();
-
- if (refreshableSegment == null) {
- continue;
- }
-
- _scheduledExecutorService.submit(refreshableSegment);
- }
+ return _segmentFetchers.get(segmentName);
}
@Override
@@ -149,12 +122,6 @@ public void stop() {
_log.debug("Stopped PeriodicFetching Segments ...");
}
- @Override
- public void run() {
- _log.debug("Fetch Segments starting ...");
- forceRefreshAll();
- }
-
@Override
public void close() {
if (_scheduledExecutorService == null || _scheduledExecutorService.isShutdown()) {
@@ -172,6 +139,22 @@ public void close() {
_log.error("Shutdown of SegmentFetchers was interrupted");
Thread.currentThread().interrupt();
}
+ }
+
+ @Override
+ public void fetchAll(boolean addCacheHeader) {
+ for (Map.Entry entry : _segmentFetchers.entrySet()) {
+ SegmentFetcher fetcher = entry.getValue();
+
+ if (fetcher == null) {
+ continue;
+ }
+ if(addCacheHeader) {
+ _scheduledExecutorService.submit(fetcher::runWhitCacheHeader);
+ continue;
+ }
+ _scheduledExecutorService.submit(fetcher::fetchAll);
+ }
}
}
diff --git a/client/src/main/java/io/split/engine/sse/EventSourceClientImp.java b/client/src/main/java/io/split/engine/sse/EventSourceClientImp.java
index cea1b2382..7d8bf990d 100644
--- a/client/src/main/java/io/split/engine/sse/EventSourceClientImp.java
+++ b/client/src/main/java/io/split/engine/sse/EventSourceClientImp.java
@@ -14,17 +14,21 @@
import java.net.URI;
import java.net.URISyntaxException;
+import java.util.concurrent.atomic.AtomicBoolean;
import static com.google.common.base.Preconditions.checkNotNull;
public class EventSourceClientImp implements EventSourceClient {
private static final Logger _log = LoggerFactory.getLogger(EventSourceClient.class);
+ private static final String ERROR = "error";
+ private static final String MESSAGE = "message";
private final String _baseStreamingUrl;
private final NotificationParser _notificationParser;
private final NotificationProcessor _notificationProcessor;
private final SSEClient _sseClient;
private final PushStatusTracker _pushStatusTracker;
+ private final AtomicBoolean _firstEvent;
@VisibleForTesting
/* package private */ EventSourceClientImp(String baseStreamingUrl,
@@ -41,7 +45,7 @@ public class EventSourceClientImp implements EventSourceClient {
inboundEvent -> { onMessage(inboundEvent); return null; },
status -> { _pushStatusTracker.handleSseStatus(status); return null; },
sseHttpClient);
-
+ _firstEvent = new AtomicBoolean();
}
public static EventSourceClientImp build(String baseStreamingUrl,
@@ -63,6 +67,7 @@ public boolean start(String channelList, String token) {
}
try {
+ _firstEvent.set(false);
return _sseClient.open(buildUri(channelList, token));
} catch (URISyntaxException e) {
_log.error("Error building Streaming URI: " + e.getMessage());
@@ -91,13 +96,16 @@ private void onMessage(RawEvent event) {
try {
String type = event.event();
String payload = event.data();
+ if(_firstEvent.compareAndSet(false, true) && !ERROR.equals(type)){
+ _pushStatusTracker.handleSseStatus(SSEClient.StatusMessage.FIRST_EVENT);
+ }
if (payload.length() > 0) {
_log.debug(String.format("Payload received: %s", payload));
switch (type) {
- case "message":
+ case MESSAGE:
_notificationProcessor.process(_notificationParser.parseMessage(payload));
break;
- case "error":
+ case ERROR:
_pushStatusTracker.handleIncomingAblyError(_notificationParser.parseError(payload));
break;
default:
diff --git a/client/src/main/java/io/split/engine/sse/PushStatusTrackerImp.java b/client/src/main/java/io/split/engine/sse/PushStatusTrackerImp.java
index 59aba69a9..f76cf691e 100644
--- a/client/src/main/java/io/split/engine/sse/PushStatusTrackerImp.java
+++ b/client/src/main/java/io/split/engine/sse/PushStatusTrackerImp.java
@@ -1,5 +1,6 @@
package io.split.engine.sse;
+import com.google.common.collect.Maps;
import io.split.engine.common.PushManager;
import io.split.engine.sse.client.SSEClient;
import io.split.engine.sse.dtos.ControlNotification;
@@ -9,6 +10,7 @@
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicReference;
@@ -20,12 +22,13 @@ public class PushStatusTrackerImp implements PushStatusTracker {
private final AtomicReference _sseStatus = new AtomicReference<>(SSEClient.StatusMessage.INITIALIZATION_IN_PROGRESS);
private final AtomicReference _backendStatus = new AtomicReference<>(ControlType.STREAMING_RESUMED);
private final LinkedBlockingQueue _statusMessages;
+ private final ConcurrentMap regions = Maps.newConcurrentMap();
public PushStatusTrackerImp(LinkedBlockingQueue statusMessages) {
_statusMessages = statusMessages;
}
- public synchronized void reset() {
+ private synchronized void reset() {
_publishersOnline.set(true);
_sseStatus.set(SSEClient.StatusMessage.INITIALIZATION_IN_PROGRESS);
_backendStatus.set(ControlType.STREAMING_RESUMED);
@@ -36,11 +39,12 @@ public void handleSseStatus(SSEClient.StatusMessage newStatus) {
_log.debug(String.format("Current status: %s. New status: %s", _sseStatus.get().toString(), newStatus.toString()));
switch(newStatus) {
- case CONNECTED:
- if (_sseStatus.compareAndSet(SSEClient.StatusMessage.INITIALIZATION_IN_PROGRESS, SSEClient.StatusMessage.CONNECTED)
- || _sseStatus.compareAndSet(SSEClient.StatusMessage.RETRYABLE_ERROR, SSEClient.StatusMessage.CONNECTED)) {
+ case FIRST_EVENT:
+ if (SSEClient.StatusMessage.CONNECTED.equals(_sseStatus.get())) {
_statusMessages.offer(PushManager.Status.STREAMING_READY);
}
+ case CONNECTED:
+ _sseStatus.compareAndSet(SSEClient.StatusMessage.INITIALIZATION_IN_PROGRESS, SSEClient.StatusMessage.CONNECTED);
break;
case RETRYABLE_ERROR:
if (_sseStatus.compareAndSet(SSEClient.StatusMessage.CONNECTED, SSEClient.StatusMessage.RETRYABLE_ERROR)) {
@@ -98,9 +102,11 @@ public void handleIncomingOccupancyEvent(OccupancyNotification occupancyNotifica
_log.debug(String.format("handleIncomingOccupancyEvent: publishers=%d", occupancyNotification.getMetrics().getPublishers()));
int publishers = occupancyNotification.getMetrics().getPublishers();
- if (publishers <= 0 && _publishersOnline.compareAndSet(true, false) && _backendStatus.get().equals(ControlType.STREAMING_RESUMED)) {
+ regions.put(occupancyNotification.getChannel(), publishers);
+ boolean isPublishers = isPublishers();
+ if (!isPublishers && _publishersOnline.compareAndSet(true, false) && _backendStatus.get().equals(ControlType.STREAMING_RESUMED)) {
_statusMessages.offer(PushManager.Status.STREAMING_DOWN);
- } else if (publishers >= 1 && _publishersOnline.compareAndSet(false, true) && _backendStatus.get().equals(ControlType.STREAMING_RESUMED)) {
+ } else if (isPublishers && _publishersOnline.compareAndSet(false, true) && _backendStatus.get().equals(ControlType.STREAMING_RESUMED)) {
_statusMessages.offer(PushManager.Status.STREAMING_READY);
}
}
@@ -114,6 +120,7 @@ public void handleIncomingAblyError(ErrorNotification notification) {
}
if (notification.getCode() >= 40140 && notification.getCode() <= 40149) {
_statusMessages.offer(PushManager.Status.STREAMING_BACKOFF);
+ return;
}
if (notification.getCode() >= 40000 && notification.getCode() <= 49999) {
_statusMessages.offer(PushManager.Status.STREAMING_OFF);
@@ -129,4 +136,13 @@ public synchronized void forcePushDisable() {
_backendStatus.set(ControlType.STREAMING_DISABLED);
_statusMessages.offer(PushManager.Status.STREAMING_OFF);
}
+
+ private boolean isPublishers() {
+ for(Integer publisher : regions.values()) {
+ if (publisher > 0) {
+ return true;
+ }
+ }
+ return false;
+ }
}
\ No newline at end of file
diff --git a/client/src/main/java/io/split/engine/sse/client/SSEClient.java b/client/src/main/java/io/split/engine/sse/client/SSEClient.java
index 00507a6d2..6f072400d 100644
--- a/client/src/main/java/io/split/engine/sse/client/SSEClient.java
+++ b/client/src/main/java/io/split/engine/sse/client/SSEClient.java
@@ -19,6 +19,7 @@
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicReference;
import java.util.function.Function;
@@ -31,7 +32,8 @@ public enum StatusMessage {
RETRYABLE_ERROR,
NONRETRYABLE_ERROR,
INITIALIZATION_IN_PROGRESS,
- FORCED_STOP
+ FORCED_STOP,
+ FIRST_EVENT
}
private enum ConnectionState {
@@ -54,6 +56,7 @@ private enum ConnectionState {
private final AtomicReference _state = new AtomicReference<>(ConnectionState.CLOSED);
private final AtomicReference _ongoingResponse = new AtomicReference<>();
private final AtomicReference _ongoingRequest = new AtomicReference<>();
+ private AtomicBoolean _forcedStop;
public SSEClient(Function eventCallback,
Function statusCallback,
@@ -61,6 +64,7 @@ public SSEClient(Function eventCallback,
_eventCallback = eventCallback;
_statusCallback = statusCallback;
_client = client;
+ _forcedStop = new AtomicBoolean();
}
public synchronized boolean open(URI uri) {
@@ -90,13 +94,14 @@ public boolean isOpen() {
}
public synchronized void close() {
+ _forcedStop.set(true);
if (_state.compareAndSet(ConnectionState.OPEN, ConnectionState.CLOSED)) {
if (_ongoingResponse.get() != null) {
try {
_ongoingRequest.get().abort();
_ongoingResponse.get().close();
} catch (IOException e) {
- _log.info(String.format("Error closing SSEClient: %s", e.getMessage()));
+ _log.debug(String.format("SSEClient close forced: %s", e.getMessage()));
}
}
}
@@ -127,9 +132,11 @@ private void connectAndLoop(URI uri, CountDownLatch signal) {
_statusCallback.apply(StatusMessage.RETRYABLE_ERROR);
return;
} catch (IOException exc) { // Other type of connection error
- _log.info(String.format("SSE connection ended abruptly: %s. Retying", exc.getMessage()));
- _statusCallback.apply(StatusMessage.RETRYABLE_ERROR);
- return;
+ if(!_forcedStop.get()) {
+ _log.debug(String.format("SSE connection ended abruptly: %s. Retying", exc.getMessage()));
+ _statusCallback.apply(StatusMessage.RETRYABLE_ERROR);
+ return;
+ }
}
}
} catch (Exception e) { // Any other error non related to the connection disables streaming altogether
@@ -144,6 +151,7 @@ private void connectAndLoop(URI uri, CountDownLatch signal) {
_state.set(ConnectionState.CLOSED);
_log.debug("SSEClient finished.");
+ _forcedStop.set(false);
}
}
diff --git a/client/src/main/java/io/split/engine/sse/dtos/OccupancyNotification.java b/client/src/main/java/io/split/engine/sse/dtos/OccupancyNotification.java
index 1ca104c96..447b16672 100644
--- a/client/src/main/java/io/split/engine/sse/dtos/OccupancyNotification.java
+++ b/client/src/main/java/io/split/engine/sse/dtos/OccupancyNotification.java
@@ -4,7 +4,6 @@
import io.split.engine.sse.NotificationProcessor;
public class OccupancyNotification extends IncomingNotification implements StatusNotification {
- private static final String CONTROL_PRI_CHANNEL = "control_pri";
private final OccupancyMetrics metrics;
public OccupancyNotification(GenericNotificationData genericNotificationData) {
@@ -23,9 +22,7 @@ public void handler(NotificationProcessor notificationProcessor) {
@Override
public void handlerStatus(PushStatusTracker notificationManagerKeeper) {
- if (CONTROL_PRI_CHANNEL.equals(getChannel())) {
- notificationManagerKeeper.handleIncomingOccupancyEvent(this);
- }
+ notificationManagerKeeper.handleIncomingOccupancyEvent(this);
}
@Override
diff --git a/client/src/main/java/io/split/inputValidation/ApiKeyValidator.java b/client/src/main/java/io/split/inputValidation/ApiKeyValidator.java
new file mode 100644
index 000000000..5c1f188f0
--- /dev/null
+++ b/client/src/main/java/io/split/inputValidation/ApiKeyValidator.java
@@ -0,0 +1,17 @@
+package io.split.inputValidation;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class ApiKeyValidator {
+ private static final Logger _log = LoggerFactory.getLogger(ApiKeyValidator.class);
+
+ public static void validate(String apiToken) {
+ if (apiToken == null) {
+ _log.error("factory instantiation: you passed a null apiToken, apiToken must be a non-empty string");
+ }
+ if (apiToken.isEmpty()) {
+ _log.error("factory instantiation: you passed and empty apiToken, apiToken be a non-empty string");
+ }
+ }
+}
diff --git a/client/src/main/java/io/split/inputValidation/EventsValidator.java b/client/src/main/java/io/split/inputValidation/EventsValidator.java
new file mode 100644
index 000000000..a218778e9
--- /dev/null
+++ b/client/src/main/java/io/split/inputValidation/EventsValidator.java
@@ -0,0 +1,110 @@
+package io.split.inputValidation;
+
+import io.split.client.dtos.Event;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.HashMap;
+import java.util.Map;
+import java.util.regex.Pattern;
+
+public class EventsValidator {
+ private static final Logger _log = LoggerFactory.getLogger(EventsValidator.class);
+ public static final Pattern EVENT_TYPE_MATCHER = Pattern.compile("^[a-zA-Z0-9][-_.:a-zA-Z0-9]{0,79}$");
+
+ public static EventValidatorResult propertiesAreValid(Map properties) {
+ int size = 1024; // We assume 1kb events without properties (750 bytes avg measured)
+
+ if (properties == null) {
+ return new EventValidatorResult(true);
+ }
+
+ if (properties.size() > 300) {
+ _log.warn("Event has more than 300 properties. Some of them will be trimmed when processed");
+ }
+
+ Map result = new HashMap<>();
+ for (Map.Entry entry : properties.entrySet()) {
+ if (entry.getKey() == null || entry.getKey().isEmpty()) {
+ continue;
+ }
+
+ size += entry.getKey().length();
+ Object value = entry.getValue();
+
+ if (!(value instanceof Number) && !(value instanceof Boolean) && !(value instanceof String)) {
+ _log.warn(String.format("Property %s is of invalid type. Setting value to null", entry.getKey()));
+ value = null;
+ }
+
+ if (value instanceof String) {
+ size += ((String) value).length();
+ }
+
+ if (size > Event.MAX_PROPERTIES_LENGTH_BYTES) {
+ _log.error(String.format("The maximum size allowed for the properties is 32768 bytes. "
+ + "Current one is %s bytes. Event not queued", size));
+
+ return new EventValidatorResult(false);
+ }
+
+ result.put(entry.getKey(), value);
+ }
+
+ return new EventValidatorResult(true, size, result);
+ }
+
+ public static boolean typeIsValid(String eventTypeId, String method) {
+ if (eventTypeId == null) {
+ _log.error(String.format("%s: you passed a null eventTypeId, eventTypeId must be a non-empty string", method));
+ return false;
+ }
+
+ if (eventTypeId.isEmpty()) {
+ _log.error(String.format("%s: you passed an empty eventTypeId, eventTypeId must be a non-empty string", method));
+ return false;
+ }
+
+ if (!EVENT_TYPE_MATCHER.matcher(eventTypeId).find()) {
+ _log.error(String.format("%s: you passed %s, eventTypeId must adhere to the regular expression " +
+ "[a-zA-Z0-9][-_.:a-zA-Z0-9]{0,79}. This means an eventTypeID must be alphanumeric, " +
+ "cannot be more than 80 characters long, and can only include a dash, underscore, period, " +
+ "or colon as separators of alphanumeric characters", method, eventTypeId));
+ return false;
+ }
+
+ return true;
+ }
+
+
+
+ public static class EventValidatorResult {
+ private final boolean _success;
+ private final int _eventSize;
+ private final Map _value;
+
+ public EventValidatorResult(boolean success, int eventSize, Map value) {
+ _success = success;
+ _eventSize = eventSize;
+ _value = value;
+ }
+
+ public EventValidatorResult(boolean success) {
+ _success = success;
+ _eventSize = 0;
+ _value = null;
+ }
+
+ public boolean getSuccess() {
+ return _success;
+ }
+
+ public int getEventSize() {
+ return _eventSize;
+ }
+
+ public Map getValue() {
+ return _value;
+ }
+ }
+}
diff --git a/client/src/main/java/io/split/inputValidation/KeyValidator.java b/client/src/main/java/io/split/inputValidation/KeyValidator.java
new file mode 100644
index 000000000..3276dac5d
--- /dev/null
+++ b/client/src/main/java/io/split/inputValidation/KeyValidator.java
@@ -0,0 +1,49 @@
+package io.split.inputValidation;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class KeyValidator {
+ private static final Logger _log = LoggerFactory.getLogger(KeyValidator.class);
+
+ public static boolean isValid(String key, String propertyName, String method) {
+ if (key == null) {
+ _log.error(String.format("%s: you passed a null %s, %s must be a non-empty string", method, propertyName, propertyName));
+ return false;
+ }
+
+ if (key.isEmpty()) {
+ _log.error(String.format("%s: you passed an empty %s, %s must be a non-empty string", method, propertyName, propertyName));
+ return false;
+ }
+
+ return true;
+ }
+
+ public static boolean isValid(String key, String propertyName, int maxStringLength, String method) {
+ if (!isValid(key, propertyName, method)) {
+ return false;
+ }
+
+ if (key.length() > maxStringLength) {
+ _log.error(String.format("%s: %s too long - must be %s characters or less", method, propertyName, maxStringLength));
+ return false;
+ }
+
+ return true;
+ }
+
+ public static boolean bucketingKeyIsValid(String bucketingKey, int maxStringLength, String method) {
+ if (bucketingKey != null && bucketingKey.isEmpty()) {
+ _log.error(String.format("%s: you passed an empty string, %s must be a non-empty string", method, "bucketingKey"));
+ return false;
+ }
+
+ if (bucketingKey != null && bucketingKey.length() > maxStringLength) {
+ _log.error(String.format("%s: bucketingKey too long - must be %s characters or less", method, maxStringLength));
+ return false;
+ }
+
+ return true;
+ }
+}
diff --git a/client/src/main/java/io/split/inputValidation/SplitNameValidator.java b/client/src/main/java/io/split/inputValidation/SplitNameValidator.java
new file mode 100644
index 000000000..06f00b72d
--- /dev/null
+++ b/client/src/main/java/io/split/inputValidation/SplitNameValidator.java
@@ -0,0 +1,30 @@
+package io.split.inputValidation;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.Optional;
+
+public class SplitNameValidator {
+ private static final Logger _log = LoggerFactory.getLogger(SplitNameValidator.class);
+
+ public static Optional isValid(String name, String method) {
+ if (name == null) {
+ _log.error(String.format("%s: you passed a null split name, split name must be a non-empty string", method));
+ return Optional.empty();
+ }
+
+ if (name.isEmpty()) {
+ _log.error(String.format("%s: you passed an empty split name, split name must be a non-empty string", method));
+ return Optional.empty();
+ }
+
+ String trimmed = name.trim();
+ if (!trimmed.equals(name)) {
+ _log.warn(String.format("%s: split name %s has extra whitespace, trimming", method, name));
+ name = trimmed;
+ }
+
+ return Optional.of(name);
+ }
+}
diff --git a/client/src/main/java/io/split/inputValidation/TrafficTypeValidator.java b/client/src/main/java/io/split/inputValidation/TrafficTypeValidator.java
new file mode 100644
index 000000000..4fc5056a8
--- /dev/null
+++ b/client/src/main/java/io/split/inputValidation/TrafficTypeValidator.java
@@ -0,0 +1,35 @@
+package io.split.inputValidation;
+
+import io.split.cache.SplitCache;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.Optional;
+
+public class TrafficTypeValidator {
+ private static final Logger _log = LoggerFactory.getLogger(TrafficTypeValidator.class);
+
+ public static Optional isValid(String trafficTypeName, SplitCache splitCache, String method) {
+ if (trafficTypeName == null) {
+ _log.error(String.format("%s: you passed a null trafficTypeName, trafficTypeName must be a non-empty string", method));
+ return Optional.empty();
+ }
+
+ if (trafficTypeName.isEmpty()) {
+ _log.error(String.format("%s: you passed an empty trafficTypeName, trafficTypeName must be a non-empty string", method));
+ return Optional.empty();
+ }
+
+ if (!trafficTypeName.equals(trafficTypeName.toLowerCase())) {
+ _log.warn(String.format("%s: trafficTypeName should be all lowercase - converting string to lowercase", method));
+ trafficTypeName = trafficTypeName.toLowerCase();
+ }
+
+ if (!splitCache.trafficTypeExists(trafficTypeName)) {
+ _log.warn(String.format("%s: Traffic Type %s does not have any corresponding Splits in this environment, " +
+ "make sure you’re tracking your events to a valid traffic type defined in the Split console.", method, trafficTypeName));
+ }
+
+ return Optional.of(trafficTypeName);
+ }
+}
diff --git a/client/src/test/java/io/split/cache/InMemoryCacheTest.java b/client/src/test/java/io/split/cache/InMemoryCacheTest.java
new file mode 100644
index 000000000..23ea022f3
--- /dev/null
+++ b/client/src/test/java/io/split/cache/InMemoryCacheTest.java
@@ -0,0 +1,121 @@
+package io.split.cache;
+
+import io.split.engine.experiments.ParsedSplit;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.util.*;
+
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+public class InMemoryCacheTest {
+ private SplitCache _cache;
+
+ @Before
+ public void before() {
+ _cache = new InMemoryCacheImp();
+ }
+
+ @Test
+ public void putAndGetSplit() {
+ ParsedSplit split = getParsedSplit("split_name");
+ _cache.put(split);
+
+ ParsedSplit result = _cache.get("split_name");
+ Assert.assertNotNull(result);
+ Assert.assertEquals(split.changeNumber(), result.changeNumber());
+ Assert.assertEquals(split.trafficTypeName(), result.trafficTypeName());
+ Assert.assertEquals(split.defaultTreatment(), result.defaultTreatment());
+ }
+
+ @Test
+ public void putDuplicateSplit() {
+ ParsedSplit split = getParsedSplit("split_name");
+ ParsedSplit split2 = getParsedSplit("split_name");
+ _cache.put(split);
+ _cache.put(split2);
+
+ int result = _cache.getAll().size();
+
+ Assert.assertEquals(1, result);
+ }
+
+ @Test
+ public void getInExistentSplit() {
+ ParsedSplit split = getParsedSplit("split_name");
+ _cache.put(split);
+
+ ParsedSplit result = _cache.get("split_name_2");
+ Assert.assertNull(result);
+ }
+
+ @Test
+ public void removeSplit() {
+ ParsedSplit split = getParsedSplit("split_name");
+ ParsedSplit split2 = getParsedSplit("split_name_2");
+ _cache.put(split);
+ _cache.put(split2);
+
+ int result = _cache.getAll().size();
+ Assert.assertEquals(2, result);
+
+ _cache.remove("split_name");
+ result = _cache.getAll().size();
+ Assert.assertEquals(1, result);
+
+ Assert.assertNull(_cache.get("split_name"));
+ }
+
+ @Test
+ public void setAndGetChangeNumber() {
+ _cache.setChangeNumber(223);
+
+ long changeNumber = _cache.getChangeNumber();
+ Assert.assertEquals(223, changeNumber);
+
+ _cache.setChangeNumber(539);
+ changeNumber = _cache.getChangeNumber();
+ Assert.assertEquals(539, changeNumber);
+ }
+
+ @Test
+ public void getMany() {
+ _cache.put(getParsedSplit("split_name_1"));
+ _cache.put(getParsedSplit("split_name_2"));
+ _cache.put(getParsedSplit("split_name_3"));
+ _cache.put(getParsedSplit("split_name_4"));
+
+ List names = new ArrayList<>();
+ names.add("split_name_2");
+ names.add("split_name_3");
+
+ Collection result = _cache.getMany(names);
+ Assert.assertEquals(2, result.size());
+ }
+
+ @Test
+ public void trafficTypesExist() {
+ SplitCache cache = new InMemoryCacheImp(-1);
+
+ cache.put(ParsedSplit.createParsedSplitForTests("splitName_1", 0, false, "default_treatment", new ArrayList<>(), "tt", 123, 2));
+ cache.put(ParsedSplit.createParsedSplitForTests("splitName_2", 0, false, "default_treatment", new ArrayList<>(), "tt", 123, 2));
+ cache.put(ParsedSplit.createParsedSplitForTests("splitName_3", 0, false, "default_treatment", new ArrayList<>(), "tt_2", 123, 2));
+ cache.put(ParsedSplit.createParsedSplitForTests("splitName_4", 0, false, "default_treatment", new ArrayList<>(), "tt_3", 123, 2));
+
+ assertTrue(cache.trafficTypeExists("tt_2"));
+ assertTrue(cache.trafficTypeExists("tt"));
+ assertFalse(cache.trafficTypeExists("tt_5"));
+
+ cache.remove("splitName_2");
+ assertTrue(cache.trafficTypeExists("tt"));
+
+ cache.remove("splitName_1");
+ assertFalse(cache.trafficTypeExists("tt"));
+ }
+
+ private ParsedSplit getParsedSplit(String splitName) {
+ return ParsedSplit.createParsedSplitForTests(splitName, 0, false, "default_treatment", new ArrayList<>(), "tt", 123, 2);
+ }
+}
diff --git a/client/src/test/java/io/split/cache/SegmentCacheInMemoryImplTest.java b/client/src/test/java/io/split/cache/SegmentCacheInMemoryImplTest.java
new file mode 100644
index 000000000..95f0b54ee
--- /dev/null
+++ b/client/src/test/java/io/split/cache/SegmentCacheInMemoryImplTest.java
@@ -0,0 +1,61 @@
+package io.split.cache;
+
+import junit.framework.TestCase;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.stream.Collectors;
+import java.util.stream.Stream;
+
+public class SegmentCacheInMemoryImplTest extends TestCase {
+ private static final String SEGMENT_NAME = "TestSegment";
+ private static final String FAKE_SEGMENT_NAME = "FakeSegment";
+ private static final long CHANGE_NUMBER = 123L;
+ private static final String KEY = "KEYTEST";
+ private static final long DEFAULT_CHANGE_NUMBER = -1L;
+
+ @Test
+ public void testUpdateSegment(){
+ SegmentCacheInMemoryImpl segmentCacheInMemory = new SegmentCacheInMemoryImpl();
+ segmentCacheInMemory.updateSegment(SEGMENT_NAME,new ArrayList<>(), new ArrayList<>());
+
+ assertEquals(DEFAULT_CHANGE_NUMBER, segmentCacheInMemory.getChangeNumber(SEGMENT_NAME));
+ }
+
+ @Test
+ public void testIsInSegment() {
+ SegmentCacheInMemoryImpl segmentCacheInMemory = new SegmentCacheInMemoryImpl();
+ segmentCacheInMemory.updateSegment(SEGMENT_NAME, Stream.of(KEY).collect(Collectors.toList()), new ArrayList<>());
+ assertTrue(segmentCacheInMemory.isInSegment(SEGMENT_NAME, KEY));
+ }
+ @Test
+ public void testIsInSegmentWithFakeSegment() {
+ SegmentCacheInMemoryImpl segmentCacheInMemory = new SegmentCacheInMemoryImpl();
+ segmentCacheInMemory.updateSegment(SEGMENT_NAME, Stream.of(KEY).collect(Collectors.toList()), new ArrayList<>());
+ assertFalse(segmentCacheInMemory.isInSegment(FAKE_SEGMENT_NAME, KEY));
+ }
+
+ @Test
+ public void testSetChangeNumber() {
+ SegmentCacheInMemoryImpl segmentCacheInMemory = new SegmentCacheInMemoryImpl();
+ segmentCacheInMemory.updateSegment(SEGMENT_NAME,new ArrayList<>(), new ArrayList<>());
+ segmentCacheInMemory.setChangeNumber(SEGMENT_NAME, CHANGE_NUMBER);
+ assertEquals(CHANGE_NUMBER, segmentCacheInMemory.getChangeNumber(SEGMENT_NAME));
+ }
+
+ @Test
+ public void testGetChangeNumberWithFakeSegment() {
+ SegmentCacheInMemoryImpl segmentCacheInMemory = new SegmentCacheInMemoryImpl();
+ segmentCacheInMemory.updateSegment(SEGMENT_NAME,new ArrayList<>(), new ArrayList<>());
+ assertEquals(DEFAULT_CHANGE_NUMBER, segmentCacheInMemory.getChangeNumber(FAKE_SEGMENT_NAME));
+ }
+
+ @Test
+ public void testClear() {
+ SegmentCacheInMemoryImpl segmentCacheInMemory = new SegmentCacheInMemoryImpl();
+ segmentCacheInMemory.updateSegment(SEGMENT_NAME,new ArrayList<>(), new ArrayList<>());
+ segmentCacheInMemory.setChangeNumber(SEGMENT_NAME, CHANGE_NUMBER);
+ segmentCacheInMemory.clear();
+ assertEquals(DEFAULT_CHANGE_NUMBER, segmentCacheInMemory.getChangeNumber(SEGMENT_NAME));
+ }
+}
\ No newline at end of file
diff --git a/client/src/test/java/io/split/client/ApiKeyCounterTest.java b/client/src/test/java/io/split/client/ApiKeyCounterTest.java
new file mode 100644
index 000000000..c017127ce
--- /dev/null
+++ b/client/src/test/java/io/split/client/ApiKeyCounterTest.java
@@ -0,0 +1,50 @@
+package io.split.client;
+
+import junit.framework.TestCase;
+import org.junit.Test;
+
+public class ApiKeyCounterTest extends TestCase {
+
+ private static final String FIRST_KEY = "KEYNUMBER1";
+ private static final String SECOND_KEY = "KEYNUMBER2";
+
+ @Test
+ public void testAddingNewToken() {
+ ApiKeyCounter.getApiKeyCounterInstance().add(FIRST_KEY);
+ assertTrue(ApiKeyCounter.getApiKeyCounterInstance().isApiKeyPresent(FIRST_KEY));
+
+ ApiKeyCounter.getApiKeyCounterInstance().remove(FIRST_KEY);
+ }
+
+ @Test
+ public void testAddingExistingToken() {
+ ApiKeyCounter.getApiKeyCounterInstance().add(FIRST_KEY);
+ ApiKeyCounter.getApiKeyCounterInstance().add(FIRST_KEY);
+
+ assertTrue(ApiKeyCounter.getApiKeyCounterInstance().isApiKeyPresent(FIRST_KEY));
+ assertEquals(2, ApiKeyCounter.getApiKeyCounterInstance().getCount(FIRST_KEY));
+ ApiKeyCounter.getApiKeyCounterInstance().remove(FIRST_KEY);
+ ApiKeyCounter.getApiKeyCounterInstance().remove(FIRST_KEY);
+ }
+
+ @Test
+ public void testRemovingToken() {
+ ApiKeyCounter.getApiKeyCounterInstance().add(FIRST_KEY);
+ ApiKeyCounter.getApiKeyCounterInstance().remove(FIRST_KEY);
+
+ assertFalse(ApiKeyCounter.getApiKeyCounterInstance().isApiKeyPresent(FIRST_KEY));
+ assertEquals(0, ApiKeyCounter.getApiKeyCounterInstance().getCount(FIRST_KEY));
+ }
+
+ @Test
+ public void testAddingNonExistingToken() {
+ ApiKeyCounter.getApiKeyCounterInstance().add(FIRST_KEY);
+ ApiKeyCounter.getApiKeyCounterInstance().add(SECOND_KEY);
+
+ assertTrue(ApiKeyCounter.getApiKeyCounterInstance().isApiKeyPresent(FIRST_KEY));
+ assertEquals(1, ApiKeyCounter.getApiKeyCounterInstance().getCount(FIRST_KEY));
+ assertEquals(1, ApiKeyCounter.getApiKeyCounterInstance().getCount(SECOND_KEY));
+ ApiKeyCounter.getApiKeyCounterInstance().remove(FIRST_KEY);
+ ApiKeyCounter.getApiKeyCounterInstance().remove(SECOND_KEY);
+ }
+}
diff --git a/client/src/test/java/io/split/client/CacheUpdaterServiceTest.java b/client/src/test/java/io/split/client/CacheUpdaterServiceTest.java
new file mode 100644
index 000000000..9d6d30140
--- /dev/null
+++ b/client/src/test/java/io/split/client/CacheUpdaterServiceTest.java
@@ -0,0 +1,45 @@
+package io.split.client;
+
+import io.split.cache.InMemoryCacheImp;
+import io.split.cache.SplitCache;
+import org.junit.Assert;
+import org.junit.Test;
+
+import java.util.HashMap;
+import java.util.Map;
+
+public class CacheUpdaterServiceTest {
+
+ private static final String OFF_TREATMENT = "off";
+ private static final String ON_TREATMENT = "on";
+ private static final String MY_FEATURE = "my_feature";
+ private SplitClientConfig config = SplitClientConfig.builder().setBlockUntilReadyTimeout(100).build();
+
+ @Test
+ public void testCacheUpdate() {
+ SplitCache splitCache = new InMemoryCacheImp();
+ CacheUpdaterService cacheUpdaterService = new CacheUpdaterService(splitCache);
+ cacheUpdaterService.updateCache(getMap());
+ Assert.assertNotNull(splitCache.get(MY_FEATURE));
+ }
+
+ public Map getMap() {
+ Map map = new HashMap<>();
+ SplitAndKey splitAndKey = new SplitAndKey(MY_FEATURE, "onley_key");
+ LocalhostSplit split = new LocalhostSplit(OFF_TREATMENT, "{\\\"desc\\\" : \\\"this applies only to OFF and only for only_key. The rest will receive ON\\\"}");
+ map.put(splitAndKey, split);
+ splitAndKey = new SplitAndKey("other_feature_2", null);
+ split = new LocalhostSplit(ON_TREATMENT, null);
+ map.put(splitAndKey, split);
+ splitAndKey = new SplitAndKey("other_feature_3", null);
+ split = new LocalhostSplit(OFF_TREATMENT, null);
+ map.put(splitAndKey, split);
+ splitAndKey = new SplitAndKey(MY_FEATURE, "key");
+ split = new LocalhostSplit(ON_TREATMENT, "{\\\"desc\\\" : \\\"this applies only to ON treatment\\\"}");
+ map.put(splitAndKey, split);
+ splitAndKey = new SplitAndKey("other_feature_3", "key_whitelist");
+ split = new LocalhostSplit(ON_TREATMENT, null);
+ map.put(splitAndKey, split);
+ return map;
+ }
+}
\ No newline at end of file
diff --git a/client/src/test/java/io/split/client/HttpSegmentChangeFetcherTest.java b/client/src/test/java/io/split/client/HttpSegmentChangeFetcherTest.java
index 6e0a70b6e..afb238552 100644
--- a/client/src/test/java/io/split/client/HttpSegmentChangeFetcherTest.java
+++ b/client/src/test/java/io/split/client/HttpSegmentChangeFetcherTest.java
@@ -61,7 +61,7 @@ public void testFetcherWithSpecialCharacters() throws URISyntaxException, IOExce
Metrics.NoopMetrics metrics = new Metrics.NoopMetrics();
HttpSegmentChangeFetcher fetcher = HttpSegmentChangeFetcher.create(httpClientMock, rootTarget, metrics);
- SegmentChange change = fetcher.fetch("some_segment", 1234567);
+ SegmentChange change = fetcher.fetch("some_segment", 1234567, true);
Assert.assertNotNull(change);
Assert.assertEquals(1, change.added.size());
diff --git a/client/src/test/java/io/split/client/HttpSplitChangeFetcherTest.java b/client/src/test/java/io/split/client/HttpSplitChangeFetcherTest.java
index 1c7887681..564339db7 100644
--- a/client/src/test/java/io/split/client/HttpSplitChangeFetcherTest.java
+++ b/client/src/test/java/io/split/client/HttpSplitChangeFetcherTest.java
@@ -63,7 +63,7 @@ public void testFetcherWithSpecialCharacters() throws URISyntaxException, Invoca
Metrics.NoopMetrics metrics = new Metrics.NoopMetrics();
HttpSplitChangeFetcher fetcher = HttpSplitChangeFetcher.create(httpClientMock, rootTarget, metrics);
- SplitChange change = fetcher.fetch(1234567);
+ SplitChange change = fetcher.fetch(1234567, true);
Assert.assertNotNull(change);
Assert.assertEquals(1, change.splits.size());
diff --git a/client/src/test/java/io/split/client/LocalhostSplitClientTest.java b/client/src/test/java/io/split/client/LocalhostSplitClientTest.java
deleted file mode 100644
index b34af8000..000000000
--- a/client/src/test/java/io/split/client/LocalhostSplitClientTest.java
+++ /dev/null
@@ -1,109 +0,0 @@
-package io.split.client;
-
-import com.google.common.collect.Maps;
-import io.split.grammar.Treatments;
-import org.junit.Test;
-
-import java.util.Map;
-
-import static org.hamcrest.Matchers.equalTo;
-import static org.hamcrest.Matchers.is;
-import static org.hamcrest.Matchers.nullValue;
-import static org.junit.Assert.assertThat;
-
-/**
- * Tests for LocalhostSplitClient
- *
- * @author adil
- */
-public class LocalhostSplitClientTest {
-
- @Test
- public void defaultsWork() {
- Map map = Maps.newHashMap();
- map.put(SplitAndKey.of("onboarding"), LocalhostSplit.of("on"));
- map.put(SplitAndKey.of("test"), LocalhostSplit.of("a"));
- map.put(SplitAndKey.of("onboarding"), LocalhostSplit.of("off")); // overwrite
-
- LocalhostSplitClient client = new LocalhostSplitClient(map);
-
- assertThat(client.getTreatment(null, "foo"), is(equalTo(Treatments.CONTROL)));
- assertThat(client.getTreatment("user1", "foo"), is(equalTo(Treatments.CONTROL)));
- assertThat(client.getTreatment("user1", "onboarding"), is(equalTo("off")));
- assertThat(client.getTreatment("user2", "onboarding"), is(equalTo("off")));
- assertThat(client.getTreatment("user1", "test"), is(equalTo("a")));
- assertThat(client.getTreatment("user2", "test"), is(equalTo("a")));
- assertThat(client.getTreatmentWithConfig("user2", "test").config(), is(nullValue()));
- assertThat(client.getTreatmentWithConfig("user2", "test").treatment(), is(equalTo("a")));
- }
-
- @Test
- public void overrides_work() {
- Map map = Maps.newHashMap();
- map.put(SplitAndKey.of("onboarding"), LocalhostSplit.of("on"));
- map.put(SplitAndKey.of("onboarding", "user1"), LocalhostSplit.of("off"));
- map.put(SplitAndKey.of("onboarding", "user2"), LocalhostSplit.of("off"));
-
- LocalhostSplitClient client = new LocalhostSplitClient(map);
-
- assertThat(client.getTreatment("user1", "onboarding"), is(equalTo("off")));
- assertThat(client.getTreatment("user2", "onboarding"), is(equalTo("off")));
- assertThat(client.getTreatment("user3", "onboarding"), is(equalTo("on")));
- assertThat(client.getTreatmentWithConfig("user3", "onboarding").config(), is(nullValue()));
- assertThat(client.getTreatmentWithConfig("user3", "onboarding").treatment(), is(equalTo("on")));
- }
-
- @Test
- public void if_only_overrides_exist() {
- Map map = Maps.newHashMap();
- map.put(SplitAndKey.of("onboarding", "user1"), LocalhostSplit.of("off"));
- map.put(SplitAndKey.of("onboarding", "user2"), LocalhostSplit.of("off"));
-
- LocalhostSplitClient client = new LocalhostSplitClient(map);
-
- assertThat(client.getTreatment("user1", "onboarding"), is(equalTo("off")));
- assertThat(client.getTreatment("user2", "onboarding"), is(equalTo("off")));
- assertThat(client.getTreatment("user3", "onboarding"), is(equalTo(Treatments.CONTROL)));
- }
-
- @Test
- public void attributes_work() {
- Map map = Maps.newHashMap();
- map.put(SplitAndKey.of("onboarding"), LocalhostSplit.of("on"));
- map.put(SplitAndKey.of("onboarding", "user1"), LocalhostSplit.of("off"));
- map.put(SplitAndKey.of("onboarding", "user2"), LocalhostSplit.of("off"));
-
- LocalhostSplitClient client = new LocalhostSplitClient(map);
-
- Map attributes = Maps.newHashMap();
- attributes.put("age", 24);
-
- assertThat(client.getTreatment("user1", "onboarding", attributes), is(equalTo("off")));
- assertThat(client.getTreatment("user2", "onboarding", attributes), is(equalTo("off")));
- assertThat(client.getTreatment("user3", "onboarding", attributes), is(equalTo("on")));
- }
-
- @Test
- public void update_works() {
- Map map = Maps.newHashMap();
- map.put(SplitAndKey.of("onboarding"), LocalhostSplit.of("on"));
- map.put(SplitAndKey.of("onboarding", "user1"), LocalhostSplit.of("off"));
- map.put(SplitAndKey.of("onboarding", "user2"), LocalhostSplit.of("off"));
-
- LocalhostSplitClient client = new LocalhostSplitClient(map);
-
- assertThat(client.getTreatment("user1", "onboarding"), is(equalTo("off")));
- assertThat(client.getTreatment("user2", "onboarding"), is(equalTo("off")));
- assertThat(client.getTreatment("user3", "onboarding"), is(equalTo("on")));
-
- map.clear();
- map.put(SplitAndKey.of("onboarding"), LocalhostSplit.of("on"));
- map.put(SplitAndKey.of("onboarding", "user1"), LocalhostSplit.of("off"));
-
- client.updateFeatureToTreatmentMap(map);
-
- assertThat(client.getTreatment("user1", "onboarding"), is(equalTo("off")));
- assertThat(client.getTreatment("user2", "onboarding"), is(equalTo("on")));
- assertThat(client.getTreatment("user3", "onboarding"), is(equalTo("on")));
- }
-}
diff --git a/client/src/test/java/io/split/client/LocalhostSplitFactoryTest.java b/client/src/test/java/io/split/client/LocalhostSplitFactoryTest.java
index 63684e8f3..2728c7366 100644
--- a/client/src/test/java/io/split/client/LocalhostSplitFactoryTest.java
+++ b/client/src/test/java/io/split/client/LocalhostSplitFactoryTest.java
@@ -10,6 +10,7 @@
import java.io.File;
import java.io.FileWriter;
import java.io.IOException;
+import java.net.URISyntaxException;
import java.util.Map;
import static org.hamcrest.Matchers.equalTo;
@@ -27,7 +28,7 @@ public class LocalhostSplitFactoryTest {
public TemporaryFolder folder = new TemporaryFolder();
@Test
- public void works() throws IOException {
+ public void works() throws IOException, URISyntaxException {
File file = folder.newFile(LocalhostSplitFactory.FILENAME);
Map map = Maps.newHashMap();
diff --git a/client/src/test/java/io/split/client/LocalhostSplitFactoryYamlCompactSampleTest.java b/client/src/test/java/io/split/client/LocalhostSplitFactoryYamlCompactSampleTest.java
index 2319594ce..b54e7f489 100644
--- a/client/src/test/java/io/split/client/LocalhostSplitFactoryYamlCompactSampleTest.java
+++ b/client/src/test/java/io/split/client/LocalhostSplitFactoryYamlCompactSampleTest.java
@@ -5,6 +5,7 @@
import org.junit.Test;
import java.io.IOException;
+import java.net.URISyntaxException;
import java.util.Map;
import static org.hamcrest.MatcherAssert.assertThat;
@@ -20,7 +21,7 @@
public class LocalhostSplitFactoryYamlCompactSampleTest {
@Test
- public void works() throws IOException {
+ public void works() throws IOException, URISyntaxException {
String file = getClass().getClassLoader().getResource("split_compact.yaml").getFile();
diff --git a/client/src/test/java/io/split/client/LocalhostSplitFactoryYamlSampleTest.java b/client/src/test/java/io/split/client/LocalhostSplitFactoryYamlSampleTest.java
index 390a0b052..933d19039 100644
--- a/client/src/test/java/io/split/client/LocalhostSplitFactoryYamlSampleTest.java
+++ b/client/src/test/java/io/split/client/LocalhostSplitFactoryYamlSampleTest.java
@@ -5,6 +5,7 @@
import org.junit.Test;
import java.io.IOException;
+import java.net.URISyntaxException;
import java.util.Map;
import static org.hamcrest.MatcherAssert.assertThat;
@@ -20,7 +21,7 @@
public class LocalhostSplitFactoryYamlSampleTest {
@Test
- public void works() throws IOException {
+ public void works() throws IOException, URISyntaxException {
String file = getClass().getClassLoader().getResource(SplitClientConfig.LOCALHOST_DEFAULT_FILE).getFile();
diff --git a/client/src/test/java/io/split/client/LocalhostSplitFactoryYamlTest.java b/client/src/test/java/io/split/client/LocalhostSplitFactoryYamlTest.java
index 4d68cb030..c0be15838 100644
--- a/client/src/test/java/io/split/client/LocalhostSplitFactoryYamlTest.java
+++ b/client/src/test/java/io/split/client/LocalhostSplitFactoryYamlTest.java
@@ -12,6 +12,7 @@
import java.io.FileWriter;
import java.io.IOException;
import java.io.StringWriter;
+import java.net.URISyntaxException;
import java.util.ArrayList;
import java.util.LinkedHashMap;
import java.util.List;
@@ -43,7 +44,7 @@ public class LocalhostSplitFactoryYamlTest {
public TemporaryFolder folder = new TemporaryFolder();
@Test
- public void works() throws IOException {
+ public void works() throws IOException, URISyntaxException {
File file = folder.newFile(SplitClientConfig.LOCALHOST_DEFAULT_FILE);
List