Permalink
Browse files

D2 custom partition is a new d2 partition type where the cluster prov…

…ides its own function to decide the partitionId for the given URI.The purpose for this is to provide a flexilbe way for the clusters to customize the partition mapping method to meet their own requirements. To use this partition type, the cluster needs to:

. Implement BasePartitionAccessor interface with the customized logic.
. Register the PartitionAccessor through PartitionAccessorRegistry at the d2client side.
. Update the cluster property in d2.src to enable the customized PartitionAccessor.

The full document can be found https://iwww.corp.linkedin.com/wiki/cf/display/ENGS/Customized+D2+Partitioning.

The pdsc change is backward incompatible so requires synchronized update on the client side.

RB=1087090
G=si-core-reviewers
R=ssheng,xzhu,dhoa
A=ssheng,dhoa
  • Loading branch information...
ChaoLinkedIn committed Sep 14, 2017
1 parent bf0d8a7 commit 04136c18e945e1162d90bbd9d9ebf0efc71171a7
Showing with 933 additions and 74 deletions.
  1. +10 −0 CHANGELOG
  2. +25 −5 d2-schemas/src/main/pegasus/com/linkedin/d2/D2ClusterPartitionConfiguration.pdsc
  3. +14 −8 d2/src/main/java/com/linkedin/d2/balancer/D2ClientBuilder.java
  4. +16 −8 d2/src/main/java/com/linkedin/d2/balancer/D2ClientConfig.java
  5. +3 −3 d2/src/main/java/com/linkedin/d2/balancer/ZKFSLoadBalancerWithFacilitiesFactory.java
  6. +26 −5 d2/src/main/java/com/linkedin/d2/balancer/config/PartitionPropertiesConverter.java
  7. +13 −0 d2/src/main/java/com/linkedin/d2/balancer/properties/ClusterPropertiesJsonSerializer.java
  8. +51 −0 d2/src/main/java/com/linkedin/d2/balancer/properties/CustomizedPartitionProperties.java
  9. +2 −2 d2/src/main/java/com/linkedin/d2/balancer/properties/PartitionProperties.java
  10. +1 −0 d2/src/main/java/com/linkedin/d2/balancer/properties/PropertyKeys.java
  11. +8 −3 d2/src/main/java/com/linkedin/d2/balancer/simple/ClusterLoadBalancerSubscriber.java
  12. +13 −6 d2/src/main/java/com/linkedin/d2/balancer/simple/SimpleLoadBalancerState.java
  13. +37 −0 d2/src/main/java/com/linkedin/d2/balancer/util/partitions/BasePartitionAccessor.java
  14. +50 −0 d2/src/main/java/com/linkedin/d2/balancer/util/partitions/CustomizedPartitionAccessor.java
  15. +4 −2 d2/src/main/java/com/linkedin/d2/balancer/util/partitions/PartitionAccessException.java
  16. +29 −3 d2/src/main/java/com/linkedin/d2/balancer/util/partitions/PartitionAccessor.java
  17. +74 −1 d2/src/main/java/com/linkedin/d2/balancer/util/partitions/PartitionAccessorFactory.java
  18. +38 −0 d2/src/main/java/com/linkedin/d2/balancer/util/partitions/PartitionAccessorRegistry.java
  19. +55 −0 d2/src/main/java/com/linkedin/d2/balancer/util/partitions/PartitionAccessorRegistryImpl.java
  20. +9 −3 d2/src/main/java/com/linkedin/d2/balancer/zkfs/ZKFSTogglingLoadBalancerFactoryImpl.java
  21. +31 −4 d2/src/main/java/com/linkedin/d2/discovery/util/D2Config.java
  22. +4 −4 d2/src/test/java/com/linkedin/d2/balancer/config/PartitionPropertiesConverterTest.java
  23. +419 −16 d2/src/test/java/com/linkedin/d2/discovery/util/TestD2Config.java
  24. +1 −1 gradle.properties
View
@@ -1,3 +1,13 @@
15.0.6
-------
15.0.5
-------
(RB=1087090)
D2 customized partition implementation
15.0.4
-------
(RB=1109051)
@@ -9,10 +9,13 @@
"type": {
"type" : "enum",
"name" : "PartitionTypeEnum",
"symbols" : ["HASH", "RANGE", "NONE"]
"symbols" : ["HASH", "RANGE", "CUSTOM", "NONE"]
},
"doc": "The type of partitioning. We support HASH, RANGE, NONE.",
"symbolDocs": {"RANGE":"partitioning based on range e.g. Id 1-1000 goes to bucket A, Id 1001-2000 goes to bucket B, etc.", "HASH":"Partitioning based on hash.", "NONE":"No Partitioning."}
"symbolDocs": {"RANGE":"partitioning based on range e.g. Id 1-1000 goes to bucket A, Id 1001-2000 goes to bucket B, etc.",
"HASH":"Partitioning based on hash.",
"CUSTOM":"Partitioning base on the customized function provided by the service",
"NONE":"No Partitioning."}
},
{
"name": "partitionKeyRegex",
@@ -28,11 +31,11 @@
},
{
"name": "partitionTypeSpecificData",
"doc": "If the partition type is RANGE, then we have rangedPartitionProperties. If it's type HASH, we should have a hashAlgorithm. Otherwise we won't have any extra data in this field",
"doc": "If the partition type is RANGE, then we have RangedPartitionProperties. If it's type HASH, we should have a HashAlgorithm. If it is type CUSTOM, we have PartitionAccessorList. Otherwise we won't have any extra data in this field",
"type": [
{
"type" : "record",
"name" : "rangedPartitionProperties",
"name" : "RangedPartitionProperties",
"fields": [
{
"name": "partitionSize",
@@ -47,11 +50,28 @@
]
},
{
"name": "hashAlgorithm",
"name": "HashAlgorithm",
"type" : "enum",
"symbols" : ["MODULO", "MD5"],
"doc": "The hashing algorithm used in HASH based partitioning. Supported algorithms are: MODULO or MD5. Not used for RANGE based partition.",
"symbolDocs": {"MODULO":"Mod the key with partitionCount to get the partitionKey", "MD5":"Hash the key and mod it with partitionCount to get the partitionKey"}
},
{
"name": "PartitionAccessorList",
"type": "record",
"doc": "The list of class names that implement BasePartitionAccessor. D2 goes through the list and uses the first one that is registered to PartitionAccessorRegistry. This list is used when the service needs to provide/deploy multiple versions of implementation.",
"fields":
[
{
"name": "classNames",
"type":
{
"type": "array",
"items": "string"
},
"doc":"Class names for the implemented BasePartitionAccessor"
}
]
}
],
"optional": true
@@ -26,6 +26,7 @@
import com.linkedin.d2.balancer.clients.RetryClient;
import com.linkedin.d2.balancer.event.EventEmitter;
import com.linkedin.d2.balancer.util.healthcheck.HealthCheckOperations;
import com.linkedin.d2.balancer.util.partitions.PartitionAccessorRegistry;
import com.linkedin.d2.balancer.zkfs.ZKFSTogglingLoadBalancerFactoryImpl;
import com.linkedin.r2.message.RequestContext;
import com.linkedin.r2.message.rest.RestRequest;
@@ -35,13 +36,11 @@
import com.linkedin.r2.transport.common.TransportClientFactory;
import com.linkedin.r2.transport.http.client.HttpClientFactory;
import com.linkedin.r2.util.NamedThreadFactory;
import javax.net.ssl.SSLContext;
import javax.net.ssl.SSLParameters;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.net.ssl.SSLContext;
import javax.net.ssl.SSLParameters;
import java.net.URI;
import java.util.Collection;
import java.util.HashMap;
@@ -94,14 +93,15 @@ public D2Client build()
_config.clientServicesConfig,
_config.d2ServicePath,
_config.useNewEphemeralStoreWatcher,
_config._healthCheckOperations,
_config.healthCheckOperations,
_config._executorService,
_config.retry,
_config.retryLimit,
_config.warmUp,
_config.warmUpTimeoutSeconds,
_config.warmUpConcurrentRequests,
_config._eventEmitter);
_config.eventEmitter,
_config.partitionAccessorRegistry);
final LoadBalancerWithFacilities loadBalancer = loadBalancerFactory.create(cfg);
@@ -231,7 +231,7 @@ public D2ClientBuilder setD2ServicePath(String d2ServicePath)
public D2ClientBuilder setHealthCheckOperations(HealthCheckOperations healthCheckOperations)
{
_config._healthCheckOperations = healthCheckOperations;
_config.healthCheckOperations = healthCheckOperations;
return this;
}
@@ -288,7 +288,7 @@ public D2ClientBuilder setRetryLimit(int retryLimit)
public D2ClientBuilder setEventEmitter(EventEmitter eventEmitter)
{
_config._eventEmitter = eventEmitter;
_config.eventEmitter = eventEmitter;
return this;
}
@@ -336,6 +336,12 @@ public D2ClientBuilder setWarmUpConcurrentRequests(int warmUpConcurrentRequests)
return this;
}
public D2ClientBuilder setPartitionAccessorRegistry(PartitionAccessorRegistry registry)
{
_config.partitionAccessorRegistry = registry;
return this;
}
private Map<String, TransportClientFactory> createDefaultTransportClientFactories()
{
final Map<String, TransportClientFactory> clientFactories = new HashMap<String, TransportClientFactory>();
@@ -19,6 +19,7 @@
import com.linkedin.d2.balancer.event.EventEmitter;
import com.linkedin.d2.balancer.util.WarmUpLoadBalancer;
import com.linkedin.d2.balancer.util.healthcheck.HealthCheckOperations;
import com.linkedin.d2.balancer.util.partitions.PartitionAccessorRegistry;
import com.linkedin.d2.balancer.zkfs.ZKFSTogglingLoadBalancerFactoryImpl;
import com.linkedin.d2.balancer.zkfs.ZKFSTogglingLoadBalancerFactoryImpl.ComponentFactory;
import com.linkedin.r2.transport.common.TransportClientFactory;
@@ -51,7 +52,7 @@
boolean isSymlinkAware = false;
Map<String, Map<String, Object>> clientServicesConfig = Collections.<String, Map<String, Object>>emptyMap();
boolean useNewEphemeralStoreWatcher = false;
HealthCheckOperations _healthCheckOperations = null;
HealthCheckOperations healthCheckOperations = null;
/**
* By default is a single threaded executor
*/
@@ -66,7 +67,8 @@
BackupRequestsStrategyStatsConsumer backupRequestsStrategyStatsConsumer = null;
long backupRequestsLatencyNotificationInterval = 1;
TimeUnit backupRequestsLatencyNotificationIntervalUnit = TimeUnit.MINUTES;
EventEmitter _eventEmitter = null;
EventEmitter eventEmitter = null;
PartitionAccessorRegistry partitionAccessorRegistry = null;
private static final int DEAULT_RETRY_LIMIT = 3;
@@ -306,7 +308,9 @@ public D2ClientConfig(String zkHosts,
false,
0,
0,
null);
null,
null
);
}
public D2ClientConfig(String zkHosts,
@@ -335,7 +339,8 @@ public D2ClientConfig(String zkHosts,
boolean warmUp,
int warmUpTimeoutSeconds,
int warmUpConcurrentRequests,
EventEmitter emitter)
EventEmitter emitter,
PartitionAccessorRegistry partitionAccessorRegistry)
{
this(zkHosts,
zkSessionTimeoutInMs,
@@ -368,7 +373,8 @@ public D2ClientConfig(String zkHosts,
1,
TimeUnit.MINUTES,
null,
emitter);
emitter,
partitionAccessorRegistry);
}
public D2ClientConfig(String zkHosts,
@@ -402,7 +408,8 @@ public D2ClientConfig(String zkHosts,
long backupRequestsLatencyNotificationInterval,
TimeUnit backupRequestsLatencyNotificationIntervalUnit,
ScheduledExecutorService backupRequestsExecutorService,
EventEmitter emitter)
EventEmitter emitter,
PartitionAccessorRegistry partitionAccessorRegistry)
{
this.zkHosts = zkHosts;
this.zkSessionTimeoutInMs = zkSessionTimeoutInMs;
@@ -423,7 +430,7 @@ public D2ClientConfig(String zkHosts,
this.clientServicesConfig = clientServicesConfig;
this.d2ServicePath = d2ServicePath;
this.useNewEphemeralStoreWatcher = useNewEphemeralStoreWatcher;
this._healthCheckOperations = healthCheckOperations;
this.healthCheckOperations = healthCheckOperations;
this._executorService = executorService;
this.retry = retry;
this.retryLimit = retryLimit;
@@ -435,6 +442,7 @@ public D2ClientConfig(String zkHosts,
this.backupRequestsLatencyNotificationInterval = backupRequestsLatencyNotificationInterval;
this.backupRequestsLatencyNotificationIntervalUnit = backupRequestsLatencyNotificationIntervalUnit;
this._backupRequestsExecutorService = backupRequestsExecutorService;
this._eventEmitter = emitter;
this.eventEmitter = emitter;
this.partitionAccessorRegistry = partitionAccessorRegistry;
}
}
@@ -88,7 +88,7 @@ public LoadBalancerWithFacilities create(D2ClientConfig config)
}
final Map<String, LoadBalancerStrategyFactory<? extends LoadBalancerStrategy>> loadBalancerStrategyFactories =
createDefaultLoadBalancerStrategyFactories(config._healthCheckOperations, config._executorService, config._eventEmitter);
createDefaultLoadBalancerStrategyFactories(config.healthCheckOperations, config._executorService, config.eventEmitter);
return new ZKFSTogglingLoadBalancerFactoryImpl(loadBalancerComponentFactory,
config.lbWaitTimeout,
@@ -102,8 +102,8 @@ public LoadBalancerWithFacilities create(D2ClientConfig config)
config.sslParameters,
config.isSSLEnabled,
config.clientServicesConfig,
config.useNewEphemeralStoreWatcher
);
config.useNewEphemeralStoreWatcher,
config.partitionAccessorRegistry);
}
private Map<String, LoadBalancerStrategyFactory<? extends LoadBalancerStrategy>> createDefaultLoadBalancerStrategyFactories(
@@ -17,13 +17,16 @@
package com.linkedin.d2.balancer.config;
import com.linkedin.d2.D2ClusterPartitionConfiguration;
import com.linkedin.d2.HashAlgorithm;
import com.linkedin.d2.PartitionAccessorList;
import com.linkedin.d2.PartitionTypeEnum;
import com.linkedin.d2.balancer.properties.CustomizedPartitionProperties;
import com.linkedin.d2.balancer.properties.HashBasedPartitionProperties;
import com.linkedin.d2.balancer.properties.NullPartitionProperties;
import com.linkedin.d2.balancer.properties.PartitionProperties;
import com.linkedin.d2.balancer.properties.RangeBasedPartitionProperties;
import com.linkedin.d2.hashAlgorithm;
import com.linkedin.d2.rangedPartitionProperties;
import com.linkedin.d2.RangedPartitionProperties;
import com.linkedin.data.template.StringArray;
/**
@@ -40,7 +43,7 @@ public static PartitionProperties toProperties(D2ClusterPartitionConfiguration c
{
case RANGE:
{
rangedPartitionProperties rangedPartitionProperties =
RangedPartitionProperties rangedPartitionProperties =
config.getPartitionTypeSpecificData().getRangedPartitionProperties();
partitionProperties =
new RangeBasedPartitionProperties(config.getPartitionKeyRegex(),
@@ -68,6 +71,10 @@ public static PartitionProperties toProperties(D2ClusterPartitionConfiguration c
config.getPartitionCount(),
algorithm);
break;
case CUSTOM:
partitionProperties = new CustomizedPartitionProperties(config.getPartitionCount(),
config.getPartitionTypeSpecificData().getPartitionAccessorList().getClassNames());
break;
case NONE:
partitionProperties = NullPartitionProperties.getInstance();
break;
@@ -91,7 +98,7 @@ public static D2ClusterPartitionConfiguration toConfig(PartitionProperties prope
config.setPartitionCount(range.getPartitionCount());
specificData = new D2ClusterPartitionConfiguration.PartitionTypeSpecificData();
rangedPartitionProperties rangedPartitionProperties = new rangedPartitionProperties();
RangedPartitionProperties rangedPartitionProperties = new RangedPartitionProperties();
rangedPartitionProperties.setKeyRangeStart(range.getKeyRangeStart());
rangedPartitionProperties.setPartitionSize(range.getPartitionSize());
specificData.setRangedPartitionProperties(rangedPartitionProperties);
@@ -105,9 +112,23 @@ public static D2ClusterPartitionConfiguration toConfig(PartitionProperties prope
config.setPartitionCount(hash.getPartitionCount());
specificData = new D2ClusterPartitionConfiguration.PartitionTypeSpecificData();
specificData.setHashAlgorithm(hashAlgorithm.valueOf(hash.getHashAlgorithm().name()));
specificData.setHashAlgorithm(HashAlgorithm.valueOf(hash.getHashAlgorithm().name()));
config.setPartitionTypeSpecificData(specificData);
break;
case CUSTOM:
{
CustomizedPartitionProperties properties = (CustomizedPartitionProperties) property;
config = new D2ClusterPartitionConfiguration();
config.setType(PartitionTypeEnum.CUSTOM);
config.setPartitionCount(properties.getPartitionCount());
specificData = new D2ClusterPartitionConfiguration.PartitionTypeSpecificData();
PartitionAccessorList partitionList = new PartitionAccessorList();
partitionList.setClassNames(new StringArray(properties.getPartitionAccessorList()));
specificData.setPartitionAccessorList(partitionList);
config.setPartitionTypeSpecificData(specificData);
break;
}
case NONE:
config = new D2ClusterPartitionConfiguration();
config.setType(PartitionTypeEnum.NONE);
@@ -127,6 +127,19 @@ public ClusterProperties fromMap(Map<String, Object> map)
new HashBasedPartitionProperties(partitionKeyRegex, partitionCount, algorithm);
break;
}
case CUSTOM:
{
int partitionCount = partitionPropertiesMap.containsKey(PropertyKeys.PARTITION_COUNT)
? PropertyUtil.checkAndGetValue(partitionPropertiesMap, PropertyKeys.PARTITION_COUNT, Number.class, scope).intValue()
: 0;
@SuppressWarnings("unchecked")
List<String> partitionAccessorList =partitionPropertiesMap.containsKey(PropertyKeys.PARTITION_ACCESSOR_LIST)
? PropertyUtil.checkAndGetValue(partitionPropertiesMap, PropertyKeys.PARTITION_ACCESSOR_LIST, List.class, scope)
: Collections.emptyList();
partitionProperties = new CustomizedPartitionProperties(partitionCount, partitionAccessorList);
break;
}
case NONE:
partitionProperties = NullPartitionProperties.getInstance();
break;
@@ -0,0 +1,51 @@
/*
Copyright (c) 2017 LinkedIn Corp.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.linkedin.d2.balancer.properties;
import java.util.List;
/**
* Properties for Custom Partition
*/
public class CustomizedPartitionProperties implements PartitionProperties
{
private final int _partitionCount;
private final List<String> _partitionAccessorList;
public CustomizedPartitionProperties(int partitionCount, List<String> partitionAccessorList)
{
_partitionCount = partitionCount;
_partitionAccessorList = partitionAccessorList;
}
@Override
public PartitionType getPartitionType()
{
return PartitionType.CUSTOM;
}
public int getPartitionCount()
{
return _partitionCount;
}
public List<String> getPartitionAccessorList()
{
return _partitionAccessorList;
}
}
Oops, something went wrong.

0 comments on commit 04136c1

Please sign in to comment.