Skip to content

Commit

Permalink
Add setting for specifying the disks number for each node group in cl…
Browse files Browse the repository at this point in the history
…uster spec file or in global settings. The total storage of a node splits into the specified number of disks.

0 by default which means using the default storage split policy defined in PlacementPlanner#placeDisk().
  • Loading branch information
jessehu committed Jul 20, 2015
1 parent d4d8369 commit 116b791
Show file tree
Hide file tree
Showing 10 changed files with 158 additions and 21 deletions.
35 changes: 32 additions & 3 deletions common/src/main/java/com/vmware/bdd/apitypes/StorageRead.java
Expand Up @@ -17,6 +17,7 @@
import java.util.List;

import com.fasterxml.jackson.annotation.JsonIgnore;
import com.google.gson.annotations.Expose;

/**
* Storage get output
Expand Down Expand Up @@ -66,18 +67,30 @@ public static DiskType getDiskType(String type) {
}
}

@Expose
private String type;

@Expose
private Priority shares;

@Expose
private int sizeGB;

@Expose
private List<String> dsNames;


@Expose
private List<String> dsNames4System;


@Expose
private List<String> dsNames4Data;


@Expose
private Integer diskNum;

@Expose
private Boolean shareDatastore;

// internal used, data disk store name patterns
private List<String> diskstoreNamePattern;

Expand Down Expand Up @@ -184,4 +197,20 @@ public String getAllocType() {
public void setAllocType(String allocType) {
this.allocType = allocType;
}

public Integer getDiskNum() {
return diskNum;
}

public void setDiskNum(Integer diskNum) {
this.diskNum = diskNum;
}

public Boolean isShareDatastore() {
return shareDatastore;
}

public void setShareDatastore(Boolean shareDatastore) {
this.shareDatastore = shareDatastore;
}
}
Expand Up @@ -52,6 +52,7 @@
import com.vmware.bdd.exception.UniqueConstraintViolationException;
import com.vmware.bdd.exception.VcProviderException;
import com.vmware.bdd.manager.intf.IClusterEntityManager;
import com.vmware.bdd.placement.entity.BaseNode;
import com.vmware.bdd.service.IClusteringService;
import com.vmware.bdd.service.resmgmt.IDatastoreService;
import com.vmware.bdd.service.resmgmt.INetworkService;
Expand Down Expand Up @@ -720,6 +721,8 @@ private void convertStorage(NodeGroupCreate group,
NodeGroupEntity groupEntity, Set<String> roles) {
if (group.getStorage() != null) {
groupEntity.setStorageSize(group.getStorage().getSizeGB());
groupEntity.setDiskNum(getDiskNumberForNodeGroup(group));
groupEntity.setShareDatastore(isShareDatastoreForNodeGroup(group));
//currently, ignore input from CLI and hard code here
String storageType = group.getStorage().getType();
if (storageType != null) {
Expand Down Expand Up @@ -1031,6 +1034,7 @@ private void expandGroupStorage(NodeGroupEntity ngEntity,
List<String> dataDiskStoreNames = ngEntity.getDdDatastoreNameList();
List<String> systemDiskStoreNames = ngEntity.getSdDatastoreNameList();

logger.debug("expanding group storage for cluster " + ngEntity.getCluster().getName());
if (storageSize <= 0 && storageType == null
&& (storeNames == null || storeNames.isEmpty())) {
logger.debug("no storage specified for node group "
Expand All @@ -1041,6 +1045,10 @@ private void expandGroupStorage(NodeGroupEntity ngEntity,
+ ngEntity.getName());
logger.debug("storage type is " + storageType + " for node group "
+ ngEntity.getName());
logger.debug("diskNum is " + ngEntity.getDiskNum() + " for node group "
+ ngEntity.getName());
logger.debug("shareDatastore is " + ngEntity.isShareDatastore() + " for node group "
+ ngEntity.getName());
logger.debug("storage name pattern is " + storeNames + " for node group "
+ ngEntity.getName());
logger.debug("system disk storage name pattern is "
Expand All @@ -1050,6 +1058,8 @@ private void expandGroupStorage(NodeGroupEntity ngEntity,
StorageRead storage = new StorageRead();
group.setStorage(storage);
storage.setSizeGB(storageSize);
storage.setDiskNum(ngEntity.getDiskNum());
storage.setShareDatastore(ngEntity.isShareDatastore());
if (storageType != null) {
storage.setType(storageType.toString().toLowerCase());
}
Expand Down Expand Up @@ -1344,4 +1354,47 @@ private void setDefaultClusterCloneType(ClusterCreate clusterCreate) {
clusterCreate.setClusterCloneType(Constants.CLUSTER_CLONE_TYPE_FAST_CLONE);
}
}

/*
* Get the disks number for each node group specified in cluster spec file or in global settings.
* The total storage of a node splits into the specified number of disks.
* 0 by default which means using the default storage split policy defined in PlacementPlanner#placeDisk().
*/
private int getDiskNumberForNodeGroup(NodeGroupCreate group) {
String storageType = null;
Integer diskNumber = null;
if (group.getStorage() != null) {
storageType = group.getStorage().getType();
diskNumber = group.getStorage().getDiskNum();
}
if (storageType == null) {
storageType = DatastoreType.LOCAL.name();
}
if (diskNumber != null) {
return diskNumber;
} else {
return Configuration.getInt(String.format("storage.%1$s.disk_number_per_node", storageType.toLowerCase()), 0);
}
}

/*
* Get the shareDatastore setting for each node group specified in cluster spec file or in global settings.
* true by default which means sharing a datastore with other disks rather than allocating the whole datastore to a single disk.
*/
private boolean isShareDatastoreForNodeGroup(NodeGroupCreate group) {
String storageType = null;
Boolean isShareDatastore = null;
if (group.getStorage() != null) {
storageType = group.getStorage().getType();
isShareDatastore = group.getStorage().isShareDatastore();
}
if (storageType == null) {
storageType = DatastoreType.LOCAL.name();
}
if (isShareDatastore != null) {
return isShareDatastore;
} else {
return Configuration.getBoolean(String.format("storage.%1$s.is_share_datastore", storageType.toLowerCase()), true);
}
}
}
Expand Up @@ -1085,10 +1085,13 @@ public void testClusterConfigWithClusterStorage() throws Exception {
// manifest.indexOf("{\"name\":\"my-cluster4\",\"groups\":[{\"name\":\"master\",\"roles\":[\"hadoop_namenode\",\"hadoop_jobtracker\"],\"instance_num\":1,\"storage\":{\"type\":\"shared\",\"size\":50},\"cpu\":2,\"memory\":7500,\"ha\":\"on\",\"vm_folder_path\":\"SERENGETI-null/my-cluster4/master\"},{\"name\":\"worker\",\"roles\":[\"hadoop_datanode\",\"hadoop_tasktracker\"],\"instance_num\":3,\"storage\":{\"type\":\"local\",\"size\":50},\"cpu\":1,\"memory\":3748,\"ha\":\"off\",\"vm_folder_path\":\"SERENGETI-null/my-cluster4/worker\"},{\"name\":\"client\",\"roles\":[\"hadoop_client\",\"pig\",\"hive\",\"hive_server\"],\"instance_num\":1,\"storage\":{\"type\":\"shared\",\"size\":50},\"cpu\":1,\"memory\":3748,\"ha\":\"off\",\"vm_folder_path\":\"SERENGETI-null/my-cluster4/client\"}],\"distro\":\"apache\",\"vc_clusters\":[{\"name\":\"cluster1\",\"vc_rps\":[\"rp1\"]}],\"template_id\":\"vm-001\",\"networking\":[{\"port_group\":\"CFNetwork\",\"type\":\"dhcp\"}]") != -1);
}

@Test(groups = { "TestClusterConfigManager" })
public void testClusterConfigWithGroupStorage() {
ClusterCreate spec = new ClusterCreate();
spec.setNetworkConfig(createNetConfigs());
spec.setName("my-cluster5");
spec.setDistro("bigtop");
spec.setDistroVendor(Constants.DEFAULT_VENDOR);
List<String> rps = new ArrayList<String>();
rps.add("myRp2");
rps.add("myRp3");
Expand All @@ -1106,10 +1109,15 @@ public void testClusterConfigWithGroupStorage() {
group.setName("main_group");
List<String> roles = new ArrayList<String>();
roles.add("hadoop_namenode");
roles.add("hadoop_resourcemanager");
roles.add("hadoop_datanode");
roles.add("hadoop_nodemanager");
group.setRoles(roles);
StorageRead storage = new StorageRead();
storage.setSizeGB(50);
storage.setType(DatastoreType.LOCAL.toString());
storage.setDiskNum(2);
storage.setShareDatastore(false);
group.setStorage(storage);
spec.setNodeGroups(nodegroups);
clusterConfigMgr.createClusterConfig(spec);
Expand All @@ -1120,21 +1128,19 @@ public void testClusterConfigWithGroupStorage() {
ClusterCreate attrs = clusterConfigMgr.getClusterConfig("my-cluster5");
String manifest = gson.toJson(attrs);
System.out.println(manifest);
Assert.assertTrue(
manifest.indexOf("main_group") != -1
&& manifest.indexOf("expanded_master") != -1
&& manifest.indexOf("expanded_worker") != -1,
"manifest should contains nodegroups");
Assert.assertTrue(
manifest
.indexOf("{\"name\":\"my-cluster5\",\"groups\":[{\"name\":\"main_group\",\"roles\":[\"hadoop_namenode\"],\"instance_num\":1,\"storage\":{\"type\":\"local\",\"size\":50},\"cpu\":3,\"memory\":15000,\"ha\":\"off\",\"vm_folder_path\":\"SERENGETI-null/my-cluster5/main_group\"},{\"name\":\"expanded_master\",\"roles\":[\"hadoop_jobtracker\"],\"instance_num\":1,\"storage\":{\"type\":\"shared\",\"size\":50},\"cpu\":2,\"memory\":7500,\"ha\":\"on\",\"vm_folder_path\":\"SERENGETI-null/my-cluster5/expanded_master\"},{\"name\":\"expanded_worker\",\"roles\":[\"hadoop_datanode\",\"hadoop_tasktracker\"],\"instance_num\":3,\"storage\":{\"type\":\"local\",\"size\":50},\"cpu\":1,\"memory\":3748,\"ha\":\"off\",\"vm_folder_path\":\"SERENGETI-null/my-cluster5/expanded_worker\"}],\"distro\":\"apache\",\"vc_clusters\":[{\"name\":\"cluster1\",\"vc_rps\":[\"rp2\"]},{\"name\":\"cluster2\",\"vc_rps\":[\"rp1\",\"rp2\"]},{\"name\":\"cluster4\",\"vc_rps\":[\"rp1\"]}],\"template_id\":\"vm-001\",\"networking\":[{\"port_group\":\"CFNetwork\",\"type\":\"dhcp\"}]") != -1,
.indexOf("{\"name\":\"my-cluster5\",\"groups\":[{\"name\":\"main_group\",\"roles\":[\"hadoop_namenode\",\"hadoop_resourcemanager\",\"hadoop_datanode\",\"hadoop_nodemanager\"],\"instance_num\":1,\"storage\":{\"type\":\"local\",\"shares\":\"NORMAL\",\"sizeGB\":50,\"diskNum\":2,\"shareDatastore\":false},\"cpu\":3,\"memory\":15000,\"swap_ratio\":1.0,\"ha\":\"off\"") == 0,
"manifest is inconsistent");
}

@Test(groups = { "TestClusterConfigManager" })
public void testClusterConfigWithGroupStoragePattern() {
ClusterCreate spec = new ClusterCreate();
spec.setNetworkConfig(createNetConfigs());
spec.setName("my-cluster6");
spec.setDistro("bigtop");
spec.setDistroVendor(Constants.DEFAULT_VENDOR);
List<String> rps = new ArrayList<String>();
rps.add("myRp2");
rps.add("myRp3");
Expand All @@ -1152,6 +1158,9 @@ public void testClusterConfigWithGroupStoragePattern() {
group.setName("main_group");
List<String> roles = new ArrayList<String>();
roles.add("hadoop_namenode");
roles.add("hadoop_resourcemanager");
roles.add("hadoop_datanode");
roles.add("hadoop_nodemanager");
group.setRoles(roles);
StorageRead storage = new StorageRead();
storage.setType(DatastoreType.LOCAL.toString());
Expand All @@ -1169,15 +1178,8 @@ public void testClusterConfigWithGroupStoragePattern() {
ClusterCreate attrs = clusterConfigMgr.getClusterConfig("my-cluster6");
String manifest = gson.toJson(attrs);
System.out.println(manifest);
Assert.assertTrue(
manifest.indexOf("main_group") != -1
&& manifest.indexOf("expanded_master") != -1
&& manifest.indexOf("expanded_worker") != -1,
"manifest should contains nodegroups");
Assert.assertTrue(
manifest
.indexOf("{\"name\":\"my-cluster6\",\"groups\":[{\"name\":\"main_group\",\"roles\":[\"hadoop_namenode\"],\"instance_num\":1,\"storage\":{\"type\":\"local\",\"size\":100,\"name_pattern\":[\"vmfs*\",\"local1\"]},\"cpu\":3,\"memory\":15000,\"ha\":\"off\",\"vm_folder_path\":\"SERENGETI-null/my-cluster6/main_group\"},{\"name\":\"expanded_master\",\"roles\":[\"hadoop_jobtracker\"],\"instance_num\":1,\"storage\":{\"type\":\"shared\",\"size\":50},\"cpu\":2,\"memory\":7500,\"ha\":\"on\",\"vm_folder_path\":\"SERENGETI-null/my-cluster6/expanded_master\"},{\"name\":\"expanded_worker\",\"roles\":[\"hadoop_datanode\",\"hadoop_tasktracker\"],\"instance_num\":3,\"storage\":{\"type\":\"local\",\"size\":50},\"cpu\":1,\"memory\":3748,\"ha\":\"off\",\"vm_folder_path\":\"SERENGETI-null/my-cluster6/expanded_worker\"}],\"distro\":\"apache\",\"vc_clusters\":[{\"name\":\"cluster1\",\"vc_rps\":[\"rp2\"]},{\"name\":\"cluster2\",\"vc_rps\":[\"rp1\",\"rp2\"]},{\"name\":\"cluster4\",\"vc_rps\":[\"rp1\"]}],\"template_id\":\"vm-001\",\"networking\":[{\"port_group\":\"CFNetwork\",\"type\":\"dhcp\"}]") != -1,
"manifest is inconsistent");
Assert.assertEquals(attrs.getNodeGroup("main_group").getStorage().getDiskstoreNamePattern().toString(),
"[share1, share2, local1, vmfs.*]");
}

public void testClusterConfigWithNoSlave() {
Expand Down
Expand Up @@ -78,6 +78,12 @@ public class NodeGroupEntity extends EntityBase {
@Column(name = "storage_size")
private int storageSize;

@Column(name = "disk_number")
private Integer diskNum = 0;

@Column(name = "is_share_datastore")
private Boolean shareDatastore = true;

@ManyToOne
@JoinColumn(name = "cluster_id")
private ClusterEntity cluster;
Expand Down Expand Up @@ -428,6 +434,8 @@ public NodeGroupRead toNodeGroupRead(boolean ignoreObsoleteNode) {
StorageRead storage = new StorageRead();
storage.setType(this.storageType.toString());
storage.setSizeGB(this.storageSize);
storage.setDiskNum(this.diskNum);
storage.setShareDatastore(this.shareDatastore);

// set dsNames/dsNames4Data/dsNames4System
List<String> datastoreNameList = getVcDatastoreNameList();
Expand Down Expand Up @@ -490,4 +498,20 @@ public boolean equals(Object nodeGroup) {
}
return this.name.equals(group.getName());
}

public Integer getDiskNum() {
return diskNum;
}

public void setDiskNum(Integer diskNum) {
this.diskNum = diskNum;
}

public Boolean isShareDatastore() {
return shareDatastore;
}

public void setShareDatastore(Boolean shareDatastore) {
this.shareDatastore = shareDatastore;
}
}
Expand Up @@ -760,6 +760,26 @@ private boolean placeDisk(VirtualNode vNode, AbstractHost host) {
for (DiskSpec disk : node.getDisks()) {
if (DiskType.DATA_DISK == disk.getDiskType()) {
if (disk.isSeparable()) {
String storageType = node.getNodeGroup().getStorage().getType();
int disksNum = node.getNodeGroup().getStorage().getDiskNum();
logger.info(String.format("%1$s disks number per node for node %2$s is %3$d", storageType, node.getVmName(), disksNum));
if (disksNum > 0) {
int subdiskSize = disk.getSize() / disksNum;
logger.info(String.format("%1$dGB storage for node %2$s is splited into %3$d disks and each single disk size is %4$dGB", disk.getSize(), node.getVmName(), disksNum, subdiskSize));
for (int i = 0; i < disksNum; i++) {
if (i == disksNum - 1) {
// in case disk.getSize() can not be divided by disksNum
// the last disk will have a little bigger size than other disks
subdiskSize = disk.getSize() - subdiskSize * i;
}
String subdiskName = disk.getName().split("\\.")[0] + i + ".vmdk";
logger.info(String.format("Add an unseparable disk %1$s (%2$dGB) for node %3$s", subdiskName, subdiskSize, node.getVmName()));
unseparable.add(new DiskSpec(subdiskName, subdiskSize, node
.getVmName(), false, disk.getDiskType(), disk.getController(), null, disk.getAllocType(),
null, null, null));
}
continue;
}
separable.add(disk);
} else {
unseparable.add(disk);
Expand Down
2 changes: 2 additions & 0 deletions server/serengeti/src/main/resources/schema.sql
Expand Up @@ -127,6 +127,8 @@ create table node_group (
ha_flag varchar(10),
storage_type varchar(255),
storage_size integer,
disk_number integer,
is_share_datastore boolean,
vc_datastore_names text,
sd_datastore_names text,
dd_datastore_names text,
Expand Down
7 changes: 7 additions & 0 deletions server/serengeti/src/main/resources/serengeti.properties
Expand Up @@ -101,6 +101,13 @@ storage.zookeepergroup.small = 20
# The system disk and swap disk will use the same controller type
storage.system_swap.disk.controller.type = VirtualLsiLogicController

# Set the default disk number for each node. This can be overridden by diskNum specified in cluster spec file.
# The total storage of a node splits into the specified number of disks.
# 0 by default which means using the default storage split policy.
# 'storage.local.disk_number_per_node' for LOCAL storage and 'storage.shared.disk_number_per_nod' for SHARED storage.
storage.local.disk_number_per_node = 0
storage.shared.disk_number_per_node = 0

elastic_runtime.automation.enable = false

# cluster.clone.service can be: simple, fast, instant
Expand Down
Expand Up @@ -70,7 +70,7 @@ public AmClusterValidator() {
}

public boolean validateBlueprint(ClusterBlueprint blueprint) {
logger.info("Start to validate bludprint for cluster "
logger.info("Start to validate blueprint for cluster "
+ blueprint.getName());
HadoopStack hadoopStack = blueprint.getHadoopStack();
String distro = hadoopStack.getDistro();
Expand Down
Expand Up @@ -41,7 +41,7 @@ public class ClusterValidator {

public boolean validateBlueprint(ClusterBlueprint blueprint, List<String> distroRoles)
throws ValidationException {
logger.info("Start to validate bludprint for cluster " + blueprint.getName());
logger.info("Start to validate blueprint for cluster " + blueprint.getName());
return validateDistros(blueprint, distroRoles);
}

Expand Down
Expand Up @@ -74,7 +74,7 @@ private void sortNodeGroupRoles(ClusterBlueprint blueprint) {
throw ClusterConfigException.NO_HADOOP_ROLE_SPECIFIED(nodeGroup.getName());
}
if (!enumRoles.contains(HadoopRole.CUSTOMIZED_ROLE)) {
logger.info("Soring roles based on role dependency and relationship with HDFS");
logger.info("Sorting roles name based on role dependency and relationship with HDFS");
Collections.sort(roles, new RoleComparactor());
nodeGroup.setRoles(roles);
}
Expand Down

0 comments on commit 116b791

Please sign in to comment.