Skip to content

Commit

Permalink
Refactoring: More class renaming and file reorganization
Browse files Browse the repository at this point in the history
ClusterInstance has been dropped.

ClusterInstanceTest is now PartitionBalanceTest and is in tools.

ClusterTestUtils is a new util class. All of the getFOOClusterBar and getFOOStoreDef(s)BAR methods have been moved into this test helper.
  • Loading branch information
jayjwylie committed Jun 20, 2013
1 parent 54e48c5 commit 8d5d076
Show file tree
Hide file tree
Showing 8 changed files with 220 additions and 294 deletions.
3 changes: 1 addition & 2 deletions src/java/voldemort/tools/PartitionAnalysisCLI.java
Expand Up @@ -29,7 +29,6 @@

import voldemort.cluster.Cluster;
import voldemort.store.StoreDefinition;
import voldemort.utils.ClusterInstance;
import voldemort.utils.CmdUtils;
import voldemort.utils.Utils;
import voldemort.xml.ClusterMapper;
Expand Down Expand Up @@ -109,7 +108,7 @@ public static void main(String[] args) throws Exception {
Cluster currentCluster = new ClusterMapper().readCluster(new File(clusterXML));
List<StoreDefinition> storeDefs = new StoreDefinitionsMapper().readStoreList(new File(storesXML));

PartitionBalance partitionBalance = new ClusterInstance(currentCluster, storeDefs).getPartitionBalance();
PartitionBalance partitionBalance = new PartitionBalance(currentCluster, storeDefs);
System.out.println(partitionBalance);
}

Expand Down
25 changes: 9 additions & 16 deletions src/java/voldemort/tools/Repartitioner.java
Expand Up @@ -28,7 +28,6 @@
import voldemort.cluster.Cluster;
import voldemort.cluster.Node;
import voldemort.store.StoreDefinition;
import voldemort.utils.ClusterInstance;
import voldemort.utils.ClusterUtils;
import voldemort.utils.Pair;
import voldemort.utils.RebalanceUtils;
Expand Down Expand Up @@ -157,7 +156,7 @@ public static Cluster repartition(final Cluster currentCluster,
final int greedySwapMaxPartitionsPerNode,
final int greedySwapMaxPartitionsPerZone,
final int maxContiguousPartitionsPerZone) {
PartitionBalance partitionBalance = new ClusterInstance(currentCluster, currentStoreDefs).getPartitionBalance();
PartitionBalance partitionBalance = new PartitionBalance(currentCluster, currentStoreDefs);
RebalanceUtils.dumpAnalysisToFile(outputDir,
RebalanceUtils.currentClusterFileName,
partitionBalance);
Expand Down Expand Up @@ -195,7 +194,7 @@ public static Cluster repartition(final Cluster currentCluster,
RebalanceUtils.validateCurrentFinalCluster(currentCluster, nextCluster);

System.out.println("-------------------------\n");
partitionBalance = new ClusterInstance(nextCluster, targetStoreDefs).getPartitionBalance();
partitionBalance = new PartitionBalance(nextCluster, targetStoreDefs);
double currentUtility = partitionBalance.getUtility();
System.out.println("Optimization number " + attempt + ": " + currentUtility
+ " max/min ratio");
Expand All @@ -219,7 +218,7 @@ public static Cluster repartition(final Cluster currentCluster,

System.out.println("\n==========================");
System.out.println("Final distribution");
partitionBalance = new ClusterInstance(minCluster, targetStoreDefs).getPartitionBalance();
partitionBalance = new PartitionBalance(minCluster, targetStoreDefs);
System.out.println(partitionBalance);

RebalanceUtils.dumpClusterToFile(outputDir, RebalanceUtils.finalClusterFileName, minCluster);
Expand Down Expand Up @@ -631,16 +630,14 @@ public static Cluster randomShufflePartitions(final Cluster targetCluster,
List<Integer> zoneIds = new ArrayList<Integer>(targetCluster.getZoneIds());
Cluster returnCluster = ClusterUtils.copyCluster(targetCluster);

double currentUtility = new ClusterInstance(returnCluster, storeDefs).getPartitionBalance()
.getUtility();
double currentUtility = new PartitionBalance(returnCluster, storeDefs).getUtility();

int successes = 0;
for(int i = 0; i < randomSwapAttempts; i++) {
Collections.shuffle(zoneIds, new Random(System.currentTimeMillis()));
for(Integer zoneId: zoneIds) {
Cluster shuffleResults = swapRandomPartitionsWithinZone(returnCluster, zoneId);
double nextUtility = new ClusterInstance(shuffleResults, storeDefs).getPartitionBalance()
.getUtility();
double nextUtility = new PartitionBalance(shuffleResults, storeDefs).getUtility();
if(nextUtility < currentUtility) {
System.out.println("Swap improved max-min ratio: " + currentUtility + " -> "
+ nextUtility + " (improvement " + successes
Expand Down Expand Up @@ -681,8 +678,7 @@ public static Cluster swapGreedyRandomPartitions(final Cluster targetCluster,
System.out.println("GreedyRandom : nodeIds:" + nodeIds);

Cluster returnCluster = ClusterUtils.copyCluster(targetCluster);
double currentUtility = new ClusterInstance(returnCluster, storeDefs).getPartitionBalance()
.getUtility();
double currentUtility = new PartitionBalance(returnCluster, storeDefs).getUtility();
int nodeIdA = -1;
int nodeIdB = -1;
int partitionIdA = -1;
Expand Down Expand Up @@ -719,8 +715,7 @@ public static Cluster swapGreedyRandomPartitions(final Cluster targetCluster,
partitionIdEh,
nodeIdBee,
partitionIdBee);
double swapUtility = new ClusterInstance(swapResult, storeDefs).getPartitionBalance()
.getUtility();
double swapUtility = new PartitionBalance(swapResult, storeDefs).getUtility();
if(swapUtility < currentUtility) {
currentUtility = swapUtility;
System.out.println(" -> " + currentUtility);
Expand Down Expand Up @@ -786,8 +781,7 @@ public static Cluster greedyShufflePartitions(final Cluster targetCluster,
return returnCluster;
}

double currentUtility = new ClusterInstance(returnCluster, storeDefs).getPartitionBalance()
.getUtility();
double currentUtility = new PartitionBalance(returnCluster, storeDefs).getUtility();

for(int i = 0; i < greedyAttempts; i++) {
Collections.shuffle(zoneIds, new Random(System.currentTimeMillis()));
Expand All @@ -807,8 +801,7 @@ public static Cluster greedyShufflePartitions(final Cluster targetCluster,
greedySwapMaxPartitionsPerNode,
greedySwapMaxPartitionsPerZone,
storeDefs);
double nextUtility = new ClusterInstance(shuffleResults, storeDefs).getPartitionBalance()
.getUtility();
double nextUtility = new PartitionBalance(shuffleResults, storeDefs).getUtility();

if(nextUtility == currentUtility) {
System.out.println("Not improving for zone: " + zoneId);
Expand Down
68 changes: 0 additions & 68 deletions src/java/voldemort/utils/ClusterInstance.java

This file was deleted.

@@ -1,5 +1,5 @@
/*
* Copyright 2013 LinkedIn, Inc
* 2013 LinkedIn, Inc
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
Expand All @@ -13,21 +13,14 @@
* License for the specific language governing permissions and limitations under
* the License.
*/

package voldemort.utils;

import static org.junit.Assert.assertTrue;
package voldemort;

import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.List;

import org.junit.Test;

import voldemort.ServerTestUtils;
import voldemort.VoldemortException;
import voldemort.client.RoutingTier;
import voldemort.cluster.Cluster;
import voldemort.cluster.Node;
Expand All @@ -41,30 +34,7 @@

import com.google.common.collect.Lists;

/**
* This test focuses on constructing ClusterInstances and then invoking
* analyzeBalanceVerbose(). This method heavily exercises much of the
* partition/replicaType code paths.
*
* The positive test cases cover expected configurations:
* <ul>
* <li>2 or 3 zones
* <li>0 or 1 zones without partitions
* <li>one or more zones with new nodes ('new' meaning no partitions on that
* node)
* </ul>
*
* The "negative test cases" cover:
* <ul>
* <li>Store definition mis-match with cluster in terms of number of zones.
* <li>Insufficient nodes in new zone to reach desired replication level.
* </ul>
*
*/
public class ClusterInstanceTest {

// TODO: Rename class/file to PartitionBalanceTest. Change tests to directly
// use PartitionBalance rather than go through ClusterInstance.
public class ClusterTestUtils {

// TODO: Move these storeDefs and cluster helper test methods into
// ClusterTestUtils.
Expand Down Expand Up @@ -143,15 +113,15 @@ public static List<StoreDefinition> getZZ322StoreDefs(String storageType) {
*/
public static List<StoreDefinition> getZZStoreDefsInMemory() {
List<StoreDefinition> storeDefs = new LinkedList<StoreDefinition>();
storeDefs.addAll(getZZ111StoreDefs(InMemoryStorageConfiguration.TYPE_NAME));
storeDefs.addAll(ClusterTestUtils.getZZ111StoreDefs(InMemoryStorageConfiguration.TYPE_NAME));
storeDefs.addAll(getZZ211StoreDefs(InMemoryStorageConfiguration.TYPE_NAME));
storeDefs.addAll(getZZ322StoreDefs(InMemoryStorageConfiguration.TYPE_NAME));
return storeDefs;
}

public static List<StoreDefinition> getZZStoreDefsBDB() {
List<StoreDefinition> storeDefs = new LinkedList<StoreDefinition>();
storeDefs.addAll(getZZ111StoreDefs(BdbStorageConfiguration.TYPE_NAME));
storeDefs.addAll(ClusterTestUtils.getZZ111StoreDefs(BdbStorageConfiguration.TYPE_NAME));
storeDefs.addAll(getZZ211StoreDefs(BdbStorageConfiguration.TYPE_NAME));
storeDefs.addAll(getZZ322StoreDefs(BdbStorageConfiguration.TYPE_NAME));
return storeDefs;
Expand Down Expand Up @@ -500,112 +470,4 @@ public static Cluster getZZClusterWithNonContiguousZoneIDsAndNonContiguousNodeID
return new Cluster(cluster.getName(), nodeList, zones);
}

@Test
public void testBasicThingsThatShouldWork() {
ClusterInstance ci;

ci = new ClusterInstance(getZZCluster(), getZZStoreDefsInMemory());
ci.getPartitionBalance();

ci = new ClusterInstance(getZZZCluster(), getZZZStoreDefsInMemory());
ci.getPartitionBalance();
}

@Test
public void testEmptyZoneThingsThatShouldWork() {
ClusterInstance ci;

ci = new ClusterInstance(getZECluster(), getZZStoreDefsInMemory());
ci.getPartitionBalance();

ci = new ClusterInstance(getZEZCluster(), getZZZStoreDefsInMemory());
ci.getPartitionBalance();

ci = new ClusterInstance(getZEZClusterWithOnlyOneNodeInNewZone(), getZZZStoreDefsInMemory());
ci.getPartitionBalance();
}

@Test
public void testNewNodeThingsThatShouldWork() {
ClusterInstance ci;

ci = new ClusterInstance(getZZClusterWithNN(), getZZStoreDefsInMemory());
ci.getPartitionBalance();

ci = new ClusterInstance(getZEZClusterWithXNN(), getZZZStoreDefsInMemory());
ci.getPartitionBalance();
}

@Test
public void testClusterStoreZoneCountMismatch() {
ClusterInstance ci;
boolean veCaught;

veCaught = false;
try {
ci = new ClusterInstance(getZZCluster(), getZZZStoreDefsInMemory());
ci.getPartitionBalance();
} catch(VoldemortException ve) {
veCaught = true;
}
assertTrue(veCaught);

veCaught = false;
try {
ci = new ClusterInstance(getZZZCluster(), getZZStoreDefsInMemory());
ci.getPartitionBalance();
} catch(VoldemortException ve) {
veCaught = true;
}
assertTrue(veCaught);
}

@Test
public void testClusterWithZoneThatCannotFullyReplicate() {
ClusterInstance ci;

boolean veCaught = false;
try {
ci = new ClusterInstance(getZZZClusterWithOnlyOneNodeInNewZone(),
getZZZStoreDefsInMemory());
ci.getPartitionBalance();
} catch(VoldemortException ve) {
veCaught = true;
}
assertTrue(veCaught);
}

/**
* Confirm that zone Ids need not be contiguous. This tests for the ability
* to shrink zones.
*/
@Test
public void testNonContiguousZonesThatShouldWork() {
ClusterInstance ci;

ci = new ClusterInstance(getZZClusterWithNonContiguousZoneIDsButContiguousNodeIDs(),
getZZStoreDefsInMemory());
ci.getPartitionBalance();
}

// TODO: Fix handling of node Ids so that they do not need to be contiguous.
/**
* This should be a positive test. But, for now, is a negative test to
* confirm that we require nodeIds to be contiguous. This may become a
* problem if we ever shrink the number of zones.
*/
@Test
public void testNonContiguousZonesThatShouldWorkButDoNot() {
ClusterInstance ci;

boolean veCaught = false;
try {
ci = new ClusterInstance(getZZClusterWithNonContiguousZoneIDsAndNonContiguousNodeIDs(),
getZZStoreDefsInMemory());
ci.getPartitionBalance();
} catch(VoldemortException ve) {
veCaught = true;
}
assertTrue(veCaught);
}
}

0 comments on commit 8d5d076

Please sign in to comment.