Skip to content

Commit

Permalink
Simplify test code, make it more robust, and improve error messages
Browse files Browse the repository at this point in the history
  • Loading branch information
davidegrohmann committed Aug 25, 2016
1 parent f38418f commit 42c990c
Showing 1 changed file with 64 additions and 87 deletions.
151 changes: 64 additions & 87 deletions enterprise/ha/src/test/java/org/neo4j/ha/UpdatePullerSwitchIT.java
Expand Up @@ -19,12 +19,13 @@
*/ */
package org.neo4j.ha; package org.neo4j.ha;


import org.junit.Before;
import org.junit.Rule; import org.junit.Rule;
import org.junit.Test; import org.junit.Test;


import java.util.Arrays;
import java.util.Map; import java.util.Map;
import java.util.Set; import java.util.Optional;
import java.util.function.Function;


import org.neo4j.cluster.ClusterSettings; import org.neo4j.cluster.ClusterSettings;
import org.neo4j.cluster.InstanceId; import org.neo4j.cluster.InstanceId;
Expand All @@ -34,142 +35,118 @@
import org.neo4j.graphdb.Transaction; import org.neo4j.graphdb.Transaction;
import org.neo4j.helpers.collection.Iterators; import org.neo4j.helpers.collection.Iterators;
import org.neo4j.kernel.configuration.Config; import org.neo4j.kernel.configuration.Config;
import org.neo4j.kernel.ha.HaSettings;
import org.neo4j.kernel.ha.HighlyAvailableGraphDatabase; import org.neo4j.kernel.ha.HighlyAvailableGraphDatabase;
import org.neo4j.kernel.ha.SlaveUpdatePuller;
import org.neo4j.kernel.ha.UpdatePuller; import org.neo4j.kernel.ha.UpdatePuller;
import org.neo4j.kernel.impl.ha.ClusterManager; import org.neo4j.kernel.impl.ha.ClusterManager;
import org.neo4j.test.ha.ClusterRule; import org.neo4j.test.ha.ClusterRule;


import static java.lang.String.format;
import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue; import static org.junit.Assert.assertTrue;
import static org.neo4j.kernel.ha.HaSettings.tx_push_factor; import static org.neo4j.kernel.ha.HaSettings.tx_push_factor;
import static org.neo4j.kernel.ha.SlaveUpdatePuller.UPDATE_PULLER_THREAD_PREFIX;
import static org.neo4j.kernel.impl.ha.ClusterManager.allSeesAllAsAvailable;
import static org.neo4j.kernel.impl.ha.ClusterManager.clusterOfSize;
import static org.neo4j.kernel.impl.ha.ClusterManager.masterAvailable;


public class UpdatePullerSwitchIT public class UpdatePullerSwitchIT
{ {
@Rule @Rule
public final ClusterRule clusterRule = new ClusterRule( getClass() ); public final ClusterRule clusterRule = new ClusterRule( getClass() ).withCluster( clusterOfSize( 2 ) )
private ClusterManager.ManagedCluster managedCluster; .withSharedSetting( tx_push_factor, "0" );

@Before
public void setup() throws Exception
{
managedCluster = clusterRule.withCluster( ClusterManager.clusterOfSize( 2 ) )
.withSharedSetting( tx_push_factor, "0" )
.withSharedSetting( HaSettings.pull_interval, "100s" )
.withFirstInstanceId( 6 )
.startCluster();
}


@Test @Test
public void updatePullerSwitchOnNodeModeSwitch() throws Throwable public void updatePullerSwitchOnNodeModeSwitch() throws Throwable
{ {
String masterLabel = "masterLabel"; ClusterManager.ManagedCluster cluster = clusterRule.startCluster();
createLabeledNodeOnMaster( masterLabel );
Label firstLabel = Label.label( "firstLabel" );
createLabeledNodeOnMaster( cluster, firstLabel );
// force update puller to work // force update puller to work
pullUpdatesOnSlave(); pullUpdatesOnSlave( cluster );
// node should exist on slave now // node should exist on slave now
checkLabeledNodeExistanceOnSlave( masterLabel ); checkLabeledNodeExistanceOnSlave( cluster, firstLabel );
// verify that puller working on slave and not working on master // verify that puller working on slave and not working on master
verifyUpdatePullerThreads(); verifyUpdatePullerThreads( cluster );

// switch roles in cluster - now update puller should be stopped on old slave and start on old master.
ClusterManager.RepairKit initialMasterRepairKit = managedCluster.shutdown( managedCluster.getMaster() );
managedCluster.await( ClusterManager.masterAvailable() );


String pretenderMasterLabel = "pretenderMasterLabel"; for ( int i = 1; i <= 2; i++ )
createLabeledNodeOnMaster( pretenderMasterLabel ); {

// switch roles in cluster - now update puller should be stopped on old slave and start on old master.
initialMasterRepairKit.repair(); ClusterManager.RepairKit repairKit = cluster.shutdown( cluster.getMaster() );
managedCluster.await( ClusterManager.masterSeesSlavesAsAvailable( 1 ) ); cluster.await( masterAvailable() );


// forcing updates pulling Label currentLabel = Label.label( "label_" + i );
pullUpdatesOnSlave();
checkLabeledNodeExistanceOnSlave( pretenderMasterLabel );
// checking pulling threads
verifyUpdatePullerThreads();


createLabeledNodeOnMaster( cluster, currentLabel );


// and finally switching roles back repairKit.repair();
ClusterManager.RepairKit justiceRepairKit = managedCluster.shutdown( managedCluster.getMaster() ); cluster.await( allSeesAllAsAvailable(), 120 );
managedCluster.await( ClusterManager.masterAvailable() );


String justicePrevailedLabel = "justice prevailed"; // forcing updates pulling
createLabeledNodeOnMaster( justicePrevailedLabel ); pullUpdatesOnSlave( cluster );
checkLabeledNodeExistanceOnSlave( cluster, currentLabel );
// checking pulling threads
verifyUpdatePullerThreads( cluster );
}
}


justiceRepairKit.repair(); private void verifyUpdatePullerThreads( ClusterManager.ManagedCluster cluster )
managedCluster.await( ClusterManager.masterSeesSlavesAsAvailable( 1 ) ); {
Map<Thread,StackTraceElement[]> threads = Thread.getAllStackTraces();
Optional<Map.Entry<Thread,StackTraceElement[]>> masterEntry =
findThreadWithPrefix( threads, UPDATE_PULLER_THREAD_PREFIX + serverId( cluster.getMaster() ) );
assertFalse( format( "Found an update puller on master.%s", masterEntry.map( this::prettyPrint ).orElse( "" ) ),
masterEntry.isPresent() );

Optional<Map.Entry<Thread,StackTraceElement[]>> slaveEntry =
findThreadWithPrefix( threads, UPDATE_PULLER_THREAD_PREFIX + serverId( cluster.getAnySlave() ) );
assertTrue( "Found no update puller on slave", slaveEntry.isPresent() );
}


// forcing pull updates private String prettyPrint( Map.Entry<Thread,StackTraceElement[]> entry )
pullUpdatesOnSlave(); {
checkLabeledNodeExistanceOnSlave( justicePrevailedLabel ); return format( "\n\tThread: %s\n\tStackTrace: %s", entry.getKey(), Arrays.toString( entry.getValue() ) );
// checking pulling threads
verifyUpdatePullerThreads();
} }


private void verifyUpdatePullerThreads() private InstanceId serverId( HighlyAvailableGraphDatabase db )
{ {
InstanceId masterId = managedCluster.getMaster().getDependencyResolver().resolveDependency( Config.class ).get( ClusterSettings.server_id ); return db.getDependencyResolver().resolveDependency( Config.class ).get( ClusterSettings.server_id );
InstanceId slaveId = managedCluster.getAnySlave().getDependencyResolver().resolveDependency( Config.class ).get( ClusterSettings.server_id );
Map<Thread,StackTraceElement[]> allStackTraces = Thread.getAllStackTraces();
Set<Thread> threads = allStackTraces.keySet();
assertNull( "Master should not have any puller threads", findThreadWithPrefix( threads,
SlaveUpdatePuller.UPDATE_PULLER_THREAD_PREFIX + masterId ) );
assertNotNull( "Slave should have active puller thread", findThreadWithPrefix( threads,
SlaveUpdatePuller.UPDATE_PULLER_THREAD_PREFIX + slaveId ) );
} }


/* private Optional<Map.Entry<Thread,StackTraceElement[]>> findThreadWithPrefix(
* Returns the name, as a String, of first thread found that has a name starting with the provided prefix, Map<Thread,StackTraceElement[]> threads, String prefix )
* null otherwise.
*/
private String findThreadWithPrefix( Set<Thread> threads, String prefix )
{ {
for ( Thread thread : threads ) return threads.entrySet().stream()
{ .filter( entry -> entry.getKey().getName().startsWith( prefix ) ).findFirst();
if ( thread.getName().startsWith( prefix ) )
{
return thread.getName();
}
}
return null;
} }


private void pullUpdatesOnSlave() throws InterruptedException private void pullUpdatesOnSlave( ClusterManager.ManagedCluster cluster ) throws InterruptedException
{ {
UpdatePuller updatePuller = UpdatePuller updatePuller =
managedCluster.getAnySlave().getDependencyResolver().resolveDependency( UpdatePuller.class ); cluster.getAnySlave().getDependencyResolver().resolveDependency( UpdatePuller.class );
assertTrue( "We should always have some updates to pull", updatePuller.tryPullUpdates() ); assertTrue( "We should always have some updates to pull", updatePuller.tryPullUpdates() );
} }


private void checkLabeledNodeExistanceOnSlave( String label ) private void checkLabeledNodeExistanceOnSlave( ClusterManager.ManagedCluster cluster, Label label )
{ {
// since we have only 2 nodes in cluster its safe to call get any cluster HighlyAvailableGraphDatabase slave = cluster.getAnySlave();
HighlyAvailableGraphDatabase slave = managedCluster.getAnySlave();
try ( Transaction transaction = slave.beginTx() ) try ( Transaction transaction = slave.beginTx() )
{ {
checkNodeWithLabelExists( slave, label ); ResourceIterator<Node> slaveNodes = slave.findNodes( label );
assertEquals( 1, Iterators.asList( slaveNodes ).size() );
transaction.success();
} }

} }


private void createLabeledNodeOnMaster( String label ) private void createLabeledNodeOnMaster( ClusterManager.ManagedCluster cluster, Label label )
{ {
HighlyAvailableGraphDatabase master = managedCluster.getMaster(); HighlyAvailableGraphDatabase master = cluster.getMaster();
try ( Transaction transaction = master.beginTx() ) try ( Transaction transaction = master.beginTx() )
{ {
Node masterNode = master.createNode(); Node masterNode = master.createNode();
masterNode.addLabel( Label.label( label ) ); masterNode.addLabel( label );
transaction.success(); transaction.success();
} }
} }

private void checkNodeWithLabelExists( HighlyAvailableGraphDatabase database, String label )
{
ResourceIterator<Node> slaveNodes = database.findNodes( Label.label( label ) );
assertEquals( 1, Iterators.asList( slaveNodes ).size() );
}
} }

0 comments on commit 42c990c

Please sign in to comment.