Permalink
Browse files

Adding the notion (but not wiring yet) of backup replication

  • Loading branch information...
1 parent 32a1799 commit cdd5a1923804f9dab58cec386d1635018736b67e ayenderahien committed Jun 5, 2009
Showing with 660 additions and 311 deletions.
  1. +2 −2 Rhino.DistributedHashTable.IntegrationTests/MasterOverTheNetwork.cs
  2. +5 −4 Rhino.DistributedHashTable.IntegrationTests/Mini/OnlineRangeReplicationCommandTest.cs
  3. +14 −14 Rhino.DistributedHashTable.Tests/BackCopiesBehavior.cs
  4. +1 −1 Rhino.DistributedHashTable.Tests/MasterCaughtUpBehavior.cs
  5. +4 −3 Rhino.DistributedHashTable.Tests/MasterGaveUpBehavior.cs
  6. +1 −1 Rhino.DistributedHashTable.Tests/MasterJoinBehavior.cs
  7. +99 −7 Rhino.DistributedHashTable.Tests/NodeReplicationBehavior.cs
  8. +5 −0 Rhino.DistributedHashTable/Client/DistributedHashTableMasterClient.cs
  9. +19 −7 Rhino.DistributedHashTable/Commands/OnlineRangeReplicationCommand.cs
  10. +16 −14 Rhino.DistributedHashTable/Commands/RearrangeBackups.cs
  11. +1 −1 Rhino.DistributedHashTable/Commands/UpdateTopologyCommand.cs
  12. +7 −2 Rhino.DistributedHashTable/Hosting/DistributedHashTableMasterHost.cs
  13. +118 −57 Rhino.DistributedHashTable/Internal/DistributedHashTableMaster.cs
  14. +72 −38 Rhino.DistributedHashTable/Internal/DistributedHashTableNode.cs
  15. +4 −8 Rhino.DistributedHashTable/Internal/IDistributedHashTableMaster.cs
  16. +2 −2 Rhino.DistributedHashTable/Internal/IDistributedHashTableNode.cs
  17. +8 −0 Rhino.DistributedHashTable/Internal/ReplicationType.cs
  18. +12 −2 Rhino.DistributedHashTable/Internal/Segment.cs
  19. +245 −138 Rhino.DistributedHashTable/Protocol/ProtocolDef.cs
  20. +9 −1 Rhino.DistributedHashTable/Protocol/ProtocolDef.proto
  21. +14 −8 Rhino.DistributedHashTable/Protocol/ProtocolDef.proto.bin
  22. +1 −0 Rhino.DistributedHashTable/Rhino.DistributedHashTable.csproj
  23. +1 −1 Rhino.DistributedHashTable/Util/PrtoBufConverter.cs
@@ -62,7 +62,7 @@ public void CanCatchUpOnSegment()
});
var segments = masterProxy.Join(endpoint);
- masterProxy.CaughtUp(endpoint, segments[0].Index, segments[1].Index);
+ masterProxy.CaughtUp(endpoint, ReplicationType.Ownership, segments[0].Index, segments[1].Index);
var topology = masterProxy.GetTopology();
Assert.Equal(endpoint, topology.Segments[segments[0].Index].AssignedEndpoint);
@@ -90,7 +90,7 @@ public void CanGiveUpOnSegment()
var segments = masterProxy.Join(newEndpoint);
- masterProxy.GaveUp(newEndpoint, segments[0].Index, segments[1].Index);
+ masterProxy.GaveUp(newEndpoint, ReplicationType.Ownership, segments[0].Index, segments[1].Index);
var topology = masterProxy.GetTopology();
Assert.Equal(existingEndpoint,topology.Segments[segments[0].Index].AssignedEndpoint);
@@ -30,6 +30,7 @@ public OnlineSegmentReplicationCommandTest()
command = new OnlineSegmentReplicationCommand(
endpoint,
new[] { new Segment { Index = 0 }, new Segment { Index = 1 }, },
+ ReplicationType.Ownership,
node,
replication);
}
@@ -67,7 +68,7 @@ public void WillLetNodeKnowAboutAnyEmptySegmentsAssignedToIt()
var success = command.Execute();
Assert.True(success);
- node.AssertWasCalled(x => x.DoneReplicatingSegments(new int[] { 0 }));
+ node.AssertWasCalled(x => x.DoneReplicatingSegments(ReplicationType.Ownership, new int[] { 0 }));
}
[Fact]
@@ -162,8 +163,8 @@ public void WhenSegmentReplicationFailsWillGiveUpTheSegment()
var success = command.Execute();
Assert.False(success);
- node.AssertWasCalled(x=>x.GivingUpOn(0));
- node.AssertWasCalled(x => x.GivingUpOn(1));
+ node.AssertWasCalled(x => x.GivingUpOn(ReplicationType.Ownership, 0));
+ node.AssertWasCalled(x => x.GivingUpOn(ReplicationType.Ownership, 1));
}
[Fact]
@@ -174,7 +175,7 @@ public void WhenEmptySegmentReplicationFailsWillGiveEverythingUp()
var success = command.Execute();
Assert.False(success);
- node.AssertWasCalled(x => x.GivingUpOn(0,1));
+ node.AssertWasCalled(x => x.GivingUpOn(ReplicationType.Ownership, 0, 1));
}
[Fact]
@@ -23,7 +23,7 @@ public void AddingNewNodeResultInAllSegmentsHavingNoBackupCopies()
{
master.Join(endPoint);
- Assert.True(master.Segments.All(x => x.Backups.Count == 0));
+ Assert.True(master.Segments.All(x => x.PendingBackups.Count == 0));
}
}
@@ -39,15 +39,15 @@ public OnMasterWithOneExistingNode()
var existingEndpoint = NodeEndpoint.ForTest(3);
var ranges = master.Join(existingEndpoint);
- master.CaughtUp(existingEndpoint, ranges.Select(x=>x.Index).ToArray());
+ master.CaughtUp(existingEndpoint, ReplicationType.Ownership, ranges.Select(x => x.Index).ToArray());
}
[Fact]
public void AddingNewNodeResultInAllSegmentsHavingSingleBackupCopy()
{
var ranges = master.Join(endPoint);
- master.CaughtUp(endPoint, ranges.Select(x => x.Index).ToArray());
- Assert.True(master.Segments.All(x => x.Backups.Count == 1));
+ master.CaughtUp(endPoint, ReplicationType.Ownership, ranges.Select(x => x.Index).ToArray());
+ Assert.True(master.Segments.All(x => x.PendingBackups.Count == 1));
}
[Fact]
@@ -56,7 +56,7 @@ public void AddingNewNodeWillRaiseBackupChangedEvent()
bool wasChanged = false;
master.BackupChanged += (state, point, range) => wasChanged = true;
var ranges = master.Join(endPoint);
- master.CaughtUp(endPoint, ranges.Select(x => x.Index).ToArray());
+ master.CaughtUp(endPoint, ReplicationType.Ownership, ranges.Select(x => x.Index).ToArray());
Assert.True(wasChanged);
}
@@ -74,18 +74,18 @@ public OnMasterWithTwoNodes()
var existingEndpoint = NodeEndpoint.ForTest(3);
var ranges = master.Join(existingEndpoint);
- master.CaughtUp(existingEndpoint, ranges.Select(x => x.Index).ToArray());
+ master.CaughtUp(existingEndpoint, ReplicationType.Ownership, ranges.Select(x => x.Index).ToArray());
var anotherPoint = NodeEndpoint.ForTest(10);
ranges = master.Join(anotherPoint);
- master.CaughtUp(anotherPoint, ranges.Select(x => x.Index).ToArray());
+ master.CaughtUp(anotherPoint, ReplicationType.Ownership, ranges.Select(x => x.Index).ToArray());
}
[Fact]
public void AddingNewNodeResultInAllSegmentsHavingTwoBackupCopy()
{
var ranges = master.Join(endPoint);
- master.CaughtUp(endPoint, ranges.Select(x => x.Index).ToArray());
- Assert.True(master.Segments.All(x => x.Backups.Count == 2));
+ master.CaughtUp(endPoint, ReplicationType.Ownership, ranges.Select(x => x.Index).ToArray());
+ Assert.True(master.Segments.All(x => x.PendingBackups.Count == 2));
}
}
@@ -101,21 +101,21 @@ public OnMasterWithThreeNodes()
var existingEndpoint = NodeEndpoint.ForTest(3);
var ranges = master.Join(existingEndpoint);
- master.CaughtUp(existingEndpoint, ranges.Select(x => x.Index).ToArray());
+ master.CaughtUp(existingEndpoint, ReplicationType.Ownership, ranges.Select(x => x.Index).ToArray());
var anotherPoint = NodeEndpoint.ForTest(10);
ranges = master.Join(anotherPoint);
- master.CaughtUp(anotherPoint, ranges.Select(x => x.Index).ToArray());
+ master.CaughtUp(anotherPoint, ReplicationType.Ownership, ranges.Select(x => x.Index).ToArray());
ranges = master.Join(endPoint);
- master.CaughtUp(endPoint, ranges.Select(x => x.Index).ToArray());
+ master.CaughtUp(endPoint, ReplicationType.Ownership, ranges.Select(x => x.Index).ToArray());
}
[Fact]
public void AddingNewNodeResultInAllSegmentsHavingAtLeastTwoBackupCopy()
{
var yetAnotherEndPoint = NodeEndpoint.ForTest(7);
var ranges = master.Join(yetAnotherEndPoint);
- master.CaughtUp(yetAnotherEndPoint, ranges.Select(x => x.Index).ToArray());
- Assert.True(master.Segments.All(x => x.Backups.Count >= 2));
+ master.CaughtUp(yetAnotherEndPoint, ReplicationType.Ownership, ranges.Select(x => x.Index).ToArray());
+ Assert.True(master.Segments.All(x => x.PendingBackups.Count >= 2));
}
}
}
@@ -25,7 +25,7 @@ public void WillRaiseTopologyChangedEvent()
bool wasCalled = false;
master.TopologyChanged += () => wasCalled = true;
- master.CaughtUp(endPoint, ranges.First().Index);
+ master.CaughtUp(endPoint, ReplicationType.Ownership, ranges.First().Index);
Assert.True(wasCalled);
}
}
@@ -15,7 +15,8 @@ public OnGaveUp()
{
master = new DistributedHashTableMaster();
master.CaughtUp(NodeEndpoint.ForTest(9),
- master.Join(NodeEndpoint.ForTest(9)).Select(x=>x.Index).ToArray());
+ ReplicationType.Ownership,
+ master.Join(NodeEndpoint.ForTest(9)).Select(x => x.Index).ToArray());
endPoint = NodeEndpoint.ForTest(5);
}
@@ -26,8 +27,8 @@ public void WillRemoveThePendingMoveFromTheSegment()
var range = ranges.First();
Assert.NotNull(range.InProcessOfMovingToEndpoint);
-
- master.GaveUp(endPoint, range.Index);
+
+ master.GaveUp(endPoint, ReplicationType.Ownership, range.Index);
Assert.Null(range.InProcessOfMovingToEndpoint);
}
@@ -112,7 +112,7 @@ public NewEndpointJoiningMasterWithTwoNodes()
{
master.Join(endPoint);
var ranges = master.Join(anotherNodeInTheMaster);
- master.CaughtUp(anotherNodeInTheMaster, ranges.Select(x => x.Index).ToArray());
+ master.CaughtUp(anotherNodeInTheMaster, ReplicationType.Ownership, ranges.Select(x => x.Index).ToArray());
master.Join(newEndpoint);
}
@@ -1,4 +1,5 @@
using System.Net;
+using Rhino.DistributedHashTable.Commands;
using Rhino.DistributedHashTable.Internal;
using Rhino.DistributedHashTable.Parameters;
using Rhino.DistributedHashTable.Remote;
@@ -31,16 +32,15 @@ public WhenFinishedReplicatingSegment()
[Fact]
public void StateWillBeStarted()
{
- node.DoneReplicatingSegments(new[] { 0 });
+ node.DoneReplicatingSegments(ReplicationType.Ownership, new[] { 0 });
Assert.Equal(NodeState.Started, node.State);
}
[Fact]
public void WillLetMasterKnowItCaughtUp()
{
- var range = new Segment();
- node.DoneReplicatingSegments(new[] { 0 });
- master.AssertWasCalled(x => x.CaughtUp(node.Endpoint, 0));
+ node.DoneReplicatingSegments(ReplicationType.Ownership, new[] { 0 });
+ master.AssertWasCalled(x => x.CaughtUp(node.Endpoint, ReplicationType.Ownership, 0));
}
}
@@ -51,7 +51,7 @@ public class WhenReplicatingRequestToOwner
private readonly IExecuter executer;
private readonly NodeEndpoint endPoint;
private readonly IQueueManager queueManager;
- private Topology topology;
+ private readonly Topology topology;
private static NodeEndpoint backup1;
private static NodeEndpoint backup2;
@@ -67,7 +67,7 @@ public WhenReplicatingRequestToOwner()
{
Index = 0,
AssignedEndpoint = endPoint,
- Backups =
+ PendingBackups =
{
backup1,
backup2,
@@ -77,7 +77,7 @@ public WhenReplicatingRequestToOwner()
{
Index = 1,
AssignedEndpoint = backup1,
- Backups =
+ PendingBackups =
{
endPoint,
backup2,
@@ -121,5 +121,97 @@ public void WhenSendingToOtherBackupsFromBackupNode()
queueManager.Send(backup2.Async, Arg<MessagePayload>.Is.TypeOf);
}
}
+
+ public class WhenTopologyIsUpdated
+ {
+ private readonly DistributedHashTableNode node;
+ private readonly IDistributedHashTableMaster master;
+ private readonly IExecuter executer;
+ private readonly NodeEndpoint endPoint;
+
+ public WhenTopologyIsUpdated()
+ {
+ master = MockRepository.GenerateStub<IDistributedHashTableMaster>();
+ executer = MockRepository.GenerateStub<IExecuter>();
+ endPoint = NodeEndpoint.ForTest(1);
+ master.Stub(x => x.Join(Arg.Is(endPoint)))
+ .Return(new Segment[0]);
+ node = new DistributedHashTableNode(master, executer, new BinaryMessageSerializer(), endPoint, MockRepository.GenerateStub<IQueueManager>(),
+ MockRepository.GenerateStub<IDistributedHashTableNodeReplicationFactory>());
+ }
+
+ [Fact]
+ public void TopologyContainsPendingBackupsForCurrentNodeWillStartsBackupReplication()
+ {
+ node.SetTopology(new Topology(new[]
+ {
+ new Segment
+ {
+ AssignedEndpoint = NodeEndpoint.ForTest(91),
+ PendingBackups = {endPoint}
+ },
+ }));
+
+ executer.AssertWasCalled(x=>x.RegisterForExecution(Arg<OnlineSegmentReplicationCommand>.Is.TypeOf));
+ }
+
+ [Fact]
+ public void WillNotStartReplicationIfCurrentlyReplicatingBackups()
+ {
+ node.SetTopology(new Topology(new[]
+ {
+ new Segment
+ {
+ AssignedEndpoint = NodeEndpoint.ForTest(91),
+ PendingBackups = {endPoint}
+ },
+ }));
+
+ node.SetTopology(new Topology(new[]
+ {
+ new Segment
+ {
+ AssignedEndpoint = NodeEndpoint.ForTest(91),
+ PendingBackups = {endPoint}
+ },
+ }));
+
+ executer.AssertWasCalled(
+ x => x.RegisterForExecution(Arg<OnlineSegmentReplicationCommand>.Is.TypeOf),
+ o=>o.Repeat.Once());
+ }
+
+ [Fact]
+ public void AfterBackupsCompleteWillStartReplicationAgain()
+ {
+ OnlineSegmentReplicationCommand command = null;
+ executer.Stub(x => x.RegisterForExecution(Arg<OnlineSegmentReplicationCommand>.Is.TypeOf))
+ .WhenCalled(invocation => command = (OnlineSegmentReplicationCommand) invocation.Arguments[0]);
+
+ node.SetTopology(new Topology(new[]
+ {
+ new Segment
+ {
+ AssignedEndpoint = NodeEndpoint.ForTest(91),
+ PendingBackups = {endPoint}
+ },
+ }));
+
+ command.RaiseCompleted();
+
+ node.SetTopology(new Topology(new[]
+ {
+ new Segment
+ {
+ AssignedEndpoint = NodeEndpoint.ForTest(91),
+ PendingBackups = {endPoint}
+ },
+ }));
+
+ executer.AssertWasCalled(
+ x => x.RegisterForExecution(Arg<OnlineSegmentReplicationCommand>.Is.TypeOf),
+ o => o.Repeat.Twice());
+ }
+ }
}
}
@@ -10,6 +10,7 @@
using NodeEndpoint = Rhino.DistributedHashTable.Internal.NodeEndpoint;
using Segment = Rhino.DistributedHashTable.Internal.Segment;
using Rhino.DistributedHashTable.Util;
+using ReplicationType=Rhino.DistributedHashTable.Internal.ReplicationType;
namespace Rhino.DistributedHashTable.Client
{
@@ -83,6 +84,7 @@ private static MasterMessageUnion ReadReply(MasterMessageType responses, Stream
}
public void CaughtUp(NodeEndpoint endpoint,
+ ReplicationType type,
params int[] caughtUpSegments)
{
Execute((writer,
@@ -94,6 +96,7 @@ private static MasterMessageUnion ReadReply(MasterMessageType responses, Stream
CaughtUp = new CaughtUpRequestMessage.Builder
{
CaughtUpSegmentsList = { caughtUpSegments },
+ Type = type == ReplicationType.Backup ? Protocol.ReplicationType.Backup : Protocol.ReplicationType.Ownership,
Endpoint = new Protocol.NodeEndpoint.Builder
{
Async = endpoint.Async.ToString(),
@@ -126,6 +129,7 @@ public Topology GetTopology()
}
public void GaveUp(NodeEndpoint endpoint,
+ ReplicationType type,
params int[] rangesGivingUpOn)
{
Execute((writer,
@@ -137,6 +141,7 @@ public Topology GetTopology()
GaveUp = new GaveUpRequestMessage.Builder
{
GaveUpSegmentsList = { rangesGivingUpOn },
+ Type = type == ReplicationType.Backup ? Protocol.ReplicationType.Backup : Protocol.ReplicationType.Ownership,
Endpoint = new Protocol.NodeEndpoint.Builder
{
Async = endpoint.Async.ToString(),
Oops, something went wrong.

0 comments on commit cdd5a19

Please sign in to comment.