Permalink
Browse files

Adding start of client project - note that it is not tested or even run!

Fixed a bunch of issues relating to server availability in the face of failure.
All tests are passing, including the cluster tests

git-svn-id: https://rhino-tools.svn.sourceforge.net/svnroot/rhino-tools/experiments/dht/dht@2195 079b0acf-d9fa-0310-9935-e5ade295c882
  • Loading branch information...
1 parent a803da4 commit c950dd9838c122b73c9506b5abbd35644056428a ayenderahien committed Jun 6, 2009
Showing with 806 additions and 78 deletions.
  1. +288 −0 Rhino.DistributedHashTable.Client/DistributedHashTable.cs
  2. +35 −0 Rhino.DistributedHashTable.Client/Exceptions/NoMoreBackupsException.cs
  3. +2 −3 {Rhino.DistributedHashTable → Rhino.DistributedHashTable.Client}/IDistributedHashTable.cs
  4. +101 −0 Rhino.DistributedHashTable.Client/Pooling/DefaultConnectionPool.cs
  5. +9 −0 Rhino.DistributedHashTable.Client/Pooling/IConnectionPool.cs
  6. +36 −0 Rhino.DistributedHashTable.Client/Properties/AssemblyInfo.cs
  7. +82 −0 Rhino.DistributedHashTable.Client/Rhino.DistributedHashTable.Client.csproj
  8. +126 −0 Rhino.DistributedHashTable.Client/Util/Crc32.cs
  9. +74 −41 Rhino.DistributedHashTable.ClusterTests/ClusterTests.cs
  10. +2 −2 Rhino.DistributedHashTable.ClusterTests/MasterOverTheNetwork.cs
  11. +3 −1 Rhino.DistributedHashTable.IntegrationTests/OnlineRangeReplicationCommandTest.cs
  12. +2 −2 Rhino.DistributedHashTable.Tests/MasterJoinBehavior.cs
  13. +6 −0 Rhino.DistributedHashTable.sln
  14. +2 −2 Rhino.DistributedHashTable/Client/DistributedHashTableStorageClient.cs
  15. +15 −12 Rhino.DistributedHashTable/Commands/OnlineRangeReplicationCommand.cs
  16. +6 −3 Rhino.DistributedHashTable/Hosting/DistributedHashTableStorageHost.cs
  17. +1 −0 Rhino.DistributedHashTable/Internal/Constants.cs
  18. +1 −1 Rhino.DistributedHashTable/Internal/DistributedHashTableMaster.cs
  19. +3 −3 Rhino.DistributedHashTable/Internal/DistributedHashTableNode.cs
  20. +12 −4 Rhino.DistributedHashTable/Internal/DistributedHashTableStorage.cs
  21. +0 −3 Rhino.DistributedHashTable/Internal/IDistributedHashTableStorage.cs
  22. +0 −1 Rhino.DistributedHashTable/Rhino.DistributedHashTable.csproj
@@ -0,0 +1,288 @@
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using System.Text;
+using Rhino.DistributedHashTable.Client.Exceptions;
+using Rhino.DistributedHashTable.Client.Pooling;
+using Rhino.DistributedHashTable.Client.Util;
+using Rhino.DistributedHashTable.Exceptions;
+using Rhino.DistributedHashTable.Internal;
+using Rhino.DistributedHashTable.Parameters;
+using Rhino.PersistentHashTable;
+
+namespace Rhino.DistributedHashTable.Client
+{
+ public class DistributedHashTable : IDistributedHashTable
+ {
+ private readonly IDistributedHashTableMaster master;
+ private readonly IConnectionPool pool;
+ private Topology topology;
+
+ public DistributedHashTable(IDistributedHashTableMaster master, IConnectionPool pool)
+ {
+ this.master = master;
+ this.pool = pool;
+ topology = master.GetTopology();
+ }
+
+ public PutResult[] Put(params PutRequest[] valuesToAdd)
+ {
+ return PutInternal(valuesToAdd, 0);
+ }
+
+ private PutResult[] PutInternal(PutRequest[] valuesToAdd, int backupIndex)
+ {
+ var results = new PutResult[valuesToAdd.Length];
+
+ var groupedByEndpoint = from req in valuesToAdd
+ let er = new
+ {
+ OriginalIndex = Array.IndexOf(valuesToAdd, req),
+ Put = new ExtendedPutRequest
+ {
+ Bytes = req.Bytes,
+ ExpiresAt = req.ExpiresAt,
+ IsReadOnly = req.IsReadOnly,
+ Key = req.Key,
+ OptimisticConcurrency = req.OptimisticConcurrency,
+ ParentVersions = req.ParentVersions,
+ Segment = GetSegmentFromKey(req.Key),
+ }
+ }
+ group er by GetEndpointByBackupIndex(topology.Segments[er.Put.Segment], backupIndex) into g
+ select g;
+
+ foreach (var endpoint in groupedByEndpoint)
+ {
+ if (endpoint.Key == null)
+ throw new NoMoreBackupsException();
+
+ var requests = endpoint.ToArray();
+ var putRequests = requests.Select(x => x.Put).ToArray();
+
+ var putsResults = GetPutsResults(endpoint.Key, putRequests, backupIndex);
+ for (var i = 0; i < putsResults.Length; i++)
+ {
+ results[requests[i].OriginalIndex] = putsResults[i];
+ }
+ }
+ return results;
+ }
+
+ private static NodeEndpoint GetEndpointByBackupIndex(Segment segment, int backupIndex)
+ {
+ if (backupIndex == 0)
+ return segment.AssignedEndpoint;
+ return segment.Backups.ElementAtOrDefault(backupIndex - 1);
+ }
+
+ private PutResult[] GetPutsResults(NodeEndpoint endpoint,
+ ExtendedPutRequest[] putRequests,
+ int backupIndex)
+ {
+ try
+ {
+ using (var client = pool.Create(endpoint))
+ {
+ return client.Put(topology.Version, putRequests);
+ }
+ }
+ catch (SeeOtherException soe)
+ {
+ return GetPutsResults(soe.Endpoint, putRequests, backupIndex);
+ }
+ catch (TopologyVersionDoesNotMatchException)
+ {
+ RefreshTopology();
+ return PutInternal(putRequests, backupIndex);
+ }
+ catch (Exception)
+ {
+ try
+ {
+ return PutInternal(putRequests, backupIndex + 1);
+ }
+ catch (NoMoreBackupsException)
+ {
+ }
+ throw;
+ }
+ }
+
+ private void RefreshTopology()
+ {
+ topology = master.GetTopology();
+ }
+
+ private static int GetSegmentFromKey(string key)
+ {
+ var crc32 = (int)Crc32.Compute(Encoding.Unicode.GetBytes(key));
+ return Math.Abs(crc32 % Constants.NumberOfSegments);
+ }
+
+ public Value[][] Get(params GetRequest[] valuesToGet)
+ {
+ return GetInternal(valuesToGet, 0);
+
+ }
+
+ private Value[][] GetInternal(GetRequest[] valuesToGet,
+ int backupIndex)
+ {
+ var results = new Value[valuesToGet.Length][];
+
+ var groupedByEndpoint = from req in valuesToGet
+ let er = new
+ {
+ OriginalIndex = Array.IndexOf(valuesToGet, req),
+ Get = new ExtendedGetRequest
+ {
+ Key = req.Key,
+ SpecifiedVersion = req.SpecifiedVersion,
+ Segment = GetSegmentFromKey(req.Key),
+ }
+ }
+ group er by GetEndpointByBackupIndex(topology.Segments[er.Get.Segment], backupIndex) into g
+ select g;
+ foreach (var endpoint in groupedByEndpoint)
+ {
+ if (endpoint.Key == null)
+ throw new NoMoreBackupsException();
+
+ var requests = endpoint.ToArray();
+ var getRequests = requests.Select(x => x.Get).ToArray();
+
+ var putsResults = GetGetsResults(endpoint.Key, getRequests, backupIndex);
+ for (var i = 0; i < putsResults.Length; i++)
+ {
+ results[requests[i].OriginalIndex] = putsResults[i];
+ }
+
+ }
+
+ return results;
+ }
+
+ private Value[][] GetGetsResults(NodeEndpoint endpoint,
+ ExtendedGetRequest[] getRequests,
+ int backupIndex)
+ {
+ try
+ {
+ using (var client = pool.Create(endpoint))
+ {
+ return client.Get(topology.Version, getRequests);
+ }
+ }
+ catch (SeeOtherException soe)
+ {
+ return GetGetsResults(soe.Endpoint, getRequests, backupIndex);
+ }
+ catch (TopologyVersionDoesNotMatchException)
+ {
+ RefreshTopology();
+ return GetInternal(getRequests, backupIndex);
+ }
+ catch (Exception)
+ {
+ try
+ {
+ return GetInternal(getRequests, backupIndex + 1);
+ }
+ catch (NoMoreBackupsException)
+ {
+ }
+ throw;
+ }
+ }
+
+ public bool[] Remove(params RemoveRequest[] valuesToRemove)
+ {
+ return RemoveInternal(valuesToRemove, 0);
+ }
+
+ private bool[] RemoveInternal(RemoveRequest[] valuesToRemove,
+ int backupIndex)
+ {
+ var results = new bool[valuesToRemove.Length];
+
+ var groupedByEndpoint = from req in valuesToRemove
+ let er = new
+ {
+ OriginalIndex = Array.IndexOf(valuesToRemove, req),
+ Remove = new ExtendedRemoveRequest
+ {
+ Key = req.Key,
+ SpecificVersion = req.SpecificVersion,
+ Segment = GetSegmentFromKey(req.Key),
+ }
+ }
+ group er by GetEndpointByBackupIndex(topology.Segments[er.Remove.Segment], backupIndex) into g
+ select g;
+
+ foreach (var endpoint in groupedByEndpoint)
+ {
+ if (endpoint.Key == null)
+ throw new NoMoreBackupsException();
+
+ var requests = endpoint.ToArray();
+ var removeRequests = requests.Select(x => x.Remove).ToArray();
+
+ var removesResults = GetRemovesResults(endpoint.Key, removeRequests, backupIndex);
+ for (var i = 0; i < removesResults.Length; i++)
+ {
+ results[requests[i].OriginalIndex] = removesResults[i];
+ }
+ }
+ return results;
+ }
+
+ private bool[] GetRemovesResults(NodeEndpoint endpoint,
+ ExtendedRemoveRequest[] removeRequests,
+ int backupIndex)
+ {
+ try
+ {
+ using (var client = pool.Create(endpoint))
+ {
+ return client.Remove(topology.Version, removeRequests);
+ }
+ }
+ catch (SeeOtherException soe)
+ {
+ return GetRemovesResults(soe.Endpoint, removeRequests, backupIndex);
+ }
+ catch (TopologyVersionDoesNotMatchException)
+ {
+ RefreshTopology();
+ return RemoveInternal(removeRequests, backupIndex);
+ }
+ catch (Exception)
+ {
+ try
+ {
+ return RemoveInternal(removeRequests, backupIndex + 1);
+ }
+ catch (NoMoreBackupsException)
+ {
+ }
+ throw;
+ }
+ }
+
+ public int[] AddItems(params AddItemRequest[] itemsToAdd)
+ {
+ throw new NotImplementedException();
+ }
+
+ public void RemoteItems(params RemoveItemRequest[] itemsToRemove)
+ {
+ throw new NotImplementedException();
+ }
+
+ public KeyValuePair<int, byte[]>[] GetItems(GetItemsRequest request)
+ {
+ throw new NotImplementedException();
+ }
+ }
+}
@@ -0,0 +1,35 @@
+using System;
+using System.Runtime.Serialization;
+
+namespace Rhino.DistributedHashTable.Client.Exceptions
+{
+ [Serializable]
+ public class NoMoreBackupsException : Exception
+ {
+ //
+ // For guidelines regarding the creation of new exception types, see
+ // http://msdn.microsoft.com/library/default.asp?url=/library/en-us/cpgenref/html/cpconerrorraisinghandlingguidelines.asp
+ // and
+ // http://msdn.microsoft.com/library/default.asp?url=/library/en-us/dncscol/html/csharp07192001.asp
+ //
+
+ public NoMoreBackupsException()
+ {
+ }
+
+ public NoMoreBackupsException(string message) : base(message)
+ {
+ }
+
+ public NoMoreBackupsException(string message,
+ Exception inner) : base(message, inner)
+ {
+ }
+
+ protected NoMoreBackupsException(
+ SerializationInfo info,
+ StreamingContext context) : base(info, context)
+ {
+ }
+ }
+}
@@ -1,9 +1,8 @@
using System.Collections.Generic;
+using Rhino.PersistentHashTable;
-namespace Rhino.DistributedHashTable
+namespace Rhino.DistributedHashTable.Client
{
- using PersistentHashTable;
-
public interface IDistributedHashTable
{
PutResult[] Put(params PutRequest[] valuesToAdd);
Oops, something went wrong.

0 comments on commit c950dd9

Please sign in to comment.