Skip to content
This repository
Browse code

fix id overwrite bug when indexing from many nodes at once

  • Loading branch information...
commit 02f35ed3a42f1f6599a63cc72ac55ddb160d353a 1 parent 9dc5269
Jake Luciani authored July 06, 2011
20  src/lucandra/cluster/CassandraIndexManager.java
@@ -49,8 +49,7 @@
49 49
 {
50 50
 
51 51
     // To increase throughput we distribute docs across a number of shards at
52  
-    // once
53  
-    // The idea being different shards live on different boxes
  52
+    // once. The idea being different shards live on different boxes
54 53
     protected final int                               shardsAtOnce;
55 54
 
56 55
     private int[]                                     randomSeq;
@@ -180,7 +179,7 @@ public Long getNextId()
180 179
                     int nextId = info.currentId.incrementAndGet();
181 180
                                     
182 181
                     if (nextId <= info.endId)
183  
-                    {
  182
+                    {                        
184 183
                         return (long) (maxDocsPerShard * info.shard) + nextId;
185 184
                     }
186 185
                     else
@@ -805,7 +804,6 @@ private Long nextReservedId(String indexName, NodeInfo[] shards, String myToken)
805 804
                 else
806 805
                 {
807 806
                     //Mark this offset as taken.
808  
-                    CassandraUtils.robustInsert(ConsistencyLevel.QUORUM, updateNodeOffset(indexName, myToken, node.shard, nextOffset));
809 807
                     usedNodeInfo.nodes.put(""+nextOffset, new AtomicInteger(1));
810 808
                     
811 809
                     // we lost, try try again...  
@@ -902,9 +900,8 @@ private int getRandomSequenceOffset(int offset)
902 900
             // initialize shards we didn't know about
903 901
             if (offset == null)
904 902
             {
905  
-                RowMutation rm = updateNodeOffset(shards.indexName, myToken, shard.getKey(), -1);
  903
+                updateNodeOffset(shards.indexName, myToken, shard.getKey(), -1);
906 904
                 offset = nodes.nodes.get(myToken);
907  
-                CassandraUtils.robustInsert(ConsistencyLevel.QUORUM, rm);
908 905
             }
909 906
 
910 907
             int randomSeqOffset = getRandomSequenceOffset(offset.get());
@@ -927,16 +924,15 @@ private int getRandomSequenceOffset(int offset)
927 924
         // new shards
928 925
         for (int i = pickedShard; i < shardsAtOnce; i++)
929 926
         {
930  
-            picked[i] = addNewShard(shards.indexName);
  927
+            picked[i] = addNewShard(shards);
931 928
         }
932 929
 
933 930
         return picked;
934 931
 
935 932
     }
936 933
 
937  
-    private NodeInfo addNewShard(String indexName) throws IOException
  934
+    private NodeInfo addNewShard(ShardInfo shards) throws IOException
938 935
     {
939  
-        ShardInfo shards = getShardInfo(indexName, true);
940 936
         
941 937
         // get max shard
942 938
         Integer maxShard = -1;
@@ -963,11 +959,9 @@ private NodeInfo addNewShard(String indexName) throws IOException
963 959
         NodeInfo dupNodes = null;
964 960
         if ((dupNodes = shards.shards.putIfAbsent(nodes.shard, nodes)) == null)
965 961
         {
966  
-            logger.info("added new shard for " + indexName + "("+getToken()+") " + nodes.shard);
  962
+            logger.info("added new shard for " + shards.indexName + "("+getToken()+") " + nodes.shard);
967 963
 
968  
-            RowMutation rm = updateNodeOffset(indexName, getToken(), nodes.shard, -1); 
969  
-                                                                                             
970  
-            CassandraUtils.robustInsert(ConsistencyLevel.QUORUM, rm);
  964
+            updateNodeOffset(shards.indexName, getToken(), nodes.shard, -1);                                                                                              
971 965
         }
972 966
       
973 967
         

0 notes on commit 02f35ed

Please sign in to comment.
Something went wrong with that request. Please try again.