diff --git a/core/pom.xml b/core/pom.xml index b21b6314f..d356d1b94 100644 --- a/core/pom.xml +++ b/core/pom.xml @@ -104,70 +104,70 @@ org.apache.cassandra - apache-cassandra - 0.8.0-20110415 + cassandra-all + 0.8.0-beta1 org.apache.cassandra - apache-cassandra-thrift - 0.8.0-20110415 + cassandra-thrift + 0.8.0-beta1 - org.apache.cassandra.deps + org.apache.thrift libthrift - 0.6.0 + 0.6.1 - - org.yaml - snakeyaml - 1.6 + + org.yaml + snakeyaml + 1.6 test - - - com.google.guava - guava - r08 - - - commons-collections - commons-collections - 3.2.1 + + + com.google.guava + guava + r08 + + + commons-collections + commons-collections + 3.2.1 test - - - org.apache.cassandra.deps - avro - 1.4.0-cassandra-1 + + + org.apache.cassandra.deps + avro + 1.4.0-cassandra-1 test - - - netty - org.jboss.netty - - - paranamer - com.thoughtworks.paranamer - - - paranamer-ant - com.thoughtworks.paranamer - - - velocity - org.apache.velocity - - - - - org.antlr - antlr + + + netty + org.jboss.netty + + + paranamer + com.thoughtworks.paranamer + + + paranamer-ant + com.thoughtworks.paranamer + + + velocity + org.apache.velocity + + + + + org.antlr + antlr 3.1.3 - test - - - com.googlecode.concurrentlinkedhashmap - concurrentlinkedhashmap-lru - 1.1 + test + + + com.googlecode.concurrentlinkedhashmap + concurrentlinkedhashmap-lru + 1.1 test @@ -232,7 +232,7 @@ org.mockito mockito-all test - + org.slf4j slf4j-log4j12 diff --git a/core/src/main/java/me/prettyprint/cassandra/model/HColumnImpl.java b/core/src/main/java/me/prettyprint/cassandra/model/HColumnImpl.java index 1cedf751c..d3a2535b7 100644 --- a/core/src/main/java/me/prettyprint/cassandra/model/HColumnImpl.java +++ b/core/src/main/java/me/prettyprint/cassandra/model/HColumnImpl.java @@ -33,8 +33,9 @@ public HColumnImpl(N name, V value, long clock, Serializer nameSerializer, notNull(name, "name is null"); notNull(value, "value is null"); - this.column = new Column(nameSerializer.toByteBuffer(name), - valueSerializer.toByteBuffer(value), clock); + this.column = new Column(nameSerializer.toByteBuffer(name)); + this.column.setValue(valueSerializer.toByteBuffer(value)); + this.column.setTimestamp(clock); } public HColumnImpl(Column thriftColumn, Serializer nameSerializer, @@ -78,7 +79,7 @@ public HColumn setClock(long clock) { } /** - * Set the time-to-live value for this column in seconds. + * Set the time-to-live value for this column in seconds. * The server will mark this column as deleted once the number of seconds has elapsed. */ @Override @@ -93,15 +94,15 @@ public int getTtl() { } @Override - public N getName() { + public N getName() { return column.isSetName() ? nameSerializer.fromByteBuffer(column.name.duplicate()) : null; } @Override - public V getValue() { + public V getValue() { return column.isSetValue() ? valueSerializer.fromByteBuffer(column.value.duplicate()) : null; - } - + } + @Override public long getClock() { @@ -126,15 +127,15 @@ public Serializer getNameSerializer() { @Override public Serializer getValueSerializer() { return valueSerializer; - } - + } + @Override - public ByteBuffer getNameBytes() { + public ByteBuffer getNameBytes() { return column.isSetName() ? column.name.duplicate() : null; } @Override - public ByteBuffer getValueBytes() { + public ByteBuffer getValueBytes() { return column.isSetValue() ? column.value.duplicate() : null; } @@ -142,7 +143,7 @@ public ByteBuffer getValueBytes() { * Clear value, timestamp and ttl (the latter two set to '0') leaving only the column name */ @Override - public HColumn clear() { + public HColumn clear() { column.value = null; column.timestamp = 0; column.ttl = 0; @@ -151,8 +152,8 @@ public HColumn clear() { column.setValueIsSet(false); return this; } - - + + @Override public HColumn apply(V value, long clock, int ttl) { @@ -166,7 +167,7 @@ public HColumn apply(Column c) { this.column = c; return this; } - + @Override public String toString() { return String.format("HColumn(%s=%s)",getName(), getValue()); diff --git a/core/src/main/java/me/prettyprint/cassandra/service/KeyspaceServiceImpl.java b/core/src/main/java/me/prettyprint/cassandra/service/KeyspaceServiceImpl.java index bf55b5e34..72cb9e158 100644 --- a/core/src/main/java/me/prettyprint/cassandra/service/KeyspaceServiceImpl.java +++ b/core/src/main/java/me/prettyprint/cassandra/service/KeyspaceServiceImpl.java @@ -57,7 +57,7 @@ public class KeyspaceServiceImpl implements KeyspaceService { private CassandraHost cassandraHost; private final FailoverPolicy failoverPolicy; - + private final Map credentials; public KeyspaceServiceImpl(String keyspaceName, @@ -228,17 +228,17 @@ public List execute(Cassandra.Client cassandra) throws HectorException { operateWithFailover(op); return op.getResult(); } - + @Override public List getSlice(String key, ColumnParent columnParent, SlicePredicate predicate) throws HectorException { return getSlice(StringSerializer.get().toByteBuffer(key), columnParent, predicate); } - + @Override public List getCounterSlice(final ByteBuffer key, final ColumnParent columnParent, final SlicePredicate predicate) throws HectorException { - Operation> op = + Operation> op = new Operation>(OperationType.READ, failoverPolicy, keyspaceName, credentials) { @Override @@ -258,7 +258,7 @@ public List execute(Cassandra.Client cassandra) throws HectorExce // Inconsistency throw new HectorException("Regular Column is part of the set of Counter Column"); } - + } return result; } catch (Exception e) { @@ -394,9 +394,9 @@ public Void execute(Cassandra.Client cassandra) throws HectorException { }; operateWithFailover(op); } - + @Override - public void addCounter(final ByteBuffer key, final ColumnParent columnParent, final CounterColumn counterColumn) + public void addCounter(final ByteBuffer key, final ColumnParent columnParent, final CounterColumn counterColumn) throws HectorException { Operation op = new Operation(OperationType.WRITE, failoverPolicy, keyspaceName, credentials) { @@ -412,7 +412,7 @@ public Void execute(Cassandra.Client cassandra) throws HectorException { }; operateWithFailover(op); } - + @Override public void addCounter(String key, ColumnParent columnParent, CounterColumn counterColumn) throws HectorException { addCounter(StringSerializer.get().toByteBuffer(key), columnParent, counterColumn); @@ -425,7 +425,9 @@ public void insert(String key, ColumnPath columnPath, ByteBuffer value) throws H if (columnPath.isSetSuper_column()) { columnParent.setSuper_column(columnPath.getSuper_column()); } - Column column = new Column(ByteBuffer.wrap(columnPath.getColumn()), value, connectionManager.createClock()); + Column column = new Column(ByteBuffer.wrap(columnPath.getColumn())); + column.setValue(value); + column.setTimestamp(connectionManager.createClock()); insert(StringSerializer.get().toByteBuffer(key), columnParent, column); } @@ -436,7 +438,9 @@ public void insert(String key, ColumnPath columnPath, ByteBuffer value, long tim if (columnPath.isSetSuper_column()) { columnParent.setSuper_column(columnPath.getSuper_column()); } - Column column = new Column(ByteBuffer.wrap(columnPath.getColumn()), value, timestamp); + Column column = new Column(ByteBuffer.wrap(columnPath.getColumn())); + column.setValue(value); + column.setTimestamp(timestamp); insert(StringSerializer.get().toByteBuffer(key), columnParent, column); } @@ -624,7 +628,7 @@ public Void execute(Cassandra.Client cassandra) throws HectorException { }; operateWithFailover(op); } - + @Override public void removeCounter(final ByteBuffer key, final ColumnPath columnPath) throws HectorException { Operation op = new Operation(OperationType.WRITE, failoverPolicy, keyspaceName, credentials) { @@ -641,7 +645,7 @@ public Void execute(Cassandra.Client cassandra) throws HectorException { }; operateWithFailover(op); } - + @Override public void removeCounter(String key, ColumnPath columnPath) throws HectorException { removeCounter(StringSerializer.get().toByteBuffer(key), columnPath); @@ -694,7 +698,7 @@ public Column execute(Cassandra.Client cassandra) throws HectorException { } return op.getResult(); } - + @Override public CounterColumn getCounter(final ByteBuffer key, final ColumnPath columnPath) throws HectorException { Operation op = new Operation(OperationType.READ, failoverPolicy, keyspaceName, credentials) { @@ -703,7 +707,7 @@ public CounterColumn getCounter(final ByteBuffer key, final ColumnPath columnPat public CounterColumn execute(Cassandra.Client cassandra) throws HectorException { ColumnOrSuperColumn cosc; try { - cosc = cassandra.get(key, columnPath, getThriftCl(OperationType.READ)); + cosc = cassandra.get(key, columnPath, getThriftCl(OperationType.READ)); } catch (NotFoundException e) { setException(xtrans.translate(e)); return null; @@ -720,7 +724,7 @@ public CounterColumn execute(Cassandra.Client cassandra) throws HectorException } return op.getResult(); } - + @Override public CounterColumn getCounter(String key, ColumnPath columnPath) throws HectorException { return getCounter(StringSerializer.get().toByteBuffer(key), columnPath); diff --git a/core/src/test/java/me/prettyprint/cassandra/model/ColumnSliceTest.java b/core/src/test/java/me/prettyprint/cassandra/model/ColumnSliceTest.java index 5576695e6..d81b1a525 100644 --- a/core/src/test/java/me/prettyprint/cassandra/model/ColumnSliceTest.java +++ b/core/src/test/java/me/prettyprint/cassandra/model/ColumnSliceTest.java @@ -22,7 +22,7 @@ public class ColumnSliceTest { StringSerializer se = StringSerializer.get(); LongSerializer le = LongSerializer.get(); - + @Test public void testConstruction() { @@ -30,22 +30,31 @@ public void testConstruction() { ColumnSlice slice = new ColumnSliceImpl(tColumns, se, le); Assert.assertTrue(slice.getColumns().isEmpty()); - tColumns.add(new Column(ByteBuffer.wrap(new byte[]{}), ByteBuffer.wrap(new byte[]{}), 0L)); + Column column = new Column(ByteBuffer.wrap(new byte[]{})); + column.setValue(ByteBuffer.wrap(new byte[]{})); + column.setTimestamp(0L); + tColumns.add(column); slice = new ColumnSliceImpl(tColumns, se, le); Assert.assertEquals(1, slice.getColumns().size()); tColumns = new ArrayList(); - tColumns.add(new Column(se.toByteBuffer("1"), le.toByteBuffer(1L), 0L)); + column = new Column(se.toByteBuffer("1")); + column.setValue(le.toByteBuffer(1L)); + column.setTimestamp(0L); + tColumns.add(column); slice = new ColumnSliceImpl(tColumns, se, le); Assert.assertEquals((Long) 1L, slice.getColumnByName("1").getValue()); } - + @Test public void testMultiCallOnByteBuffer() { List tColumns = new ArrayList(); - tColumns.add(new Column(se.toByteBuffer("1"), ByteBuffer.wrap("colvalue".getBytes()), 0L)); + Column column = new Column(se.toByteBuffer("1")); + column.setValue(ByteBuffer.wrap("colvalue".getBytes())); + column.setTimestamp(0L); + tColumns.add(column); ColumnSlice slice = new ColumnSliceImpl(tColumns, se, ByteBufferSerializer.get()); - + ByteBuffer value = slice.getColumnByName("1").getValue(); Assert.assertEquals("colvalue", se.fromByteBuffer(value)); value.rewind(); diff --git a/core/src/test/java/me/prettyprint/cassandra/model/SuperColumnSliceTest.java b/core/src/test/java/me/prettyprint/cassandra/model/SuperColumnSliceTest.java index 0455c3d77..0b9980f35 100644 --- a/core/src/test/java/me/prettyprint/cassandra/model/SuperColumnSliceTest.java +++ b/core/src/test/java/me/prettyprint/cassandra/model/SuperColumnSliceTest.java @@ -34,7 +34,9 @@ public void testConstruction() { Assert.assertTrue(slice.getSuperColumns().isEmpty()); // non-empty one - Column c = new Column(le.toByteBuffer(5L), be.toByteBuffer(ByteBuffer.wrap(new byte[] { 1 })), 2L); + Column c = new Column(le.toByteBuffer(5L)); + c.setValue(be.toByteBuffer(ByteBuffer.wrap(new byte[] { 1 }))); + c.setTimestamp(2L); tColumns.add(new SuperColumn(se.toByteBuffer("super"), Arrays.asList(c))); slice = new SuperSliceImpl(tColumns, se, le, be); Assert.assertEquals(1, slice.getSuperColumns().size()); diff --git a/core/src/test/java/me/prettyprint/cassandra/service/BatchMutationTest.java b/core/src/test/java/me/prettyprint/cassandra/service/BatchMutationTest.java index af527df5f..fa2667ae7 100644 --- a/core/src/test/java/me/prettyprint/cassandra/service/BatchMutationTest.java +++ b/core/src/test/java/me/prettyprint/cassandra/service/BatchMutationTest.java @@ -36,7 +36,9 @@ public void setup() { @Test public void testAddInsertion() { - Column column = new Column(StringSerializer.get().toByteBuffer("c_name"), StringSerializer.get().toByteBuffer("c_val"), System.currentTimeMillis()); + Column column = new Column(StringSerializer.get().toByteBuffer("c_name")); + column.setValue(StringSerializer.get().toByteBuffer("c_val")); + column.setTimestamp(System.currentTimeMillis()); batchMutate.addInsertion("key1", columnFamilies, column); // assert there is one outter map row with 'key' as the key Map>> mutationMap = batchMutate.getMutationMap(); @@ -45,23 +47,29 @@ public void testAddInsertion() { // add again with a different column and verify there is one key and two mutations underneath // for "standard1" - Column column2 = new Column(StringSerializer.get().toByteBuffer("c_name2"), StringSerializer.get().toByteBuffer("c_val2"), System.currentTimeMillis()); + Column column2 = new Column(StringSerializer.get().toByteBuffer("c_name2")); + column2.setValue(StringSerializer.get().toByteBuffer("c_val2")); + column2.setTimestamp(System.currentTimeMillis()); batchMutate.addInsertion("key1",columnFamilies, column2); assertEquals(2, mutationMap.get(StringSerializer.get().toByteBuffer("key1")).get("Standard1").size()); } @Test public void testAddSuperInsertion() { - SuperColumn sc = new SuperColumn(StringSerializer.get().toByteBuffer("c_name"), - Arrays.asList(new Column(StringSerializer.get().toByteBuffer("c_name"), StringSerializer.get().toByteBuffer("c_val"), System.currentTimeMillis()))); + Column column = new Column(StringSerializer.get().toByteBuffer("c_name")); + column.setValue(StringSerializer.get().toByteBuffer("c_val")); + column.setTimestamp(System.currentTimeMillis()); + SuperColumn sc = new SuperColumn(StringSerializer.get().toByteBuffer("c_name"), Arrays.asList(column)); batchMutate.addSuperInsertion("key1", columnFamilies, sc); // assert there is one outter map row with 'key' as the key assertEquals(1, batchMutate.getMutationMap().get(StringSerializer.get().toByteBuffer("key1")).size()); // add again with a different column and verify there is one key and two mutations underneath // for "standard1" - SuperColumn sc2 = new SuperColumn(StringSerializer.get().toByteBuffer("c_name2"), - Arrays.asList(new Column(StringSerializer.get().toByteBuffer("c_name"), StringSerializer.get().toByteBuffer("c_val"), System.currentTimeMillis()))); + column = new Column(StringSerializer.get().toByteBuffer("c_name")); + column.setValue(StringSerializer.get().toByteBuffer("c_val")); + column.setTimestamp(System.currentTimeMillis()); + SuperColumn sc2 = new SuperColumn(StringSerializer.get().toByteBuffer("c_name2"), Arrays.asList(column)); batchMutate.addSuperInsertion("key1", columnFamilies, sc2); assertEquals(2, batchMutate.getMutationMap().get(StringSerializer.get().toByteBuffer("key1")).get("Standard1").size()); } @@ -84,16 +92,15 @@ public void testAddDeletion() { batchMutate.addDeletion("key1", columnFamilies, deletion); assertEquals(2,batchMutate.getMutationMap().get(StringSerializer.get().toByteBuffer("key1")).get("Standard1").size()); } - + @Test public void testIsEmpty() { assertTrue(batchMutate.isEmpty()); - + // Insert a column - Column c1 = new Column( - StringSerializer.get().toByteBuffer("c_name"), - StringSerializer.get().toByteBuffer("c_val"), - System.currentTimeMillis()); + Column c1 = new Column(StringSerializer.get().toByteBuffer("c_name")); + c1.setValue(StringSerializer.get().toByteBuffer("c_val")); + c1.setTimestamp(System.currentTimeMillis()); batchMutate.addInsertion("key1", columnFamilies, c1); assertFalse(batchMutate.isEmpty()); @@ -101,21 +108,21 @@ public void testIsEmpty() { CounterColumn cc1 = new CounterColumn(StringSerializer.get().toByteBuffer("c_name"), 13); batchMutate.addCounterInsertion("key1", columnFamilies, cc1); assertFalse(batchMutate.isEmpty()); - - - + + + } - + // ********** Test Counters related operations ****************** - + @Test public void testAddCounterInsertion() { // Insert a Counter. CounterColumn cc1 = new CounterColumn(StringSerializer.get().toByteBuffer("c_name"), 222); - + batchMutate.addCounterInsertion("key1", columnFamilies, cc1); - + // assert there is one outter map row with 'key' as the key Map>> mutationMap = batchMutate.getMutationMap(); assertEquals(1, mutationMap.get(StringSerializer.get().toByteBuffer("key1")).size()); @@ -126,16 +133,16 @@ public void testAddCounterInsertion() { batchMutate.addCounterInsertion("key1", columnFamilies, cc2); assertEquals(2, mutationMap.get(StringSerializer.get().toByteBuffer("key1")).get("Standard1").size()); } - + @Test public void testAddCounterDeletion() { - + Deletion counterDeletion = new Deletion(); - + SlicePredicate slicePredicate = new SlicePredicate(); slicePredicate.addToColumn_names(StringSerializer.get().toByteBuffer("c_name")); counterDeletion.setPredicate(slicePredicate); - + batchMutate.addDeletion("key1", columnFamilies, counterDeletion); assertEquals(1, batchMutate.getMutationMap().get(StringSerializer.get().toByteBuffer("key1")).size()); @@ -147,11 +154,11 @@ public void testAddCounterDeletion() { batchMutate.addDeletion("key1", columnFamilies, counterDeletion); assertEquals(2,batchMutate.getMutationMap().get(StringSerializer.get().toByteBuffer("key1")).get("Standard1").size()); } - + @Test public void testAddSuperCounterInsertion() { // Create 1 super counter. - CounterSuperColumn csc1 = new CounterSuperColumn(StringSerializer.get().toByteBuffer("c_name"), + CounterSuperColumn csc1 = new CounterSuperColumn(StringSerializer.get().toByteBuffer("c_name"), Arrays.asList(new CounterColumn(StringSerializer.get().toByteBuffer("c_name"), 123))); batchMutate.addSuperCounterInsertion("key1", columnFamilies, csc1); @@ -160,7 +167,7 @@ public void testAddSuperCounterInsertion() { // add again with a different column and verify there is one key and two mutations underneath // for "standard1" - CounterSuperColumn csc2 = new CounterSuperColumn(StringSerializer.get().toByteBuffer("c_name2"), + CounterSuperColumn csc2 = new CounterSuperColumn(StringSerializer.get().toByteBuffer("c_name2"), Arrays.asList(new CounterColumn(StringSerializer.get().toByteBuffer("c_name"), 456))); batchMutate.addSuperCounterInsertion("key1", columnFamilies, csc2); assertEquals(2, batchMutate.getMutationMap().get(StringSerializer.get().toByteBuffer("key1")).get("Standard1").size()); diff --git a/core/src/test/java/me/prettyprint/cassandra/service/KeyspaceTest.java b/core/src/test/java/me/prettyprint/cassandra/service/KeyspaceTest.java index 54d23169f..9531e01d0 100644 --- a/core/src/test/java/me/prettyprint/cassandra/service/KeyspaceTest.java +++ b/core/src/test/java/me/prettyprint/cassandra/service/KeyspaceTest.java @@ -60,8 +60,8 @@ public class KeyspaceTest extends BaseEmbededServerSetupTest { @Before public void setupCase() throws IllegalStateException, PoolExhaustedException, Exception { super.setupClient(); - - keyspace = new KeyspaceServiceImpl("Keyspace1", new QuorumAllConsistencyLevelPolicy(), + + keyspace = new KeyspaceServiceImpl("Keyspace1", new QuorumAllConsistencyLevelPolicy(), connectionManager, FailoverPolicy.ON_FAIL_TRY_ALL_AVAILABLE); } @@ -100,7 +100,7 @@ public void testInsertAndGetAndRemove() throws IllegalArgumentException, NoSuchE } } } - + @Test public void testInsertAndGetAndRemoveCounter() throws IllegalArgumentException, NoSuchElementException, IllegalStateException, HNotFoundException, Exception { @@ -109,11 +109,11 @@ public void testInsertAndGetAndRemoveCounter() throws IllegalArgumentException, // insert value ColumnParent cp = new ColumnParent("Counter1"); //cp.setColumn(bytes("testInsertAndGetAndRemoveCounter")); - + // Insert 3 counters for the same key - keyspace.addCounter("testInsertAndGetAndRemoveCounter_key1", cp, createCounterColumn("A", 5L)); - keyspace.addCounter("testInsertAndGetAndRemoveCounter_key1", cp, createCounterColumn("A", -1L)); - keyspace.addCounter("testInsertAndGetAndRemoveCounter_key1", cp, createCounterColumn("B", 10L)); + keyspace.addCounter("testInsertAndGetAndRemoveCounter_key1", cp, createCounterColumn("A", 5L)); + keyspace.addCounter("testInsertAndGetAndRemoveCounter_key1", cp, createCounterColumn("A", -1L)); + keyspace.addCounter("testInsertAndGetAndRemoveCounter_key1", cp, createCounterColumn("B", 10L)); // Total for counter A is (5 - 1) = 4. // Total for counter B is 10. @@ -122,27 +122,27 @@ public void testInsertAndGetAndRemoveCounter() throws IllegalArgumentException, CounterColumn counter = keyspace.getCounter("testInsertAndGetAndRemoveCounter_key1", cph); assertNotNull(counter); assertEquals(4, counter.value); - + cph.setColumn(ss.toByteBuffer("B")); counter = keyspace.getCounter("testInsertAndGetAndRemoveCounter_key1", cph); assertNotNull(counter); assertEquals(10, counter.value); - + // Reuse the ColumnPath associated to Conter B and remove only B. keyspace.removeCounter("testInsertAndGetAndRemoveCounter_key1", cph); - + // Fetch it and it should not exist try { keyspace.getCounter("testInsertAndGetAndRemoveCounter_key1", cph); } catch (HNotFoundException e) { // good } - + // Fetch Counter A again to verify I did not delete it cph.setColumn(ss.toByteBuffer("A")); counter = keyspace.getCounter("testInsertAndGetAndRemoveCounter_key1", cph); assertNotNull(counter); - + // Delete the whole row cph.column = null; keyspace.removeCounter("testInsertAndGetAndRemoveCounter_key1", cph); @@ -155,7 +155,7 @@ public void testInsertAndGetAndRemoveCounter() throws IllegalArgumentException, // good } } - + private CounterColumn createCounterColumn(String name, long value) { CounterColumn cc = new CounterColumn(); cc.setName(StringSerializer.get().toByteBuffer(name)); @@ -173,10 +173,9 @@ public void testInsertSuper() throws IllegalArgumentException, NoSuchElementExce // insert value ColumnParent columnParent = new ColumnParent("Super1"); columnParent.setSuper_column(StringSerializer.get().toByteBuffer("testInsertSuper_super")); - Column column = new Column(StringSerializer.get().toByteBuffer("testInsertSuper_column"), - StringSerializer.get().toByteBuffer("testInsertSuper_value"), connectionManager.createClock()); - - + Column column = new Column(StringSerializer.get().toByteBuffer("testInsertSuper_column")); + column.setValue(StringSerializer.get().toByteBuffer("testInsertSuper_value")); + column.setTimestamp(connectionManager.createClock()); keyspace.insert(StringSerializer.get().toByteBuffer("testInsertSuper_key"), columnParent, column); column.setName(StringSerializer.get().toByteBuffer("testInsertSuper_column2")); @@ -247,8 +246,9 @@ public void testBatchMutate() throws HectorException { ArrayList mutations = new ArrayList(10); for (int j = 0; j < 10; j++) { - Column col = new Column(StringSerializer.get().toByteBuffer("testBatchMutateColumn_" + j), - StringSerializer.get().toByteBuffer("testBatchMutateColumn_value_" + j), connectionManager.createClock()); + Column col = new Column(StringSerializer.get().toByteBuffer("testBatchMutateColumn_" + j)); + col.setValue(StringSerializer.get().toByteBuffer("testBatchMutateColumn_value_" + j)); + col.setTimestamp(connectionManager.createClock()); //list.add(col); ColumnOrSuperColumn cosc = new ColumnOrSuperColumn(); cosc.setColumn(col); @@ -317,8 +317,9 @@ public void testBatchMutateBatchMutation() throws HectorException { for (int i = 0; i < 10; i++) { for (int j = 0; j < 10; j++) { - Column col = new Column(StringSerializer.get().toByteBuffer("testBatchMutateColumn_" + j), - StringSerializer.get().toByteBuffer("testBatchMutateColumn_value_" + j), connectionManager.createClock()); + Column col = new Column(StringSerializer.get().toByteBuffer("testBatchMutateColumn_" + j)); + col.setValue(StringSerializer.get().toByteBuffer("testBatchMutateColumn_value_" + j)); + col.setTimestamp(connectionManager.createClock()); batchMutation.addInsertion("testBatchMutateColumn_" + i, columnFamilies, col); } } @@ -381,8 +382,9 @@ public void testBatchUpdateInsertAndDelOnSame() throws HectorException { for (int i = 0; i < 10; i++) { for (int j = 0; j < 10; j++) { - Column col = new Column(StringSerializer.get().toByteBuffer("testBatchMutateColumn_" + j), - StringSerializer.get().toByteBuffer("testBatchMutateColumn_value_" + j), connectionManager.createClock()); + Column col = new Column(StringSerializer.get().toByteBuffer("testBatchMutateColumn_" + j)); + col.setValue(StringSerializer.get().toByteBuffer("testBatchMutateColumn_value_" + j)); + col.setTimestamp(connectionManager.createClock()); batchMutation.addInsertion("testBatchMutateColumn_" + i, columnFamilies, col); } } @@ -478,7 +480,7 @@ public void testGetSlice() throws HectorException { keyspace.remove("testGetSlice_", cp); keyspace.remove("testGetSlice", cp); } - + @Test public void testGetCounterSlice() throws HectorException { // insert value @@ -487,8 +489,8 @@ public void testGetCounterSlice() throws HectorException { for (int i = 0; i < 100; i++) { ColumnParent cp = new ColumnParent("Counter1"); - keyspace.addCounter("testGetCounterSlice", cp, createCounterColumn("testGetCounterSlice_" + i, i)); - + keyspace.addCounter("testGetCounterSlice", cp, createCounterColumn("testGetCounterSlice_" + i, i)); + if (i < 50) { // I want to query only 50. columnnames.add(ss.toByteBuffer("testGetCounterSlice_" + i)); @@ -497,7 +499,7 @@ public void testGetCounterSlice() throws HectorException { // Query 50 counters. From testGetCounterSlice_0 to testGetCounterSlice_49. ColumnParent clp = new ColumnParent("Counter1"); - + // TODO (patricioe) Slice by range will be in the next snapshot. //SliceRange sr = new SliceRange(ByteBuffer.wrap(new byte[0]), ByteBuffer.wrap(new byte[49]), false, 150); SlicePredicate sp = new SlicePredicate(); @@ -509,7 +511,7 @@ public void testGetCounterSlice() throws HectorException { assertEquals(50, cols.size()); Collections.sort(columnnames); - + ArrayList gotlist = new ArrayList(50); for (int i = 0; i < 50; i++) { CounterColumn cc = cols.get(i); @@ -931,7 +933,7 @@ public void testMultigetCount() { assertEquals(5,counts.size()); assertEquals(new Integer(25),counts.entrySet().iterator().next().getValue()); - slicePredicate.setSlice_range(new SliceRange(StringSerializer.get().toByteBuffer(""), + slicePredicate.setSlice_range(new SliceRange(StringSerializer.get().toByteBuffer(""), StringSerializer.get().toByteBuffer(""), false, 5)); counts = keyspace.multigetCount(keys, clp, slicePredicate); diff --git a/object-mapper/pom.xml b/object-mapper/pom.xml index 6b7f768a1..71557edfe 100644 --- a/object-mapper/pom.xml +++ b/object-mapper/pom.xml @@ -29,7 +29,7 @@ - + org.slf4j @@ -65,6 +65,12 @@ provided + + org.apache.cassandra + cassandra-thrift + 0.8.0-beta1 + test + log4j log4j @@ -82,7 +88,7 @@ mockito-all 1.8.2 test - + org.slf4j slf4j-log4j12 @@ -94,8 +100,7 @@ - - + org.apache.cassandra cassandra-javautils test @@ -139,7 +144,7 @@ 2.0.1 test-jar test - + diff --git a/object-mapper/src/test/java/me/prettyprint/hom/CassandraTestBase.java b/object-mapper/src/test/java/me/prettyprint/hom/CassandraTestBase.java index 4e1c4a3d3..9e99b2e00 100644 --- a/object-mapper/src/test/java/me/prettyprint/hom/CassandraTestBase.java +++ b/object-mapper/src/test/java/me/prettyprint/hom/CassandraTestBase.java @@ -47,28 +47,23 @@ public static void startCassandraInstance(String pathToDataDir) throws TTranspor cleaner.prepare(); EmbeddedCassandraService cassandra = new EmbeddedCassandraService(); try { - cassandra.init(); + cassandraStarted = true; + cassandra.start(); } - catch (TTransportException e) { + catch (IOException e) { System.out.println( "Could not initialize Cassandra"); e.printStackTrace(); throw e; } - cassandraStarted = true; - - Thread t = new Thread(cassandra); - t.setName(cassandra.getClass().getSimpleName()); - t.setDaemon(true); - t.start(); - System.out.println( "Successfully started Cassandra"); } public static void createKeyspace(Cluster cluster, String name, String strategy, int replicationFactor, List cfDefList) { try { - KsDef ksDef = new KsDef(name, strategy, replicationFactor, cfDefList); + KsDef ksDef = new KsDef(name, strategy, cfDefList); + ksDef.setReplication_factor(replicationFactor); cluster.addKeyspace(new ThriftKsDef(ksDef)); return; } @@ -94,16 +89,16 @@ public static void setupKeyspace() throws TTransportException, InterruptedException, NoSuchMethodException, IllegalAccessException, InvocationTargetException { startCassandraInstance("tmp/var/lib/cassandra"); - + ArrayList cfDefList = new ArrayList(2); cfDefList.add(new CfDef("TestKeyspace", "TestBeanColumnFamily").setComparator_type(BytesType.class.getSimpleName()) .setKey_cache_size(0).setRow_cache_size(0).setGc_grace_seconds(86400)); cfDefList.add(new CfDef("TestKeyspace", "CustomIdColumnFamily").setComparator_type(BytesType.class.getSimpleName()) .setKey_cache_size(0).setRow_cache_size(0).setGc_grace_seconds(86400)); cfDefList.add(new CfDef("TestKeyspace", "SimpleTestBeanColumnFamily").setComparator_type(BytesType.class.getSimpleName()) - .setKey_cache_size(0).setRow_cache_size(0).setGc_grace_seconds(86400)); + .setKey_cache_size(0).setRow_cache_size(0).setGc_grace_seconds(86400)); cfDefList.add(new CfDef("TestKeyspace", "SimpleRelationshipBeanColumnFamily").setComparator_type(BytesType.class.getSimpleName()) - .setKey_cache_size(0).setRow_cache_size(0).setGc_grace_seconds(86400)); + .setKey_cache_size(0).setRow_cache_size(0).setGc_grace_seconds(86400)); cfDefList.add(new CfDef("TestKeyspace", "NoAnonymousColumnFamily").setComparator_type(BytesType.class.getSimpleName()) .setKey_cache_size(0).setRow_cache_size(0).setGc_grace_seconds(86400)); cfDefList.add(new CfDef("TestKeyspace", "ComplexColumnFamily").setComparator_type(BytesType.class.getSimpleName()) diff --git a/object-mapper/src/test/resources/cassandra.yaml b/object-mapper/src/test/resources/cassandra.yaml index 1bf3adc8f..da813423c 100644 --- a/object-mapper/src/test/resources/cassandra.yaml +++ b/object-mapper/src/test/resources/cassandra.yaml @@ -1,9 +1,9 @@ -# Cassandra storage config YAML +# Cassandra storage config YAML -#NOTE !!!!!!!! NOTE +#NOTE !!!!!!!! NOTE # See http://wiki.apache.org/cassandra/StorageConfiguration for # full explanations of configuration directives -#NOTE !!!!!!!! NOTE +#NOTE !!!!!!!! NOTE # The name of the cluster. This is mainly used to prevent machines in # one logical cluster from joining another. @@ -58,10 +58,10 @@ commitlog_directory: target/cassandra-data/commitlog # saved caches saved_caches_directory: target/cassandra-data/saved_caches -# Size to allow commitlog to grow to before creating a new segment +# Size to allow commitlog to grow to before creating a new segment commitlog_rotation_threshold_in_mb: 128 -# commitlog_sync may be either "periodic" or "batch." +# commitlog_sync may be either "periodic" or "batch." # When in batch mode, Cassandra won't ack writes until the commit log # has been fsynced to disk. It will wait up to # CommitLogSyncBatchWindowInMS milliseconds for other writes, before @@ -73,12 +73,17 @@ commitlog_sync: periodic # milliseconds. commitlog_sync_period_in_ms: 10000 -# Addresses of hosts that are deemed contact points. -# Cassandra nodes use this list of hosts to find each other and learn -# the topology of the ring. You must change this if you are running -# multiple nodes! -seeds: - - 127.0.0.1 +# any class that implements the SeedProvider interface and has a constructor that takes a Map of +# parameters will do. +seed_provider: + # Addresses of hosts that are deemed contact points. + # Cassandra nodes use this list of hosts to find each other and learn + # the topology of the ring. You must change this if you are running + # multiple nodes! + - class_name: org.apache.cassandra.locator.SimpleSeedProvider + parameters: + # seeds is actually a comma-delimited list of addresses. + - seeds: "127.0.0.1" # Access mode. mmapped i/o is substantially faster, but only practical on # a 64bit machine (which notably does not include EC2 "small" instances) @@ -105,7 +110,7 @@ concurrent_writes: 8 # By default this will be set to the amount of data directories defined. #memtable_flush_writers: 1 -# Buffer size to use when performing contiguous column slices. +# Buffer size to use when performing contiguous column slices. # Increase this to the size of the column slices you typically perform sliced_buffer_size_in_kb: 64 @@ -115,7 +120,7 @@ storage_port: 7001 # Address to bind to and tell other Cassandra nodes to connect to. You # _must_ change this if you want multiple nodes to be able to # communicate! -# +# # Leaving it blank leaves it up to InetAddress.getLocalHost(). This # will always do the Right Thing *if* the node is properly configured # (hostname, name resolution, etc), and the Right Thing is to use the @@ -127,7 +132,7 @@ listen_address: localhost # The address to bind the Thrift RPC service to -- clients connect # here. Unlike ListenAddress above, you *can* specify 0.0.0.0 here if # you want Thrift to listen on all interfaces. -# +# # Leaving this blank has the same effect it does for ListenAddress, # (i.e. it will be based on the configured hostname of the node). rpc_address: localhost @@ -178,7 +183,7 @@ column_index_size_in_kb: 64 # will be logged specifying the row key. in_memory_compaction_limit_in_mb: 64 -# Time to wait for a reply from other nodes before failing the command +# Time to wait for a reply from other nodes before failing the command rpc_timeout_in_ms: 10000 # phi value that must be reached for a host to be marked down. @@ -208,7 +213,7 @@ endpoint_snitch: org.apache.cassandra.locator.SimpleSnitch dynamic_snitch: true # controls how often to perform the more expensive part of host score # calculation -dynamic_snitch_update_interval_in_ms: 100 +dynamic_snitch_update_interval_in_ms: 100 # controls how often to reset all host scores, allowing a bad host to # possibly recover dynamic_snitch_reset_interval_in_ms: 600000 @@ -238,7 +243,7 @@ request_scheduler: org.apache.cassandra.scheduler.NoScheduler # NoScheduler - Has no options # RoundRobin # - throttle_limit -- The throttle_limit is the number of in-flight -# requests per client. Requests beyond +# requests per client. Requests beyond # that limit are queued up until # running requests can complete. # The value of 80 here is twice the number of @@ -266,7 +271,7 @@ request_scheduler: org.apache.cassandra.scheduler.NoScheduler # the index is at the cost of space. index_interval: 128 -# A ColumnFamily is the Cassandra concept closest to a relational table. +# A ColumnFamily is the Cassandra concept closest to a relational table. # # Keyspaces are separate groups of ColumnFamilies. Except in very # unusual circumstances you will have one Keyspace per application. @@ -277,8 +282,8 @@ index_interval: 128 # - replica_placement_strategy: the class that determines how replicas # are distributed among nodes. Contains both the class as well as # configuration information. Must extend AbstractReplicationStrategy. -# Out of the box, Cassandra provides -# * org.apache.cassandra.locator.SimpleStrategy +# Out of the box, Cassandra provides +# * org.apache.cassandra.locator.SimpleStrategy # * org.apache.cassandra.locator.NetworkTopologyStrategy # * org.apache.cassandra.locator.OldNetworkTopologyStrategy # @@ -286,7 +291,7 @@ index_interval: 128 # replica at the node whose token is closest to the key (as determined # by the Partitioner), and additional replicas on subsequent nodes # along the ring in increasing Token order. -# +# # With NetworkTopologyStrategy, # for each datacenter, you can specify how many replicas you want # on a per-keyspace basis. Replicas are placed on different racks @@ -299,8 +304,8 @@ index_interval: 128 # DC1 : 3 # DC2 : 2 # DC3 : 1 -# -# OldNetworkToplogyStrategy [formerly RackAwareStrategy] +# +# OldNetworkToplogyStrategy [formerly RackAwareStrategy] # places one replica in each of two datacenters, and the third on a # different rack in in the first. Additional datacenters are not # guaranteed to get a replica. Additional replicas after three are placed @@ -318,7 +323,7 @@ index_interval: 128 # and IntegerType (a generic variable-length integer type). # You can also specify the fully-qualified class name to a class of # your choice extending org.apache.cassandra.db.marshal.AbstractType. -# +# # ColumnFamily optional parameters: # - keys_cached: specifies the number of keys per sstable whose # locations we keep in memory in "mostly LRU" order. (JUST the key @@ -330,7 +335,7 @@ index_interval: 128 # or ColumnFamilies with high write:read ratios. Specify a fraction # (value less than 1) or an absolute number of rows to cache. # Defaults to 0. (i.e. row caching is off by default) -# - comment: used to attach additional human-readable information about +# - comment: used to attach additional human-readable information about # the column family to its definition. # - read_repair_chance: specifies the probability with which read # repairs should be invoked on non-quorum reads. must be between 0 @@ -352,7 +357,7 @@ index_interval: 128 # row caches. The row caches can be saved periodically and if one # exists on startup it will be loaded. # - key_cache_save_period_in_seconds: number of seconds between saving -# key caches. The key caches can be saved periodically and if one +# key caches. The key caches can be saved periodically and if one # exists on startup it will be loaded. # - memtable_flush_after_mins: The maximum time to leave a dirty table # unflushed. This should be large enough that it won't cause a flush diff --git a/pom.xml b/pom.xml index cb8b50d72..05a618bda 100644 --- a/pom.xml +++ b/pom.xml @@ -88,7 +88,7 @@ org.apache.cassandra cassandra-all - 0.7.0 + 0.8.0-beta1 org.apache.cassandra @@ -96,9 +96,9 @@ 0.7.0 - org.apache.cassandra.deps + org.apache.thrift libthrift - 0.5.0 + 0.6.1 com.github.stephenc.high-scale-lib @@ -137,8 +137,8 @@ mockito-all 1.8.2 test - - + +