Permalink
Browse files

Migrated to cassandra-1.2.0

  • Loading branch information...
1 parent cceb669 commit 4ee09de5e2f6ae57d4adad45a36e1d043079914c @kkmishra kkmishra committed Jan 16, 2013
Showing with 905 additions and 279 deletions.
  1. +18 −5 kundera-cassandra/pom.xml
  2. +20 −9 kundera-cassandra/src/main/java/com/impetus/client/cassandra/CassandraClientBase.java
  3. +1 −1 kundera-cassandra/src/main/java/com/impetus/client/cassandra/config/CassandraPropertyReader.java
  4. +8 −5 ...ra-cassandra/src/main/java/com/impetus/client/cassandra/datahandler/CassandraDataHandlerBase.java
  5. +61 −15 kundera-cassandra/src/main/java/com/impetus/client/cassandra/query/CassQuery.java
  6. +1 −1 kundera-cassandra/src/main/java/com/impetus/client/cassandra/query/CassandraEntityReader.java
  7. +3 −2 ...ra-cassandra/src/main/java/com/impetus/client/cassandra/schemamanager/CassandraSchemaManager.java
  8. +8 −0 kundera-cassandra/src/main/java/com/impetus/client/cassandra/thrift/CQLTranslator.java
  9. +59 −10 kundera-cassandra/src/main/java/com/impetus/client/cassandra/thrift/ThriftClient.java
  10. +1 −1 kundera-cassandra/src/main/resources/log4j-server.properties
  11. +2 −1 ...era-cassandra/src/test/java/com/impetus/client/crud/compositeType/CassandraCompositeTypeTest.java
  12. +3 −2 ...era-cassandra/src/test/java/com/impetus/client/crud/datatypes/StudentCassandraBigDecimalTest.java
  13. +8 −5 kundera-cassandra/src/test/java/com/impetus/client/crud/datatypes/StudentCassandraTest.java
  14. +1 −2 kundera-cassandra/src/test/java/com/impetus/client/crud/datatypes/StudentCassandraUUIDTest.java
  15. +10 −8 kundera-cassandra/src/test/java/com/impetus/client/persistence/CassandraCli.java
  16. +9 −5 kundera-cassandra/src/test/java/com/impetus/client/persistence/NativeQueryCQLV3Test.java
  17. +4 −4 kundera-cassandra/src/test/java/com/impetus/client/persistence/NativeQueryTest.java
  18. +66 −14 kundera-cassandra/src/test/java/com/impetus/client/persistence/UpdateDeleteNamedQueryTest.java
  19. +85 −4 kundera-cassandra/src/test/java/org/apache/cassandra/auth/SimpleAuthenticator.java
  20. +120 −5 kundera-cassandra/src/test/java/org/apache/cassandra/auth/SimpleAuthority.java
  21. +132 −50 kundera-cassandra/src/test/resources/cassandra.yaml
  22. +0 −1 kundera-core/src/main/java/com/impetus/kundera/property/PropertyAccessorHelper.java
  23. +1 −1 kundera-rest/src/main/resources/log4j.properties
  24. +1 −1 kundera-rest/src/test/java/com/impetus/kundera/rest/common/Book.java
  25. +14 −15 kundera-rest/src/test/java/com/impetus/kundera/rest/resources/CRUDResourceTest.java
  26. +132 −50 kundera-rest/src/test/resources/cassandra.yaml
  27. +5 −12 kundera-tests/pom.xml
  28. +132 −50 kundera-tests/src/test/resources/cassandra.yaml
View
23 kundera-cassandra/pom.xml
@@ -50,24 +50,37 @@
<dependency>
<groupId>org.apache.cassandra</groupId>
<artifactId>cassandra-thrift</artifactId>
- <version>1.1.6</version>
+ <version>1.2.0</version>
</dependency>
<dependency>
<groupId>org.apache.cassandra</groupId>
<artifactId>cassandra-all</artifactId>
- <version>1.1.6</version>
+ <version>1.2.0</version>
</dependency>
<dependency>
<groupId>org.apache.cassandra</groupId>
<artifactId>cassandra-clientutil</artifactId>
- <version>1.1.6</version>
+ <version>1.2.0</version>
</dependency>
+
+ <dependency>
+ <groupId>org.slf4j</groupId>
+ <artifactId>slf4j-api</artifactId>
+ <version>1.7.2</version>
+ </dependency>
+
+ <dependency>
+ <groupId>org.jboss.netty</groupId>
+ <artifactId>netty</artifactId>
+ <version>3.2.8.Final</version>
+ </dependency>
+
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-log4j12</artifactId>
- <version>1.6.3</version>
- <scope>test</scope>
+ <version>1.7.2</version>
</dependency>
+
<dependency>
<groupId>log4j</groupId>
<artifactId>log4j</artifactId>
View
29 kundera-cassandra/src/main/java/com/impetus/client/cassandra/CassandraClientBase.java
@@ -107,7 +107,7 @@
private static Log log = LogFactory.getLog(CassandraClientBase.class);
/** The cql version. */
- private String cqlVersion = CassandraConstants.CQL_VERSION_2_0;
+ private String cqlVersion = CassandraConstants.CQL_VERSION_3_0;
/** The consistency level. */
private ConsistencyLevel consistencyLevel = ConsistencyLevel.ONE;
@@ -135,7 +135,7 @@ protected CassandraClientBase(String persistenceUnit, Map<String, Object> extern
this.externalProperties = externalProperties;
setBatchSize(persistenceUnit, this.externalProperties);
cqlVersion = CassandraPropertyReader.csmd != null ? CassandraPropertyReader.csmd.getCqlVersion()
- : CassandraConstants.CQL_VERSION_2_0;
+ : CassandraConstants.CQL_VERSION_3_0;
}
/**
@@ -682,8 +682,18 @@ public List executeQuery(String cqlQuery, Class clazz, List<String> relationalFi
entityMetadata.getPersistenceUnit());
log.info("executing query " + cqlQuery);
- result = conn.execute_cql_query(ByteBufferUtil.bytes(cqlQuery),
- org.apache.cassandra.thrift.Compression.NONE);
+
+ if (getCqlVersion().equals(CassandraConstants.CQL_VERSION_2_0)
+ && !metaModel.isEmbeddable(entityMetadata.getIdAttribute().getBindableJavaType()))
+ {
+ result = conn.execute_cql_query(ByteBufferUtil.bytes(cqlQuery),
+ org.apache.cassandra.thrift.Compression.NONE);
+ }
+ else
+ {
+ result = conn.execute_cql3_query(ByteBufferUtil.bytes(cqlQuery),
+ org.apache.cassandra.thrift.Compression.NONE, consistencyLevel);
+ }
if (result != null && (result.getRows() != null || result.getRowsSize() > 0))
{
returnedEntities = new ArrayList<Object>(result.getRowsSize());
@@ -695,8 +705,9 @@ public List executeQuery(String cqlQuery, Class clazz, List<String> relationalFi
if (!metaModel.isEmbeddable(entityMetadata.getIdAttribute().getBindableJavaType()))
{
- rowKey = PropertyAccessorHelper.getObject(
- entityMetadata.getIdAttribute().getBindableJavaType(), row.getKey());
+ // rowKey = PropertyAccessorHelper.getObject(
+ // entityMetadata.getIdAttribute().getBindableJavaType(),
+ // row.getKey());
}
// String rowKeys = Bytes.toUTF8(row.getKey());
ThriftRow thriftRow = null;
@@ -897,8 +908,8 @@ protected void onpersistOverCompositeKey(EntityMetadata entityMetadata, Object e
translation.get(TranslationType.VALUE));
insert_Query = StringUtils
.replace(insert_Query, CQLTranslator.COLUMNS, translation.get(TranslationType.COLUMN));
- cassandra_client.execute_cql_query(ByteBuffer.wrap(insert_Query.getBytes(Constants.CHARSET_UTF8)),
- Compression.NONE);
+ cassandra_client.execute_cql3_query(ByteBuffer.wrap(insert_Query.getBytes(Constants.CHARSET_UTF8)),
+ Compression.NONE, consistencyLevel);
}
/**
@@ -1424,7 +1435,7 @@ else if (metaModel.isEmbeddable(metadata.getIdAttribute().getBindableJavaType())
// Create Mutation Map
Map<String, List<Mutation>> columnFamilyValues = new HashMap<String, List<Mutation>>();
columnFamilyValues.put(columnFamily, insertion_list);
- Bytes b = CassandraUtilities.toBytes(tf.getId(), tf.getId().getClass());
+ Bytes b = CassandraUtilities.toBytes(tf.getId(), entityMetadata.getIdAttribute().getBindableJavaType());
mutationMap.put(b.getBytes(), columnFamilyValues);
return mutationMap;
View
2 ...-cassandra/src/main/java/com/impetus/client/cassandra/config/CassandraPropertyReader.java
@@ -185,7 +185,7 @@ public String getCqlVersion()
}
}
}
- return CassandraConstants.CQL_VERSION_2_0;
+ return CassandraConstants.CQL_VERSION_3_0;
}
public Schema getSchema(String schemaName)
View
13 ...ndra/src/main/java/com/impetus/client/cassandra/datahandler/CassandraDataHandlerBase.java
@@ -964,9 +964,6 @@ private Object onColumn(Column column, EntityMetadata m, Object entity, EntityTy
List<String> relationNames, boolean isWrapReq, Map<String, Object> relations)
throws InstantiationException, IllegalAccessException
{
- MetamodelImpl metaModel = (MetamodelImpl) KunderaMetadata.INSTANCE.getApplicationMetadata().getMetamodel(
- m.getPersistenceUnit());
-
String thriftColumnName = PropertyAccessorFactory.STRING.fromBytes(String.class, column.getName());
byte[] thriftColumnValue = column.getValue();
return populateViaThrift(m, entity, entityType, relationNames, relations, thriftColumnName, thriftColumnValue);
@@ -1029,17 +1026,23 @@ private Object populateViaThrift(EntityMetadata m, Object entity, EntityType ent
{
if (thriftColumnValue != null)
{
+ MetamodelImpl metaModel = (MetamodelImpl) KunderaMetadata.INSTANCE.getApplicationMetadata()
+ .getMetamodel(m.getPersistenceUnit());
String fieldName = m.getFieldName(thriftColumnName);
Attribute attribute = fieldName != null ? entityType.getAttribute(fieldName) : null;
if (attribute != null)
{
entity = initialize(m, entity, null);
+ String idColumnName = ((AbstractAttribute) m.getIdAttribute()).getJPAColumnName();
+ if (!metaModel.isEmbeddable(m.getIdAttribute().getBindableJavaType())
+ && thriftColumnName.equals(idColumnName))
+ {
+ PropertyAccessorHelper.setId(entity, m, (byte[]) thriftColumnValue);
+ }
setFieldValue(entity, thriftColumnValue, attribute);
}
else
{
- MetamodelImpl metaModel = (MetamodelImpl) KunderaMetadata.INSTANCE.getApplicationMetadata()
- .getMetamodel(m.getPersistenceUnit());
if (metaModel.isEmbeddable(m.getIdAttribute().getBindableJavaType()))
{
entity = initialize(m, entity, null);
View
76 kundera-cassandra/src/main/java/com/impetus/client/cassandra/query/CassQuery.java
@@ -118,7 +118,8 @@ public CassQuery(String query, KunderaQuery kunderaQuery, PersistenceDelegator p
if (appMetadata.isNative(getJPAQuery()))
{
- result = ((CassandraClientBase) client).executeQuery(appMetadata.getQuery(getJPAQuery()), m.getEntityClazz(), null);
+ result = ((CassandraClientBase) client).executeQuery(appMetadata.getQuery(getJPAQuery()),
+ m.getEntityClazz(), null);
}
else
{
@@ -171,8 +172,8 @@ public CassQuery(String query, KunderaQuery kunderaQuery, PersistenceDelegator p
ApplicationMetadata appMetadata = KunderaMetadata.INSTANCE.getApplicationMetadata();
if (appMetadata.isNative(getJPAQuery()))
{
- ls = (List<EnhanceEntity>) ((CassandraClientBase) client).executeQuery(appMetadata.getQuery(getJPAQuery()), m.getEntityClazz(),
- null);
+ ls = (List<EnhanceEntity>) ((CassandraClientBase) client).executeQuery(appMetadata.getQuery(getJPAQuery()),
+ m.getEntityClazz(), null);
}
else
{
@@ -206,8 +207,8 @@ protected int onExecuteUpdate()
EntityMetadata m = getEntityMetadata();
if (KunderaMetadata.INSTANCE.getApplicationMetadata().isNative(getJPAQuery()))
{
- ((CassandraClientBase) persistenceDelegeator.getClient(m)).executeQuery(KunderaMetadata.INSTANCE.getApplicationMetadata().getQuery(getJPAQuery()), m.getEntityClazz(),
- null);
+ ((CassandraClientBase) persistenceDelegeator.getClient(m)).executeQuery(KunderaMetadata.INSTANCE
+ .getApplicationMetadata().getQuery(getJPAQuery()), m.getEntityClazz(), null);
}
else if (kunderaQuery.isDeleteUpdate())
{
@@ -525,13 +526,8 @@ private Bytes getBytesValue(String jpaFieldName, EntityMetadata m, Object value)
addWhereClause(builder);
- isPresent = onCondition(m, metaModel, compoundKey, idColumn, builder, isPresent, translator);
+ onCondition(m, metaModel, compoundKey, idColumn, builder, isPresent, translator);
- // String last AND clause.
- if (isPresent)
- {
- builder.delete(builder.lastIndexOf(CQLTranslator.AND_CLAUSE), builder.length());
- }
result = ((CassandraClientBase) client).executeQuery(builder.toString(), m.getEntityClazz(), null);
return result;
@@ -559,6 +555,8 @@ private Bytes getBytesValue(String jpaFieldName, EntityMetadata m, Object value)
private boolean onCondition(EntityMetadata m, MetamodelImpl metaModel, EmbeddableType compoundKey, String idColumn,
StringBuilder builder, boolean isPresent, CQLTranslator translator)
{
+ String partitionKey = null;
+ boolean allowFiltering=false;
for (Object o : getKunderaQuery().getFilterClauseQueue())
{
if (o instanceof FilterClause)
@@ -576,9 +574,19 @@ private boolean onCondition(EntityMetadata m, MetamodelImpl metaModel, Embeddabl
Field[] fields = m.getIdAttribute().getBindableJavaType().getDeclaredFields();
for (Field field : fields)
{
+
Attribute compositeColumn = compoundKey.getAttribute(field.getName());
translator.buildWhereClause(builder, ((AbstractAttribute) compositeColumn).getJPAColumnName(),
field, value);
+ if (partitionKey == null)
+ {
+ partitionKey = compositeColumn.getName();
+ }
+
+ if(!allowFiltering)
+ {
+ allowFiltering= fieldName.equals(partitionKey);
+ }
}
}
@@ -587,16 +595,53 @@ else if (metaModel.isEmbeddable(m.getIdAttribute().getBindableJavaType())
{
// Means it is a case of composite column.
fieldName = fieldName.substring(fieldName.indexOf(".") + 1);
- ((AbstractAttribute)compoundKey.getAttribute(fieldName)).getJPAColumnName();
+ ((AbstractAttribute) compoundKey.getAttribute(fieldName)).getJPAColumnName();
// compositeColumns.add(new
// BasicDBObject(compositeColumn,value));
- translator.buildWhereClause(builder,((AbstractAttribute)compoundKey.getAttribute(fieldName)).getJPAColumnName(), value, condition);
- } else
+ translator.buildWhereClause(builder,
+ ((AbstractAttribute) compoundKey.getAttribute(fieldName)).getJPAColumnName(), value,
+ condition);
+ if (partitionKey == null)
+ {
+ partitionKey = compoundKey.getAttribute(fieldName).getName();
+ }
+ if(!allowFiltering)
+ {
+ allowFiltering= fieldName.equals(partitionKey);
+ }
+ }
+ else
{
+ Field[] fields = m.getIdAttribute().getBindableJavaType().getDeclaredFields();
+ Attribute compositeColumn = compoundKey.getAttribute(fields[0].getName());
+
+ if (partitionKey == null)
+ {
+ partitionKey = compositeColumn.getName();
+ }
+ if(!allowFiltering)
+ {
+ allowFiltering= fieldName.equals(partitionKey);
+ }
+
translator.buildWhereClause(builder, fieldName, value, condition);
}
}
}
+
+
+ // String last AND clause.
+ if (isPresent)
+ {
+ builder.delete(builder.lastIndexOf(CQLTranslator.AND_CLAUSE), builder.length());
+ }
+
+ if (allowFiltering)
+ {
+ translator.buildFilteringClause(builder);
+ }
+
+
return isPresent;
}
@@ -626,7 +671,8 @@ private void addWhereClause(StringBuilder builder)
* @param translator
* the translator
*/
- private StringBuilder appendColumns(StringBuilder builder, List<String> columns, String selectQuery, CQLTranslator translator)
+ private StringBuilder appendColumns(StringBuilder builder, List<String> columns, String selectQuery,
+ CQLTranslator translator)
{
if (columns != null)
{
View
2 ...era-cassandra/src/main/java/com/impetus/client/cassandra/query/CassandraEntityReader.java
@@ -238,7 +238,7 @@ public List handleFindByRange(EntityMetadata m, Client client, List result,
}
catch (Exception e)
{
- log.error("Error while executing find by range. Details: " + e.getMessage());
+ log.error("Error while executing find by range. Details: " , e);
throw new QueryHandlerException(e);
}
return result;
View
5 ...ndra/src/main/java/com/impetus/client/cassandra/schemamanager/CassandraSchemaManager.java
@@ -511,8 +511,9 @@ private void onCompoundKey(TableInfo tableInfo) throws InvalidRequestException,
cassandra_client.set_keyspace(databaseName);
try
{
- cassandra_client.execute_cql_query(
- ByteBuffer.wrap(queryBuilder.toString().getBytes(Constants.CHARSET_UTF8)), Compression.NONE);
+ cassandra_client.execute_cql3_query(
+ ByteBuffer.wrap(queryBuilder.toString().getBytes(Constants.CHARSET_UTF8)), Compression.NONE,
+ ConsistencyLevel.ONE);
}
catch (UnsupportedEncodingException e)
{
View
8 kundera-cassandra/src/main/java/com/impetus/client/cassandra/thrift/CQLTranslator.java
@@ -499,4 +499,12 @@ private static final String getType(final String propertyName)
return mapper.get(propertyName);
}
}
+
+ /**
+ * @param builder
+ */
+ public void buildFilteringClause(StringBuilder builder)
+ {
+ builder.append(" ALLOW FILTERING");
+ }
}
View
69 kundera-cassandra/src/main/java/com/impetus/client/cassandra/thrift/ThriftClient.java
@@ -54,7 +54,6 @@
import org.apache.commons.logging.LogFactory;
import org.apache.thrift.TException;
import org.scale7.cassandra.pelops.Bytes;
-import org.scale7.cassandra.pelops.Selector;
import com.impetus.client.cassandra.CassandraClientBase;
import com.impetus.client.cassandra.common.CassandraUtilities;
@@ -154,6 +153,7 @@ protected void onPersist(EntityMetadata entityMetadata, Object entity, Object id
{
prepareMutation(entityMetadata, entity, id, rlHolders, mutationMap);
// Write Mutation map to database
+// conn.set_cql_version("3.0.0");
conn.batch_mutate(mutationMap, getConsistencyLevel());
}
mutationMap.clear();
@@ -478,13 +478,23 @@ public final List find(Class entityClass, List<String> relationNames, boolean is
public Object[] findIdsByColumn(String schemaName, String tableName, String pKeyName, String columnName,
Object columnValue, Class entityClazz)
{
- SlicePredicate slicePredicate = Selector.newColumnsPredicateAll(false, 10000);
+ SlicePredicate slicePredicate = new SlicePredicate();
+
+ slicePredicate.setSlice_range(new SliceRange(Bytes.EMPTY.getBytes(), Bytes.EMPTY.getBytes(), false, 1000));
+
EntityMetadata metadata = KunderaMetadataManager.getEntityMetadata(entityClazz);
String childIdStr = (String) columnValue;
IndexExpression ie = new IndexExpression(Bytes.fromUTF8(
columnName + Constants.JOIN_COLUMN_NAME_SEPARATOR + childIdStr).getBytes(), IndexOperator.EQ, Bytes
.fromUTF8(childIdStr).getBytes());
- IndexClause ix = Selector.newIndexClause(Bytes.EMPTY, 10000, ie);
+
+ List<IndexExpression> expressions = new ArrayList<IndexExpression>();
+ expressions.add(ie);
+
+ IndexClause ix = new IndexClause();
+ ix.setStart_key(Bytes.EMPTY.toByteArray());
+ ix.setCount(1000);
+ ix.setExpressions(expressions);
List<Object> rowKeys = new ArrayList<Object>();
ColumnParent columnParent = new ColumnParent(tableName);
@@ -536,12 +546,19 @@ public final List find(Class entityClass, List<String> relationNames, boolean is
{
EntityMetadata m = KunderaMetadataManager.getEntityMetadata(entityClazz);
- SlicePredicate slicePredicate = Selector.newColumnsPredicateAll(false, 10000);
+ SlicePredicate slicePredicate = new SlicePredicate();
+ slicePredicate.setSlice_range(new SliceRange(Bytes.EMPTY.getBytes(), Bytes.EMPTY.getBytes(), false, 1000));
List<Object> entities = null;
IndexExpression ie = new IndexExpression(Bytes.fromUTF8(colName).getBytes(), IndexOperator.EQ,
ByteBuffer.wrap(PropertyAccessorHelper.getBytes(colValue)));
- IndexClause ix = Selector.newIndexClause(Bytes.EMPTY, 10000, ie);
+ List<IndexExpression> expressions = new ArrayList<IndexExpression>();
+ expressions.add(ie);
+
+ IndexClause ix = new IndexClause();
+ ix.setStart_key(Bytes.EMPTY.toByteArray());
+ ix.setCount(1000);
+ ix.setExpressions(expressions);
ColumnParent columnParent = new ColumnParent(m.getTableName());
List<KeySlice> keySlices;
@@ -811,12 +828,27 @@ public List find(List<IndexClause> ixClause, EntityMetadata m, boolean isRelatio
try
{
// ixClause can be 0,1 or more!
- SlicePredicate slicePredicate = Selector.newColumnsPredicateAll(false, Integer.MAX_VALUE);
+ SlicePredicate slicePredicate = new SlicePredicate();
+
if (columns != null && !columns.isEmpty())
{
- slicePredicate = Selector.newColumnsPredicate(columns.toArray(new String[] {}));
+ List asList = new ArrayList(32);
+ for (String colName : columns)
+ {
+ if (colName != null)
+ {
+ asList.add(Bytes.fromUTF8(colName).getBytes());
+ }
+ }
+ slicePredicate.setColumn_names(asList);
+ }
+ else
+ {
+ SliceRange sliceRange = new SliceRange();
+ sliceRange.setStart(Bytes.EMPTY.getBytes());
+ sliceRange.setFinish(Bytes.EMPTY.getBytes());
+ slicePredicate.setSlice_range(sliceRange);
}
-
conn = PelopsUtils.getCassandraConnection(pool);
if (ixClause.isEmpty())
@@ -911,11 +943,28 @@ public List find(List<IndexClause> ixClause, EntityMetadata m, boolean isRelatio
public List findByRange(byte[] minVal, byte[] maxVal, EntityMetadata m, boolean isWrapReq, List<String> relations,
List<String> columns, List<IndexExpression> conditions) throws Exception
{
- SlicePredicate slicePredicate = Selector.newColumnsPredicateAll(false, Integer.MAX_VALUE);
+ SlicePredicate slicePredicate = new SlicePredicate();
+
if (columns != null && !columns.isEmpty())
{
- slicePredicate = Selector.newColumnsPredicate(columns.toArray(new String[] {}));
+ List asList = new ArrayList(32);
+ for (String colName : columns)
+ {
+ if (colName != null)
+ {
+ asList.add(Bytes.fromUTF8(colName).getBytes());
+ }
+ }
+ slicePredicate.setColumn_names(asList);
+ }
+ else
+ {
+ SliceRange sliceRange = new SliceRange();
+ sliceRange.setStart(Bytes.EMPTY.getBytes());
+ sliceRange.setFinish(Bytes.EMPTY.getBytes());
+ slicePredicate.setSlice_range(sliceRange);
}
+
KeyRange keyRange = new KeyRange(10000);
keyRange.setStart_key(minVal == null ? "".getBytes() : minVal);
keyRange.setEnd_key(maxVal == null ? "".getBytes() : maxVal);
View
2 kundera-cassandra/src/main/resources/log4j-server.properties
@@ -15,7 +15,7 @@
# limitations under the License.
-log4j.rootLogger=WARN, DRFA, CONSOLE
+log4j.rootLogger=DEBUG, DRFA, CONSOLE
# rolling log file ("system.log
log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
View
3 ...andra/src/test/java/com/impetus/client/crud/compositeType/CassandraCompositeTypeTest.java
@@ -80,7 +80,8 @@ public void setUp() throws Exception
{
CassandraCli.cassandraSetUp();
CassandraCli.initClient();
-// loadData();
+ CassandraCli.dropKeySpace("CompositeCassandra");
+ // loadData();
emf = Persistence.createEntityManagerFactory(PERSISTENCE_UNIT);
}
View
5 ...andra/src/test/java/com/impetus/client/crud/datatypes/StudentCassandraBigDecimalTest.java
@@ -182,7 +182,7 @@ public void testFindByQuery(boolean useSameEm)
findByNameAndAgeWithOrClause();
findByAgeAndNameGTAndLT();
findByNameAndAGEBetween();
- findByRange();
+// findByRange();
}
private void findByAgeAndNameGTAndLT()
@@ -616,6 +616,7 @@ public void createSchema()
cfDef.name = "StudentCassandraBigDecimal";
cfDef.keyspace = keyspace;
cfDef.setKey_validation_class("DecimalType");
+ cfDef.setComparator_type("UTF8Type");
ColumnDef name = new ColumnDef(ByteBuffer.wrap("NAME".getBytes()), "UTF8Type");
name.index_type = IndexType.KEYS;
@@ -679,7 +680,7 @@ public void createSchema()
public void dropSchema()
{
- CassandraCli.dropKeySpace(keyspace);
+ CassandraCli.dropKeySpace(keyspace);
}
}
View
13 kundera-cassandra/src/test/java/com/impetus/client/crud/datatypes/StudentCassandraTest.java
@@ -16,6 +16,7 @@
package com.impetus.client.crud.datatypes;
import java.io.IOException;
+import java.math.BigDecimal;
import java.math.BigInteger;
import java.nio.ByteBuffer;
import java.util.ArrayList;
@@ -414,6 +415,7 @@ else if (studentCassandra.getStudentId() == studentId2)
Assert.assertEquals((short) 8, results.get(0).getCgpa());
Assert.assertEquals((byte) 5, results.get(0).getDigitalSignature());
em.clear();
+
// query on percentage and height.
query = "Select s from StudentCassandra s where s.percentage >= ?2 and s.percentage <= ?3 and s.height =?1";
q = em.createQuery(query);
@@ -623,22 +625,22 @@ else if (studentCassandra.getStudentId() == studentId2)
private void updateQueryTest()
{
- Query q = em.createQuery("update StudentCassandra s set s.studentName = :newName where s.studentName = :oldName");
+ Query q = em
+ .createQuery("update StudentCassandra s set s.studentName = :newName where s.studentName = :oldName");
q.setParameter("newName", "NewAmresh");
q.setParameter("oldName", "Amresh");
q.executeUpdate();
-
+
em.clear();
// // find by id.
StudentEntityDef s = em.find(StudentCassandra.class, studentId1);
Assert.assertEquals("NewAmresh", s.getStudentName());
-
-
+
q = em.createQuery("update StudentCassandra s set s.studentName = :newName where s.studentName = :oldName");
q.setParameter("newName", "Amresh");
q.setParameter("oldName", "NewAmresh");
q.executeUpdate();
-
+
em.clear();
// // find by id.
s = em.find(StudentCassandra.class, studentId1);
@@ -696,6 +698,7 @@ private void loadData() throws TException, InvalidRequestException, UnavailableE
cfDef.name = "STUDENT";
cfDef.keyspace = "KunderaExamples";
cfDef.setKey_validation_class("LongType");
+ cfDef.setComparator_type("UTF8Type");
ColumnDef columnDef2 = new ColumnDef(ByteBuffer.wrap("UNIQUE_ID".getBytes()), "LongType");
columnDef2.index_type = IndexType.KEYS;
View
3 ...a-cassandra/src/test/java/com/impetus/client/crud/datatypes/StudentCassandraUUIDTest.java
@@ -83,7 +83,7 @@ public void testExecuteUseSameEm()
*/
private void testNativeQuery(boolean b)
{
- String s = "Select * From StudentCassandraUUID";
+ String s = "Select * From " + "\"StudentCassandraUUID\"";
EntityManager em = emf.createEntityManager();
Query q = em.createNativeQuery(s, StudentCassandraUUID.class);
List<StudentCassandraUUID> results = q.getResultList();
@@ -694,5 +694,4 @@ public void dropSchema()
CassandraCli.dropKeySpace(keyspace);
}
-
}
View
18 kundera-cassandra/src/test/java/com/impetus/client/persistence/CassandraCli.java
@@ -30,6 +30,7 @@
import org.apache.cassandra.thrift.Cassandra;
import org.apache.cassandra.thrift.CfDef;
import org.apache.cassandra.thrift.Compression;
+import org.apache.cassandra.thrift.ConsistencyLevel;
import org.apache.cassandra.thrift.InvalidRequestException;
import org.apache.cassandra.thrift.KsDef;
import org.apache.cassandra.thrift.NotFoundException;
@@ -62,6 +63,7 @@
/** the log used by this class. */
private static Log log = LogFactory.getLog(CassandraCli.class);
+
/**
* Cassandra set up.
*
@@ -81,7 +83,6 @@
public static void cassandraSetUp() throws IOException, TException, InvalidRequestException, UnavailableException,
TimedOutException, SchemaDisagreementException
{
-
if (!checkIfServerRunning())
{
cassandra = new EmbeddedCassandraService();
@@ -194,11 +195,11 @@ public static void dropKeySpace(String keyspaceName)
try
{
client.system_drop_keyspace(keyspaceName);
-// deleteCassandraFolders("/var/lib/cassandra/data/");
-// deleteCassandraFolders("/var/lib/cassandra/data/system/");
-// deleteCassandraFolders("/var/lib/cassandra/commitlog/");
-// deleteCassandraFolders("/var/lib/cassandra/saved_caches/");
-// deleteCassandraFolders("/var/log/cassandra/");
+ // deleteCassandraFolders("/var/lib/cassandra/data/");
+ // deleteCassandraFolders("/var/lib/cassandra/data/system/");
+ // deleteCassandraFolders("/var/lib/cassandra/commitlog/");
+ // deleteCassandraFolders("/var/lib/cassandra/saved_caches/");
+ // deleteCassandraFolders("/var/log/cassandra/");
}
catch (InvalidRequestException e)
{
@@ -211,7 +212,7 @@ public static void dropKeySpace(String keyspaceName)
catch (TException e)
{
log.error(e);
- }
+ }
}
private static void deleteCassandraFolders(String dir)
@@ -326,7 +327,8 @@ public static void executeCqlQuery(String cqlQuery)
try
{
getClient().set_cql_version("3.0.0");
- getClient().execute_cql_query(ByteBuffer.wrap(cqlQuery.getBytes("UTF-8")), Compression.NONE);
+ getClient().execute_cql3_query(ByteBuffer.wrap(cqlQuery.getBytes("UTF-8")), Compression.NONE,
+ ConsistencyLevel.ONE);
}
catch (InvalidRequestException e)
{
View
14 kundera-cassandra/src/test/java/com/impetus/client/persistence/NativeQueryCQLV3Test.java
@@ -29,6 +29,7 @@
import junit.framework.Assert;
+import org.apache.cassandra.thrift.ConsistencyLevel;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
@@ -69,17 +70,18 @@ public void setUp() throws Exception
{
CassandraCli.cassandraSetUp();
// CassandraCli.initClient();
- CassandraCli.dropKeySpace(schema);
+// CassandraCli.dropKeySpace(schema);
// CassandraCli.createKeySpace(schema);
String nativeSql = "CREATE KEYSPACE " + schema
- + " with strategy_class = 'SimpleStrategy' and strategy_options:replication_factor=1";
+ + " with replication = {'class':'SimpleStrategy', 'replication_factor':1}";
+// strategy_class = 'SimpleStrategy' and strategy_options:replication_factor=1"
CassandraCli.executeCqlQuery(nativeSql);
}
/**
* Test create insert column family query.
*/
- @Test
+// @Test
public void testCreateInsertColumnFamilyQueryVersion3()
{
// CassandraCli.dropKeySpace("KunderaExamples");
@@ -217,7 +219,7 @@ public void testCQLBatch()
String useNativeSql = "USE " + schema;
EntityManagerFactory emf = getEntityManagerFactory();
String createColumnFamily = "CREATE TABLE CassandraBatchEntity ( user_name varchar PRIMARY KEY, password varchar, name varchar)";
- String batchOps = "BEGIN BATCH USING CONSISTENCY QUORUM INSERT INTO CassandraBatchEntity (user_name, password, name) VALUES ('user2', 'ch@ngem3b', 'second user') UPDATE CassandraBatchEntity SET password = 'ps22dhds' WHERE user_name = 'user2' INSERT INTO CassandraBatchEntity (user_name, password) VALUES ('user3', 'ch@ngem3c') DELETE name FROM CassandraBatchEntity WHERE user_name = 'user2' INSERT INTO CassandraBatchEntity (user_name, password, name) VALUES ('user4', 'ch@ngem3c', 'Andrew') APPLY BATCH";
+ String batchOps = "BEGIN BATCH INSERT INTO CassandraBatchEntity (user_name, password, name) VALUES ('user2', 'ch@ngem3b', 'second user') UPDATE CassandraBatchEntity SET password = 'ps22dhds' WHERE user_name = 'user2' INSERT INTO CassandraBatchEntity (user_name, password) VALUES ('user3', 'ch@ngem3c') DELETE name FROM CassandraBatchEntity WHERE user_name = 'user2' INSERT INTO CassandraBatchEntity (user_name, password, name) VALUES ('user4', 'ch@ngem3c', 'Andrew') APPLY BATCH";
EntityManager em = new EntityManagerImpl(emf, PersistenceUnitTransactionType.RESOURCE_LOCAL,
PersistenceContextType.EXTENDED);
@@ -230,10 +232,12 @@ public void testCQLBatch()
// q.getResultList();
q.executeUpdate();
+ pc.setConsistencyLevel(ConsistencyLevel.QUORUM);
q = em.createNativeQuery(createColumnFamily, CassandraBatchEntity.class);
// q.getResultList();
q.executeUpdate();
+ pc.setConsistencyLevel(ConsistencyLevel.QUORUM);
q = em.createNativeQuery(batchOps, CassandraBatchEntity.class);
// q.getResultList();
q.executeUpdate();
@@ -249,7 +253,7 @@ public void testCQLBatch()
em.createNativeQuery(createColumnFamily, CassandraBatchEntity.class).executeUpdate();
createColumnFamily = "create table test2 (key text primary key, count int)";
em.createNativeQuery(createColumnFamily, CassandraBatchEntity.class).executeUpdate();
- batchOps = "BEGIN BATCH USING CONSISTENCY QUORUM INSERT INTO test1(id, url) VALUES ('64907b40-29a1-11e2-93fa-90b11c71b811','w') INSERT INTO test2(key, count) VALUES ('key1',12) APPLY BATCH";
+ batchOps = "BEGIN BATCH INSERT INTO test1(id, url) VALUES ('64907b40-29a1-11e2-93fa-90b11c71b811','w') INSERT INTO test2(key, count) VALUES ('key1',12) APPLY BATCH";
em.createNativeQuery(batchOps, CassandraBatchEntity.class).executeUpdate();
}
View
8 kundera-cassandra/src/test/java/com/impetus/client/persistence/NativeQueryTest.java
@@ -96,7 +96,7 @@ public void testExecutNativeQuery()
// String nativeSql = "CREATE KEYSPACE " + schema
// +
// " with strategy_class = 'SimpleStrategy' and strategy_options:replication_factor=1";
- String useNativeSql = "USE " + schema;
+ String useNativeSql = "USE " + "\"KunderaExamples\"";
EntityManager em = new EntityManagerImpl(emf, PersistenceUnitTransactionType.RESOURCE_LOCAL,
PersistenceContextType.EXTENDED);
@@ -124,7 +124,7 @@ public void testReleasesNativeQueryConnection()
// +
// " with strategy_class = 'SimpleStrategy' and strategy_options:replication_factor=1";
// String useNativeSql = "USE test";
- String useNativeSql = "USE " + schema;
+ String useNativeSql = "USE " + "\"KunderaExamples\"";
EntityManager em = new EntityManagerImpl(emf, PersistenceUnitTransactionType.RESOURCE_LOCAL,
PersistenceContextType.EXTENDED);
@@ -152,7 +152,7 @@ public void testCreateInsertColumnFamilyQuery()
// +
// " with strategy_class = 'SimpleStrategy' and strategy_options:replication_factor=1";
// String useNativeSql = "USE test";
- String useNativeSql = "USE " + schema;
+ String useNativeSql = "USE " + "\"KunderaExamples\"";
EntityManagerFactoryImpl emf = getEntityManagerFactory();
EntityManager em = new EntityManagerImpl(emf, PersistenceUnitTransactionType.RESOURCE_LOCAL,
PersistenceContextType.EXTENDED);
@@ -205,7 +205,7 @@ public void testCreateInsertColumnFamilyQuery()
q.getResultList();
// select all
- String selectAll = "SELECT * FROM users WHERE state='UT' AND birth_date > 1970";
+ String selectAll = "SELECT * FROM users WHERE state='UT' AND birth_date > 1970 ALLOW FILTERING";
q = em.createNativeQuery(selectAll, CassandraEntitySample.class);
results = q.getResultList();
Assert.assertNotNull(results);
View
80 ...ra-cassandra/src/test/java/com/impetus/client/persistence/UpdateDeleteNamedQueryTest.java
@@ -15,8 +15,10 @@
******************************************************************************/
package com.impetus.client.persistence;
+import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.HashMap;
+import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties;
@@ -26,6 +28,14 @@
import junit.framework.Assert;
+import org.apache.cassandra.thrift.CfDef;
+import org.apache.cassandra.thrift.ColumnDef;
+import org.apache.cassandra.thrift.IndexType;
+import org.apache.cassandra.thrift.InvalidRequestException;
+import org.apache.cassandra.thrift.KsDef;
+import org.apache.cassandra.thrift.NotFoundException;
+import org.apache.cassandra.thrift.SchemaDisagreementException;
+import org.apache.thrift.TException;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
@@ -58,28 +68,70 @@
public void setUp() throws Exception
{
CassandraCli.cassandraSetUp();
- CassandraCli.createKeySpace("KunderaExamples");
+// CassandraCli.createKeySpace("KunderaExamples");
+
+ loadData();
+ }
+
+ /**
+ * @throws TException
+ * @throws InvalidRequestException
+ * @throws SchemaDisagreementException
+ *
+ */
+ private void loadData() throws InvalidRequestException, TException, SchemaDisagreementException
+ {
+ KsDef ksDef = null;
+ CfDef user_Def = new CfDef();
+ user_Def.name = "users";
+ user_Def.keyspace = "KunderaExamples";
+ user_Def.setComparator_type("UTF8Type");
+ user_Def.setDefault_validation_class("UTF8Type");
+ user_Def.setKey_validation_class("UTF8Type");
+ ColumnDef columnDef = new ColumnDef(ByteBuffer.wrap("birth_date".getBytes()), "IntegerType");
+ columnDef.index_type = IndexType.KEYS;
+ user_Def.addToColumn_metadata(columnDef);
+ ColumnDef columnDef1 = new ColumnDef(ByteBuffer.wrap("state".getBytes()), "UTF8Type");
+ columnDef1.index_type = IndexType.KEYS;
+ user_Def.addToColumn_metadata(columnDef1);
+
+ ColumnDef columnDef2 = new ColumnDef(ByteBuffer.wrap("full_name".getBytes()), "UTF8Type");
+ columnDef2.index_type = IndexType.KEYS;
+ user_Def.addToColumn_metadata(columnDef2);
+
+ List<CfDef> cfDefs = new ArrayList<CfDef>();
+ cfDefs.add(user_Def);
+
+ ksDef = new KsDef("KunderaExamples", "org.apache.cassandra.locator.SimpleStrategy", cfDefs);
+ // Set replication factor
+ if (ksDef.strategy_options == null)
+ {
+ ksDef.strategy_options = new LinkedHashMap<String, String>();
+ }
+ // Set replication factor, the value MUST be an integer
+ ksDef.strategy_options.put("replication_factor", "1");
+ CassandraCli.client.system_add_keyspace(ksDef);
}
@Test
public void testUpdate()
{
EntityManager em = getEntityManagerFactory().createEntityManager();
- String colFamilySql = "CREATE COLUMNFAMILY users (key varchar PRIMARY KEY,full_name varchar, birth_date int,state varchar)";
- Query q1 = em.createNativeQuery(colFamilySql, CassandraEntitySample.class);
- q1.executeUpdate();
-
- String idxSql = "CREATE INDEX ON users (birth_date)";
- q1 = em.createNativeQuery(idxSql, CassandraEntitySample.class);
- q1.executeUpdate();
-
- idxSql = "CREATE INDEX ON users (state)";
- q1 = em.createNativeQuery(idxSql, CassandraEntitySample.class);
- q1.executeUpdate();
+// String colFamilySql = "CREATE COLUMNFAMILY users (key varchar PRIMARY KEY,full_name varchar, birth_date int,state varchar)";
+// Query q1 = em.createNativeQuery(colFamilySql, CassandraEntitySample.class);
+// q1.executeUpdate();
+//
+// String idxSql = "CREATE INDEX ON users (birthDate)";
+// q1 = em.createNativeQuery(idxSql, CassandraEntitySample.class);
+// q1.executeUpdate();
+//
+// idxSql = "CREATE INDEX ON users (state)";
+// q1 = em.createNativeQuery(idxSql, CassandraEntitySample.class);
+// q1.executeUpdate();
CassandraEntitySample entity = new CassandraEntitySample();
- entity.setBirth_date(100112);
+ entity.setBirth_date(new Integer(100112));
entity.setFull_name("impetus_emp");
entity.setKey("k");
entity.setState("UP");
@@ -130,7 +182,7 @@ private EntityManagerFactoryImpl getEntityManagerFactory()
String persistenceUnit = "cassandra";
props.put(Constants.PERSISTENCE_UNIT_NAME, persistenceUnit);
props.put(PersistenceProperties.KUNDERA_CLIENT_FACTORY,
- "com.impetus.client.cassandra.pelops.PelopsClientFactory");
+ "com.impetus.client.cassandra.thrift.ThriftClientFactory");
props.put(PersistenceProperties.KUNDERA_NODES, "localhost");
props.put(PersistenceProperties.KUNDERA_PORT, "9160");
props.put(PersistenceProperties.KUNDERA_KEYSPACE, "KunderaExamples");
View
89 kundera-cassandra/src/test/java/org/apache/cassandra/auth/SimpleAuthenticator.java
@@ -41,10 +41,12 @@
import java.security.MessageDigest;
import java.util.Map;
import java.util.Properties;
+import java.util.Set;
-import org.apache.cassandra.config.ConfigurationException;
+import org.apache.cassandra.exceptions.AuthenticationException;
+import org.apache.cassandra.exceptions.ConfigurationException;
+import org.apache.cassandra.exceptions.InvalidRequestException;
import org.apache.cassandra.io.util.FileUtils;
-import org.apache.cassandra.thrift.AuthenticationException;
import org.apache.cassandra.utils.FBUtilities;
import org.apache.cassandra.utils.Hex;
@@ -69,8 +71,7 @@ public AuthenticatedUser defaultUser()
return null;
}
- public AuthenticatedUser authenticate(Map<? extends CharSequence, ? extends CharSequence> credentials)
- throws AuthenticationException
+ public AuthenticatedUser authenticate(Map<String, String> credentials) throws AuthenticationException
{
String pmode_plain = System.getProperty(PMODE_PROPERTY);
PasswordMode mode = PasswordMode.PLAIN;
@@ -173,4 +174,84 @@ static String authenticationErrorMessage(PasswordMode mode, String username)
{
return String.format("Given password in password mode %s could not be validated for user %s", mode, username);
}
+
+ /* (non-Javadoc)
+ * @see org.apache.cassandra.auth.IAuthenticator#alter(java.lang.String, java.util.Map)
+ */
+ @Override
+ public void alter(String arg0, Map<Option, Object> arg1) throws InvalidRequestException
+ {
+ // TODO Auto-generated method stub
+
+ }
+
+ /* (non-Javadoc)
+ * @see org.apache.cassandra.auth.IAuthenticator#alterableOptions()
+ */
+ @Override
+ public Set<Option> alterableOptions()
+ {
+ // TODO Auto-generated method stub
+ return null;
+ }
+
+ /* (non-Javadoc)
+ * @see org.apache.cassandra.auth.IAuthenticator#create(java.lang.String, java.util.Map)
+ */
+ @Override
+ public void create(String arg0, Map<Option, Object> arg1) throws InvalidRequestException
+ {
+ // TODO Auto-generated method stub
+
+ }
+
+ /* (non-Javadoc)
+ * @see org.apache.cassandra.auth.IAuthenticator#drop(java.lang.String)
+ */
+ @Override
+ public void drop(String arg0) throws InvalidRequestException
+ {
+ // TODO Auto-generated method stub
+
+ }
+
+ /* (non-Javadoc)
+ * @see org.apache.cassandra.auth.IAuthenticator#protectedResources()
+ */
+ @Override
+ public Set<? extends IResource> protectedResources()
+ {
+ // TODO Auto-generated method stub
+ return null;
+ }
+
+ /* (non-Javadoc)
+ * @see org.apache.cassandra.auth.IAuthenticator#requireAuthentication()
+ */
+ @Override
+ public boolean requireAuthentication()
+ {
+ // TODO Auto-generated method stub
+ return false;
+ }
+
+ /* (non-Javadoc)
+ * @see org.apache.cassandra.auth.IAuthenticator#setup()
+ */
+ @Override
+ public void setup()
+ {
+ // TODO Auto-generated method stub
+
+ }
+
+ /* (non-Javadoc)
+ * @see org.apache.cassandra.auth.IAuthenticator#supportedOptions()
+ */
+ @Override
+ public Set<Option> supportedOptions()
+ {
+ // TODO Auto-generated method stub
+ return null;
+ }
}
View
125 kundera-cassandra/src/test/java/org/apache/cassandra/auth/SimpleAuthority.java
@@ -41,11 +41,14 @@
import java.util.EnumSet;
import java.util.List;
import java.util.Properties;
+import java.util.Set;
-import org.apache.cassandra.config.ConfigurationException;
+import org.apache.cassandra.exceptions.ConfigurationException;
+import org.apache.cassandra.exceptions.InvalidRequestException;
+import org.apache.cassandra.exceptions.UnauthorizedException;
import org.apache.cassandra.io.util.FileUtils;
-public class SimpleAuthority implements IAuthority
+public class SimpleAuthority implements IAuthorizer
{
public final static String ACCESS_FILENAME_PROPERTY = "access.properties";
@@ -99,7 +102,7 @@ else if (resource.size() == 4)
{
String kspAdmins = accessProperties.getProperty(KEYSPACES_WRITE_PROPERTY);
for (String admin : kspAdmins.split(","))
- if (admin.equals(user.username))
+ if (admin.equals(user.getName()))
return (EnumSet<Permission>) Permission.ALL;
}
@@ -121,7 +124,7 @@ else if (resource.size() == 4)
{
for (String reader : readers.split(","))
{
- if (reader.equals(user.username))
+ if (reader.equals(user.getName()))
{
canRead = true;
break;
@@ -133,7 +136,7 @@ else if (resource.size() == 4)
{
for (String writer : writers.split(","))
{
- if (writer.equals(user.username))
+ if (writer.equals(user.getName()))
{
canWrite = true;
break;
@@ -169,4 +172,116 @@ public void validateConfiguration() throws ConfigurationException
.getClass().getCanonicalName(), ACCESS_FILENAME_PROPERTY));
}
}
+
+ /*
+ * (non-Javadoc)
+ *
+ * @see
+ * org.apache.cassandra.auth.IAuthorizer#authorize(org.apache.cassandra.
+ * auth.AuthenticatedUser, org.apache.cassandra.auth.IResource)
+ */
+ @Override
+ public Set<Permission> authorize(AuthenticatedUser arg0, IResource arg1)
+ {
+ // TODO Auto-generated method stub
+ return null;
+ }
+
+ /*
+ * (non-Javadoc)
+ *
+ * @see
+ * org.apache.cassandra.auth.IAuthorizer#grant(org.apache.cassandra.auth
+ * .AuthenticatedUser, java.util.Set, org.apache.cassandra.auth.IResource,
+ * java.lang.String)
+ */
+ @Override
+ public void grant(AuthenticatedUser arg0, Set<Permission> arg1, IResource arg2, String arg3)
+ throws UnauthorizedException, InvalidRequestException
+ {
+ // TODO Auto-generated method stub
+
+ }
+
+ /*
+ * (non-Javadoc)
+ *
+ * @see
+ * org.apache.cassandra.auth.IAuthorizer#list(org.apache.cassandra.auth.
+ * AuthenticatedUser, java.util.Set, org.apache.cassandra.auth.IResource,
+ * java.lang.String)
+ */
+ @Override
+ public Set<PermissionDetails> list(AuthenticatedUser arg0, Set<Permission> arg1, IResource arg2, String arg3)
+ throws UnauthorizedException, InvalidRequestException
+ {
+ // TODO Auto-generated method stub
+ return null;
+ }
+
+ /*
+ * (non-Javadoc)
+ *
+ * @see org.apache.cassandra.auth.IAuthorizer#protectedResources()
+ */
+ @Override
+ public Set<? extends IResource> protectedResources()
+ {
+ // TODO Auto-generated method stub
+ return null;
+ }
+
+ /*
+ * (non-Javadoc)
+ *
+ * @see
+ * org.apache.cassandra.auth.IAuthorizer#revoke(org.apache.cassandra.auth
+ * .AuthenticatedUser, java.util.Set, org.apache.cassandra.auth.IResource,
+ * java.lang.String)
+ */
+ @Override
+ public void revoke(AuthenticatedUser arg0, Set<Permission> arg1, IResource arg2, String arg3)
+ throws UnauthorizedException, InvalidRequestException
+ {
+ // TODO Auto-generated method stub
+
+ }
+
+ /*
+ * (non-Javadoc)
+ *
+ * @see org.apache.cassandra.auth.IAuthorizer#revokeAll(java.lang.String)
+ */
+ @Override
+ public void revokeAll(String arg0)
+ {
+ // TODO Auto-generated method stub
+
+ }
+
+ /*
+ * (non-Javadoc)
+ *
+ * @see
+ * org.apache.cassandra.auth.IAuthorizer#revokeAll(org.apache.cassandra.
+ * auth.IResource)
+ */
+ @Override
+ public void revokeAll(IResource arg0)
+ {
+ // TODO Auto-generated method stub
+
+ }
+
+ /*
+ * (non-Javadoc)
+ *
+ * @see org.apache.cassandra.auth.IAuthorizer#setup()
+ */
+ @Override
+ public void setup()
+ {
+ // TODO Auto-generated method stub
+
+ }
}
View
182 kundera-cassandra/src/test/resources/cassandra.yaml
@@ -9,7 +9,22 @@
# one logical cluster from joining another.
cluster_name: 'Test Cluster'
-# You should always specify InitialToken when setting up a production
+# This defines the number of tokens randomly assigned to this node on the ring
+# The more tokens, relative to other nodes, the larger the proportion of data
+# that this node will store. You probably want all nodes to have the same number
+# of tokens assuming they have equal hardware capability.
+#
+# If you leave this unspecified, Cassandra will use the default of 1 token for legacy compatibility,
+# and will use the initial_token as described below.
+#
+# Specifying initial_token will override this setting.
+#
+# If you already have a cluster with 1 token per node, and wish to migrate to
+# multiple tokens per node, see http://wiki.apache.org/cassandra/Operations
+# num_tokens: 256
+
+# If you haven't specified num_tokens, or have set it to the default of 1 then
+# you should always specify InitialToken when setting up a production
# cluster for the first time, and often when adding capacity later.
# The principle is that each node should be given an equal slice of
# the token ring; see http://wiki.apache.org/cassandra/Operations
@@ -25,9 +40,13 @@ initial_token:
hinted_handoff_enabled: true
# this defines the maximum amount of time a dead host will have hints
# generated. After it has been dead this long, hints will be dropped.
-max_hint_window_in_ms: 3600000 # one hour
-# Sleep this long after delivering each hint
-hinted_handoff_throttle_delay_in_ms: 1
+max_hint_window_in_ms: 10800000 # 3 hours
+# throttle in KB's per second, per delivery thread
+hinted_handoff_throttle_in_kb: 1024
+# Number of threads with which to deliver hints;
+# Consider increasing this number when you have multi-dc deployments, since
+# cross-dc handoff tends to be slower
+max_hints_delivery_threads: 2
# The following setting populates the page cache on memtable flush and compaction
# WARNING: Enable this setting only when the whole node's data fits in memory.
@@ -36,23 +55,20 @@ hinted_handoff_throttle_delay_in_ms: 1
# authentication backend, implementing IAuthenticator; used to identify users
authenticator: org.apache.cassandra.auth.AllowAllAuthenticator
-# authenticator: org.apache.cassandra.auth.SimpleAuthenticator
-# authorization backend, implementing IAuthority; used to limit access/provide permissions
-authority: org.apache.cassandra.auth.AllowAllAuthority
-# authority: org.apache.cassandra.auth.SimpleAuthority
+# authorization backend, implementing IAuthorizer; used to limit access/provide permissions
+authorizer: org.apache.cassandra.auth.AllowAllAuthorizer
# The partitioner is responsible for distributing rows (by key) across
# nodes in the cluster. Any IPartitioner may be used, including your
# own as long as it is on the classpath. Out of the box, Cassandra
-# provides org.apache.cassandra.dht.RandomPartitioner
-# org.apache.cassandra.dht.ByteOrderedPartitioner,
-# org.apache.cassandra.dht.OrderPreservingPartitioner (deprecated),
-# and org.apache.cassandra.dht.CollatingOrderPreservingPartitioner
-# (deprecated).
+# provides org.apache.cassandra.dht.{Murmur3Partitioner, RandomPartitioner
+# ByteOrderedPartitioner, OrderPreservingPartitioner (deprecated)}.
#
# - RandomPartitioner distributes rows across the cluster evenly by md5.
-# When in doubt, this is the best option.
+# This is the default prior to 1.2 and is retained for compatibility.
+# - Murmur3Partitioner is similar to RandomPartioner but uses Murmur3_128
+# Hash Function instead of md5. When in doubt, this is the best option.
# - ByteOrderedPartitioner orders rows lexically by key bytes. BOP allows
# scanning rows in key order, but the ordering can generate hot spots
# for sequential insertion workloads.
@@ -64,6 +80,7 @@ authority: org.apache.cassandra.auth.AllowAllAuthority
#
# See http://wiki.apache.org/cassandra/Operations for more on
# partitioners and token selection.
+# partitioner: org.apache.cassandra.dht.Murmur3Partitioner
partitioner: org.apache.cassandra.dht.ByteOrderedPartitioner
# directories where Cassandra should store data on disk.
@@ -73,6 +90,15 @@ data_file_directories:
# commit log
commitlog_directory: /var/lib/cassandra/commitlog
+# policy for data disk failures:
+# stop: shut down gossip and Thrift, leaving the node effectively dead, but
+# still inspectable via JMX.
+# best_effort: stop using the failed disk and respond to requests based on
+# remaining available sstables. This means you WILL see obsolete
+# data at CL.ONE!
+# ignore: ignore fatal errors and let requests fail, as in pre-1.2 Cassandra
+disk_failure_policy: stop
+
# Maximum size of the key cache in memory.
#
# Each key cache hit saves 1 seek and each row cache hit saves 2 seeks at the
@@ -157,13 +183,16 @@ saved_caches_directory: /var/lib/cassandra/saved_caches
commitlog_sync: periodic
commitlog_sync_period_in_ms: 10000
-# Configure the Size of the individual Commitlog file. The
-# default is 128 MB, which is almost always fine, but if you are
+# The size of the individual commitlog file segments. A commitlog
+# segment may be archived, deleted, or recycled once all the data
+# in it (potentally from each columnfamily in the system) has been
+# flushed to sstables.
+#
+# The default size is 32, which is almost always fine, but if you are
# archiving commitlog segments (see commitlog_archiving.properties),
-# then you probably want a finer granularity of archiving; 16 MB
+# then you probably want a finer granularity of archiving; 8 or 16 MB
# is reasonable.
-#
-# commitlog_segment_size_in_mb: 128
+commitlog_segment_size_in_mb: 32
# any class that implements the SeedProvider interface and has a
# constructor that takes a Map<String, String> of parameters will do.
@@ -220,10 +249,14 @@ concurrent_writes: 32
# If omitted, Cassandra will set it to 1/3 of the heap.
# memtable_total_space_in_mb: 2048
-# Total space to use for commitlogs.
+# Total space to use for commitlogs. Since commitlog segments are
+# mmapped, and hence use up address space, the default size is 32
+# on 32-bit JVMs, and 1024 on 64-bit JVMs.
+#
# If space gets above this value (it will round up to the next nearest
# segment multiple), Cassandra will flush every dirty CF in the oldest
-# segment and remove it.
+# segment and remove it. So a small total commitlog space will tend
+# to cause more flush activity on less-active columnfamilies.
# commitlog_total_space_in_mb: 4096
# This sets the amount of memtable flush writer threads. These will
@@ -269,6 +302,25 @@ listen_address: localhost
# Leaving this blank will set it to the same value as listen_address
# broadcast_address: 1.2.3.4
+
+# Whether to start the native transport server.
+# Currently, only the thrift server is started by default because the native
+# transport is considered beta.
+# Please note that the address on which the native transport is bound is the
+# same as the rpc_address. The port however is different and specified below.
+start_native_transport: false
+# port for the CQL native transport to listen for clients on
+native_transport_port: 9042
+# The minimum and maximum threads for handling requests when the native
+# transport is used. The meaning is those is similar to the one of
+# rpc_min_threads and rpc_max_threads, though the default differ slightly and
+# are the ones below:
+# native_transport_min_threads: 16
+# native_transport_max_threads: 128
+
+
+# Whether to start the thrift rpc server.
+start_rpc: true
# The address to bind the Thrift RPC service to -- clients connect
# here. Unlike ListenAddress above, you *can* specify 0.0.0.0 here if
# you want Thrift to listen on all interfaces.
@@ -282,36 +334,34 @@ rpc_port: 9160
# enable or disable keepalive on rpc connections
rpc_keepalive: true
-# Cassandra provides three options for the RPC Server:
+# Cassandra provides three out-of-the-box options for the RPC Server:
#
-# sync -> One connection per thread in the rpc pool (see below).
-# For a very large number of clients, memory will be your limiting
-# factor; on a 64 bit JVM, 128KB is the minimum stack size per thread.
-# Connection pooling is very, very strongly recommended.
+# sync -> One thread per thrift connection. For a very large number of clients, memory
+# will be your limiting factor. On a 64 bit JVM, 128KB is the minimum stack size
+# per thread, and that will correspond to your use of virtual memory (but physical memory
+# may be limited depending on use of stack space).
#
-# async -> Nonblocking server implementation with one thread to serve
-# rpc connections. This is not recommended for high throughput use
-# cases. Async has been tested to be about 50% slower than sync
-# or hsha and is deprecated: it will be removed in the next major release.
-#
-# hsha -> Stands for "half synchronous, half asynchronous." The rpc thread pool
-# (see below) is used to manage requests, but the threads are multiplexed
-# across the different clients.
+# hsha -> Stands for "half synchronous, half asynchronous." All thrift clients are handled
+# asynchronously using a small number of threads that does not vary with the amount
+# of thrift clients (and thus scales well to many clients). The rpc requests are still
+# synchronous (one thread per active request).
#
# The default is sync because on Windows hsha is about 30% slower. On Linux,
# sync/hsha performance is about the same, with hsha of course using less memory.
+#
+# Alternatively, can provide your own RPC server by providing the fully-qualified class name
+# of an o.a.c.t.TServerFactory that can create an instance of it.
rpc_server_type: sync
-# Uncomment rpc_min|max|thread to set request pool size.
-# You would primarily set max for the sync server to safeguard against
-# misbehaved clients; if you do hit the max, Cassandra will block until one
-# disconnects before accepting more. The defaults for sync are min of 16 and max
-# unlimited.
-#
-# For the Hsha server, the min and max both default to quadruple the number of
-# CPU cores.
+# Uncomment rpc_min|max_thread to set request pool size limits.
#
-# This configuration is ignored by the async server.
+# Regardless of your choice of RPC server (see above), the number of maximum requests in the
+# RPC thread pool dictates how many concurrent requests are possible (but if you are using the sync
+# RPC server, it also dictates the number of clients that can be connected at all).
+#
+# The default is unlimited and thus provide no protection against clients overwhelming the server. You are
+# encouraged to set a maximum that makes sense for you in production, but do keep in mind that
+# rpc_max_threads represents the maximum number of client requests this server may execute concurrently.
#
# rpc_min_threads: 16
# rpc_max_threads: 2048
@@ -321,8 +371,6 @@ rpc_server_type: sync
# rpc_recv_buff_size_in_bytes:
# Frame size for thrift (maximum field length).
-# 0 disables TFramedTransport in favor of TSocket. This option
-# is deprecated; we strongly recommend using Framed mode.
thrift_framed_transport_size_in_mb: 15
# The max length of a thrift message, including all fields and
@@ -370,8 +418,6 @@ in_memory_compaction_limit_in_mb: 64
# slowly or too fast, you should look at
# compaction_throughput_mb_per_sec first.
#
-# This setting has no effect on LeveledCompactionStrategy.
-#
# concurrent_compactors defaults to the number of cores.
# Uncomment to make compaction mono-threaded, the pre-0.8 default.
#concurrent_compactors: 1
@@ -403,8 +449,26 @@ compaction_preheat_key_cache: true
# When unset, the default is 400 Mbps or 50 MB/s.
# stream_throughput_outbound_megabits_per_sec: 400
-# Time to wait for a reply from other nodes before failing the command
-rpc_timeout_in_ms: 10000
+# How long the coordinator should wait for read operations to complete
+read_request_timeout_in_ms: 10000
+# How long the coordinator should wait for seq or index scans to complete
+range_request_timeout_in_ms: 10000
+# How long the coordinator should wait for writes to complete
+write_request_timeout_in_ms: 10000
+# How long the coordinator should wait for truncates to complete
+# (This can be much longer, because unless auto_snapshot is disabled
+# we need to flush first so we can snapshot before removing the data.)
+truncate_request_timeout_in_ms: 60000
+# The default timeout for other, miscellaneous operations
+request_timeout_in_ms: 10000
+
+# Enable operation timeout information exchange between nodes to accurately
+# measure request timeouts, If disabled cassandra will assuming the request
+# was forwarded to the replica instantly by the coordinator
+#
+# Warning: before enabling this property make sure to ntp is installed
+# and the times are synchronized between the nodes.
+cross_node_timeout: false
# Enable socket timeout for streaming operation.
# When a timeout occurs during streaming, streaming is retried from the start
@@ -549,7 +613,7 @@ index_interval: 128
# the keystore and truststore. For instructions on generating these files, see:
# http://download.oracle.com/javase/6/docs/technotes/guides/security/jsse/JSSERefGuide.html#CreateKeystore
#
-encryption_options:
+server_encryption_options:
internode_encryption: none
keystore: conf/.keystore
keystore_password: cassandra
@@ -560,3 +624,21 @@ encryption_options:
# algorithm: SunX509
# store_type: JKS
# cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA]
+
+# enable or disable client/server encryption.
+client_encryption_options:
+ enabled: false
+ keystore: conf/.keystore
+ keystore_password: cassandra
+ # More advanced defaults below:
+ # protocol: TLS
+ # algorithm: SunX509
+ # store_type: JKS
+ # cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA]
+
+# internode_compression controls whether traffic between nodes is
+# compressed.
+# can be: all - all traffic is compressed
+# dc - traffic between different datacenters is compressed
+# none - nothing is compressed.
+internode_compression: all
View
1 kundera-core/src/main/java/com/impetus/kundera/property/PropertyAccessorHelper.java
@@ -281,7 +281,6 @@ public static void setId(Object entity, EntityMetadata metadata, Object rowKey)
{
try
{
-
Field idField = (Field) metadata.getIdAttribute().getJavaMember();
set(entity, idField, rowKey);
}
View
2 kundera-rest/src/main/resources/log4j.properties
@@ -1,4 +1,4 @@
-log4j.rootLogger=WARN, DRFA, CONSOLE
+log4j.rootLogger=DEBUG, DRFA, CONSOLE
### direct log messages to stdout ###
log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
View
2 kundera-rest/src/test/java/com/impetus/kundera/rest/common/Book.java
@@ -36,7 +36,7 @@
@NamedQueries(value = { @NamedQuery(name = "findByAuthor", query = "Select b from Book b where b.author = :author"),
@NamedQuery(name = "findByPublication", query = "Select b from Book b where b.publication = ?1"),
@NamedQuery(name = "findAllBooks", query = "Select b from Book b") })
-@NamedNativeQueries(value = { @NamedNativeQuery(name = "findAllBooksNative", query = "select * from BOOK") })
+@NamedNativeQueries(value = { @NamedNativeQuery(name = "findAllBooksNative", query = "select * from " + "\"BOOK\"") })
@XmlRootElement
public class Book
{
View
29 kundera-rest/src/test/java/com/impetus/kundera/rest/resources/CRUDResourceTest.java
@@ -113,9 +113,8 @@
private final static boolean USE_EMBEDDED_SERVER = true;
private final static boolean AUTO_MANAGE_SCHEMA = true;
-
+
WebResource webResource = resource();
-
public CRUDResourceTest() throws Exception
{
@@ -133,24 +132,24 @@ public static void setUpBeforeClass() throws Exception
if (AUTO_MANAGE_SCHEMA)
{
loadData();
- }
+ }
- // Initialize REST Client
- restClient = new RESTClientImpl();
+ // Initialize REST Client
+ restClient = new RESTClientImpl();
}
@Before
public void setup() throws Exception
- {
-
+ {
+
restClient.initialize(webResource, mediaType);
}
@After
public void tearDown() throws Exception
{
- super.tearDown();
+ super.tearDown();
}
@AfterClass
@@ -172,7 +171,7 @@ public static void tearDownAfterClass() throws Exception
@Test
@PerfTest(invocations = 10)
public void testCRUD()
- {
+ {
if (MediaType.APPLICATION_XML.equals(mediaType))
{
@@ -270,7 +269,7 @@ else if (MediaType.APPLICATION_JSON.equals(mediaType))
/** Native Query - Select */
// Get All books
- String nativeQuery = "Select * from BOOK";
+ String nativeQuery = "Select * from " + "\"BOOK\"";
String nativeQueryResult = restClient.runNativeQuery(sessionToken, "Book", nativeQuery,
new HashMap<String, Object>());
log.debug("Native Query Select Result:" + nativeQueryResult);
@@ -297,8 +296,8 @@ else if (MediaType.APPLICATION_JSON.equals(mediaType))
// Close Application
restClient.closeApplication(applicationToken);
-
- if(AUTO_MANAGE_SCHEMA)
+
+ if (AUTO_MANAGE_SCHEMA)
{
truncateColumnFamily();
}
@@ -398,8 +397,8 @@ public void testCRUDOnAssociation()
// Close Application
restClient.closeApplication(applicationToken);
-
- if(AUTO_MANAGE_SCHEMA)
+
+ if (AUTO_MANAGE_SCHEMA)
{
truncateColumnFamily();
}
@@ -505,7 +504,7 @@ private static void loadData() throws TException, InvalidRequestException, Unava
CassandraCli.client.set_keyspace(_KEYSPACE);
}
-
+
private void truncateColumnFamily()
{
String[] columnFamily = new String[] { "BOOK", "PERSONNEL", "ADDRESS" };
View
182 kundera-rest/src/test/resources/cassandra.yaml
@@ -9,7 +9,22 @@
# one logical cluster from joining another.
cluster_name: 'Test Cluster'
-# You should always specify InitialToken when setting up a production
+# This defines the number of tokens randomly assigned to this node on the ring
+# The more tokens, relative to other nodes, the larger the proportion of data
+# that this node will store. You probably want all nodes to have the same number
+# of tokens assuming they have equal hardware capability.
+#
+# If you leave this unspecified, Cassandra will use the default of 1 token for legacy compatibility,
+# and will use the initial_token as described below.
+#
+# Specifying initial_token will override this setting.
+#
+# If you already have a cluster with 1 token per node, and wish to migrate to
+# multiple tokens per node, see http://wiki.apache.org/cassandra/Operations
+# num_tokens: 256
+
+# If you haven't specified num_tokens, or have set it to the default of 1 then
+# you should always specify InitialToken when setting up a production
# cluster for the first time, and often when adding capacity later.
# The principle is that each node should be given an equal slice of
# the token ring; see http://wiki.apache.org/cassandra/Operations
@@ -25,9 +40,13 @@ initial_token:
hinted_handoff_enabled: true
# this defines the maximum amount of time a dead host will have hints
# generated. After it has been dead this long, hints will be dropped.
-max_hint_window_in_ms: 3600000 # one hour
-# Sleep this long after delivering each hint
-hinted_handoff_throttle_delay_in_ms: 1
+max_hint_window_in_ms: 10800000 # 3 hours
+# throttle in KB's per second, per delivery thread
+hinted_handoff_throttle_in_kb: 1024
+# Number of threads with which to deliver hints;
+# Consider increasing this number when you have multi-dc deployments, since
+# cross-dc handoff tends to be slower
+max_hints_delivery_threads: 2
# The following setting populates the page cache on memtable flush and compaction
# WARNING: Enable this setting only when the whole node's data fits in memory.
@@ -36,23 +55,20 @@ hinted_handoff_throttle_delay_in_ms: 1
# authentication backend, implementing IAuthenticator; used to identify users
authenticator: org.apache.cassandra.auth.AllowAllAuthenticator
-# authenticator: org.apache.cassandra.auth.SimpleAuthenticator
-# authorization backend, implementing IAuthority; used to limit access/provide permissions
-authority: org.apache.cassandra.auth.AllowAllAuthority
-# authority: org.apache.cassandra.auth.SimpleAuthority
+# authorization backend, implementing IAuthorizer; used to limit access/provide permissions
+authorizer: org.apache.cassandra.auth.AllowAllAuthorizer
# The partitioner is responsible for distributing rows (by key) across
# nodes in the cluster. Any IPartitioner may be used, including your
# own as long as it is on the classpath. Out of the box, Cassandra
-# provides org.apache.cassandra.dht.RandomPartitioner
-# org.apache.cassandra.dht.ByteOrderedPartitioner,
-# org.apache.cassandra.dht.OrderPreservingPartitioner (deprecated),
-# and org.apache.cassandra.dht.CollatingOrderPreservingPartitioner
-# (deprecated).
+# provides org.apache.cassandra.dht.{Murmur3Partitioner, RandomPartitioner
+# ByteOrderedPartitioner, OrderPreservingPartitioner (deprecated)}.
#
# - RandomPartitioner distributes rows across the cluster evenly by md5.
-# When in doubt, this is the best option.
+# This is the default prior to 1.2 and is retained for compatibility.
+# - Murmur3Partitioner is similar to RandomPartioner but uses Murmur3_128
+# Hash Function instead of md5. When in doubt, this is the best option.
# - ByteOrderedPartitioner orders rows lexically by key bytes. BOP allows
# scanning rows in key order, but the ordering can generate hot spots
# for sequential insertion workloads.
@@ -64,6 +80,7 @@ authority: org.apache.cassandra.auth.AllowAllAuthority
#
# See http://wiki.apache.org/cassandra/Operations for more on
# partitioners and token selection.
+# partitioner: org.apache.cassandra.dht.Murmur3Partitioner
partitioner: org.apache.cassandra.dht.ByteOrderedPartitioner
# directories where Cassandra should store data on disk.
@@ -73,6 +90,15 @@ data_file_directories:
# commit log
commitlog_directory: /var/lib/cassandra/commitlog
+# policy for data disk failures:
+# stop: shut down gossip and Thrift, leaving the node effectively dead, but
+# still inspectable via JMX.
+# best_effort: stop using the failed disk and respond to requests based on
+# remaining available sstables. This means you WILL see obsolete
+# data at CL.ONE!
+# ignore: ignore fatal errors and let requests fail, as in pre-1.2 Cassandra
+disk_failure_policy: stop
+
# Maximum size of the key cache in memory.
#
# Each key cache hit saves 1 seek and each row cache hit saves 2 seeks at the
@@ -157,13 +183,16 @@ saved_caches_directory: /var/lib/cassandra/saved_caches
commitlog_sync: periodic
commitlog_sync_period_in_ms: 10000
-# Configure the Size of the individual Commitlog file. The
-# default is 128 MB, which is almost always fine, but if you are
+# The size of the individual commitlog file segments. A commitlog
+# segment may be archived, deleted, or recycled once all the data
+# in it (potentally from each columnfamily in the system) has been
+# flushed to sstables.
+#
+# The default size is 32, which is almost always fine, but if you are
# archiving commitlog segments (see commitlog_archiving.properties),
-# then you probably want a finer granularity of archiving; 16 MB
+# then you probably want a finer granularity of archiving; 8 or 16 MB
# is reasonable.
-#
-# commitlog_segment_size_in_mb: 128
+commitlog_segment_size_in_mb: 32
# any class that implements the SeedProvider interface and has a
# constructor that takes a Map<String, String> of parameters will do.
@@ -220,10 +249,14 @@ concurrent_writes: 32
# If omitted, Cassandra will set it to 1/3 of the heap.
# memtable_total_space_in_mb: 2048
-# Total space to use for commitlogs.
+# Total space to use for commitlogs. Since commitlog segments are
+# mmapped, and hence use up address space, the default size is 32
+# on 32-bit JVMs, and 1024 on 64-bit JVMs.
+#
# If space gets above this value (it will round up to the next nearest
# segment multiple), Cassandra will flush every dirty CF in the oldest
-# segment and remove it.
+# segment and remove it. So a small total commitlog space will tend
+# to cause more flush activity on less-active columnfamilies.
# commitlog_total_space_in_mb: 4096
# This sets the amount of memtable flush writer threads. These will
@@ -269,6 +302,25 @@ listen_address: localhost
# Leaving this blank will set it to the same value as listen_address
# broadcast_address: 1.2.3.4
+
+# Whether to start the native transport server.
+# Currently, only the thrift server is started by default because the native
+# transport is considered beta.
+# Please note that the address on which the native transport is bound is the
+# same as the rpc_address. The port however is different and specified below.
+start_native_transport: false
+# port for the CQL native transport to listen for clients on
+native_transport_port: 9042
+# The minimum and maximum threads for handling requests when the native
+# transport is used. The meaning is those is similar to the one of
+# rpc_min_threads and rpc_max_threads, though the default differ slightly and
+# are the ones below:
+# native_transport_min_threads: 16
+# native_transport_max_threads: 128
+
+
+# Whether to start the thrift rpc server.
+start_rpc: true
# The address to bind the Thrift RPC service to -- clients connect
# here. Unlike ListenAddress above, you *can* specify 0.0.0.0 here if
# you want Thrift to listen on all interfaces.
@@ -282,36 +334,34 @@ rpc_port: 9160
# enable or disable keepalive on rpc connections
rpc_keepalive: true
-# Cassandra provides three options for the RPC Server:
+# Cassandra provides three out-of-the-box options for the RPC Server:
#
-# sync -> One connection per thread in the rpc pool (see below).
-# For a very large number of clients, memory will be your limiting
-# factor; on a 64 bit JVM, 128KB is the minimum stack size per thread.
-# Connection pooling is very, very strongly recommended.
+# sync -> One thread per thrift connection. For a very large number of clients, memory
+# will be your limiting factor. On a 64 bit JVM, 128KB is the minimum stack size
+# per thread, and that will correspond to your use of virtual memory (but physical memory
+# may be limited depending on use of stack space).
#
-# async -> Nonblocking server implementation with one thread to serve
-# rpc connections. This is not recommended for high throughput use
-# cases. Async has been tested to be about 50% slower than sync
-# or hsha and is deprecated: it will be removed in the next major release.
-#
-# hsha -> Stands for "half synchronous, half asynchronous." The rpc thread pool
-# (see below) is used to manage requests, but the threads are multiplexed
-# across the different clients.
+# hsha -> Stands for "half synchronous, half asynchronous." All thrift clients are handled
+# asynchronously using a small number of threads that does not vary with the amount
+# of thrift clients (and thus scales well to many clients). The rpc requests are still
+# synchronous (one thread per active request).
#
# The default is sync because on Windows hsha is about 30% slower. On Linux,
# sync/hsha performance is about the same, with hsha of course using less memory.
+#
+# Alternatively, can provide your own RPC server by providing the fully-qualified class name
+# of an o.a.c.t.TServerFactory that can create an instance of it.
rpc_server_type: sync
-# Uncomment rpc_min|max|thread to set request pool size.
-# You would primarily set max for the sync server to safeguard against
-# misbehaved clients; if you do hit the max, Cassandra will block until one
-# disconnects before accepting more. The defaults for sync are min of 16 and max
-# unlimited.
-#
-# For the Hsha server, the min and max both default to quadruple the number of
-# CPU cores.
+# Uncomment rpc_min|max_thread to set request pool size limits.
#
-# This configuration is ignored by the async server.
+# Regardless of your choice of RPC server (see above), the number of maximum requests in the
+# RPC thread pool dictates how many concurrent requests are possible (but if you are using the sync
+# RPC server, it also dictates the number of clients that can be connected at all).
+#
+# The default is unlimited and thus provide no protection against clients overwhelming the server. You are
+# encouraged to set a maximum that makes sense for you in production, but do keep in mind that
+# rpc_max_threads represents the maximum number of client requests this server may execute concurrently.
#
# rpc_min_threads: 16
# rpc_max_threads: 2048
@@ -321,8 +371,6 @@ rpc_server_type: sync
# rpc_recv_buff_size_in_bytes:
# Frame size for thrift (maximum field length).
-# 0 disables TFramedTransport in favor of TSocket. This option
-# is deprecated; we strongly recommend using Framed mode.
thrift_framed_transport_size_in_mb: 15
# The max length of a thrift message, including all fields and
@@ -370,8 +418,6 @@ in_memory_compaction_limit_in_mb: 64
# slowly or too fast, you should look at
# compaction_throughput_mb_per_sec first.
#
-# This setting has no effect on LeveledCompactionStrategy.
-#
# concurrent_compactors defaults to the number of cores.
# Uncomment to make compaction mono-threaded, the pre-0.8 default.
#concurrent_compactors: 1
@@ -403,8 +449,26 @@ compaction_preheat_key_cache: true
# When unset, the default is 400 Mbps or 50 MB/s.
# stream_throughput_outbound_megabits_per_sec: 400
-# Time to wait for a reply from other nodes before failing the command
-rpc_timeout_in_ms: 10000
+# How long the coordinator should wait for read operations to complete
+read_request_timeout_in_ms: 10000
+# How long the coordinator should wait for seq or index scans to complete
+range_request_timeout_in_ms: 10000
+# How long the coordinator should wait for writes to complete
+write_request_timeout_in_ms: 10000
+# How long the coordinator should wait for truncates to complete
+# (This can be much longer, because unless auto_snapshot is disabled
+# we need to flush first so we can snapshot before removing the data.)
+truncate_request_timeout_in_ms: 60000
+# The default timeout for other, miscellaneous operations
+request_timeout_in_ms: 10000
+
+# Enable operation timeout information exchange between nodes to accurately
+# measure request timeouts, If disabled cassandra will assuming the request
+# was forwarded to the replica instantly by the coordinator
+#
+# Warning: before enabling this property make sure to ntp is installed
+# and the times are synchronized between the nodes.
+cross_node_timeout: false
# Enable socket timeout for streaming operation.
# When a timeout occurs during streaming, streaming is retried from the start
@@ -549,7 +613,7 @@ index_interval: 128
# the keystore and truststore. For instructions on generating these files, see:
# http://download.oracle.com/javase/6/docs/technotes/guides/security/jsse/JSSERefGuide.html#CreateKeystore
#
-encryption_options:
+server_encryption_options:
internode_encryption: none
keystore: conf/.keystore
keystore_password: cassandra
@@ -560,3 +624,21 @@ encryption_options:
# algorithm: SunX509
# store_type: JKS
# cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA]
+
+# enable or disable client/server encryption.
+client_encryption_options:
+ enabled: false
+ keystore: conf/.keystore
+ keystore_password: cassandra
+ # More advanced defaults below:
+ # protocol: TLS
+ # algorithm: SunX509
+ # store_type: JKS
+ # cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA]
+
+# internode_compression controls whether traffic between nodes is
+# compressed.
+# can be: all - all traffic is compressed
+# dc - traffic between different datacenters is compressed
+# none - nothing is compressed.
+internode_compression: all
View
17 kundera-tests/pom.xml
@@ -163,12 +163,12 @@
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-api</artifactId>
- <version>1.5.8</version>
+ <version>1.7.2</version>
</dependency>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-log4j12</artifactId>
- <version>1.5.8</version>
+ <version>1.7.2</version>
</dependency>
<dependency>
<groupId>commons-codec</groupId>
@@ -242,17 +242,17 @@
<dependency>
<groupId>org.apache.cassandra</groupId>
<artifactId>cassandra-all</artifactId>
- <version>1.1.2</version>
+ <version>1.2.0</version>
</dependency>
<dependency>
<groupId>org.apache.cassandra</groupId>
<artifactId>cassandra-thrift</artifactId>
- <version>1.1.2</version>
+ <version>1.2.0</version>
</dependency>
<dependency>
<groupId>org.apache.cassandra</groupId>
<artifactId>cassandra-clientutil</artifactId>
- <version>1.1.2</version>
+ <version>1.2.0</version>
<scope>test</scope>
</dependency>
<dependency>
@@ -431,13 +431,6 @@
<version>1.9.8</version>
</dependency>
- <dependency>