Skip to content

HTTPS clone URL

Subversion checkout URL

You can clone with
or
.
Download ZIP
Browse files

added initial plumbing, proxy client connection

  • Loading branch information...
commit 51706b15982a66e4ebe0f1e624371ee619484025 1 parent db74f37
@zznate zznate authored
View
152 lcp/pom.xml
@@ -0,0 +1,152 @@
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
+ <modelVersion>4.0.0</modelVersion>
+ <parent>
+ <groupId>me.prettyprint</groupId>
+ <artifactId>hector</artifactId>
+ <version>1.0-4-SNAPSHOT</version>
+ </parent>
+ <artifactId>hector-lcp</artifactId>
+ <packaging>jar</packaging>
+ <name>hector-lcp</name>
+ <properties>
+ <!-- OSGi bundle properties -->
+ <bundle.symbolicName>me.prettyprint.hector-lcp</bundle.symbolicName>
+ <bundle.namespace>me.prettyprint</bundle.namespace>
+ </properties>
+
+ <build>
+ <plugins>
+
+ <!--
+ run examples thusly: mvn exec:java
+ -Dexec.mainClass="me.prettyprint.cassandra.examples.ExampleDao"
+ -->
+ <plugin>
+ <groupId>org.codehaus.mojo</groupId>
+ <artifactId>exec-maven-plugin</artifactId>
+ <version>1.1</version>
+ <executions>
+ <execution>
+ <goals>
+ <goal>java</goal>
+ </goals>
+ </execution>
+ </executions>
+ </plugin>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-surefire-plugin</artifactId>
+ <configuration>
+ <forkMode>always</forkMode>
+ <argLine>-Xmx512M -Xms512M -Dfile.encoding=UTF-8 -Dsun.jnu.encoding=UTF-8</argLine>
+ </configuration>
+ </plugin>
+ </plugins>
+ </build>
+ <dependencies>
+ <dependency>
+ <groupId>commons-lang</groupId>
+ <artifactId>commons-lang</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>commons-pool</groupId>
+ <artifactId>commons-pool</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.cassandra</groupId>
+ <artifactId>cassandra-thrift</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.thrift</groupId>
+ <artifactId>libthrift</artifactId>
+ <version>0.6.1</version> <!-- needs to be >= 0.7 for bytebuffer, later for SSL+Async-->
+ </dependency>
+ <dependency>
+ <groupId>org.apache.cassandra</groupId>
+ <artifactId>cassandra-all</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>com.google.guava</groupId>
+ <artifactId>guava</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.slf4j</groupId>
+ <artifactId>slf4j-api</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>com.github.stephenc.eaio-uuid</groupId>
+ <artifactId>uuid</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>com.ecyrd.speed4j</groupId>
+ <artifactId>speed4j</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>me.prettyprint</groupId>
+ <artifactId>hector-core</artifactId>
+ <version>${pom.version}</version>
+ </dependency>
+ <!-- test dependencies -->
+ <dependency>
+ <groupId>me.prettyprint</groupId>
+ <artifactId>hector-test</artifactId>
+ <version>${pom.version}</version>
+ <scope>test</scope>
+ </dependency>
+
+ <dependency>
+ <groupId>junit</groupId>
+ <artifactId>junit</artifactId>
+ <scope>test</scope>
+ </dependency>
+ <dependency>
+ <groupId>com.github.stephenc</groupId>
+ <artifactId>jamm</artifactId>
+ <scope>test</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.mockito</groupId>
+ <artifactId>mockito-all</artifactId>
+ <scope>test</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.slf4j</groupId>
+ <artifactId>slf4j-log4j12</artifactId>
+ <scope>test</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.slf4j</groupId>
+ <artifactId>jcl-over-slf4j</artifactId>
+ <scope>test</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.slf4j</groupId>
+ <artifactId>jul-to-slf4j</artifactId>
+ <scope>test</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.springframework</groupId>
+ <artifactId>spring-beans</artifactId>
+ <version>${org.springframework.version}</version>
+ <scope>test</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.springframework</groupId>
+ <artifactId>spring-context</artifactId>
+ <version>${org.springframework.version}</version>
+ <scope>test</scope>
+ <exclusions>
+ <exclusion>
+ <groupId>commons-logging</groupId>
+ <artifactId>commons-logging</artifactId>
+ </exclusion>
+ </exclusions>
+ </dependency>
+ <dependency>
+ <groupId>org.springframework</groupId>
+ <artifactId>spring-test</artifactId>
+ <version>${org.springframework.version}</version>
+ <scope>test</scope>
+ </dependency>
+ </dependencies>
+</project>
View
122 lcp/src/main/java/me/prettyprint/lcp/CassandraProxyClient.java
@@ -0,0 +1,122 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package me.prettyprint.lcp;
+
+import java.io.IOException;
+import java.lang.reflect.InvocationTargetException;
+import java.lang.reflect.Method;
+import java.net.InetSocketAddress;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Random;
+
+import org.apache.cassandra.config.KSMetaData;
+import org.apache.cassandra.thrift.*;
+import org.apache.log4j.Logger;
+import org.apache.thrift.TException;
+import org.apache.thrift.transport.*;
+
+
+/**
+ * This wraps the underlying Cassandra thrift client and attempts to handle
+ * disconnect, unavailable, timeout errors gracefully.
+ * <p/>
+ * On disconnect, if it cannot reconnect to the same host then it will use a
+ * different host from the ring. After a successful connecting, the ring will be
+ * refreshed.
+ * <p/>
+ * This incorporates the CircuitBreaker pattern so not to overwhelm the network
+ * with reconnect attempts.
+ */
+public class CassandraProxyClient implements java.lang.reflect.InvocationHandler {
+
+ private static final Logger logger = Logger.getLogger(CassandraProxyClient.class);
+
+ public enum ConnectionStrategy {
+ RANDOM, ROUND_ROBIN, STICKY
+ }
+
+ private String host;
+ private int port;
+
+ private Cassandra.Client client;
+
+ /**
+ * Build the Cassandra.Iface proxy
+ *
+ * @param host cassandra host
+ * @param port cassandra port
+ * @return a Casandra.Client Interface
+ * @throws IOException
+ */
+ public static Cassandra.Iface newProxyConnection(String host, int port)
+ throws IOException {
+ return (Cassandra.Iface) java.lang.reflect.Proxy.newProxyInstance(Cassandra.Client.class.getClassLoader(),
+ Cassandra.Client.class.getInterfaces(), new CassandraProxyClient(host, port));
+ }
+
+ /**
+ * Create connection to a given host.
+ *
+ * @return cassandra thrift client
+ * @throws IOException error
+ */
+ public Cassandra.Client createConnection() throws IOException {
+ TSocket socket = new TSocket(host, port, 10000);
+ TTransport trans;
+ try {
+ socket.getSocket().setKeepAlive(true);
+ socket.getSocket().setSoLinger(false,0);
+ socket.getSocket().setTcpNoDelay(true);
+
+ trans = new TFastFramedTransport(socket);
+ trans.open();
+
+ return new Cassandra.Client(new TBinaryProtocol(trans));
+
+ } catch (Exception e) {
+ throw new IOException("unable to connect to server", e);
+ }
+
+ }
+
+ private CassandraProxyClient(String host, int port) throws IOException {
+ this.host = host;
+ this.port = port;
+ }
+
+
+
+ public Object invoke(Object proxy, Method m, Object[] args) throws Throwable {
+ Object result = null;
+ try {
+ this.client = createConnection();
+ result = m.invoke(client, args);
+
+ return result;
+
+ } catch (Exception e) {
+ logger.error("Error invoking a method via proxy: ", e);
+ throw new RuntimeException(e);
+ }
+
+ }
+
+
+}
+
View
90 lcp/src/main/java/me/prettyprint/lcp/EmbeddedServerLauncher.java
@@ -0,0 +1,90 @@
+package me.prettyprint.lcp;
+
+import org.apache.cassandra.config.ConfigurationException;
+import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.db.commitlog.CommitLog;
+import org.apache.cassandra.thrift.CassandraDaemon;
+import org.apache.thrift.transport.TTransportException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.TimeUnit;
+
+/**
+ * @author zznate
+ */
+public class EmbeddedServerLauncher {
+ private static Logger log = LoggerFactory.getLogger(EmbeddedServerLauncher.class);
+
+ private static final String CASS_ROOT = "cassandra";
+
+ private final String yamlFile;
+ static LcpCassandraDaemon cassandraDaemon;
+
+ public EmbeddedServerLauncher() {
+ this("/cassandra.yaml");
+ }
+
+ public EmbeddedServerLauncher(String yamlFile) {
+ this.yamlFile = yamlFile;
+ }
+
+ static ExecutorService executor = Executors.newSingleThreadExecutor();
+
+ /**
+ * Set embedded cassandra up and spawn it in a new thread.
+ *
+ * @throws org.apache.thrift.transport.TTransportException
+ * @throws java.io.IOException
+ * @throws InterruptedException
+ */
+ public void setup() throws TTransportException, IOException,
+ InterruptedException, ConfigurationException {
+ if ( cassandraDaemon != null && cassandraDaemon.isRPCServerRunning() ) {
+ return;
+ }
+ System.setProperty("cassandra-foreground","true");
+ DatabaseDescriptor.createAllDirectories();
+
+ log.info("Starting executor");
+
+ executor.execute(new CassandraRunner());
+ log.info("Started executor");
+ try
+ {
+ TimeUnit.SECONDS.sleep(3);
+ log.info("Done sleeping");
+ }
+ catch (InterruptedException e)
+ {
+ throw new AssertionError(e);
+ }
+ }
+
+ public static void teardown() {
+ try {
+ CommitLog.instance.shutdownBlocking();
+ } catch (Exception e) {
+ e.printStackTrace();
+ }
+ executor.shutdown();
+ executor.shutdownNow();
+ log.info("Teardown complete");
+ }
+
+
+ class CassandraRunner implements Runnable {
+
+ public void run() {
+
+ cassandraDaemon = new LcpCassandraDaemon();
+
+ cassandraDaemon.activate();
+
+ }
+
+ }
+}
View
227 lcp/src/main/java/me/prettyprint/lcp/LcpCassandraServer.java
@@ -0,0 +1,227 @@
+package me.prettyprint.lcp;
+
+import me.prettyprint.cassandra.service.ThriftCluster;
+import me.prettyprint.hector.api.Cluster;
+import me.prettyprint.hector.api.factory.HFactory;
+import org.apache.cassandra.thrift.*;
+import org.apache.thrift.TException;
+import org.apache.thrift.transport.TFramedTransport;
+import org.apache.thrift.transport.TSocket;
+import org.apache.thrift.transport.TTransport;
+import org.apache.thrift.transport.TTransportException;
+
+import java.net.SocketException;
+import java.nio.ByteBuffer;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * @author zznate
+ */
+public class LcpCassandraServer implements Cassandra.Iface {
+
+ /*
+ general contract for each method:
+ - acquire Cassandra.Client from pool, pass through
+ - if method uses a key, check the token cache & route
+
+ */
+
+
+ public LcpCassandraServer() {
+
+ }
+ /**
+ * Use the key to deduce locality for "client-mediated selects"
+ *
+ * @return
+ */
+ protected Cassandra.Client acquire() {
+ // DecoratedKey<?> dk = StorageService.getPartitioner().decorateKey(key);
+ // ~ in the case of RP, is just: return new BigIntegerToken(FBUtilities.hashToBigInteger(key));
+ // --- leads to ---
+ // from RandomPartitioner#getToken(bb key): return new BigIntegerToken(FBUtilities.hashToBigInteger(key));
+ //
+ // really just need to query describe_ring and store the output
+ // - model after locator.TokenMetadata? Use TokenMetadata directly
+ // - TokenRange from describe_ring has everything, but may not want to convert BB to strings
+ // * make a local version which uses Token/TokenMetadata directly
+ // * see TokenMetadata ctor for this
+ Cassandra.Client client;
+ try {
+ TSocket socket = new TSocket("localhost",9160,10000);
+ socket.getSocket().setSoLinger(false, 0);
+ socket.getSocket().setKeepAlive(true);
+ socket.getSocket().setTcpNoDelay(true);
+ TTransport transport = new TFramedTransport(socket);
+ transport.open();
+ client = new Cassandra.Client(new TBinaryProtocol(transport));
+ } catch (SocketException e) {
+ e.printStackTrace(); //To change body of catch statement use File | Settings | File Templates.
+ } catch (TTransportException e) {
+ e.printStackTrace(); //To change body of catch statement use File | Settings | File Templates.
+ }
+ // map
+ // (StorageProxy#read takes the commands and noodles hosts via #fetchRows)
+ // - then via StorageService#getLiveNaturalEndpoints
+ // - then via Table#getReplicationStrategy#getNaturalEndpoints
+ // RowPosition implements RingPosition
+
+ return null;
+ }
+
+ @Override
+ public void login(AuthenticationRequest auth_request) throws AuthenticationException, AuthorizationException, TException {
+ acquire().login(auth_request);
+ }
+
+ @Override
+ public void set_keyspace(String keyspace) throws InvalidRequestException, TException {
+ acquire().set_keyspace(keyspace);
+ }
+
+ @Override
+ public ColumnOrSuperColumn get(ByteBuffer key, ColumnPath column_path, ConsistencyLevel consistency_level) throws InvalidRequestException, NotFoundException, UnavailableException, TimedOutException, TException {
+ return acquire().get(key,column_path,consistency_level);
+ }
+
+ @Override
+ public List<ColumnOrSuperColumn> get_slice(ByteBuffer key, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level) throws InvalidRequestException, UnavailableException, TimedOutException, TException {
+ return acquire().get_slice(key,column_parent,predicate,consistency_level);
+ }
+
+ @Override
+ public int get_count(ByteBuffer key, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level) throws InvalidRequestException, UnavailableException, TimedOutException, TException {
+ return 0; //To change body of implemented methods use File | Settings | File Templates.
+ }
+
+ @Override
+ public Map<ByteBuffer, List<ColumnOrSuperColumn>> multiget_slice(List<ByteBuffer> keys, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level) throws InvalidRequestException, UnavailableException, TimedOutException, TException {
+ return null; //To change body of implemented methods use File | Settings | File Templates.
+ }
+
+ @Override
+ public Map<ByteBuffer, Integer> multiget_count(List<ByteBuffer> keys, ColumnParent column_parent, SlicePredicate predicate, ConsistencyLevel consistency_level) throws InvalidRequestException, UnavailableException, TimedOutException, TException {
+ return null; //To change body of implemented methods use File | Settings | File Templates.
+ }
+
+ @Override
+ public List<KeySlice> get_range_slices(ColumnParent column_parent, SlicePredicate predicate, KeyRange range, ConsistencyLevel consistency_level) throws InvalidRequestException, UnavailableException, TimedOutException, TException {
+ return null; //To change body of implemented methods use File | Settings | File Templates.
+ }
+
+ @Override
+ public List<KeySlice> get_indexed_slices(ColumnParent column_parent, IndexClause index_clause, SlicePredicate column_predicate, ConsistencyLevel consistency_level) throws InvalidRequestException, UnavailableException, TimedOutException, TException {
+ return null; //To change body of implemented methods use File | Settings | File Templates.
+ }
+
+ @Override
+ public void insert(ByteBuffer key, ColumnParent column_parent, Column column, ConsistencyLevel consistency_level) throws InvalidRequestException, UnavailableException, TimedOutException, TException {
+ //To change body of implemented methods use File | Settings | File Templates.
+ }
+
+ @Override
+ public void add(ByteBuffer key, ColumnParent column_parent, CounterColumn column, ConsistencyLevel consistency_level) throws InvalidRequestException, UnavailableException, TimedOutException, TException {
+ //To change body of implemented methods use File | Settings | File Templates.
+ }
+
+ @Override
+ public void remove(ByteBuffer key, ColumnPath column_path, long timestamp, ConsistencyLevel consistency_level) throws InvalidRequestException, UnavailableException, TimedOutException, TException {
+ //To change body of implemented methods use File | Settings | File Templates.
+ }
+
+ @Override
+ public void remove_counter(ByteBuffer key, ColumnPath path, ConsistencyLevel consistency_level) throws InvalidRequestException, UnavailableException, TimedOutException, TException {
+ //To change body of implemented methods use File | Settings | File Templates.
+ }
+
+ @Override
+ public void batch_mutate(Map<ByteBuffer, Map<String, List<Mutation>>> mutation_map, ConsistencyLevel consistency_level) throws InvalidRequestException, UnavailableException, TimedOutException, TException {
+ //To change body of implemented methods use File | Settings | File Templates.
+ }
+
+ @Override
+ public void truncate(String cfname) throws InvalidRequestException, UnavailableException, TException {
+ //To change body of implemented methods use File | Settings | File Templates.
+ }
+
+ @Override
+ public Map<String, List<String>> describe_schema_versions() throws InvalidRequestException, TException {
+ return null; //To change body of implemented methods use File | Settings | File Templates.
+ }
+
+ @Override
+ public List<KsDef> describe_keyspaces() throws InvalidRequestException, TException {
+ return null; //To change body of implemented methods use File | Settings | File Templates.
+ }
+
+ @Override
+ public String describe_cluster_name() throws TException {
+ return null; //To change body of implemented methods use File | Settings | File Templates.
+ }
+
+ @Override
+ public String describe_version() throws TException {
+ return "Hector-LCP-"+Constants.VERSION; //To change body of implemented methods use File | Settings | File Templates.
+ }
+
+ @Override
+ public List<TokenRange> describe_ring(String keyspace) throws InvalidRequestException, TException {
+ return null; //To change body of implemented methods use File | Settings | File Templates.
+ }
+
+ @Override
+ public String describe_partitioner() throws TException {
+ return null; //To change body of implemented methods use File | Settings | File Templates.
+ }
+
+ @Override
+ public String describe_snitch() throws TException {
+ return null; //To change body of implemented methods use File | Settings | File Templates.
+ }
+
+ @Override
+ public KsDef describe_keyspace(String keyspace) throws NotFoundException, InvalidRequestException, TException {
+ return null; //To change body of implemented methods use File | Settings | File Templates.
+ }
+
+ @Override
+ public List<String> describe_splits(String cfName, String start_token, String end_token, int keys_per_split) throws InvalidRequestException, TException {
+ return null; //To change body of implemented methods use File | Settings | File Templates.
+ }
+
+ @Override
+ public String system_add_column_family(CfDef cf_def) throws InvalidRequestException, SchemaDisagreementException, TException {
+ return null; //To change body of implemented methods use File | Settings | File Templates.
+ }
+
+ @Override
+ public String system_drop_column_family(String column_family) throws InvalidRequestException, SchemaDisagreementException, TException {
+ return null; //To change body of implemented methods use File | Settings | File Templates.
+ }
+
+ @Override
+ public String system_add_keyspace(KsDef ks_def) throws InvalidRequestException, SchemaDisagreementException, TException {
+ return null; //To change body of implemented methods use File | Settings | File Templates.
+ }
+
+ @Override
+ public String system_drop_keyspace(String keyspace) throws InvalidRequestException, SchemaDisagreementException, TException {
+ return null; //To change body of implemented methods use File | Settings | File Templates.
+ }
+
+ @Override
+ public String system_update_keyspace(KsDef ks_def) throws InvalidRequestException, SchemaDisagreementException, TException {
+ return null; //To change body of implemented methods use File | Settings | File Templates.
+ }
+
+ @Override
+ public String system_update_column_family(CfDef cf_def) throws InvalidRequestException, SchemaDisagreementException, TException {
+ return null; //To change body of implemented methods use File | Settings | File Templates.
+ }
+
+ @Override
+ public CqlResult execute_cql_query(ByteBuffer query, Compression compression) throws InvalidRequestException, UnavailableException, TimedOutException, SchemaDisagreementException, TException {
+ return null; //To change body of implemented methods use File | Settings | File Templates.
+ }
+}
View
34 lcp/src/test/java/me/prettyprint/lcp/LcpCassandraDaemonTest.java
@@ -0,0 +1,34 @@
+package me.prettyprint.lcp;
+
+import me.prettyprint.hector.api.Cluster;
+import me.prettyprint.hector.api.Keyspace;
+import me.prettyprint.hector.api.factory.HFactory;
+import org.apache.cassandra.thrift.Cassandra;
+import org.apache.cassandra.thrift.Constants;
+import org.junit.Before;
+import org.junit.Test;
+
+import static org.junit.Assert.assertEquals;
+
+public class LcpCassandraDaemonTest {
+
+ private EmbeddedServerLauncher launcher = new EmbeddedServerLauncher();
+ private Cluster cluster;
+ private Keyspace keyspace;
+
+ @Before
+ public void localSetup() throws Exception {
+ launcher.setup();
+ //cluster = HFactory.getOrCreateCluster("TestCluster","localhost:9170");
+
+ }
+
+ @Test
+ public void testFireUp() throws Exception {
+ //assertEquals("19.20.0",cluster.describeThriftVersion());
+
+ Cassandra.Iface client = CassandraProxyClient.newProxyConnection("localhost",9170);
+ client.describe_version();
+ }
+
+}
View
368 lcp/src/test/resources/cassandra.yaml
@@ -0,0 +1,368 @@
+# Cassandra storage config YAML
+
+#NOTE !!!!!!!! NOTE
+# See http://wiki.apache.org/cassandra/StorageConfiguration for
+# full explanations of configuration directives
+#NOTE !!!!!!!! NOTE
+
+# The name of the cluster. This is mainly used to prevent machines in
+# one logical cluster from joining another.
+cluster_name: 'Test Cluster'
+
+# You should always specify InitialToken when setting up a production
+# cluster for the first time, and often when adding capacity later.
+# The principle is that each node should be given an equal slice of
+# the token ring; see http://wiki.apache.org/cassandra/Operations
+# for more details.
+#
+# If blank, Cassandra will request a token bisecting the range of
+# the heaviest-loaded existing node. If there is no load information
+# available, such as is the case with a new cluster, it will pick
+# a random token, which will lead to hot spots.
+initial_token:
+
+# Set to true to make new [non-seed] nodes automatically migrate data
+# to themselves from the pre-existing nodes in the cluster. Defaults
+# to false because you can only bootstrap N machines at a time from
+# an existing cluster of N, so if you are bringing up a cluster of
+# 10 machines with 3 seeds you would have to do it in stages. Leaving
+# this off for the initial start simplifies that.
+auto_bootstrap: false
+
+# See http://wiki.apache.org/cassandra/HintedHandoff
+hinted_handoff_enabled: true
+
+# authentication backend, implementing IAuthenticator; used to identify users
+authenticator: org.apache.cassandra.auth.AllowAllAuthenticator
+
+# authorization backend, implementing IAuthority; used to limit access/provide permissions
+authority: org.apache.cassandra.auth.AllowAllAuthority
+
+# any IPartitioner may be used, including your own as long as it is on
+# the classpath. Out of the box, Cassandra provides
+# org.apache.cassandra.dht.RandomPartitioner
+# org.apache.cassandra.dht.ByteOrderedPartitioner,
+# org.apache.cassandra.dht.OrderPreservingPartitioner, and
+# org.apache.cassandra.dht.CollatingOrderPreservingPartitioner.
+# (CollatingOPP colates according to EN,US rules, not naive byte
+# ordering. Use this as an example if you need locale-aware collation.)
+partitioner: org.apache.cassandra.dht.OrderPreservingPartitioner
+
+# directories where Cassandra should store data on disk.
+data_file_directories:
+ - tmp/var/lib/cassandra/data
+
+# commit log
+commitlog_directory: tmp/var/lib/cassandra/commitlog
+
+# saved caches
+saved_caches_directory: tmp/var/lib/cassandra/saved_caches
+
+
+# commitlog_sync may be either "periodic" or "batch."
+# When in batch mode, Cassandra won't ack writes until the commit log
+# has been fsynced to disk. It will wait up to
+# CommitLogSyncBatchWindowInMS milliseconds for other writes, before
+# performing the sync.
+commitlog_sync: periodic
+
+# the other option is "timed," where writes may be acked immediately
+# and the CommitLog is simply synced every commitlog_sync_period_in_ms
+# milliseconds.
+commitlog_sync_period_in_ms: 10000
+
+# Addresses of hosts that are deemed contact points.
+# Cassandra nodes use this list of hosts to find each other and learn
+# the topology of the ring. You must change this if you are running
+# multiple nodes!
+seed_provider:
+ - class_name: org.apache.cassandra.locator.SimpleSeedProvider
+ parameters:
+ - seeds: "127.0.0.1"
+# Access mode. mmapped i/o is substantially faster, but only practical on
+# a 64bit machine (which notably does not include EC2 "small" instances)
+# or relatively small datasets. "auto", the safe choice, will enable
+# mmapping on a 64bit JVM. Other values are "mmap", "mmap_index_only"
+# (which may allow you to get part of the benefits of mmap on a 32bit
+# machine by mmapping only index files) and "standard".
+# (The buffer size settings that follow only apply to standard,
+# non-mmapped i/o.)
+disk_access_mode: auto
+
+# Unlike most systems, in Cassandra writes are faster than reads, so
+# you can afford more of those in parallel. A good rule of thumb is 2
+# concurrent reads per processor core. Increase ConcurrentWrites to
+# the number of clients writing at once if you enable CommitLogSync +
+# CommitLogSyncDelay. -->
+concurrent_reads: 2
+concurrent_writes: 4
+
+# This sets the amount of memtable flush writer threads. These will
+# be blocked by disk io, and each one will hold a memtable in memory
+# while blocked. If you have a large heap and many data directories,
+# you can increase this value for better flush performance.
+# By default this will be set to the amount of data directories defined.
+#memtable_flush_writers: 1
+
+# Buffer size to use when performing contiguous column slices.
+# Increase this to the size of the column slices you typically perform
+sliced_buffer_size_in_kb: 64
+
+# TCP port, for commands and data
+storage_port: 7001
+
+# Address to bind to and tell other Cassandra nodes to connect to. You
+# _must_ change this if you want multiple nodes to be able to
+# communicate!
+#
+# Leaving it blank leaves it up to InetAddress.getLocalHost(). This
+# will always do the Right Thing *if* the node is properly configured
+# (hostname, name resolution, etc), and the Right Thing is to use the
+# address associated with the hostname (it might not be).
+#
+# Setting this to 0.0.0.0 is always wrong.
+listen_address: 127.0.0.1
+
+# The address to bind the Thrift RPC service to -- clients connect
+# here. Unlike ListenAddress above, you *can* specify 0.0.0.0 here if
+# you want Thrift to listen on all interfaces.
+#
+# Leaving this blank has the same effect it does for ListenAddress,
+# (i.e. it will be based on the configured hostname of the node).
+rpc_address: localhost
+# port for Thrift to listen for clients on
+rpc_port: 9170
+
+# enable or disable keepalive on rpc connections
+rpc_keepalive: true
+
+# uncomment to set socket buffer sizes on rpc connections
+# rpc_send_buff_size_in_bytes:
+# rpc_recv_buff_size_in_bytes:
+
+# Frame size for thrift (maximum field length).
+# 0 disables TFramedTransport in favor of TSocket. This option
+# is deprecated; we strongly recommend using Framed mode.
+thrift_framed_transport_size_in_mb: 15
+
+# The max length of a thrift message, including all fields and
+# internal thrift overhead.
+thrift_max_message_length_in_mb: 16
+
+# Whether or not to take a snapshot before each compaction. Be
+# careful using this option, since Cassandra won't clean up the
+# snapshots for you. Mostly useful if you're paranoid when there
+# is a data format change.
+snapshot_before_compaction: false
+
+# change this to increase the compaction thread's priority. In java, 1 is the
+# lowest priority and that is our default.
+# compaction_thread_priority: 1
+
+# Add column indexes to a row after its contents reach this size.
+# Increase if your column values are large, or if you have a very large
+# number of columns. The competing causes are, Cassandra has to
+# deserialize this much of the row to read a single column, so you want
+# it to be small - at least if you do many partial-row reads - but all
+# the index data is read for each access, so you don't want to generate
+# that wastefully either.
+column_index_size_in_kb: 64
+
+# Size limit for rows being compacted in memory. Larger rows will spill
+# over to disk and use a slower two-pass compaction process. A message
+# will be logged specifying the row key.
+in_memory_compaction_limit_in_mb: 16
+
+# Time to wait for a reply from other nodes before failing the command
+rpc_timeout_in_ms: 10000
+
+# phi value that must be reached for a host to be marked down.
+# most users should never need to adjust this.
+# phi_convict_threshold: 8
+
+# endpoint_snitch -- Set this to a class that implements
+# IEndpointSnitch, which will let Cassandra know enough
+# about your network topology to route requests efficiently.
+# Out of the box, Cassandra provides
+# - org.apache.cassandra.locator.SimpleSnitch:
+# Treats Strategy order as proximity. This improves cache locality
+# when disabling read repair, which can further improve throughput.
+# - org.apache.cassandra.locator.RackInferringSnitch:
+# Proximity is determined by rack and data center, which are
+# assumed to correspond to the 3rd and 2nd octet of each node's
+# IP address, respectively
+# org.apache.cassandra.locator.PropertyFileSnitch:
+# - Proximity is determined by rack and data center, which are
+# explicitly configured in cassandra-rack.properties.
+endpoint_snitch: org.apache.cassandra.locator.SimpleSnitch
+
+# dynamic_snitch -- This boolean controls whether the above snitch is
+# wrapped with a dynamic snitch, which will monitor read latencies
+# and avoid reading from hosts that have slowed (due to compaction,
+# for instance)
+dynamic_snitch: true
+# controls how often to perform the more expensive part of host score
+# calculation
+dynamic_snitch_update_interval_in_ms: 100
+# controls how often to reset all host scores, allowing a bad host to
+# possibly recover
+dynamic_snitch_reset_interval_in_ms: 600000
+# if set greater than zero and read_repair_chance is < 1.0, this will allow
+# 'pinning' of replicas to hosts in order to increase cache capacity.
+# The badness threshold will control how much worse the pinned host has to be
+# before the dynamic snitch will prefer other replicas over it. This is
+# expressed as a double which represents a percentage.
+dynamic_snitch_badness_threshold: 0.0
+
+# request_scheduler -- Set this to a class that implements
+# RequestScheduler, which will schedule incoming client requests
+# according to the specific policy. This is useful for multi-tenancy
+# with a single Cassandra cluster.
+# NOTE: This is specifically for requests from the client and does
+# not affect inter node communication.
+# org.apache.cassandra.scheduler.NoScheduler - No scheduling takes place
+# org.apache.cassandra.scheduler.RoundRobinScheduler - Round robin of
+# client requests to a node with a separate queue for each
+# request_scheduler_id. The scheduler is further customized by
+# request_scheduler_options as described below.
+request_scheduler: org.apache.cassandra.scheduler.NoScheduler
+
+encryption_options:
+ internode_encryption: none
+ keystore: conf/.keystore
+ keystore_password: cassandra
+ truststore: conf/.truststore
+ truststore_password: cassandra
+# Scheduler Options vary based on the type of scheduler
+# NoScheduler - Has no options
+# RoundRobin
+# - throttle_limit -- The throttle_limit is the number of in-flight
+# requests per client. Requests beyond
+# that limit are queued up until
+# running requests can complete.
+# The value of 80 here is twice the number of
+# concurrent_reads + concurrent_writes.
+# - default_weight -- default_weight is optional and allows for
+# overriding the default which is 1.
+# - weights -- Weights are optional and will default to 1 or the
+# overridden default_weight. The weight translates into how
+# many requests are handled during each turn of the
+# RoundRobin, based on the scheduler id.
+#
+# request_scheduler_options:
+# throttle_limit: 80
+# default_weight: 5
+# weights:
+# Keyspace1: 1
+# Keyspace2: 5
+
+# request_scheduler_id -- An identifer based on which to perform
+# the request scheduling. Currently the only valid option is keyspace.
+# request_scheduler_id: keyspace
+
+# The Index Interval determines how large the sampling of row keys
+# is for a given SSTable. The larger the sampling, the more effective
+# the index is at the cost of space.
+index_interval: 128
+
+# A ColumnFamily is the Cassandra concept closest to a relational table.
+#
+# Keyspaces are separate groups of ColumnFamilies. Except in very
+# unusual circumstances you will have one Keyspace per application.
+#
+# Keyspace required parameters:
+# - name: name of the keyspace; "system" and "definitions" are
+# reserved for Cassandra Internals.
+# - replica_placement_strategy: the class that determines how replicas
+# are distributed among nodes. Contains both the class as well as
+# configuration information. Must extend AbstractReplicationStrategy.
+# Out of the box, Cassandra provides
+# * org.apache.cassandra.locator.SimpleStrategy
+# * org.apache.cassandra.locator.NetworkTopologyStrategy
+# * org.apache.cassandra.locator.OldNetworkTopologyStrategy
+#
+# SimpleStrategy merely places the first
+# replica at the node whose token is closest to the key (as determined
+# by the Partitioner), and additional replicas on subsequent nodes
+# along the ring in increasing Token order.
+#
+# With NetworkTopologyStrategy,
+# for each datacenter, you can specify how many replicas you want
+# on a per-keyspace basis. Replicas are placed on different racks
+# within each DC, if possible. This strategy also requires rack aware
+# snitch, such as RackInferringSnitch or PropertyFileSnitch.
+# An example:
+# - name: Keyspace1
+# replica_placement_strategy: org.apache.cassandra.locator.NetworkTopologyStrategy
+# strategy_options:
+# DC1 : 3
+# DC2 : 2
+# DC3 : 1
+#
+# OldNetworkToplogyStrategy [formerly RackAwareStrategy]
+# places one replica in each of two datacenters, and the third on a
+# different rack in in the first. Additional datacenters are not
+# guaranteed to get a replica. Additional replicas after three are placed
+# in ring order after the third without regard to rack or datacenter.
+#
+# - replication_factor: Number of replicas of each row
+# - column_families: column families associated with this keyspace
+#
+# ColumnFamily required parameters:
+# - name: name of the ColumnFamily. Must not contain the character "-".
+# - compare_with: tells Cassandra how to sort the columns for slicing
+# operations. The default is BytesType, which is a straightforward
+# lexical comparison of the bytes in each column. Other options are
+# AsciiType, UTF8Type, LexicalUUIDType, TimeUUIDType, LongType,
+# and IntegerType (a generic variable-length integer type).
+# You can also specify the fully-qualified class name to a class of
+# your choice extending org.apache.cassandra.db.marshal.AbstractType.
+#
+# ColumnFamily optional parameters:
+# - keys_cached: specifies the number of keys per sstable whose
+# locations we keep in memory in "mostly LRU" order. (JUST the key
+# locations, NOT any column values.) Specify a fraction (value less
+# than 1) or an absolute number of keys to cache. Defaults to 200000
+# keys.
+# - rows_cached: specifies the number of rows whose entire contents we
+# cache in memory. Do not use this on ColumnFamilies with large rows,
+# or ColumnFamilies with high write:read ratios. Specify a fraction
+# (value less than 1) or an absolute number of rows to cache.
+# Defaults to 0. (i.e. row caching is off by default)
+# - comment: used to attach additional human-readable information about
+# the column family to its definition.
+# - read_repair_chance: specifies the probability with which read
+# repairs should be invoked on non-quorum reads. must be between 0
+# and 1. defaults to 1.0 (always read repair).
+# - gc_grace_seconds: specifies the time to wait before garbage
+# collecting tombstones (deletion markers). defaults to 864000 (10
+# days). See http://wiki.apache.org/cassandra/DistributedDeletes
+# - default_validation_class: specifies a validator class to use for
+# validating all the column values in the CF.
+# - min_compaction_threshold: the minimum number of SSTables needed
+# to start a minor compaction. increasing this will cause minor
+# compactions to start less frequently and be more intensive. setting
+# this to 0 disables minor compactions. defaults to 4.
+# - max_compaction_threshold: the maximum number of SSTables allowed
+# before a minor compaction is forced. decreasing this will cause
+# minor compactions to start more frequently and be less intensive.
+# setting this to 0 disables minor compactions. defaults to 32.
+# - row_cache_save_period_in_seconds: number of seconds between saving
+# row caches. The row caches can be saved periodically and if one
+# exists on startup it will be loaded.
+# - key_cache_save_period_in_seconds: number of seconds between saving
+# key caches. The key caches can be saved periodically and if one
+# exists on startup it will be loaded.
+# - memtable_flush_after_mins: The maximum time to leave a dirty table
+# unflushed. This should be large enough that it won't cause a flush
+# storm of all memtables during periods of inactivity.
+# - memtable_throughput_in_mb: The maximum size of the memtable before
+# it is flushed. If undefined, 1/8 * heapsize will be used.
+# - memtable_operations_in_millions: Number of operations in millions
+# before the memtable is flushed. If undefined, throughput / 64 * 0.3
+# will be used.
+#
+# NOTE: this keyspace definition is for demonstration purposes only.
+# Cassandra will not load these definitions during startup. See
+# http://wiki.apache.org/cassandra/FAQ#no_keyspaces for an explanation.
+
View
35 lcp/src/test/resources/log4j-server.properties
@@ -0,0 +1,35 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# for production, you should probably set the root to INFO
+# and the pattern to %c instead of %l. (%l is slower.)
+
+# output messages into a rolling log file as well as stdout
+log4j.rootLogger=INFO,stdout
+
+# stdout
+log4j.appender.stdout=org.apache.log4j.ConsoleAppender
+log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
+log4j.appender.stdout.layout.ConversionPattern=%-5p [%t]: %m%n
+log4j.appender.stdout.follow=true
+
+# Application logging options
+#log4j.logger.org.apache.cassandra=DEBUG
+#log4j.logger.org.apache.cassandra.db=DEBUG
+#log4j.logger.org.apache.cassandra.service.StorageProxy=DEBUG
+
+# Adding this to avoid thrift logging disconnect errors.
+log4j.logger.org.apache.thrift.server.TNonblockingServer=ERROR
View
1  pom.xml
@@ -82,6 +82,7 @@
<module>core</module>
<module>object-mapper</module>
<module>test</module>
+ <module>lcp</module>
</modules>
<dependencyManagement>
Please sign in to comment.
Something went wrong with that request. Please try again.