Skip to content

HTTPS clone URL

Subversion checkout URL

You can clone with
or
.
Download ZIP

Comparing changes

Choose two branches to see what’s changed or to start a new pull request. If you need to, you can also compare across forks.

Open a pull request

Create a new pull request by comparing changes across two branches. If you need to, you can also compare across forks.
base fork: akkumar/YCSB
base: f39d6cbf01
...
head fork: akkumar/YCSB
compare: f75ffe814b
  • 14 commits
  • 7 files changed
  • 0 commit comments
  • 2 contributors
Commits on Oct 21, 2011
Swapnil Bawaskar -adding initial implementation of GemFireClient for ycsb.
-adding compile target for gemfire-client.
-adding ConfigHelper which calculates the number of entries to use to fill up a certain percentage of memory
7aef3fa
Commits on Nov 01, 2011
Swapnil Bawaskar improving support for gemfire in client-server topology by adding com…
…mand line properties to configure

- server host and port
- locators
- topology
Also adding javadocs
e5586a3
Swapnil Bawaskar adding javadocs e12f315
Commits on Nov 06, 2011
Swapnil Bawaskar adding dir db/gemfire/lib which is required for compilation 49869aa
Commits on Dec 01, 2011
Swapnil Bawaskar adding documentation 95757f9
Swapnil Bawaskar removing ConfigHelper since its not ready for prime-time. 646e251
Swapnil Bawaskar - adding sample cache.xml which should be provided to GemFire cache s…
…erver.

- removing old README file
7ad4903
Swapnil Bawaskar Merge remote branch 'upstream/master' e1297f5
Commits on Dec 02, 2011
Swapnil Bawaskar using ByteIterator instead of String for field values. 7f8fa82
Swapnil Bawaskar removing note about gemfire.jar on CLASSPATH for compilation since in…
…cludeantruntime is now false.
5ae76f1
Commits on Jan 11, 2012
Swapnil Bawaskar Merge remote branch 'upstream/master' d4606f7
Swapnil Bawaskar adding GemFire to makefile 188be5d
Michi Mutsuzaki gh-55 VMware vFabric GemFire (sbawaska) 83e4c0e
Commits on Jan 17, 2012
Michi Mutsuzaki gh-59 Cassandra 1.0.6 (akkumar) f75ffe8
View
19 Makefile
@@ -10,6 +10,9 @@ CASSANDRA_7_DIR=db/cassandra-0.7/lib
CASSANDRA_7_FILE=apache-cassandra-0.7.9-bin.tar.gz
CASSANDRA_8_DIR=db/cassandra-0.8/lib
CASSANDRA_8_FILE=apache-cassandra-0.8.7-bin.tar.gz
+CASSANDRA_1X_VERSION=1.0.6
+CASSANDRA_1X_DIR=db/cassandra-$(CASSANDRA_1X_VERSION)/lib
+CASSANDRA_1X_FILE=apache-cassandra-$(CASSANDRA_1X_VERSION)-bin.tar.gz
HBASE_DIR=db/hbase/lib
HBASE_VERSION=0.90.5
HBASE_FILE=hbase-$(HBASE_VERSION).tar.gz
@@ -23,6 +26,8 @@ VOLDEMORT_DIR=db/voldemort/lib
VOLDEMORT_FILE=voldemort-0.90.1.tar.gz
MAPKEEPER_DIR=db/mapkeeper/lib
MAPKEEPER_FILE=mapkeeper.jar
+GEMFIRE_DIR=db/gemfire/lib
+GEMFIRE_FILE=gemfire-6.6.1.jar
.PHONY: build
build: download-database-deps
@@ -33,12 +38,14 @@ download-database-deps: $(CASSANDRA_5_DIR)/$(CASSANDRA_5_FILE) \
$(CASSANDRA_6_DIR)/$(CASSANDRA_6_FILE) \
$(CASSANDRA_7_DIR)/$(CASSANDRA_7_FILE) \
$(CASSANDRA_8_DIR)/$(CASSANDRA_8_FILE) \
+ $(CASSANDRA_1X_DIR)/$(CASSANDRA_1X_FILE) \
$(HBASE_DIR)/$(HBASE_FILE) \
$(INFINISPAN_DIR)/$(INFINISPAN_FILE) \
$(MONGODB_DIR)/$(MONGODB_FILE) \
$(REDIS_DIR)/$(REDIS_FILE) \
$(VOLDEMORT_DIR)/$(VOLDEMORT_FILE) \
$(MAPKEEPER_DIR)/$(MAPKEEPER_FILE) \
+ $(GEMFIRE_DIR)/$(GEMFIRE_FILE) \
$(CASSANDRA_5_DIR)/$(CASSANDRA_5_FILE) :
wget http://archive.apache.org/dist/cassandra/0.5.1/$(CASSANDRA_5_FILE)\
@@ -60,6 +67,11 @@ $(CASSANDRA_8_DIR)/$(CASSANDRA_8_FILE) :
-O $(CASSANDRA_8_DIR)/$(CASSANDRA_8_FILE)
tar -C $(CASSANDRA_8_DIR) -zxf $(CASSANDRA_8_DIR)/$(CASSANDRA_8_FILE)
+$(CASSANDRA_1X_DIR)/$(CASSANDRA_1X_FILE) :
+ wget http://archive.apache.org/dist/cassandra/$(CASSANDRA_1X_VERSION)/$(CASSANDRA_1X_FILE)\
+ -O $(CASSANDRA_1X_DIR)/$(CASSANDRA_1X_FILE)
+ tar -C $(CASSANDRA_1X_DIR) -zxf $(CASSANDRA_1X_DIR)/$(CASSANDRA_1X_FILE)
+
$(HBASE_DIR)/$(HBASE_FILE) :
wget http://archive.apache.org/dist/hbase/hbase-$(HBASE_VERSION)/$(HBASE_FILE)\
-O $(HBASE_DIR)/$(HBASE_FILE)
@@ -88,3 +100,10 @@ $(MAPKEEPER_DIR)/$(MAPKEEPER_FILE) :
-O $(MAPKEEPER_DIR)/$(MAPKEEPER_FILE)
wget https://raw.github.com/m1ch1/mapkeeper/master/lib/libthrift-0.6.1.jar \
-O $(MAPKEEPER_DIR)/libthrift-0.6.1.jar
+
+$(GEMFIRE_DIR)/$(GEMFIRE_FILE) :
+ wget http://dist.gemstone.com.s3.amazonaws.com/maven/release/com/gemstone/gemfire/gemfire/6.6.1/$(GEMFIRE_FILE) \
+ -O $(GEMFIRE_DIR)/$(GEMFIRE_FILE)
+
+
+
View
112 build.xml
@@ -1,6 +1,6 @@
<?xml version="1.0" encoding="UTF-8"?>
<project name="ycsb" default="compile" basedir=".">
-
+
<property name="src.dir" value="src"/>
<property name="lib.dir" value="lib"/>
<property name="doc.dir" value="doc"/>
@@ -15,62 +15,72 @@
</fileset>
</path>
+ <target name="dbcompile-gemfire" depends="compile">
+ <property name="db.dir" value="db/gemfire" />
+ <antcall target="dbcompile" />
+ </target>
+
<target name="dbcompile-infinispan-5.0" depends="compile">
<property name="db.dir" value="db/infinispan-5.0" />
<antcall target="dbcompile" />
</target>
<target name="dbcompile-cassandra-0.5" depends="compile">
- <property name="db.dir" value="db/cassandra-0.5"/>
- <antcall target="dbcompile"/>
+ <property name="db.dir" value="db/cassandra-0.5"/>
+ <antcall target="dbcompile"/>
</target>
-
- <target name="dbcompile-cassandra-0.6" depends="compile">
- <property name="db.dir" value="db/cassandra-0.6"/>
- <antcall target="dbcompile"/>
- </target>
-
- <target name="dbcompile-cassandra-0.7" depends="compile">
- <property name="db.dir" value="db/cassandra-0.7"/>
- <antcall target="dbcompile"/>
- </target>
+
+ <target name="dbcompile-cassandra-0.6" depends="compile">
+ <property name="db.dir" value="db/cassandra-0.6"/>
+ <antcall target="dbcompile"/>
+ </target>
+
+ <target name="dbcompile-cassandra-0.7" depends="compile">
+ <property name="db.dir" value="db/cassandra-0.7"/>
+ <antcall target="dbcompile"/>
+ </target>
<target name="dbcompile-cassandra-0.8" depends="compile">
<property name="db.dir" value="db/cassandra-0.8"/>
<antcall target="dbcompile"/>
</target>
-
- <target name="dbcompile-hbase" depends="compile">
- <property name="db.dir" value="db/hbase"/>
- <antcall target="dbcompile"/>
- </target>
-
- <target name="dbcompile-mongodb" depends="compile">
- <property name="db.dir" value="db/mongodb"/>
- <antcall target="dbcompile"/>
- </target>
-
- <target name="dbcompile-voldemort" depends="compile">
- <property name="db.dir" value="db/voldemort"/>
- <antcall target="dbcompile"/>
- </target>
-
- <target name="dbcompile-jdbc" depends="compile">
- <property name="db.dir" value="db/jdbc"/>
- <antcall target="dbcompile"/>
- </target>
+
+ <target name="dbcompile-cassandra-1.0.6" depends="compile">
+ <property name="db.dir" value="db/cassandra-1.0.6"/>
+ <antcall target="dbcompile"/>
+ </target>
+
+ <target name="dbcompile-hbase" depends="compile">
+ <property name="db.dir" value="db/hbase"/>
+ <antcall target="dbcompile"/>
+ </target>
+
+ <target name="dbcompile-mongodb" depends="compile">
+ <property name="db.dir" value="db/mongodb"/>
+ <antcall target="dbcompile"/>
+ </target>
+
+ <target name="dbcompile-voldemort" depends="compile">
+ <property name="db.dir" value="db/voldemort"/>
+ <antcall target="dbcompile"/>
+ </target>
+
+ <target name="dbcompile-jdbc" depends="compile">
+ <property name="db.dir" value="db/jdbc"/>
+ <antcall target="dbcompile"/>
+ </target>
<target name="dbcompile-redis" depends="compile">
<property name="db.dir" value="db/redis"/>
<antcall target="dbcompile"/>
</target>
-
- <target name="dbcompile-mapkeeper" depends="compile">
+
+ <target name="dbcompile-mapkeeper" depends="compile">
<property name="db.dir" value="db/mapkeeper"/>
<antcall target="dbcompile"/>
- </target>
+ </target>
- <target name="compile">
+ <target name="compile">
<mkdir dir="${classes.dir}"/>
<javac includeantruntime="false" srcdir="${src.dir}" destdir="${classes.dir}" classpathref="build.classpath" deprecation="on">
<compilerarg value="-Xlint:unchecked"/>
@@ -79,22 +89,22 @@
</target>
<target name="dbcompile">
- <path id="dbclasspath">
- <fileset dir="${db.dir}/lib" includes="**/*.jar"/>
- <fileset file="build/ycsb.jar"/>
- </path>
-
- <mkdir dir="${classes.dir}"/>
- <javac includeantruntime="false" srcdir="${db.dir}/src" destdir="${classes.dir}" classpathref="dbclasspath" deprecation="on">
+ <path id="dbclasspath">
+ <fileset dir="${db.dir}/lib" includes="**/*.jar"/>
+ <fileset file="build/ycsb.jar"/>
+ </path>
+
+ <mkdir dir="${classes.dir}"/>
+ <javac includeantruntime="false" srcdir="${db.dir}/src" destdir="${classes.dir}" classpathref="dbclasspath" deprecation="on">
<compilerarg value="-Xlint:unchecked"/>
- </javac>
- <antcall target="makejar"/>
+ </javac>
+ <antcall target="makejar"/>
+ </target>
+
+ <target name ="makejar" description="Create a jar for the YCSB project">
+ <jar jarfile="build/ycsb.jar" includes="**/*.class" basedir="${classes.dir}"/>
</target>
- <target name ="makejar" description="Create a jar for the YCSB project">
- <jar jarfile="build/ycsb.jar" includes="**/*.class" basedir="${classes.dir}"/>
- </target>
-
<target name="clean">
<delete includeemptydirs="true">
<fileset dir="build" includes="**/*"/>
@@ -102,7 +112,7 @@
</target>
<target name="doc">
- <javadoc destdir="${doc.dir}/javadoc" packagenames="com.yahoo.ycsb,com.yahoo.ycsb.workloads,com.yahoo.ycsb.db,com.yahoo.ycsb.generator,com.yahoo.ycsb.measurements">
+ <javadoc destdir="${doc.dir}/javadoc" packagenames="com.yahoo.ycsb,com.yahoo.ycsb.workloads,com.yahoo.ycsb.db,com.yahoo.ycsb.generator,com.yahoo.ycsb.measurements">
<fileset dir="." defaultexcludes="yes">
<include name="src/**"/>
<include name="db/*/src/**"/>
View
2  changes
@@ -25,6 +25,8 @@
* gh-54 Add mapkeeper driver (m1ch1)
* gh-57 voldemort - enable nio connector (akkumar)
* gh-58 benchmarking with hbase 0.90.5 (akkumar)
+ * gh-55 VMware vFabric GemFire (sbawaska)
+ * gh-59 Cassandra 1.0.6 (akkumar)
0.1.3
* Voldemort binding (rsumbaly)
View
619 db/cassandra-1.0.6/src/com/yahoo/ycsb/db/CassandraClient10.java
@@ -0,0 +1,619 @@
+/**
+ * Copyright (c) 2010 Yahoo! Inc. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you
+ * may not use this file except in compliance with the License. You
+ * may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied. See the License for the specific language governing
+ * permissions and limitations under the License. See accompanying
+ * LICENSE file.
+ */
+
+package com.yahoo.ycsb.db;
+
+import com.yahoo.ycsb.*;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Vector;
+import java.util.Random;
+import java.util.Properties;
+import java.nio.ByteBuffer;
+
+import org.apache.thrift.transport.TTransport;
+import org.apache.thrift.transport.TFramedTransport;
+import org.apache.thrift.transport.TSocket;
+import org.apache.thrift.protocol.TProtocol;
+import org.apache.thrift.protocol.TBinaryProtocol;
+import org.apache.cassandra.thrift.*;
+
+
+//XXXX if we do replication, fix the consistency levels
+/**
+ * Cassandra 1.0.6 client for YCSB framework
+ */
+public class CassandraClient10 extends DB
+{
+ static Random random = new Random();
+ public static final int Ok = 0;
+ public static final int Error = -1;
+ public static final ByteBuffer emptyByteBuffer = ByteBuffer.wrap(new byte[0]);
+
+ public int ConnectionRetries;
+ public int OperationRetries;
+ public String column_family;
+
+ public static final String CONNECTION_RETRY_PROPERTY = "cassandra.connectionretries";
+ public static final String CONNECTION_RETRY_PROPERTY_DEFAULT = "300";
+
+ public static final String OPERATION_RETRY_PROPERTY = "cassandra.operationretries";
+ public static final String OPERATION_RETRY_PROPERTY_DEFAULT = "300";
+
+ public static final String USERNAME_PROPERTY = "cassandra.username";
+ public static final String PASSWORD_PROPERTY = "cassandra.password";
+
+ public static final String COLUMN_FAMILY_PROPERTY = "cassandra.columnfamily";
+ public static final String COLUMN_FAMILY_PROPERTY_DEFAULT = "data";
+
+ TTransport tr;
+ Cassandra.Client client;
+
+ boolean _debug = false;
+
+ String _table = "";
+ Exception errorexception = null;
+
+ List<Mutation> mutations = new ArrayList<Mutation>();
+ Map<String, List<Mutation>> mutationMap = new HashMap<String, List<Mutation>>();
+ Map<ByteBuffer, Map<String, List<Mutation>>> record = new HashMap<ByteBuffer, Map<String, List<Mutation>>>();
+
+ ColumnParent parent;
+
+ /**
+ * Initialize any state for this DB. Called once per DB instance; there is one
+ * DB instance per client thread.
+ */
+ public void init() throws DBException
+ {
+ String hosts = getProperties().getProperty("hosts");
+ if (hosts == null)
+ {
+ throw new DBException("Required property \"hosts\" missing for CassandraClient");
+ }
+
+ column_family = getProperties().getProperty(COLUMN_FAMILY_PROPERTY, COLUMN_FAMILY_PROPERTY_DEFAULT);
+ parent = new ColumnParent(column_family);
+
+ ConnectionRetries = Integer.parseInt(getProperties().getProperty(CONNECTION_RETRY_PROPERTY,
+ CONNECTION_RETRY_PROPERTY_DEFAULT));
+ OperationRetries = Integer.parseInt(getProperties().getProperty(OPERATION_RETRY_PROPERTY,
+ OPERATION_RETRY_PROPERTY_DEFAULT));
+
+ String username = getProperties().getProperty(USERNAME_PROPERTY);
+ String password = getProperties().getProperty(PASSWORD_PROPERTY);
+
+ _debug = Boolean.parseBoolean(getProperties().getProperty("debug", "false"));
+
+ String[] allhosts = hosts.split(",");
+ String myhost = allhosts[random.nextInt(allhosts.length)];
+
+ Exception connectexception = null;
+
+ for (int retry = 0; retry < ConnectionRetries; retry++)
+ {
+ tr = new TFramedTransport(new TSocket(myhost, 9160));
+ TProtocol proto = new TBinaryProtocol(tr);
+ client = new Cassandra.Client(proto);
+ try
+ {
+ tr.open();
+ connectexception = null;
+ break;
+ } catch (Exception e)
+ {
+ connectexception = e;
+ }
+ try
+ {
+ Thread.sleep(1000);
+ } catch (InterruptedException e)
+ {
+ }
+ }
+ if (connectexception != null)
+ {
+ System.err.println("Unable to connect to " + myhost + " after " + ConnectionRetries
+ + " tries");
+ System.out.println("Unable to connect to " + myhost + " after " + ConnectionRetries
+ + " tries");
+ throw new DBException(connectexception);
+ }
+
+ if (username != null && password != null)
+ {
+ Map<String,String> cred = new HashMap<String,String>();
+ cred.put("username", username);
+ cred.put("password", password);
+ AuthenticationRequest req = new AuthenticationRequest(cred);
+ try
+ {
+ client.login(req);
+ }
+ catch (Exception e)
+ {
+ throw new DBException(e);
+ }
+ }
+ }
+
+ /**
+ * Cleanup any state for this DB. Called once per DB instance; there is one DB
+ * instance per client thread.
+ */
+ public void cleanup() throws DBException
+ {
+ tr.close();
+ }
+
+ /**
+ * Read a record from the database. Each field/value pair from the result will
+ * be stored in a HashMap.
+ *
+ * @param table
+ * The name of the table
+ * @param key
+ * The record key of the record to read.
+ * @param fields
+ * The list of fields to read, or null for all of them
+ * @param result
+ * A HashMap of field/value pairs for the result
+ * @return Zero on success, a non-zero error code on error
+ */
+ public int read(String table, String key, Set<String> fields, HashMap<String, ByteIterator> result)
+ {
+ if (!_table.equals(table)) {
+ try
+ {
+ client.set_keyspace(table);
+ _table = table;
+ }
+ catch (Exception e)
+ {
+ e.printStackTrace();
+ e.printStackTrace(System.out);
+ return Error;
+ }
+ }
+
+ for (int i = 0; i < OperationRetries; i++)
+ {
+
+ try
+ {
+ SlicePredicate predicate;
+ if (fields == null)
+ {
+ predicate = new SlicePredicate().setSlice_range(new SliceRange(emptyByteBuffer, emptyByteBuffer, false, 1000000));
+
+ } else {
+ ArrayList<ByteBuffer> fieldlist = new ArrayList<ByteBuffer>(fields.size());
+ for (String s : fields)
+ {
+ fieldlist.add(ByteBuffer.wrap(s.getBytes("UTF-8")));
+ }
+
+ predicate = new SlicePredicate().setColumn_names(fieldlist);
+ }
+
+ List<ColumnOrSuperColumn> results = client.get_slice(ByteBuffer.wrap(key.getBytes("UTF-8")), parent, predicate, ConsistencyLevel.ONE);
+
+ if (_debug)
+ {
+ System.out.print("Reading key: " + key);
+ }
+
+ Column column;
+ String name;
+ ByteIterator value;
+ for (ColumnOrSuperColumn oneresult : results)
+ {
+
+ column = oneresult.column;
+ name = new String(column.name.array(), column.name.position()+column.name.arrayOffset(), column.name.remaining());
+ value = new ByteArrayByteIterator(column.value.array(), column.value.position()+column.value.arrayOffset(), column.value.remaining());
+
+ result.put(name,value);
+
+ if (_debug)
+ {
+ System.out.print("(" + name + "=" + value + ")");
+ }
+ }
+
+ if (_debug)
+ {
+ System.out.println();
+ }
+
+ return Ok;
+ } catch (Exception e)
+ {
+ errorexception = e;
+ }
+
+ try
+ {
+ Thread.sleep(500);
+ } catch (InterruptedException e)
+ {
+ }
+ }
+ errorexception.printStackTrace();
+ errorexception.printStackTrace(System.out);
+ return Error;
+
+ }
+
+ /**
+ * Perform a range scan for a set of records in the database. Each field/value
+ * pair from the result will be stored in a HashMap.
+ *
+ * @param table
+ * The name of the table
+ * @param startkey
+ * The record key of the first record to read.
+ * @param recordcount
+ * The number of records to read
+ * @param fields
+ * The list of fields to read, or null for all of them
+ * @param result
+ * A Vector of HashMaps, where each HashMap is a set field/value
+ * pairs for one record
+ * @return Zero on success, a non-zero error code on error
+ */
+ public int scan(String table, String startkey, int recordcount, Set<String> fields,
+ Vector<HashMap<String, ByteIterator>> result)
+ {
+ if (!_table.equals(table)) {
+ try
+ {
+ client.set_keyspace(table);
+ _table = table;
+ }
+ catch (Exception e)
+ {
+ e.printStackTrace();
+ e.printStackTrace(System.out);
+ return Error;
+ }
+ }
+
+ for (int i = 0; i < OperationRetries; i++)
+ {
+
+ try
+ {
+ SlicePredicate predicate;
+ if (fields == null)
+ {
+ predicate = new SlicePredicate().setSlice_range(new SliceRange(emptyByteBuffer, emptyByteBuffer, false, 1000000));
+
+ } else {
+ ArrayList<ByteBuffer> fieldlist = new ArrayList<ByteBuffer>(fields.size());
+ for (String s : fields)
+ {
+ fieldlist.add(ByteBuffer.wrap(s.getBytes("UTF-8")));
+ }
+
+ predicate = new SlicePredicate().setColumn_names(fieldlist);
+ }
+
+ KeyRange kr = new KeyRange().setStart_key(startkey.getBytes("UTF-8")).setEnd_key(new byte[] {}).setCount(recordcount);
+
+ List<KeySlice> results = client.get_range_slices(parent, predicate, kr, ConsistencyLevel.ONE);
+
+ if (_debug)
+ {
+ System.out.println("Scanning startkey: " + startkey);
+ }
+
+ HashMap<String, ByteIterator> tuple;
+ for (KeySlice oneresult : results)
+ {
+ tuple = new HashMap<String, ByteIterator>();
+
+ Column column;
+ String name;
+ ByteIterator value;
+ for (ColumnOrSuperColumn onecol : oneresult.columns)
+ {
+ column = onecol.column;
+ name = new String(column.name.array(), column.name.position()+column.name.arrayOffset(), column.name.remaining());
+ value = new ByteArrayByteIterator(column.value.array(), column.value.position()+column.value.arrayOffset(), column.value.remaining());
+
+ tuple.put(name, value);
+
+ if (_debug)
+ {
+ System.out.print("(" + name + "=" + value + ")");
+ }
+ }
+
+ result.add(tuple);
+ if (_debug)
+ {
+ System.out.println();
+ }
+ }
+
+ return Ok;
+ } catch (Exception e)
+ {
+ errorexception = e;
+ }
+ try
+ {
+ Thread.sleep(500);
+ } catch (InterruptedException e)
+ {
+ }
+ }
+ errorexception.printStackTrace();
+ errorexception.printStackTrace(System.out);
+ return Error;
+ }
+
+ /**
+ * Update a record in the database. Any field/value pairs in the specified
+ * values HashMap will be written into the record with the specified record
+ * key, overwriting any existing values with the same field name.
+ *
+ * @param table
+ * The name of the table
+ * @param key
+ * The record key of the record to write.
+ * @param values
+ * A HashMap of field/value pairs to update in the record
+ * @return Zero on success, a non-zero error code on error
+ */
+ public int update(String table, String key, HashMap<String, ByteIterator> values)
+ {
+ return insert(table, key, values);
+ }
+
+ /**
+ * Insert a record in the database. Any field/value pairs in the specified
+ * values HashMap will be written into the record with the specified record
+ * key.
+ *
+ * @param table
+ * The name of the table
+ * @param key
+ * The record key of the record to insert.
+ * @param values
+ * A HashMap of field/value pairs to insert in the record
+ * @return Zero on success, a non-zero error code on error
+ */
+ public int insert(String table, String key, HashMap<String, ByteIterator> values)
+ {
+ if (!_table.equals(table)) {
+ try
+ {
+ client.set_keyspace(table);
+ _table = table;
+ }
+ catch (Exception e)
+ {
+ e.printStackTrace();
+ e.printStackTrace(System.out);
+ return Error;
+ }
+ }
+
+ for (int i = 0; i < OperationRetries; i++)
+ {
+ if (_debug)
+ {
+ System.out.println("Inserting key: " + key);
+ }
+
+ try
+ {
+ ByteBuffer wrappedKey = ByteBuffer.wrap(key.getBytes("UTF-8"));
+
+ Column col;
+ ColumnOrSuperColumn column;
+ for (Map.Entry<String, ByteIterator> entry : values.entrySet())
+ {
+ col = new Column();
+ col.setName(ByteBuffer.wrap(entry.getKey().getBytes("UTF-8")));
+ col.setValue(ByteBuffer.wrap(entry.getValue().toArray()));
+ col.setTimestamp(System.currentTimeMillis());
+
+ column = new ColumnOrSuperColumn();
+ column.setColumn(col);
+
+ mutations.add(new Mutation().setColumn_or_supercolumn(column));
+ }
+
+ mutationMap.put(column_family, mutations);
+ record.put(wrappedKey, mutationMap);
+
+ client.batch_mutate(record, ConsistencyLevel.ONE);
+
+ mutations.clear();
+ mutationMap.clear();
+ record.clear();
+
+ return Ok;
+ } catch (Exception e)
+ {
+ errorexception = e;
+ }
+ try
+ {
+ Thread.sleep(500);
+ } catch (InterruptedException e)
+ {
+ }
+ }
+
+ errorexception.printStackTrace();
+ errorexception.printStackTrace(System.out);
+ return Error;
+ }
+
+ /**
+ * Delete a record from the database.
+ *
+ * @param table
+ * The name of the table
+ * @param key
+ * The record key of the record to delete.
+ * @return Zero on success, a non-zero error code on error
+ */
+ public int delete(String table, String key)
+ {
+ if (!_table.equals(table)) {
+ try
+ {
+ client.set_keyspace(table);
+ _table = table;
+ }
+ catch (Exception e)
+ {
+ e.printStackTrace();
+ e.printStackTrace(System.out);
+ return Error;
+ }
+ }
+
+ for (int i = 0; i < OperationRetries; i++)
+ {
+ try
+ {
+ client.remove(ByteBuffer.wrap(key.getBytes("UTF-8")),
+ new ColumnPath(column_family),
+ System.currentTimeMillis(),
+ ConsistencyLevel.ONE);
+
+ if (_debug)
+ {
+ System.out.println("Delete key: " + key);
+ }
+
+ return Ok;
+ } catch (Exception e)
+ {
+ errorexception = e;
+ }
+ try
+ {
+ Thread.sleep(500);
+ } catch (InterruptedException e)
+ {
+ }
+ }
+ errorexception.printStackTrace();
+ errorexception.printStackTrace(System.out);
+ return Error;
+ }
+
+ public static void main(String[] args)
+ {
+ CassandraClient10 cli = new CassandraClient10();
+
+ Properties props = new Properties();
+
+ props.setProperty("hosts", args[0]);
+ cli.setProperties(props);
+
+ try
+ {
+ cli.init();
+ } catch (Exception e)
+ {
+ e.printStackTrace();
+ System.exit(0);
+ }
+
+ HashMap<String, ByteIterator> vals = new HashMap<String, ByteIterator>();
+ vals.put("age", new StringByteIterator("57"));
+ vals.put("middlename", new StringByteIterator("bradley"));
+ vals.put("favoritecolor", new StringByteIterator("blue"));
+ int res = cli.insert("usertable", "BrianFrankCooper", vals);
+ System.out.println("Result of insert: " + res);
+
+ HashMap<String, ByteIterator> result = new HashMap<String, ByteIterator>();
+ HashSet<String> fields = new HashSet<String>();
+ fields.add("middlename");
+ fields.add("age");
+ fields.add("favoritecolor");
+ res = cli.read("usertable", "BrianFrankCooper", null, result);
+ System.out.println("Result of read: " + res);
+ for (String s : result.keySet())
+ {
+ System.out.println("[" + s + "]=[" + result.get(s) + "]");
+ }
+
+ res = cli.delete("usertable", "BrianFrankCooper");
+ System.out.println("Result of delete: " + res);
+ }
+
+ /*
+ * public static void main(String[] args) throws TException,
+ * InvalidRequestException, UnavailableException,
+ * UnsupportedEncodingException, NotFoundException {
+ *
+ *
+ *
+ * String key_user_id = "1";
+ *
+ *
+ *
+ *
+ * client.insert("Keyspace1", key_user_id, new ColumnPath("Standard1", null,
+ * "age".getBytes("UTF-8")), "24".getBytes("UTF-8"), timestamp,
+ * ConsistencyLevel.ONE);
+ *
+ *
+ * // read single column ColumnPath path = new ColumnPath("Standard1", null,
+ * "name".getBytes("UTF-8"));
+ *
+ * System.out.println(client.get("Keyspace1", key_user_id, path,
+ * ConsistencyLevel.ONE));
+ *
+ *
+ * // read entire row SlicePredicate predicate = new SlicePredicate(null, new
+ * SliceRange(new byte[0], new byte[0], false, 10));
+ *
+ * ColumnParent parent = new ColumnParent("Standard1", null);
+ *
+ * List<ColumnOrSuperColumn> results = client.get_slice("Keyspace1",
+ * key_user_id, parent, predicate, ConsistencyLevel.ONE);
+ *
+ * for (ColumnOrSuperColumn result : results) {
+ *
+ * Column column = result.column;
+ *
+ * System.out.println(new String(column.name, "UTF-8") + " -> " + new
+ * String(column.value, "UTF-8"));
+ *
+ * }
+ *
+ *
+ *
+ *
+ * }
+ */
+}
View
25 db/gemfire/lib/README.txt
@@ -0,0 +1,25 @@
+This directory should contain gemfire.jar for compiling GemFireClient.
+
+GemFireClient can be compiled using target:
+$ ant dbcompile-gemfire
+
+Running benchmark.
+1. Copy cache.xml from this dir to your GemFire install directory ($GEMFIRE_HOME)
+2. start GemFire cache server
+ - $ cd $GEMFIRE_HOME
+ - $ bin/cacheserver start -J-Xms42g -J-Xmx42g -J-XX:+UseConcMarkSweepGC -J-XX:CMSInitiatingOccupancyFraction=70
+3. Add ycsb.jar and gemfire.jar to CLASSPATH.
+4. run YCSB workload.
+
+GemFire can be run either in client-server or peer-to-peer mode.
+By default com.yahoo.ycsb.db.GemFireClient connects as a client to GemFire server running on localhost on default port (40404). host name and port of a GemFire server running elsewhere can be specified by properties "gemfire.serverhost" and "gemfire.serverport" respectively. Example:
+$ java com.yahoo.ycsb.Client -load -db com.yahoo.ycsb.db.GemFireClient -P workloads/workloada -p gemfire.serverhost=host2 -p gemfire.serverport=3333
+
+To run com.yahoo.ycsb.db.GemFireClient as a peer to existing GemFire members, use property "gemfire.topology" like so:
+$ java com.yahoo.ycsb.Client -load -db com.yahoo.ycsb.db.GemFireClient -P workloads/workloada -p gemfire.topology=p2p
+
+Locators can be used for member discovery, either in client-server or peer-to-peer mode. Please see GemFire docs for details. locators can be specified like so:
+$ java com.yahoo.ycsb.Client -load -db com.yahoo.ycsb.db.GemFireClient -P workloads/workloada -p gemfire.locator=host1[port1],host2[port2]
+
+Please refer to GemFire docs here: https://www.vmware.com/support/pubs/vfabric-gemfire.html.
+Questions? visit: http://forums.gemstone.com
View
9 db/gemfire/lib/cache.xml
@@ -0,0 +1,9 @@
+<?xml version="1.0"?>
+
+<!DOCTYPE cache PUBLIC
+ "-//GemStone Systems, Inc.//GemFire Declarative Caching 6.5//EN"
+ "http://www.gemstone.com/dtd/cache6_5.dtd">
+<cache>
+ <region name="usertable" refid="PARTITION"/>
+</cache>
+
View
200 db/gemfire/src/com/yahoo/ycsb/db/GemFireClient.java
@@ -0,0 +1,200 @@
+package com.yahoo.ycsb.db;
+
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Properties;
+import java.util.Set;
+import java.util.Vector;
+
+import com.gemstone.gemfire.cache.Cache;
+import com.gemstone.gemfire.cache.CacheFactory;
+import com.gemstone.gemfire.cache.GemFireCache;
+import com.gemstone.gemfire.cache.Region;
+import com.gemstone.gemfire.cache.RegionExistsException;
+import com.gemstone.gemfire.cache.RegionFactory;
+import com.gemstone.gemfire.cache.RegionShortcut;
+import com.gemstone.gemfire.cache.client.ClientCache;
+import com.gemstone.gemfire.cache.client.ClientCacheFactory;
+import com.gemstone.gemfire.cache.client.ClientRegionFactory;
+import com.gemstone.gemfire.cache.client.ClientRegionShortcut;
+import com.gemstone.gemfire.internal.admin.remote.DistributionLocatorId;
+import com.yahoo.ycsb.ByteArrayByteIterator;
+import com.yahoo.ycsb.ByteIterator;
+import com.yahoo.ycsb.DB;
+import com.yahoo.ycsb.DBException;
+import com.yahoo.ycsb.StringByteIterator;
+
+/**
+ * VMware vFabric GemFire client for the YCSB benchmark.<br />
+ * <p>By default acts as a GemFire client and tries to connect
+ * to GemFire cache server running on localhost with default
+ * cache server port. Hostname and port of a GemFire cacheServer
+ * can be provided using <code>gemfire.serverport=port</code> and <code>
+ * gemfire.serverhost=host</code> properties on YCSB command line.
+ * A locator may also be used for discovering a cacheServer
+ * by using the property <code>gemfire.locator=host[port]</code></p>
+ *
+ * <p>To run this client in a peer-to-peer topology with other GemFire
+ * nodes, use the property <code>gemfire.topology=p2p</code>. Running
+ * in p2p mode will enable embedded caching in this client.</p>
+ *
+ * <p>YCSB by default does its operations against "usertable". When running
+ * as a client this is a <code>ClientRegionShortcut.PROXY</code> region,
+ * when running in p2p mode it is a <code>RegionShortcut.PARTITION</code>
+ * region. A cache.xml defining "usertable" region can be placed in the
+ * working directory to override these region definitions.</p>
+ *
+ * @author Swapnil Bawaskar (sbawaska at vmware)
+ *
+ */
+public class GemFireClient extends DB {
+
+ /** Return code when operation succeeded */
+ private static final int SUCCESS = 0;
+
+ /** Return code when operation did not succeed */
+ private static final int ERROR = -1;
+
+ /** property name of the port where GemFire server is listening for connections */
+ private static final String SERVERPORT_PROPERTY_NAME = "gemfire.serverport";
+
+ /** property name of the host where GemFire server is running */
+ private static final String SERVERHOST_PROPERTY_NAME = "gemfire.serverhost";
+
+ /** default value of {@link #SERVERHOST_PROPERTY_NAME} */
+ private static final String SERVERHOST_PROPERTY_DEFAULT = "localhost";
+
+ /** property name to specify a GemFire locator. This property can be used in both
+ * client server and p2p topology */
+ private static final String LOCATOR_PROPERTY_NAME = "gemfire.locator";
+
+ /** property name to specify GemFire topology */
+ private static final String TOPOLOGY_PROPERTY_NAME = "gemfire.topology";
+
+ /** value of {@value #TOPOLOGY_PROPERTY_NAME} when peer to peer topology should be used.
+ * (client-server topology is default) */
+ private static final String TOPOLOGY_P2P_VALUE = "p2p";
+
+ private GemFireCache cache;
+
+ /**
+ * true if ycsb client runs as a client to a
+ * GemFire cache server
+ */
+ private boolean isClient;
+
+ @Override
+ public void init() throws DBException {
+ Properties props = getProperties();
+ // hostName where GemFire cacheServer is running
+ String serverHost = null;
+ // port of GemFire cacheServer
+ int serverPort = 0;
+ String locatorStr = null;
+
+ if (props != null && !props.isEmpty()) {
+ String serverPortStr = props.getProperty(SERVERPORT_PROPERTY_NAME);
+ if (serverPortStr != null) {
+ serverPort = Integer.parseInt(serverPortStr);
+ }
+ serverHost = props.getProperty(SERVERHOST_PROPERTY_NAME, SERVERHOST_PROPERTY_DEFAULT);
+ locatorStr = props.getProperty(LOCATOR_PROPERTY_NAME);
+
+ String topology = props.getProperty(TOPOLOGY_PROPERTY_NAME);
+ if (topology != null && topology.equals(TOPOLOGY_P2P_VALUE)) {
+ CacheFactory cf = new CacheFactory();
+ if (locatorStr != null) {
+ cf.set("locators", locatorStr);
+ }
+ cache = cf.create();
+ isClient = false;
+ return;
+ }
+ }
+ isClient = true;
+ DistributionLocatorId locator = null;
+ if (locatorStr != null) {
+ locator = new DistributionLocatorId(locatorStr);
+ }
+ ClientCacheFactory ccf = new ClientCacheFactory();
+ if (serverPort != 0) {
+ ccf.addPoolServer(serverHost, serverPort);
+ } else if (locator != null) {
+ ccf.addPoolLocator(locator.getHost().getCanonicalHostName(), locator.getPort());
+ }
+ cache = ccf.create();
+ }
+
+ @Override
+ public int read(String table, String key, Set<String> fields,
+ HashMap<String, ByteIterator> result) {
+ Region<String, Map<String, byte[]>> r = getRegion(table);
+ Map<String, byte[]> val = r.get(key);
+ if (val != null) {
+ if (fields == null) {
+ for (String k : val.keySet()) {
+ result.put(key, new ByteArrayByteIterator(val.get(key)));
+ }
+ } else {
+ for (String field : fields) {
+ result.put(field, new ByteArrayByteIterator(val.get(field)));
+ }
+ }
+ return SUCCESS;
+ }
+ return ERROR;
+ }
+
+ @Override
+ public int scan(String table, String startkey, int recordcount,
+ Set<String> fields, Vector<HashMap<String, ByteIterator>> result) {
+ // GemFire does not support scan
+ return ERROR;
+ }
+
+ @Override
+ public int update(String table, String key, HashMap<String, ByteIterator> values) {
+ getRegion(table).put(key, convertToBytearrayMap(values));
+ return 0;
+ }
+
+ @Override
+ public int insert(String table, String key, HashMap<String, ByteIterator> values) {
+ getRegion(table).put(key, convertToBytearrayMap(values));
+ return 0;
+ }
+
+ @Override
+ public int delete(String table, String key) {
+ getRegion(table).destroy(key);
+ return 0;
+ }
+
+ private Map<String, byte[]> convertToBytearrayMap(Map<String,ByteIterator> values) {
+ Map<String, byte[]> retVal = new HashMap<String, byte[]>();
+ for (String key : values.keySet()) {
+ retVal.put(key, values.get(key).toArray());
+ }
+ return retVal;
+ }
+
+ private Region<String, Map<String, byte[]>> getRegion(String table) {
+ Region<String, Map<String, byte[]>> r = cache.getRegion(table);
+ if (r == null) {
+ try {
+ if (isClient) {
+ ClientRegionFactory<String, Map<String, byte[]>> crf = ((ClientCache) cache).createClientRegionFactory(ClientRegionShortcut.PROXY);
+ r = crf.create(table);
+ } else {
+ RegionFactory<String, Map<String, byte[]>> rf = ((Cache)cache).createRegionFactory(RegionShortcut.PARTITION);
+ r = rf.create(table);
+ }
+ } catch (RegionExistsException e) {
+ // another thread created the region
+ r = cache.getRegion(table);
+ }
+ }
+ return r;
+ }
+
+}

No commit comments for this range

Something went wrong with that request. Please try again.