Permalink
Browse files

code merge

  • Loading branch information...
gegao committed Dec 31, 2013
2 parents 0b65b9e + 0ac50bd commit dcde69733be0a688f9b89ceb7c9952187ca5f2bd
Showing with 199 additions and 87 deletions.
  1. +1 −0 build-common.xml
  2. +1 −1 build.py
  3. +1 −1 build.xml
  4. +3 −3 buildtools.py
  5. +3 −0 properties/benchmarks/ycsb.properties
  6. +13 −16 src/benchmarks/edu/brown/benchmark/ycsb/YCSBClient.java
  7. +2 −4 src/benchmarks/edu/brown/benchmark/ycsb/YCSBLoader.java
  8. +5 −5 src/benchmarks/org/voltdb/benchmark/tpcc/TPCCProjectBuilder.java
  9. +12 −12 src/ee/common/tabletuple.h
  10. +1 −1 src/ee/executors/deleteexecutor.cpp
  11. +1 −1 src/ee/executors/indexscanexecutor.cpp
  12. +1 −1 src/ee/executors/seqscanexecutor.cpp
  13. +1 −1 src/ee/executors/updateexecutor.cpp
  14. +7 −6 src/ee/storage/ReadWriteTracker.cpp
  15. +3 −3 src/ee/storage/ReadWriteTracker.h
  16. +2 −1 src/frontend/edu/brown/catalog/conflicts/ConflictSetCalculator.java
  17. +2 −1 src/frontend/edu/brown/hstore/HStoreCoordinator.java
  18. +6 −6 src/frontend/edu/brown/hstore/PartitionExecutor.java
  19. +11 −2 src/frontend/edu/brown/hstore/conf/HStoreConf.java
  20. +11 −4 src/frontend/edu/brown/statistics/ProcedureStatistics.java
  21. +23 −6 src/frontend/org/voltdb/sysprocs/AdHoc.java
  22. +31 −0 tests/ee/storage/persistent_table_log_test.cpp
  23. +3 −7 tests/frontend/edu/brown/costmodel/TestSingleSitedCostModelInvalidateCache.java
  24. +23 −0 tests/frontend/edu/brown/hstore/TestReadWriteTracking.java
  25. +9 −1 tests/frontend/org/voltdb/ServerThread.java
  26. +18 −3 tests/frontend/org/voltdb/regressionsuites/LocalCluster.java
  27. +5 −1 tests/frontend/org/voltdb/regressionsuites/LocalSingleProcessServer.java
View
@@ -142,6 +142,7 @@
<arg value="site.commandlog_timeout=${site.commandlog_timeout}" />
<arg value="site.commandlog_profiling=${site.commandlog_profiling}" />
<arg value="site.anticache_enable=${site.anticache_enable}" />
<arg value="site.anticache_build=${site.anticache_build}" />
<arg value="site.anticache_reversible_lru=${site.anticache_reversible_lru}" />
<arg value="site.anticache_profiling=${site.anticache_profiling}" />
<arg value="site.anticache_dir=${site.anticache_dir}" />
View
@@ -376,7 +376,7 @@
# ANTI-CACHING
###############################################################################
if CTX.ANTICACHE_ENABLE:
if CTX.ANTICACHE_BUILD:
CTX.CPPFLAGS += " -DANTICACHE"
if CTX.ANTICACHE_REVERSIBLE_LRU:
View
@@ -879,7 +879,7 @@ NATIVE EE STUFF
<arg value="build.py" />
<arg value="LOG_LEVEL=${site.exec_ee_log_level}" />
<arg value="MMAP_STORAGE=${site.storage_mmap}" />
<arg value="ANTICACHE_ENABLE=${site.anticache_enable}" />
<arg value="ANTICACHE_BUILD=${site.anticache_build}" />
<arg value="ANTICACHE_REVERSIBLE_LRU=${site.anticache_reversible_lru}" />
<arg value="${build}" />
</exec>
View
@@ -40,7 +40,7 @@ def __init__(self, args):
self.COVERAGE = False
self.VOLT_LOG_LEVEL = None
self.MMAP_STORAGE = False
self.ANTICACHE_ENABLE = True
self.ANTICACHE_BUILD = True
self.ANTICACHE_REVERSIBLE_LRU = True
for arg in [x.strip().upper() for x in args]:
@@ -54,10 +54,10 @@ def __init__(self, args):
parts = arg.split("=")
if len(parts) > 1 and not parts[1].startswith("${"):
self.MMAP_STORAGE = bool(parts[1])
if arg.startswith("ANTICACHE_ENABLE="):
if arg.startswith("ANTICACHE_BUILD="):
parts = arg.split("=")
if len(parts) > 1 and not parts[1].startswith("${"):
self.ANTICACHE_ENABLE = bool(parts[1])
self.ANTICACHE_BUILD = bool(parts[1])
if arg.startswith("ANTICACHE_REVERSIBLE_LRU="):
parts = arg.split("=")
if len(parts) > 1 and not parts[1].startswith("${"):
@@ -8,3 +8,6 @@ builder = edu.brown.benchmark.ycsb.YCSBProjectBuilder
# only add the number of tuples defined in 'num_records'
fixed_size = false
num_records = 100000
# Zipfian skew factor for tuple access
skew_factor = 0.5
@@ -89,6 +89,7 @@ private Transaction(String displayName, int weight) {
private final ZipfianGenerator randScan;
private final FlatHistogram<Transaction> txnWeights;
private final Random rand_gen;
private double skewFactor = YCSBConstants.ZIPFIAN_CONSTANT;
int run_count = 0;
@@ -101,7 +102,6 @@ public static void main(String args[]) {
public YCSBClient(String args[]) {
super(args);
final CatalogContext catalogContext = this.getCatalogContext();
boolean useFixedSize = false;
long fixedSize = -1;
for (String key : m_extraParams.keySet()) {
@@ -116,22 +116,19 @@ public YCSBClient(String args[]) {
else if (key.equalsIgnoreCase("num_records")) {
fixedSize = Long.valueOf(value);
}
// Zipfian Skew Factor
else if (key.equalsIgnoreCase("skew_factor")) {
this.skewFactor = Double.valueOf(value);
}
} // FOR
// Figure out the # of records that we need
// if (useFixedSize && fixedSize > 0) {
// this.init_record_count = fixedSize;
// }
// else {
// //this.init_record_count = (int)Math.round(YCSBConstants.NUM_RECORDS *
//// catalogContext.numberOfPartitions *
//// this.getScaleFactor());
//
// this.init_record_count = YCSBConstants.NUM_RECORDS;
// }
this.init_record_count = YCSBConstants.NUM_RECORDS;
if (useFixedSize && fixedSize > 0) {
this.init_record_count = fixedSize;
}
else {
this.init_record_count = (long)Math.round(YCSBConstants.NUM_RECORDS * this.getScaleFactor());
}
this.rand_gen = new Random();
this.randScan = new ZipfianGenerator(YCSBConstants.MAX_SCAN);
@@ -145,8 +142,8 @@ else if (key.equalsIgnoreCase("num_records")) {
// YCSBConstants.HOT_DATA_WORKLOAD_SKEW, YCSBConstants.HOT_DATA_SIZE,
// YCSBConstants.WARM_DATA_WORKLOAD_SKEW, YCSBConstants.WARM_DATA_SIZE);
this.insertRecord = new ZipfianGenerator(YCSBConstants.NUM_RECORDS, YCSBConstants.ZIPFIAN_CONSTANT);
this.readRecord = new ZipfianGenerator(YCSBConstants.NUM_RECORDS, YCSBConstants.ZIPFIAN_CONSTANT);
this.insertRecord = new ZipfianGenerator(this.init_record_count, this.skewFactor);
this.readRecord = new ZipfianGenerator(this.init_record_count, this.skewFactor);
// Initialize the sampling table
Histogram<Transaction> txns = new ObjectHistogram<Transaction>();
@@ -71,7 +71,6 @@ public YCSBLoader(String[] args) {
if (debug.val)
LOG.debug("CONSTRUCTOR: " + YCSBLoader.class.getName());
final CatalogContext catalogContext = this.getCatalogContext();
boolean useFixedSize = false;
long fixedSize = -1;
for (String key : m_extraParams.keySet()) {
@@ -98,7 +97,6 @@ else if (key.equalsIgnoreCase("loadthreads")) {
}
else {
this.init_record_count = (int)Math.round(YCSBConstants.NUM_RECORDS *
catalogContext.numberOfPartitions *
this.getScaleFactor());
}
LOG.info("Initializing database with " + init_record_count + " records.");
@@ -144,7 +142,7 @@ public void run() {
table.clearRowData();
if (debug.val)
LOG.debug(String.format("[%d] Records Loaded: %6d / %d",
thread_id, total.get(), YCSBConstants.NUM_RECORDS));
thread_id, total.get(), init_record_count));
}
} // FOR
@@ -155,7 +153,7 @@ public void run() {
table.clearRowData();
if (debug.val)
LOG.debug(String.format("[%d] Records Loaded: %6d / %d",
thread_id, total.get(), YCSBConstants.NUM_RECORDS));
thread_id, total.get(), init_record_count));
}
}
});
@@ -72,11 +72,11 @@
LoadWarehouse.class,
LoadWarehouseReplicated.class,
GetTableCounts.class,
MRquery1.class,
MRquery3.class,
MRquery6.class,
MRquery12.class,
MRqueryJoinAgg.class,
// MRquery1.class,
// MRquery3.class,
// MRquery6.class,
// MRquery12.class,
// MRqueryJoinAgg.class,
};
// Transaction Frequencies
View
@@ -309,18 +309,18 @@ class TableTuple {
}
inline uint32_t getTupleID()
{
uint32_t tuple_id;
memcpy(&tuple_id, m_data+TUPLE_HEADER_SIZE-4, 4);
return tuple_id;
}
inline void setTupleID(uint32_t tuple_id)
{
memcpy(m_data+TUPLE_HEADER_SIZE-4, &tuple_id, 4);
}
// inline uint32_t getTupleID()
// {
// uint32_t tuple_id;
// memcpy(&tuple_id, m_data+TUPLE_HEADER_SIZE-4, 4);
//
// return tuple_id;
// }
//
// inline void setTupleID(uint32_t tuple_id)
// {
// memcpy(m_data+TUPLE_HEADER_SIZE-4, &tuple_id, 4);
// }
/** Get the value of a specified column (const) */
//not performant because it has to check the schema to see how to
@@ -118,7 +118,7 @@ bool DeleteExecutor::p_execute(const NValueArray &params, ReadWriteTracker *trac
// Read/Write Set Tracking
if (tracker != NULL) {
tracker->markTupleWritten(m_targetTable->name(), &m_targetTuple);
tracker->markTupleWritten(m_targetTable, &m_targetTuple);
}
// Delete from target table
@@ -539,7 +539,7 @@ bool IndexScanExecutor::p_execute(const NValueArray &params, ReadWriteTracker *t
// Read/Write Set Tracking
if (tracker != NULL) {
tracker->markTupleRead(m_targetTable->name(), &m_tuple);
tracker->markTupleRead(m_targetTable, &m_tuple);
}
#ifdef ANTICACHE
@@ -207,7 +207,7 @@ bool SeqScanExecutor::p_execute(const NValueArray &params, ReadWriteTracker *tra
{
// Read/Write Set Tracking
if (tracker != NULL) {
tracker->markTupleRead(target_table->name(), &tuple);
tracker->markTupleRead(target_table, &tuple);
}
target_table->updateTupleAccessCount();
@@ -167,7 +167,7 @@ bool UpdateExecutor::p_execute(const NValueArray &params, ReadWriteTracker *trac
// Read/Write Set Tracking
if (tracker != NULL) {
tracker->markTupleWritten(m_targetTable->name(), &m_targetTuple);
tracker->markTupleWritten(m_targetTable, &m_targetTuple);
}
// Loop through INPUT_COL_IDX->TARGET_COL_IDX mapping and only update
@@ -55,8 +55,9 @@ ReadWriteTracker::~ReadWriteTracker() {
} // WHILE
}
void ReadWriteTracker::insertTuple(boost::unordered_map<std::string, RowOffsets*> *map, const std::string tableName, TableTuple *tuple) {
void ReadWriteTracker::insertTuple(boost::unordered_map<std::string, RowOffsets*> *map, Table *table, TableTuple *tuple) {
RowOffsets *offsets = NULL;
const std::string tableName = table->name();
boost::unordered_map<std::string, RowOffsets*>::const_iterator iter = map->find(tableName);
if (iter != map->end()) {
offsets = iter->second;
@@ -65,17 +66,17 @@ void ReadWriteTracker::insertTuple(boost::unordered_map<std::string, RowOffsets*
map->insert(std::make_pair(tableName, offsets));
}
uint32_t tupleId = tuple->getTupleID();
uint32_t tupleId = table->getTupleID(tuple->address());
offsets->insert(tupleId);
VOLT_INFO("*** TXN #%ld -> %s / %d", this->txnId, tableName.c_str(), tupleId);
}
void ReadWriteTracker::markTupleRead(const std::string tableName, TableTuple *tuple) {
this->insertTuple(&this->reads, tableName, tuple);
void ReadWriteTracker::markTupleRead(Table *table, TableTuple *tuple) {
this->insertTuple(&this->reads, table, tuple);
}
void ReadWriteTracker::markTupleWritten(const std::string tableName, TableTuple *tuple) {
this->insertTuple(&this->writes, tableName, tuple);
void ReadWriteTracker::markTupleWritten(Table *table, TableTuple *tuple) {
this->insertTuple(&this->writes, table, tuple);
}
std::vector<std::string> ReadWriteTracker::getTableNames(boost::unordered_map<std::string, RowOffsets*> *map) const {
@@ -53,16 +53,16 @@ class ReadWriteTracker {
ReadWriteTracker(int64_t txnId);
~ReadWriteTracker();
void markTupleRead(const std::string tableName, TableTuple *tuple);
void markTupleWritten(const std::string tableName, TableTuple *tuple);
void markTupleRead(Table *table, TableTuple *tuple);
void markTupleWritten(Table *table, TableTuple *tuple);
void clear();
std::vector<std::string> getTablesRead();
std::vector<std::string> getTablesWritten();
private:
void insertTuple(boost::unordered_map<std::string, RowOffsets*> *map, const std::string tableName, TableTuple *tuple);
void insertTuple(boost::unordered_map<std::string, RowOffsets*> *map, Table *table, TableTuple *tuple);
std::vector<std::string> getTableNames(boost::unordered_map<std::string, RowOffsets*> *map) const;
int64_t txnId;
@@ -373,8 +373,9 @@ private boolean alwaysWriteConflicting(Statement stmt,
// Any UPDATE or DELETE statement that does not use a primary key in its WHERE
// clause should be marked as always conflicting.
// Note that pkeys will be null here if there is no primary key for the table
Collection<Column> pkeys = this.pkeysCache.get(CollectionUtil.first(tables));
if (cols.containsAll(pkeys) == false) {
if (pkeys == null || cols.containsAll(pkeys) == false) {
return (true);
}
// Or any UPDATE or DELETE with a range predicate in its WHERE clause always conflicts
@@ -220,7 +220,8 @@ public void run() {
HStoreCoordinator.this.shutdownCluster(error);
}
}
if (trace.val) LOG.trace("Messenger Thread for Site #" + catalog_site.getId() + " has stopped!");
if (trace.val)
LOG.trace("Messenger Thread for Site #" + catalog_site.getId() + " has stopped!");
}
}
@@ -2742,12 +2742,12 @@ else if (is_remote == false) {
* @throws Exception
*/
private DependencySet executeFragmentIds(AbstractTransaction ts,
long undoToken,
long fragmentIds[],
ParameterSet parameters[],
int output_depIds[],
int input_depIds[],
Map<Integer, List<VoltTable>> input_deps) throws Exception {
long undoToken,
long fragmentIds[],
ParameterSet parameters[],
int output_depIds[],
int input_depIds[],
Map<Integer, List<VoltTable>> input_deps) throws Exception {
if (fragmentIds.length == 0) {
LOG.warn(String.format("Got a fragment batch for %s that does not have any fragments?", ts));
@@ -614,12 +614,21 @@
// ----------------------------------------------------------------------------
@ConfigProperty(
description="Enable the anti-cache feature.",
defaultBoolean=true,
description="Enable the anti-cache feature. This requires that the system " +
"is compiled with ${site.anticache_build} set to true.",
defaultBoolean=false,
experimental=true
)
public boolean anticache_enable;
@ConfigProperty(
description="Build the anti-cache feature when compiling the H-Store source code. " +
"You probably always want to leave this flag enabled.",
defaultBoolean=true,
experimental=true
)
public boolean anticache_build;
@ConfigProperty(
description="Use a doubly-linked list for the anti-cache's LRU tracker. " +
"This will increase the memory overhead of the anti-cache's metatadata " +
@@ -43,7 +43,7 @@
* @author pavlo
*/
public class ProcedureStatistics extends AbstractStatistics<Procedure> {
private static final Logger LOG = Logger.getLogger(Workload.class.getName());
private static final Logger LOG = Logger.getLogger(ProcedureStatistics.class);
public enum Members {
TABLE_TUPLE_COUNTS, TABLE_AVG_TUPLE_SIZES, TABLE_TOTAL_SIZES, TABLE_READONLY, TABLE_QUERYTYPE_COUNTS,
@@ -360,7 +360,11 @@ public void fromJSONObject(JSONObject object, Database catalog_db) throws JSONEx
String table_keys[] = { CatalogKey.createKey(catalog_tbl), CatalogKeyOldVersion.createKey(catalog_tbl) };
for (String table_key : table_keys) {
try {
this.readMap(this.table_querytype_counts.get(table_keys[0]), table_key, QueryType.getNameMap(), Integer.class, tblQueryObject);
this.readMap(this.table_querytype_counts.get(table_keys[0]),
table_key,
QueryType.getNameMap(),
Integer.class,
tblQueryObject);
} catch (JSONException ex) {
last_error = ex;
continue;
@@ -370,8 +374,11 @@ public void fromJSONObject(JSONObject object, Database catalog_db) throws JSONEx
break;
} // FOR
if (last_error != null) {
LOG.error("BUSTED: " + StringUtil.join(",", tblQueryObject.keys()));
throw last_error;
// 2013-10-21
// I decided to switch this to be just a warning instead of
// a fatal warning for Nesime. Deal with it.
LOG.warn("BUSTED: " + StringUtil.join(",", tblQueryObject.keys()));
// throw last_error;
}
} // FOR
Oops, something went wrong.

0 comments on commit dcde697

Please sign in to comment.