Skip to content

HTTPS clone URL

Subversion checkout URL

You can clone with
or
.
Download ZIP
Browse files

rename classes to lowerCamelCase

git-svn-id: svn+ssh://svn.corp.yahoo.com/yahoo/yrl/labs/pnuts/code/logstore@3785 8dad8b1f-cf64-0410-95b6-bcf113ffbcfe
  • Loading branch information...
commit 9b1df5b1afe8f3899a65a7faffc382fdc0cb24d4 1 parent 7b2809a
sears authored
Showing with 638 additions and 643 deletions.
  1. +65 −65 blsm.cpp
  2. +45 −49 blsm.h
  3. +18 −18 datapage.cpp
  4. +18 −19 datapage.h
  5. +14 −14 datatuple.h
  6. +30 −30 diskTreeComponent.cpp
  7. +19 −19 diskTreeComponent.h
  8. +3 −3 memTreeComponent.cpp
  9. +18 −18 memTreeComponent.h
  10. +5 −5 mergeManager.cpp
  11. +7 −7 mergeManager.h
  12. +2 −2 mergeStats.h
  13. +26 −26 merger.cpp
  14. +4 −4 merger.h
  15. +5 −5 regionAllocator.h
  16. +71 −71 servers/mapkeeper/blsmRequestHandler.cpp
  17. +7 −7 servers/mapkeeper/blsmRequestHandler.h
  18. +51 −51 servers/native/benchmarks/lsm_microbenchmarks.cpp
  19. +2 −2 servers/native/benchmarks/tcpclient_noop.cpp
  20. +4 −4 servers/native/logserver.cpp
  21. +3 −3 servers/native/logserver.h
  22. +10 −10 servers/native/network.h
  23. +4 −4 servers/native/newserver.cpp
  24. +44 −44 servers/native/requestDispatch.cpp
  25. +16 −16 servers/native/requestDispatch.h
  26. +5 −5 servers/native/server.cpp
  27. +1 −1  servers/native/simpleServer.cpp
  28. +2 −2 servers/native/simpleServer.h
  29. +12 −12 servers/native/tcpclient.cpp
  30. +5 −5 servers/native/tcpclient.h
  31. +3 −3 servers/native/util/change_log_mode.cpp
  32. +2 −2 servers/native/util/copy_database.cpp
  33. +2 −2 servers/native/util/drop_database.cpp
  34. +2 −2 servers/native/util/dump_blockmap.cpp
  35. +2 −2 servers/native/util/histogram.cpp
  36. +2 −2 servers/native/util/shutdown.cpp
  37. +2 −2 servers/native/util/space_usage.cpp
  38. +2 −2 test/CMakeLists.txt
  39. +20 −20 test/check_datapage.cpp
  40. +4 −4 test/check_gen.cpp
  41. +8 −8 test/check_logtable.cpp
  42. +3 −3 test/check_logtree.cpp
  43. +9 −9 test/check_merge.cpp
  44. +6 −6 test/check_mergelarge.cpp
  45. +14 −14 test/check_mergetuple.cpp
  46. +4 −4 test/check_rbtree.cpp
  47. +8 −8 test/check_tcpbulkinsert.cpp
  48. +9 −9 test/check_tcpclient.cpp
  49. +9 −9 test/check_testAndSet.cpp
  50. +4 −4 tuplemerger.cpp
  51. +7 −7 tuplemerger.h
View
130 blsm.cpp
@@ -40,7 +40,7 @@ static inline double tv_to_double(struct timeval tv)
// LOG TABLE IMPLEMENTATION
/////////////////////////////////////////////////////////////////
-blsm::blsm(int log_mode, pageid_t max_c0_size, pageid_t internal_region_size, pageid_t datapage_region_size, pageid_t datapage_size)
+bLSM::bLSM(int log_mode, pageid_t max_c0_size, pageid_t internal_region_size, pageid_t datapage_region_size, pageid_t datapage_size)
{
recovering = true;
this->max_c0_size = max_c0_size;
@@ -63,7 +63,7 @@ blsm::blsm(int log_mode, pageid_t max_c0_size, pageid_t internal_region_size, pa
current_timestamp = 0;
expiry = 0;
this->merge_mgr = 0;
- tmerger = new tuplemerger(&replace_merger);
+ tmerger = new tupleMerger(&replace_merger);
header_mut = rwlc_initlock();
pthread_mutex_init(&rb_mut, 0);
@@ -85,7 +85,7 @@ blsm::blsm(int log_mode, pageid_t max_c0_size, pageid_t internal_region_size, pa
stasis_log_file_permissions);
}
-blsm::~blsm()
+bLSM::~bLSM()
{
delete merge_mgr; // shuts down pretty print thread.
@@ -110,17 +110,17 @@ blsm::~blsm()
delete tmerger;
}
-void blsm::init_stasis() {
+void bLSM::init_stasis() {
- DataPage::register_stasis_page_impl();
+ dataPage::register_stasis_page_impl();
// stasis_buffer_manager_hint_writes_are_sequential = 1;
Tinit();
}
-void blsm::deinit_stasis() { Tdeinit(); }
+void bLSM::deinit_stasis() { Tdeinit(); }
-recordid blsm::allocTable(int xid)
+recordid bLSM::allocTable(int xid)
{
table_rec = Talloc(xid, sizeof(tbl_header));
mergeStats * stats = 0;
@@ -142,7 +142,7 @@ recordid blsm::allocTable(int xid)
return table_rec;
}
-void blsm::openTable(int xid, recordid rid) {
+void bLSM::openTable(int xid, recordid rid) {
table_rec = rid;
Tread(xid, table_rec, &tbl_header);
tree_c2 = new diskTreeComponent(xid, tbl_header.c2_root, tbl_header.c2_state, tbl_header.c2_dp_state, 0);
@@ -156,23 +156,23 @@ void blsm::openTable(int xid, recordid rid) {
}
-void blsm::logUpdate(datatuple * tup) {
+void bLSM::logUpdate(dataTuple * tup) {
byte * buf = tup->to_bytes();
LogEntry * e = stasis_log_write_update(log_file, 0, INVALID_PAGE, 0/*Page**/, 0/*op*/, buf, tup->byte_length());
log_file->write_entry_done(log_file,e);
free(buf);
}
-void blsm::replayLog() {
+void bLSM::replayLog() {
lsn_t start = tbl_header.log_trunc;
LogHandle * lh = start ? getLSNHandle(log_file, start) : getLogHandle(log_file);
const LogEntry * e;
while((e = nextInLog(lh))) {
switch(e->type) {
case UPDATELOG: {
- datatuple * tup = datatuple::from_bytes((byte*)stasis_log_entry_update_args_cptr(e));
+ dataTuple * tup = dataTuple::from_bytes((byte*)stasis_log_entry_update_args_cptr(e));
insertTuple(tup);
- datatuple::freetuple(tup);
+ dataTuple::freetuple(tup);
} break;
case INTERNALLOG: { } break;
default: assert(e->type == UPDATELOG); abort();
@@ -184,12 +184,12 @@ void blsm::replayLog() {
}
-lsn_t blsm::get_log_offset() {
+lsn_t bLSM::get_log_offset() {
if(recovering || !log_mode) { return INVALID_LSN; }
return log_file->next_available_lsn(log_file);
}
-void blsm::truncate_log() {
+void bLSM::truncate_log() {
if(recovering) {
printf("Not truncating log until recovery is complete.\n");
} else {
@@ -200,7 +200,7 @@ void blsm::truncate_log() {
}
}
-void blsm::update_persistent_header(int xid, lsn_t trunc_lsn) {
+void bLSM::update_persistent_header(int xid, lsn_t trunc_lsn) {
tbl_header.c2_root = tree_c2->get_root_rid();
tbl_header.c2_dp_state = tree_c2->get_datapage_allocator_rid();
@@ -219,7 +219,7 @@ void blsm::update_persistent_header(int xid, lsn_t trunc_lsn) {
Tset(xid, table_rec, &tbl_header);
}
-void blsm::flushTable()
+void bLSM::flushTable()
{
struct timeval start_tv, stop_tv;
double start, stop;
@@ -277,7 +277,7 @@ void blsm::flushTable()
c0_flushing = false;
}
-datatuple * blsm::findTuple(int xid, const datatuple::key_t key, size_t keySize)
+dataTuple * bLSM::findTuple(int xid, const dataTuple::key_t key, size_t keySize)
{
// Apply proportional backpressure to reads as well as writes. This prevents
// starvation of the merge threads on fast boxes.
@@ -286,12 +286,12 @@ datatuple * blsm::findTuple(int xid, const datatuple::key_t key, size_t keySize)
#endif
//prepare a search tuple
- datatuple *search_tuple = datatuple::create(key, keySize);
+ dataTuple *search_tuple = dataTuple::create(key, keySize);
pthread_mutex_lock(&rb_mut);
- datatuple *ret_tuple=0;
+ dataTuple *ret_tuple=0;
//step 1: look in tree_c0
memTreeComponent::rbtree_t::iterator rbitr = get_tree_c0()->find(search_tuple);
@@ -312,14 +312,14 @@ datatuple * blsm::findTuple(int xid, const datatuple::key_t key, size_t keySize)
rbitr = get_tree_c0_mergeable()->find(search_tuple);
if(rbitr != get_tree_c0_mergeable()->end())
{
- datatuple *tuple = *rbitr;
+ dataTuple *tuple = *rbitr;
if(tuple->isDelete()) //tuple deleted
done = true; //return ret_tuple
else if(ret_tuple != 0) //merge the two
{
- datatuple *mtuple = tmerger->merge(tuple, ret_tuple); //merge the two
- datatuple::freetuple(ret_tuple); //free tuple from current tree
+ dataTuple *mtuple = tmerger->merge(tuple, ret_tuple); //merge the two
+ dataTuple::freetuple(ret_tuple); //free tuple from current tree
ret_tuple = mtuple; //set return tuple to merge result
}
else //key first found in old mem tree
@@ -334,7 +334,7 @@ datatuple * blsm::findTuple(int xid, const datatuple::key_t key, size_t keySize)
if(!done && get_tree_c1_prime() != 0)
{
DEBUG("old c1 tree not null\n");
- datatuple *tuple_oc1 = get_tree_c1_prime()->findTuple(xid, key, keySize);
+ dataTuple *tuple_oc1 = get_tree_c1_prime()->findTuple(xid, key, keySize);
if(tuple_oc1 != NULL)
{
@@ -343,8 +343,8 @@ datatuple * blsm::findTuple(int xid, const datatuple::key_t key, size_t keySize)
done = true;
else if(ret_tuple != 0) //merge the two
{
- datatuple *mtuple = tmerger->merge(tuple_oc1, ret_tuple); //merge the two
- datatuple::freetuple(ret_tuple); //free tuple from before
+ dataTuple *mtuple = tmerger->merge(tuple_oc1, ret_tuple); //merge the two
+ dataTuple::freetuple(ret_tuple); //free tuple from before
ret_tuple = mtuple; //set return tuple to merge result
}
else //found for the first time
@@ -355,7 +355,7 @@ datatuple * blsm::findTuple(int xid, const datatuple::key_t key, size_t keySize)
if(!use_copy)
{
- datatuple::freetuple(tuple_oc1); //free tuple from tree old c1
+ dataTuple::freetuple(tuple_oc1); //free tuple from tree old c1
}
}
}
@@ -363,7 +363,7 @@ datatuple * blsm::findTuple(int xid, const datatuple::key_t key, size_t keySize)
//step 3: check c1
if(!done)
{
- datatuple *tuple_c1 = get_tree_c1()->findTuple(xid, key, keySize);
+ dataTuple *tuple_c1 = get_tree_c1()->findTuple(xid, key, keySize);
if(tuple_c1 != NULL)
{
bool use_copy = false;
@@ -371,8 +371,8 @@ datatuple * blsm::findTuple(int xid, const datatuple::key_t key, size_t keySize)
done = true;
else if(ret_tuple != 0) //merge the two
{
- datatuple *mtuple = tmerger->merge(tuple_c1, ret_tuple); //merge the two
- datatuple::freetuple(ret_tuple); //free tuple from before
+ dataTuple *mtuple = tmerger->merge(tuple_c1, ret_tuple); //merge the two
+ dataTuple::freetuple(ret_tuple); //free tuple from before
ret_tuple = mtuple; //set return tuple to merge result
}
else //found for the first time
@@ -383,7 +383,7 @@ datatuple * blsm::findTuple(int xid, const datatuple::key_t key, size_t keySize)
if(!use_copy)
{
- datatuple::freetuple(tuple_c1); //free tuple from tree c1
+ dataTuple::freetuple(tuple_c1); //free tuple from tree c1
}
}
}
@@ -392,7 +392,7 @@ datatuple * blsm::findTuple(int xid, const datatuple::key_t key, size_t keySize)
if(!done && get_tree_c1_mergeable() != 0)
{
DEBUG("old c1 tree not null\n");
- datatuple *tuple_oc1 = get_tree_c1_mergeable()->findTuple(xid, key, keySize);
+ dataTuple *tuple_oc1 = get_tree_c1_mergeable()->findTuple(xid, key, keySize);
if(tuple_oc1 != NULL)
{
@@ -401,8 +401,8 @@ datatuple * blsm::findTuple(int xid, const datatuple::key_t key, size_t keySize)
done = true;
else if(ret_tuple != 0) //merge the two
{
- datatuple *mtuple = tmerger->merge(tuple_oc1, ret_tuple); //merge the two
- datatuple::freetuple(ret_tuple); //free tuple from before
+ dataTuple *mtuple = tmerger->merge(tuple_oc1, ret_tuple); //merge the two
+ dataTuple::freetuple(ret_tuple); //free tuple from before
ret_tuple = mtuple; //set return tuple to merge result
}
else //found for the first time
@@ -413,7 +413,7 @@ datatuple * blsm::findTuple(int xid, const datatuple::key_t key, size_t keySize)
if(!use_copy)
{
- datatuple::freetuple(tuple_oc1); //free tuple from tree old c1
+ dataTuple::freetuple(tuple_oc1); //free tuple from tree old c1
}
}
}
@@ -422,7 +422,7 @@ datatuple * blsm::findTuple(int xid, const datatuple::key_t key, size_t keySize)
if(!done)
{
DEBUG("Not in old first disk tree\n");
- datatuple *tuple_c2 = get_tree_c2()->findTuple(xid, key, keySize);
+ dataTuple *tuple_c2 = get_tree_c2()->findTuple(xid, key, keySize);
if(tuple_c2 != NULL)
{
@@ -431,8 +431,8 @@ datatuple * blsm::findTuple(int xid, const datatuple::key_t key, size_t keySize)
done = true;
else if(ret_tuple != 0)
{
- datatuple *mtuple = tmerger->merge(tuple_c2, ret_tuple); //merge the two
- datatuple::freetuple(ret_tuple); //free tuple from before
+ dataTuple *mtuple = tmerger->merge(tuple_c2, ret_tuple); //merge the two
+ dataTuple::freetuple(ret_tuple); //free tuple from before
ret_tuple = mtuple; //set return tuple to merge result
}
else //found for the first time
@@ -443,16 +443,16 @@ datatuple * blsm::findTuple(int xid, const datatuple::key_t key, size_t keySize)
if(!use_copy)
{
- datatuple::freetuple(tuple_c2); //free tuple from tree c2
+ dataTuple::freetuple(tuple_c2); //free tuple from tree c2
}
}
}
rwlc_unlock(header_mut);
- datatuple::freetuple(search_tuple);
+ dataTuple::freetuple(search_tuple);
if (ret_tuple != NULL && ret_tuple->isDelete()) {
// this is a tombstone. don't return it
- datatuple::freetuple(ret_tuple);
+ dataTuple::freetuple(ret_tuple);
return NULL;
}
return ret_tuple;
@@ -463,7 +463,7 @@ datatuple * blsm::findTuple(int xid, const datatuple::key_t key, size_t keySize)
* returns the first record found with the matching key
* (not to be used together with diffs)
**/
-datatuple * blsm::findTuple_first(int xid, datatuple::key_t key, size_t keySize)
+dataTuple * bLSM::findTuple_first(int xid, dataTuple::key_t key, size_t keySize)
{
// Apply proportional backpressure to reads as well as writes. This prevents
// starvation of the merge threads on fast boxes.
@@ -472,9 +472,9 @@ datatuple * blsm::findTuple_first(int xid, datatuple::key_t key, size_t keySize)
#endif
//prepare a search tuple
- datatuple * search_tuple = datatuple::create(key, keySize);
+ dataTuple * search_tuple = dataTuple::create(key, keySize);
- datatuple *ret_tuple=0;
+ dataTuple *ret_tuple=0;
//step 1: look in tree_c0
pthread_mutex_lock(&rb_mut);
@@ -551,11 +551,11 @@ datatuple * blsm::findTuple_first(int xid, datatuple::key_t key, size_t keySize)
rwlc_unlock(header_mut);
}
- datatuple::freetuple(search_tuple);
+ dataTuple::freetuple(search_tuple);
if (ret_tuple != NULL && ret_tuple->isDelete()) {
// this is a tombstone. don't return it
- datatuple::freetuple(ret_tuple);
+ dataTuple::freetuple(ret_tuple);
return NULL;
}
@@ -563,7 +563,7 @@ datatuple * blsm::findTuple_first(int xid, datatuple::key_t key, size_t keySize)
}
-datatuple * blsm::insertTupleHelper(datatuple *tuple)
+dataTuple * bLSM::insertTupleHelper(dataTuple *tuple)
{
bool need_free = false;
if(!tuple->isDelete() && expiry != 0) {
@@ -576,22 +576,22 @@ datatuple * blsm::insertTupleHelper(datatuple *tuple)
memcpy(newkey, tuple->strippedkey(), kl);
newkey[kl] = 0;
memcpy(newkey+kl+1, &ts, ts_sz);
- datatuple * old = tuple;
- tuple = datatuple::create(newkey, kl+ 1+ ts_sz, tuple->data(), tuple->datalen());
+ dataTuple * old = tuple;
+ tuple = dataTuple::create(newkey, kl+ 1+ ts_sz, tuple->data(), tuple->datalen());
assert(tuple->strippedkeylen() == old->strippedkeylen());
- assert(!datatuple::compare_obj(tuple, old));
+ assert(!dataTuple::compare_obj(tuple, old));
free(newkey);
need_free = true;
} //find the previous tuple with same key in the memtree if exists
pthread_mutex_lock(&rb_mut);
memTreeComponent::rbtree_t::iterator rbitr = tree_c0->find(tuple);
- datatuple * t = 0;
- datatuple * pre_t = 0;
+ dataTuple * t = 0;
+ dataTuple * pre_t = 0;
if(rbitr != tree_c0->end())
{
pre_t = *rbitr;
//do the merging
- datatuple *new_t = tmerger->merge(pre_t, tuple);
+ dataTuple *new_t = tmerger->merge(pre_t, tuple);
merge_mgr->get_merge_stats(0)->merged_tuples(new_t, tuple, pre_t);
t = new_t;
@@ -608,12 +608,12 @@ datatuple * blsm::insertTupleHelper(datatuple *tuple)
}
pthread_mutex_unlock(&rb_mut);
- if(need_free) { datatuple::freetuple(tuple); }
+ if(need_free) { dataTuple::freetuple(tuple); }
return pre_t;
}
-void blsm::insertManyTuples(datatuple ** tuples, int tuple_count) {
+void bLSM::insertManyTuples(dataTuple ** tuples, int tuple_count) {
for(int i = 0; i < tuple_count; i++) {
merge_mgr->read_tuple_from_small_component(0, tuples[i]);
}
@@ -631,18 +631,18 @@ void blsm::insertManyTuples(datatuple ** tuples, int tuple_count) {
int num_old_tups = 0;
pageid_t sum_old_tup_lens = 0;
for(int i = 0; i < tuple_count; i++) {
- datatuple * old_tup = insertTupleHelper(tuples[i]);
+ dataTuple * old_tup = insertTupleHelper(tuples[i]);
if(old_tup) {
num_old_tups++;
sum_old_tup_lens += old_tup->byte_length();
- datatuple::freetuple(old_tup);
+ dataTuple::freetuple(old_tup);
}
}
merge_mgr->read_tuple_from_large_component(0, num_old_tups, sum_old_tup_lens);
}
-void blsm::insertTuple(datatuple *tuple)
+void bLSM::insertTuple(dataTuple *tuple)
{
if(log_mode && !recovering) {
logUpdate(tuple);
@@ -656,26 +656,26 @@ void blsm::insertTuple(datatuple *tuple)
// any locks!
merge_mgr->read_tuple_from_small_component(0, tuple);
- datatuple * pre_t = 0; // this is a pointer to any data tuples that we'll be deleting below. We need to update the merge_mgr statistics with it, but have to do so outside of the rb_mut region.
+ dataTuple * pre_t = 0; // this is a pointer to any data tuples that we'll be deleting below. We need to update the merge_mgr statistics with it, but have to do so outside of the rb_mut region.
pre_t = insertTupleHelper(tuple);
if(pre_t) {
// needs to be here; calls update_progress, which sometimes grabs mutexes..
merge_mgr->read_tuple_from_large_component(0, pre_t); // was interspersed with the erase, insert above...
- datatuple::freetuple(pre_t); //free the previous tuple
+ dataTuple::freetuple(pre_t); //free the previous tuple
}
DEBUG("tree size %d tuples %lld bytes.\n", tsize, tree_bytes);
}
-bool blsm::testAndSetTuple(datatuple *tuple, datatuple *tuple2)
+bool bLSM::testAndSetTuple(dataTuple *tuple, dataTuple *tuple2)
{
bool succ = false;
static pthread_mutex_t test_and_set_mut = PTHREAD_MUTEX_INITIALIZER;
pthread_mutex_lock(&test_and_set_mut);
- datatuple * exists = findTuple_first(-1, tuple2 ? tuple2->strippedkey() : tuple->strippedkey(), tuple2 ? tuple2->strippedkeylen() : tuple->strippedkeylen());
+ dataTuple * exists = findTuple_first(-1, tuple2 ? tuple2->strippedkey() : tuple->strippedkey(), tuple2 ? tuple2->strippedkeylen() : tuple->strippedkeylen());
if(!tuple2 || tuple2->isDelete()) {
if(!exists || exists->isDelete()) {
@@ -690,18 +690,18 @@ bool blsm::testAndSetTuple(datatuple *tuple, datatuple *tuple2)
succ = false;
}
}
- if(exists) datatuple::freetuple(exists);
+ if(exists) dataTuple::freetuple(exists);
if(succ) insertTuple(tuple);
pthread_mutex_unlock(&test_and_set_mut);
return succ;
}
-void blsm::registerIterator(iterator * it) {
+void bLSM::registerIterator(iterator * it) {
its.push_back(it);
}
-void blsm::forgetIterator(iterator * it) {
+void bLSM::forgetIterator(iterator * it) {
for(unsigned int i = 0; i < its.size(); i++) {
if(its[i] == it) {
its.erase(its.begin()+i);
@@ -710,7 +710,7 @@ void blsm::forgetIterator(iterator * it) {
}
}
-void blsm::bump_epoch() {
+void bLSM::bump_epoch() {
epoch++;
for(unsigned int i = 0; i < its.size(); i++) {
its[i]->invalidate();
View
94 blsm.h
@@ -25,15 +25,11 @@
#include "diskTreeComponent.h"
#include "memTreeComponent.h"
-
#include "tuplemerger.h"
-
#include "mergeManager.h"
#include "mergeStats.h"
-class logtable_mergedata;
-
-class blsm {
+class bLSM {
public:
class iterator;
@@ -49,28 +45,28 @@ class blsm {
// 6GB ~= 100B * 500 GB / (datapage_size * 4KB)
// (100B * 500GB) / (6GB * 4KB) = 2.035
// RCS: Set this to 1 so that we do (on average) one seek per b-tree read.
- blsm(int log_mode = 0, pageid_t max_c0_size = 100 * 1024 * 1024, pageid_t internal_region_size = 1000, pageid_t datapage_region_size = 10000, pageid_t datapage_size = 1);
+ bLSM(int log_mode = 0, pageid_t max_c0_size = 100 * 1024 * 1024, pageid_t internal_region_size = 1000, pageid_t datapage_region_size = 10000, pageid_t datapage_size = 1);
- ~blsm();
+ ~bLSM();
double * R() { return &r_val; }
//user access functions
- datatuple * findTuple(int xid, const datatuple::key_t key, size_t keySize);
+ dataTuple * findTuple(int xid, const dataTuple::key_t key, size_t keySize);
- datatuple * findTuple_first(int xid, datatuple::key_t key, size_t keySize);
+ dataTuple * findTuple_first(int xid, dataTuple::key_t key, size_t keySize);
private:
- datatuple * insertTupleHelper(datatuple *tuple);
+ dataTuple * insertTupleHelper(dataTuple *tuple);
public:
- void insertManyTuples(struct datatuple **tuples, int tuple_count);
- void insertTuple(struct datatuple *tuple);
+ void insertManyTuples(struct dataTuple **tuples, int tuple_count);
+ void insertTuple(struct dataTuple *tuple);
/** This test and set has strange semantics on two fronts:
*
* 1) It is not atomic with respect to non-testAndSet operations (which is fine in theory, since they have no barrier semantics, and we don't have a use case to support the extra overhead)
* 2) If tuple2 is not null, it looks at tuple2's key instead of tuple's key. This means you can atomically set the value of one key based on the value of another (if you want to...)
*/
- bool testAndSetTuple(struct datatuple *tuple, struct datatuple *tuple2);
+ bool testAndSetTuple(struct dataTuple *tuple, struct dataTuple *tuple2);
//other class functions
recordid allocTable(int xid);
@@ -78,7 +74,7 @@ class blsm {
void flushTable();
void replayLog();
- void logUpdate(datatuple * tup);
+ void logUpdate(dataTuple * tup);
static void init_stasis();
static void deinit_stasis();
@@ -115,7 +111,7 @@ class blsm {
void update_persistent_header(int xid, lsn_t log_trunc = INVALID_LSN);
- inline tuplemerger * gettuplemerger(){return tmerger;}
+ inline tupleMerger * gettuplemerger(){return tmerger;}
public:
@@ -183,14 +179,14 @@ class blsm {
pageid_t datapage_region_size; // "
pageid_t datapage_size; // "
private:
- tuplemerger *tmerger;
+ tupleMerger *tmerger;
std::vector<iterator *> its;
public:
bool shutting_down_;
- bool mightBeOnDisk(datatuple * t) {
+ bool mightBeOnDisk(dataTuple * t) {
if(tree_c1) {
if(!tree_c1->bloom_filter) { DEBUG("no c1 bloom filter\n"); return true; }
if(bloom_filter_lookup(tree_c1->bloom_filter, (const char*)t->strippedkey(), t->strippedkeylen())) { DEBUG("in c1\n"); return true; }
@@ -202,7 +198,7 @@ class blsm {
return mightBeAfterMemMerge(t);
}
- bool mightBeAfterMemMerge(datatuple * t) {
+ bool mightBeAfterMemMerge(dataTuple * t) {
if(tree_c1_mergeable) {
if(!tree_c1_mergeable->bloom_filter) { DEBUG("no c1m bloom filter\n"); return true; }
@@ -220,11 +216,11 @@ class blsm {
template<class ITRA, class ITRN>
class mergeManyIterator {
public:
- explicit mergeManyIterator(ITRA* a, ITRN** iters, int num_iters, datatuple*(*merge)(const datatuple*,const datatuple*), int (*cmp)(const datatuple*,const datatuple*)) :
+ explicit mergeManyIterator(ITRA* a, ITRN** iters, int num_iters, dataTuple*(*merge)(const dataTuple*,const dataTuple*), int (*cmp)(const dataTuple*,const dataTuple*)) :
num_iters_(num_iters+1),
first_iter_(a),
iters_((ITRN**)malloc(sizeof(*iters_) * num_iters)), // exactly the number passed in
- current_((datatuple**)malloc(sizeof(*current_) * (num_iters_))), // one more than was passed in
+ current_((dataTuple**)malloc(sizeof(*current_) * (num_iters_))), // one more than was passed in
last_iter_(-1),
cmp_(cmp),
merge_(merge),
@@ -240,7 +236,7 @@ class blsm {
delete(first_iter_);
for(int i = 0; i < num_iters_; i++) {
if(i != last_iter_) {
- if(current_[i]) datatuple::freetuple(current_[i]);
+ if(current_[i]) dataTuple::freetuple(current_[i]);
}
}
for(int i = 1; i < num_iters_; i++) {
@@ -250,12 +246,12 @@ class blsm {
free(iters_);
free(dups);
}
- datatuple * peek() {
- datatuple * ret = next_callerFrees();
+ dataTuple * peek() {
+ dataTuple * ret = next_callerFrees();
last_iter_ = -1; // don't advance iterator on next peek() or getnext() call.
return ret;
}
- datatuple * next_callerFrees() {
+ dataTuple * next_callerFrees() {
int num_dups = 0;
if(last_iter_ != -1) {
// get the value after the one we just returned to the user
@@ -287,7 +283,7 @@ class blsm {
}
}
}
- datatuple * ret;
+ dataTuple * ret;
if(!merge_) {
ret = current_[min];
} else {
@@ -296,7 +292,7 @@ class blsm {
}
// advance the iterators that match the tuple we're returning.
for(int i = 0; i < num_dups; i++) {
- datatuple::freetuple(current_[dups[i]]); // should never be null
+ dataTuple::freetuple(current_[dups[i]]); // should never be null
current_[dups[i]] = iters_[dups[i]-1]->next_callerFrees();
}
last_iter_ = min; // mark the min iter to be advance at the next invocation of next(). This saves us a copy in the non-merging case.
@@ -307,12 +303,12 @@ class blsm {
int num_iters_;
ITRA * first_iter_;
ITRN ** iters_;
- datatuple ** current_;
+ dataTuple ** current_;
int last_iter_;
- int (*cmp_)(const datatuple*,const datatuple*);
- datatuple*(*merge_)(const datatuple*,const datatuple*);
+ int (*cmp_)(const dataTuple*,const dataTuple*);
+ dataTuple*(*merge_)(const dataTuple*,const dataTuple*);
// temporary variables initiaized once for effiency
int * dups;
@@ -322,7 +318,7 @@ class blsm {
class iterator {
public:
- explicit iterator(blsm* ltable)
+ explicit iterator(bLSM* ltable)
: ltable(ltable),
epoch(ltable->get_epoch()),
merge_it_(NULL),
@@ -338,7 +334,7 @@ class blsm {
// rwlc_unlock(ltable->header_mut);
}
- explicit iterator(blsm* ltable,datatuple *key)
+ explicit iterator(bLSM* ltable,dataTuple *key)
: ltable(ltable),
epoch(ltable->get_epoch()),
merge_it_(NULL),
@@ -361,16 +357,16 @@ class blsm {
ltable->forgetIterator(this);
invalidate();
pthread_mutex_unlock(&ltable->rb_mut);
- if(last_returned) datatuple::freetuple(last_returned);
+ if(last_returned) dataTuple::freetuple(last_returned);
rwlc_unlock(ltable->header_mut);
}
private:
- datatuple * getnextHelper() {
+ dataTuple * getnextHelper() {
// rwlc_readlock(ltable->header_mut);
revalidate();
- datatuple * tmp = merge_it_->next_callerFrees();
+ dataTuple * tmp = merge_it_->next_callerFrees();
if(last_returned && tmp) {
- int res = datatuple::compare(last_returned->strippedkey(), last_returned->strippedkeylen(), tmp->strippedkey(), tmp->strippedkeylen());
+ int res = dataTuple::compare(last_returned->strippedkey(), last_returned->strippedkeylen(), tmp->strippedkey(), tmp->strippedkeylen());
if(res >= 0) {
int al = last_returned->strippedkeylen();
char * a =(char*)malloc(al + 1);
@@ -387,21 +383,21 @@ class blsm {
}
if(last_returned) {
- datatuple::freetuple(last_returned);
+ dataTuple::freetuple(last_returned);
}
last_returned = tmp;
// rwlc_unlock(ltable->header_mut);
return last_returned;
}
public:
- datatuple * getnextIncludingTombstones() {
- datatuple * ret = getnextHelper();
+ dataTuple * getnextIncludingTombstones() {
+ dataTuple * ret = getnextHelper();
ret = ret ? ret->create_copy() : NULL;
return ret;
}
- datatuple * getnext() {
- datatuple * ret;
+ dataTuple * getnext() {
+ dataTuple * ret;
while((ret = getnextHelper()) && ret->isDelete()) { } // getNextHelper handles its own memory.
ret = ret ? ret->create_copy() : NULL; // XXX hate making copy! Caller should not manage our memory.
return ret;
@@ -427,7 +423,7 @@ class blsm {
static const int C1 = 0;
static const int C1_MERGEABLE = 1;
static const int C2 = 2;
- blsm * ltable;
+ bLSM * ltable;
uint64_t epoch;
typedef mergeManyIterator<
memTreeComponent::batchedRevalidatingIterator,
@@ -438,8 +434,8 @@ class blsm {
merge_it_t* merge_it_;
- datatuple * last_returned;
- datatuple * key;
+ dataTuple * last_returned;
+ dataTuple * key;
bool valid;
int reval_count;
static const int reval_period = 100;
@@ -465,7 +461,7 @@ class blsm {
diskTreeComponent::iterator * disk_it[4];
epoch = ltable->get_epoch();
- datatuple *t;
+ dataTuple *t;
if(last_returned) {
t = last_returned;
} else if(key) {
@@ -490,13 +486,13 @@ class blsm {
disk_it[3] = ltable->get_tree_c2()->open_iterator(t);
inner_merge_it_t * inner_merge_it =
- new inner_merge_it_t(c0_it, c0_mergeable_it, 1, NULL, datatuple::compare_obj);
- merge_it_ = new merge_it_t(inner_merge_it, disk_it, 4, NULL, datatuple::compare_obj); // XXX Hardcodes comparator, and does not handle merges
+ new inner_merge_it_t(c0_it, c0_mergeable_it, 1, NULL, dataTuple::compare_obj);
+ merge_it_ = new merge_it_t(inner_merge_it, disk_it, 4, NULL, dataTuple::compare_obj); // XXX Hardcodes comparator, and does not handle merges
if(last_returned) {
- datatuple * junk = merge_it_->peek();
- if(junk && !datatuple::compare(junk->strippedkey(), junk->strippedkeylen(), last_returned->strippedkey(), last_returned->strippedkeylen())) {
+ dataTuple * junk = merge_it_->peek();
+ if(junk && !dataTuple::compare(junk->strippedkey(), junk->strippedkeylen(), last_returned->strippedkey(), last_returned->strippedkeylen())) {
// we already returned junk
- datatuple::freetuple(merge_it_->next_callerFrees());
+ dataTuple::freetuple(merge_it_->next_callerFrees());
}
}
valid = true;
View
36 datapage.cpp
@@ -42,7 +42,7 @@ static int notSupported(int xid, Page * p) { return 0; }
END_C_DECLS
-void DataPage::register_stasis_page_impl() {
+void dataPage::register_stasis_page_impl() {
static page_impl pi = {
DATA_PAGE,
1,
@@ -76,7 +76,7 @@ void DataPage::register_stasis_page_impl() {
}
-DataPage::DataPage(int xid, RegionAllocator * alloc, pageid_t pid): // XXX Hack!! The read-only constructor signature is too close to the other's
+dataPage::dataPage(int xid, regionAllocator * alloc, pageid_t pid): // XXX Hack!! The read-only constructor signature is too close to the other's
xid_(xid),
page_count_(1), // will be opportunistically incremented as we scan the datapage.
initial_page_count_(-1), // used by append.
@@ -94,7 +94,7 @@ DataPage::DataPage(int xid, RegionAllocator * alloc, pageid_t pid): // XXX Hack
releasePage(p);
}
-DataPage::DataPage(int xid, pageid_t page_count, RegionAllocator *alloc) :
+dataPage::dataPage(int xid, pageid_t page_count, regionAllocator *alloc) :
xid_(xid),
page_count_(1),
initial_page_count_(page_count),
@@ -107,11 +107,11 @@ DataPage::DataPage(int xid, pageid_t page_count, RegionAllocator *alloc) :
initialize();
}
-void DataPage::initialize() {
+void dataPage::initialize() {
initialize_page(first_page_);
}
-void DataPage::initialize_page(pageid_t pageid) {
+void dataPage::initialize_page(pageid_t pageid) {
//load the first page
Page *p;
#ifdef CHECK_FOR_SCRIBBLING
@@ -144,7 +144,7 @@ void DataPage::initialize_page(pageid_t pageid) {
releasePage(p);
}
-size_t DataPage::write_bytes(const byte * buf, ssize_t remaining, Page ** latch_p) {
+size_t dataPage::write_bytes(const byte * buf, ssize_t remaining, Page ** latch_p) {
if(latch_p) { *latch_p = NULL; }
recordid chunk = calc_chunk_from_offset(write_offset_);
if(chunk.size > remaining) {
@@ -167,7 +167,7 @@ size_t DataPage::write_bytes(const byte * buf, ssize_t remaining, Page ** latch_
}
return chunk.size;
}
-size_t DataPage::read_bytes(byte * buf, off_t offset, ssize_t remaining) {
+size_t dataPage::read_bytes(byte * buf, off_t offset, ssize_t remaining) {
recordid chunk = calc_chunk_from_offset(offset);
if(chunk.size > remaining) {
chunk.size = remaining;
@@ -190,7 +190,7 @@ size_t DataPage::read_bytes(byte * buf, off_t offset, ssize_t remaining) {
return chunk.size;
}
-bool DataPage::initialize_next_page() {
+bool dataPage::initialize_next_page() {
recordid rid = calc_chunk_from_offset(write_offset_);
assert(rid.slot == 0);
DEBUG("\t\t%lld\n", (long long)rid.page);
@@ -215,7 +215,7 @@ bool DataPage::initialize_next_page() {
return true;
}
-Page * DataPage::write_data_and_latch(const byte * buf, size_t len, bool init_next, bool latch) {
+Page * dataPage::write_data_and_latch(const byte * buf, size_t len, bool init_next, bool latch) {
bool first = true;
Page * p = 0;
while(1) {
@@ -255,11 +255,11 @@ Page * DataPage::write_data_and_latch(const byte * buf, size_t len, bool init_ne
}
}
-bool DataPage::write_data(const byte * buf, size_t len, bool init_next) {
+bool dataPage::write_data(const byte * buf, size_t len, bool init_next) {
return 0 != write_data_and_latch(buf, len, init_next, false);
}
-bool DataPage::read_data(byte * buf, off_t offset, size_t len) {
+bool dataPage::read_data(byte * buf, off_t offset, size_t len) {
while(1) {
assert(len > 0);
size_t read_count = read_bytes(buf, offset, len);
@@ -275,7 +275,7 @@ bool DataPage::read_data(byte * buf, off_t offset, size_t len) {
}
}
-bool DataPage::append(datatuple const * dat)
+bool dataPage::append(dataTuple const * dat)
{
// First, decide if we should append to this datapage, based on whether
// appending will waste more or less space than starting a new datapage
@@ -344,21 +344,21 @@ bool DataPage::append(datatuple const * dat)
return succ;
}
-bool DataPage::recordRead(const datatuple::key_t key, size_t keySize, datatuple ** buf)
+bool dataPage::recordRead(const dataTuple::key_t key, size_t keySize, dataTuple ** buf)
{
iterator itr(this, NULL);
int match = -1;
while((*buf=itr.getnext()) != 0) {
- match = datatuple::compare((*buf)->strippedkey(), (*buf)->strippedkeylen(), key, keySize);
+ match = dataTuple::compare((*buf)->strippedkey(), (*buf)->strippedkeylen(), key, keySize);
if(match<0) { //keep searching
- datatuple::freetuple(*buf);
+ dataTuple::freetuple(*buf);
*buf=0;
} else if(match==0) { //found
return true;
} else { // match > 0, then does not exist
- datatuple::freetuple(*buf);
+ dataTuple::freetuple(*buf);
*buf = 0;
break;
}
@@ -371,7 +371,7 @@ bool DataPage::recordRead(const datatuple::key_t key, size_t keySize, datatuple
///////////////////////////////////////////////////////////////
-datatuple* DataPage::iterator::getnext() {
+dataTuple* dataPage::iterator::getnext() {
len_t len;
bool succ;
if(dp == NULL) { return NULL; }
@@ -398,7 +398,7 @@ datatuple* DataPage::iterator::getnext() {
read_offset_ += len;
- datatuple *ret = datatuple::from_bytes(buf);
+ dataTuple *ret = dataTuple::from_bytes(buf);
free(buf);
View
37 datapage.h
@@ -17,37 +17,36 @@
*
* Author: makdere
*/
-#ifndef _SIMPLE_DATA_PAGE_H_
-#define _SIMPLE_DATA_PAGE_H_
+#ifndef DATA_PAGE_H_
+#define DATA_PAGE_H_
#include <limits.h>
#include <stasis/page.h>
#include <stasis/constants.h>
#include "datatuple.h"
-
-struct RegionAllocator;
+#include "regionAllocator.h"
//#define CHECK_FOR_SCRIBBLING
-class DataPage
+class dataPage
{
public:
class iterator
{
private:
- void scan_to_key(datatuple * key) {
+ void scan_to_key(dataTuple * key) {
if(key) {
len_t old_off = read_offset_;
- datatuple * t = getnext();
- while(t && datatuple::compare(key->strippedkey(), key->strippedkeylen(), t->strippedkey(), t->strippedkeylen()) > 0) {
- datatuple::freetuple(t);
+ dataTuple * t = getnext();
+ while(t && dataTuple::compare(key->strippedkey(), key->strippedkeylen(), t->strippedkey(), t->strippedkeylen()) > 0) {
+ dataTuple::freetuple(t);
old_off = read_offset_;
t = getnext();
}
if(t) {
DEBUG("datapage opened at %s\n", t->key());
- datatuple::freetuple(t);
+ dataTuple::freetuple(t);
read_offset_ = old_off;
} else {
DEBUG("datapage key not found. Offset = %lld", read_offset_);
@@ -56,7 +55,7 @@ class DataPage
}
}
public:
- iterator(DataPage *dp, datatuple * key=NULL) : read_offset_(0), dp(dp) {
+ iterator(dataPage *dp, dataTuple * key=NULL) : read_offset_(0), dp(dp) {
scan_to_key(key);
}
@@ -66,11 +65,11 @@ class DataPage
}
//returns the next tuple and also advances the iterator
- datatuple *getnext();
+ dataTuple *getnext();
private:
off_t read_offset_;
- DataPage *dp;
+ dataPage *dp;
};
public:
@@ -78,12 +77,12 @@ class DataPage
/**
* if alloc is non-null, then reads will be optimized for sequential access
*/
- DataPage( int xid, RegionAllocator* alloc, pageid_t pid );
+ dataPage( int xid, regionAllocator* alloc, pageid_t pid );
//to be used to create new data pages
- DataPage( int xid, pageid_t page_count, RegionAllocator* alloc);
+ dataPage( int xid, pageid_t page_count, regionAllocator* alloc);
- ~DataPage() {
+ ~dataPage() {
assert(write_offset_ == -1);
}
@@ -100,8 +99,8 @@ class DataPage
}
- bool append(datatuple const * dat);
- bool recordRead(const datatuple::key_t key, size_t keySize, datatuple ** buf);
+ bool append(dataTuple const * dat);
+ bool recordRead(const dataTuple::key_t key, size_t keySize, dataTuple ** buf);
inline uint16_t recordCount();
@@ -150,7 +149,7 @@ class DataPage
int xid_;
pageid_t page_count_;
const pageid_t initial_page_count_;
- RegionAllocator *alloc_;
+ regionAllocator *alloc_;
const pageid_t first_page_;
off_t write_offset_; // points to the next free byte (ignoring page boundaries)
};
View
28 datatuple.h
@@ -29,7 +29,7 @@
typedef uint32_t len_t ;
static const len_t DELETE = ((len_t)0) - 1;
-typedef struct datatuple
+typedef struct dataTuple
{
public:
typedef unsigned char* key_t ;
@@ -38,7 +38,7 @@ typedef struct datatuple
len_t datalen_;
byte* data_; // aliases key(). data_ - 1 should be the \0 terminating key().
- datatuple* sanity_check() {
+ dataTuple* sanity_check() {
assert(rawkeylen() < 3000);
return this;
}
@@ -83,7 +83,7 @@ typedef struct datatuple
return (key_t)(this+1);
}
//this is used by the stl set
- bool operator() (const datatuple* lhs, const datatuple* rhs) const {
+ bool operator() (const dataTuple* lhs, const dataTuple* rhs) const {
return compare(lhs->strippedkey(), lhs->strippedkeylen(), rhs->strippedkey(), rhs->strippedkeylen()) < 0; //strcmp((char*)lhs.key(),(char*)rhs.key()) < 0;
}
@@ -136,7 +136,7 @@ typedef struct datatuple
return (int64_t)*(uint64_t*)(rawkey()+1+al-ts_sz);
}
- static int compare_obj(const datatuple * a, const datatuple* b) {
+ static int compare_obj(const dataTuple * a, const dataTuple* b) {
return compare(a->strippedkey(), a->strippedkeylen(), b->strippedkey(), b->strippedkeylen());
}
@@ -160,16 +160,16 @@ typedef struct datatuple
}
//copy the tuple. does a deep copy of the contents.
- datatuple* create_copy() const {
+ dataTuple* create_copy() const {
return create(rawkey(), rawkeylen(), data(), datalen_);
}
- static datatuple* create(const void* key, len_t keylen) {
+ static dataTuple* create(const void* key, len_t keylen) {
return create(key, keylen, 0, DELETE);
}
- static datatuple* create(const void* key, len_t keylen, const void* data, len_t datalen) {
- datatuple *ret = (datatuple*)malloc(sizeof(datatuple) + length_from_header(keylen,datalen));
+ static dataTuple* create(const void* key, len_t keylen, const void* data, len_t datalen) {
+ dataTuple *ret = (dataTuple*)malloc(sizeof(dataTuple) + length_from_header(keylen,datalen));
memcpy(ret->rawkey(), key, keylen);
ret->data_ = ret->rawkey() + keylen; // need to set this even if delete, since it encodes the key length.
if(datalen != DELETE) {
@@ -195,17 +195,17 @@ typedef struct datatuple
}
//format of buf: key _ data. The caller needs to 'peel' off key length and data length for this call.
- static datatuple* from_bytes(len_t keylen, len_t datalen, byte* buf) {
- datatuple *dt = (datatuple*) malloc(sizeof(datatuple) + length_from_header(keylen,datalen));
+ static dataTuple* from_bytes(len_t keylen, len_t datalen, byte* buf) {
+ dataTuple *dt = (dataTuple*) malloc(sizeof(dataTuple) + length_from_header(keylen,datalen));
dt->datalen_ = datalen;
memcpy(dt->rawkey(),buf, length_from_header(keylen,datalen));
dt->data_ = dt->rawkey() + keylen;
return dt->sanity_check();
}
- static datatuple* from_bytes(byte* buf) {
+ static dataTuple* from_bytes(byte* buf) {
len_t keylen = ((len_t*)buf)[0];
len_t buflen = length_from_header(keylen, ((len_t*)buf)[1]);
- datatuple *dt = (datatuple*) malloc(sizeof(datatuple) + buflen);
+ dataTuple *dt = (dataTuple*) malloc(sizeof(dataTuple) + buflen);
dt->datalen_ = ((len_t*)buf)[1];
memcpy(dt->rawkey(),((len_t*)buf)+2,buflen);
dt->data_ = dt->rawkey() + keylen;
@@ -213,11 +213,11 @@ typedef struct datatuple
return dt->sanity_check();
}
- static inline void freetuple(datatuple* dt) {
+ static inline void freetuple(dataTuple* dt) {
free(dt);
}
-} datatuple;
+} dataTuple;
#endif
View
60 diskTreeComponent.cpp
@@ -76,7 +76,7 @@ void diskTreeComponent::writes_done() {
}
}
-int diskTreeComponent::insertTuple(int xid, datatuple *t)
+int diskTreeComponent::insertTuple(int xid, dataTuple *t)
{
if(bloom_filter) {
bloom_filter_insert(bloom_filter, (const char*)t->strippedkey(), t->strippedkeylen());
@@ -96,14 +96,14 @@ int diskTreeComponent::insertTuple(int xid, datatuple *t)
return ret;
}
-DataPage* diskTreeComponent::insertDataPage(int xid, datatuple *tuple) {
+dataPage* diskTreeComponent::insertDataPage(int xid, dataTuple *tuple) {
//create a new data page -- either the last region is full, or the last data page doesn't want our tuple. (or both)
- DataPage * dp = 0;
+ dataPage * dp = 0;
int count = 0;
while(dp==0)
{
- dp = new DataPage(xid, datapage_size, ltree->get_datapage_alloc());
+ dp = new dataPage(xid, datapage_size, ltree->get_datapage_alloc());
//insert the record into the data page
if(!dp->append(tuple))
@@ -130,9 +130,9 @@ DataPage* diskTreeComponent::insertDataPage(int xid, datatuple *tuple) {
return dp;
}
-datatuple * diskTreeComponent::findTuple(int xid, datatuple::key_t key, size_t keySize)
+dataTuple * diskTreeComponent::findTuple(int xid, dataTuple::key_t key, size_t keySize)
{
- datatuple * tup=0;
+ dataTuple * tup=0;
if(bloom_filter) {
if(!bloom_filter_lookup(bloom_filter, (const char*)key, keySize)) {
@@ -145,7 +145,7 @@ datatuple * diskTreeComponent::findTuple(int xid, datatuple::key_t key, size_t k
if(pid!=-1)
{
- DataPage * dp = new DataPage(xid, 0, pid);
+ dataPage * dp = new dataPage(xid, 0, pid);
dp->recordRead(key, keySize, &tup);
delete dp;
}
@@ -194,7 +194,7 @@ recordid diskTreeComponent::internalNodes::create(int xid) {
void diskTreeComponent::internalNodes::writeNodeRecord(int xid, Page * p, recordid & rid,
const byte *key, size_t keylen, pageid_t ptr) {
DEBUG("writenoderecord:\tp->id\t%lld\tkey:\t%s\tkeylen: %d\tval_page\t%lld\n",
- p->id, datatuple::key_to_str(key).c_str(), keylen, ptr);
+ p->id, dataTuple::key_to_str(key).c_str(), keylen, ptr);
indexnode_rec *nr = (indexnode_rec*)stasis_record_write_begin(xid, p, rid);
nr->ptr = ptr;
memcpy(nr+1, key, keylen);
@@ -338,7 +338,7 @@ recordid diskTreeComponent::internalNodes::appendPage(int xid,
} else {
DEBUG("Appended new internal node tree depth = %lld key = %s\n",
- depth, datatuple::key_to_str(key).c_str());
+ depth, dataTuple::key_to_str(key).c_str());
}
lastLeaf = ret.page;
@@ -346,7 +346,7 @@ recordid diskTreeComponent::internalNodes::appendPage(int xid,
} else {
// write the new value to an existing page
- DEBUG("Writing %s\t%d to existing page# %lld\n", datatuple::key_to_str(key).c_str(),
+ DEBUG("Writing %s\t%d to existing page# %lld\n", dataTuple::key_to_str(key).c_str(),
val_page, lastLeafPage->id);
stasis_record_alloc_done(xid, lastLeafPage, ret);
@@ -368,15 +368,15 @@ recordid diskTreeComponent::internalNodes::appendPage(int xid,
diskTreeComponent::internalNodes::internalNodes(int xid, pageid_t internal_region_size, pageid_t datapage_region_size, pageid_t datapage_size)
: lastLeaf(-1),
- internal_node_alloc(new RegionAllocator(xid, internal_region_size)),
- datapage_alloc(new RegionAllocator(xid, datapage_region_size))
+ internal_node_alloc(new regionAllocator(xid, internal_region_size)),
+ datapage_alloc(new regionAllocator(xid, datapage_region_size))
{ create(xid); }
diskTreeComponent::internalNodes::internalNodes(int xid, recordid root, recordid internal_node_state, recordid datapage_state)
: lastLeaf(-1),
root_rec(root),
- internal_node_alloc(new RegionAllocator(xid, internal_node_state)),
- datapage_alloc(new RegionAllocator(xid, datapage_state))
+ internal_node_alloc(new regionAllocator(xid, internal_node_state)),
+ datapage_alloc(new regionAllocator(xid, datapage_state))
{ }
diskTreeComponent::internalNodes::~internalNodes() {
@@ -646,8 +646,8 @@ recordid diskTreeComponent::internalNodes::lookup(int xid,
rid.size = stasis_record_length_read(xid, node, rid);
const indexnode_rec *rec = (const indexnode_rec*)stasis_record_read_begin(xid,node,rid);
- int cmpval = datatuple::compare((datatuple::key_t) (rec+1), rid.size-sizeof(*rec),
- (datatuple::key_t) key, keySize);
+ int cmpval = dataTuple::compare((dataTuple::key_t) (rec+1), rid.size-sizeof(*rec),
+ (dataTuple::key_t) key, keySize);
stasis_record_read_done(xid,node,rid,(const byte*)rec);
// key of current node is too big; there can be no matches under it.
@@ -716,7 +716,7 @@ void diskTreeComponent::internalNodes::print_tree(int xid, pageid_t pid, int64_t
rid.slot = i;
const indexnode_rec *nr = (const indexnode_rec*)stasis_record_read_begin(xid,node,rid);
printf("\tchild_page_id:%lld\tkey:%s\n", nr->ptr,
- datatuple::key_to_str((byte*)(nr+1)).c_str());
+ dataTuple::key_to_str((byte*)(nr+1)).c_str());
stasis_record_read_done(xid, node, rid, (const byte*)nr);
}
@@ -733,7 +733,7 @@ void diskTreeComponent::internalNodes::print_tree(int xid, pageid_t pid, int64_t
rid.slot = FIRST_SLOT;
const indexnode_rec *nr = (const indexnode_rec*)stasis_record_read_begin(xid,node,rid);
printf("\tdata_page_id:%lld\tkey:%s\n", nr->ptr,
- datatuple::key_to_str((byte*)(nr+1)).c_str());
+ dataTuple::key_to_str((byte*)(nr+1)).c_str());
stasis_record_read_done(xid, node, rid, (const byte*)nr);
printf("\t...\n");
@@ -741,7 +741,7 @@ void diskTreeComponent::internalNodes::print_tree(int xid, pageid_t pid, int64_t
rid.slot= numslots - 1;
nr = (const indexnode_rec*)stasis_record_read_begin(xid,node,rid);
printf("\tdata_page_id:%lld\tkey:%s\n", nr->ptr,
- datatuple::key_to_str((byte*)(nr+1)).c_str());
+ dataTuple::key_to_str((byte*)(nr+1)).c_str());
stasis_record_read_done(xid, node, rid, (const byte*)nr);
}
unlock(node->rwlatch);
@@ -752,7 +752,7 @@ void diskTreeComponent::internalNodes::print_tree(int xid, pageid_t pid, int64_t
//diskTreeComponentIterator implementation
/////////////////////////////////////////////////
-diskTreeComponent::internalNodes::iterator::iterator(int xid, RegionAllocator* ro_alloc, recordid root) {
+diskTreeComponent::internalNodes::iterator::iterator(int xid, regionAllocator* ro_alloc, recordid root) {
ro_alloc_ = ro_alloc;
if(root.page == 0 && root.slot == 0 && root.size == -1) abort();
p = ro_alloc_->load_page(xid,root.page);
@@ -798,7 +798,7 @@ diskTreeComponent::internalNodes::iterator::iterator(int xid, RegionAllocator* r
if(!justOnePage) readlock(p->rwlatch,0);
}
-diskTreeComponent::internalNodes::iterator::iterator(int xid, RegionAllocator* ro_alloc, recordid root, const byte* key, len_t keylen) {
+diskTreeComponent::internalNodes::iterator::iterator(int xid, regionAllocator* ro_alloc, recordid root, const byte* key, len_t keylen) {
if(root.page == NULLRID.page && root.slot == NULLRID.slot) abort();
ro_alloc_ = ro_alloc;
p = ro_alloc_->load_page(xid,root.page);
@@ -917,7 +917,7 @@ void diskTreeComponent::internalNodes::iterator::close() {
// tree iterator implementation
/////////////////////////////////////////////////////////////////////
-void diskTreeComponent::iterator::init_iterators(datatuple * key1, datatuple * key2) {
+void diskTreeComponent::iterator::init_iterators(dataTuple * key1, dataTuple * key2) {
assert(!key2); // unimplemented
if(tree_.size == INVALID_SIZE) {
lsmIterator_ = NULL;
@@ -931,7 +931,7 @@ void diskTreeComponent::iterator::init_iterators(datatuple * key1, datatuple * k
}
diskTreeComponent::iterator::iterator(diskTreeComponent::internalNodes *tree, mergeManager * mgr, double target_progress_delta, bool * flushing) :
- ro_alloc_(new RegionAllocator()),
+ ro_alloc_(new regionAllocator()),
tree_(tree ? tree->get_root_rec() : NULLRID),
mgr_(mgr),
target_progress_delta_(target_progress_delta),
@@ -941,8 +941,8 @@ diskTreeComponent::iterator::iterator(diskTreeComponent::internalNodes *tree, me
init_helper(NULL);
}
-diskTreeComponent::iterator::iterator(diskTreeComponent::internalNodes *tree, datatuple* key) :
- ro_alloc_(new RegionAllocator()),
+diskTreeComponent::iterator::iterator(diskTreeComponent::internalNodes *tree, dataTuple* key) :
+ ro_alloc_(new regionAllocator()),
tree_(tree ? tree->get_root_rec() : NULLRID),
mgr_(NULL),
target_progress_delta_(0.0),
@@ -965,7 +965,7 @@ diskTreeComponent::iterator::~iterator() {
delete ro_alloc_;
}
-void diskTreeComponent::iterator::init_helper(datatuple* key1)
+void diskTreeComponent::iterator::init_helper(dataTuple* key1)
{
if(!lsmIterator_)
{
@@ -988,7 +988,7 @@ void diskTreeComponent::iterator::init_helper(datatuple* key1)
lsmIterator_->value((byte**)hack);
curr_pageid = *pid_tmp;
- curr_page = new DataPage(-1, ro_alloc_, curr_pageid);
+ curr_page = new dataPage(-1, ro_alloc_, curr_pageid);
DEBUG("opening datapage iterator %lld at key %s\n.", curr_pageid, key1 ? (char*)key1->key() : "NULL");
dp_itr = new DPITR_T(curr_page, key1);
@@ -997,14 +997,14 @@ void diskTreeComponent::iterator::init_helper(datatuple* key1)
}
}
-datatuple * diskTreeComponent::iterator::next_callerFrees()
+dataTuple * diskTreeComponent::iterator::next_callerFrees()
{
if(!this->lsmIterator_) { return NULL; }
if(dp_itr == 0)
return 0;
- datatuple* readTuple = dp_itr->getnext();
+ dataTuple* readTuple = dp_itr->getnext();
if(!readTuple)
@@ -1022,7 +1022,7 @@ datatuple * diskTreeComponent::iterator::next_callerFrees()
size_t ret = lsmIterator_->value((byte**)hack);
assert(ret == sizeof(pageid_t));
curr_pageid = *pid_tmp;
- curr_page = new DataPage(-1, ro_alloc_, curr_pageid);
+ curr_page = new dataPage(-1, ro_alloc_, curr_pageid);
DEBUG("opening datapage iterator %lld at beginning\n.", curr_pageid);
dp_itr = new DPITR_T(curr_page->begin());
View
38 diskTreeComponent.h
@@ -74,15 +74,15 @@ class diskTreeComponent {
recordid get_datapage_allocator_rid();
recordid get_internal_node_allocator_rid();
internalNodes * get_internal_nodes() { return ltree; }
- datatuple* findTuple(int xid, datatuple::key_t key, size_t keySize);
- int insertTuple(int xid, datatuple *t);
+ dataTuple* findTuple(int xid, dataTuple::key_t key, size_t keySize);
+ int insertTuple(int xid, dataTuple *t);
void writes_done();
iterator * open_iterator(mergeManager * mgr = NULL, double target_size = 0, bool * flushing = NULL) {
return new iterator(ltree, mgr, target_size, flushing);
}
- iterator * open_iterator(datatuple * key) {
+ iterator * open_iterator(dataTuple * key) {
if(key != NULL) {
return new iterator(ltree, key);
} else {
@@ -100,10 +100,10 @@ class diskTreeComponent {
}
private:
- DataPage* insertDataPage(int xid, datatuple *tuple);
+ dataPage* insertDataPage(int xid, dataTuple *tuple);
internalNodes * ltree;
- DataPage* dp;
+ dataPage* dp;
pageid_t datapage_size;
/*mergeManager::mergeStats*/ void *stats; // XXX hack to work around circular includes.
@@ -122,8 +122,8 @@ class diskTreeComponent {
//appends a leaf page, val_page is the id of the leaf page
recordid appendPage(int xid, const byte *key,size_t keySize, pageid_t val_page);
- inline RegionAllocator* get_datapage_alloc() { return datapage_alloc; }
- inline RegionAllocator* get_internal_node_alloc() { return internal_node_alloc; }
+ inline regionAllocator* get_datapage_alloc() { return datapage_alloc; }
+ inline regionAllocator* get_internal_node_alloc() { return internal_node_alloc; }
const recordid &get_root_rec(){return root_rec;}
private:
@@ -168,8 +168,8 @@ class diskTreeComponent {
void print_tree(int xid, pageid_t pid, int64_t depth);
recordid root_rec;
- RegionAllocator* internal_node_alloc;
- RegionAllocator* datapage_alloc;
+ regionAllocator* internal_node_alloc;
+ regionAllocator* datapage_alloc;
struct indexnode_rec {
pageid_t ptr;
@@ -178,8 +178,8 @@ class diskTreeComponent {
public:
class iterator {
public:
- iterator(int xid, RegionAllocator *ro_alloc, recordid root);
- iterator(int xid, RegionAllocator *ro_alloc, recordid root, const byte* key, len_t keylen);
+ iterator(int xid, regionAllocator *ro_alloc, recordid root);
+ iterator(int xid, regionAllocator *ro_alloc, recordid root, const byte* key, len_t keylen);
int next();
void close();
@@ -197,7 +197,7 @@ class diskTreeComponent {
inline void releaseLock() { }
private:
- RegionAllocator * ro_alloc_;
+ regionAllocator * ro_alloc_;
Page * p;
int xid_;
bool done;
@@ -216,21 +216,21 @@ class diskTreeComponent {
public:
explicit iterator(diskTreeComponent::internalNodes *tree, mergeManager * mgr = NULL, double target_size = 0, bool * flushing = NULL);
- explicit iterator(diskTreeComponent::internalNodes *tree,datatuple *key);
+ explicit iterator(diskTreeComponent::internalNodes *tree,dataTuple *key);
~iterator();
- datatuple * next_callerFrees();
+ dataTuple * next_callerFrees();
private:
- void init_iterators(datatuple * key1, datatuple * key2);
- inline void init_helper(datatuple * key1);
+ void init_iterators(dataTuple * key1, dataTuple * key2);
+ inline void init_helper(dataTuple * key1);
explicit iterator() { abort(); }
void operator=(iterator & t) { abort(); }
int operator-(iterator & t) { abort(); }
- RegionAllocator * ro_alloc_; // has a filehandle that we use to optimize sequential scans.
+ regionAllocator * ro_alloc_; // has a filehandle that we use to optimize sequential scans.
recordid tree_; //root of the tree
mergeManager * mgr_;
double target_progress_delta_;
@@ -239,8 +239,8 @@ class diskTreeComponent {
diskTreeComponent::internalNodes::iterator* lsmIterator_;
pageid_t curr_pageid; //current page id
- DataPage *curr_page; //current page
- typedef DataPage::iterator DPITR_T;
+ dataPage *curr_page; //current page
+ typedef dataPage::iterator DPITR_T;
DPITR_T *dp_itr;
};
View
6 memTreeComponent.cpp
@@ -20,14 +20,14 @@
#include "datatuple.h"
void memTreeComponent::tearDownTree(rbtree_ptr_t tree) {
- datatuple * t = 0;
+ dataTuple * t = 0;
rbtree_t::iterator old;
for(rbtree_t::iterator delitr = tree->begin();
delitr != tree->end();
delitr++) {
if(t) {
tree->erase(old);
- datatuple::freetuple(t);
+ dataTuple::freetuple(t);
t = 0;
}
t = *delitr;
@@ -35,7 +35,7 @@ void memTreeComponent::tearDownTree(rbtree_ptr_t tree) {
}
if(t) {
tree->erase(old);
- datatuple::freetuple(t);
+ dataTuple::freetuple(t);
}
delete tree;
}
View
36 memTreeComponent.h
@@ -26,7 +26,7 @@
class memTreeComponent {
public:
// typedef std::set<datatuple*, datatuple, stlslab<datatuple*> > rbtree_t;
- typedef std::set<datatuple*, datatuple> rbtree_t;
+ typedef std::set<dataTuple*, dataTuple> rbtree_t;
typedef rbtree_t* rbtree_ptr_t;
static void tearDownTree(rbtree_ptr_t t);
@@ -47,7 +47,7 @@ class memTreeComponent {
init_iterators(s, NULL, NULL);
}
- iterator( rbtree_t *s, datatuple *&key )
+ iterator( rbtree_t *s, dataTuple *&key )
: first_(true), done_(s == NULL) {
init_iterators(s, key, NULL);
}
@@ -57,7 +57,7 @@ class memTreeComponent {
delete itend_;
}
- datatuple* next_callerFrees() {
+ dataTuple* next_callerFrees() {
if(done_) { return NULL; }
if(first_) { first_ = 0;} else { (*it_)++; }
if(*it_==*itend_) { done_= true; return NULL; }
@@ -67,7 +67,7 @@ class memTreeComponent {
private:
- void init_iterators(rbtree_t * s, datatuple * key1, datatuple * key2) {
+ void init_iterators(rbtree_t * s, dataTuple * key1, dataTuple * key2) {
if(s) {
it_ = key1 ? new MTITER(s->lower_bound(key1)) : new MTITER(s->begin());
itend_ = key2 ? new MTITER(s->upper_bound(key2)) : new MTITER(s->end());
@@ -114,7 +114,7 @@ class memTreeComponent {
}
if(mut_) pthread_mutex_unlock(mut_);
}
- revalidatingIterator( rbtree_t *s, pthread_mutex_t * rb_mut, datatuple *&key ) : s_(s), mut_(rb_mut) {
+ revalidatingIterator( rbtree_t *s, pthread_mutex_t * rb_mut, dataTuple *&key ) : s_(s), mut_(rb_mut) {
if(mut_) pthread_mutex_lock(mut_);
if(key) {
if(s_->find(key) != s_->end()) {
@@ -136,12 +136,12 @@ class memTreeComponent {
}
~revalidatingIterator() {
- if(next_ret_) datatuple::freetuple(next_ret_);
+ if(next_ret_) dataTuple::freetuple(next_ret_);
}
- datatuple* next_callerFrees() {
+ dataTuple* next_callerFrees() {
if(mut_) pthread_mutex_lock(mut_);
- datatuple * ret = next_ret_;
+ dataTuple * ret = next_ret_;
if(next_ret_) {
if(s_->upper_bound(next_ret_) == s_->end()) {
next_ret_ = 0;
@@ -159,7 +159,7 @@ class memTreeComponent {
int operator-(revalidatingIterator & t) { abort(); }
rbtree_t *s_;
- datatuple * next_ret_;
+ dataTuple * next_ret_;
pthread_mutex_t * mut_;
};
@@ -174,7 +174,7 @@ class memTreeComponent {
typedef rbtree_t::const_iterator MTITER;
- void populate_next_ret_impl(std::_Rb_tree_const_iterator<datatuple*>/*MTITER*/ it) {
+ void populate_next_ret_impl(std::_Rb_tree_const_iterator<dataTuple*>/*MTITER*/ it) {
num_batched_ = 0;
cur_off_ = 0;
while(it != s_->end() && num_batched_ < batch_size_) {
@@ -183,7 +183,7 @@ class memTreeComponent {
it++;
}
}
- void populate_next_ret(datatuple *key=NULL, bool include_key=false) {
+ void populate_next_ret(dataTuple *key=NULL, bool include_key=false) {
if(cur_off_ == num_batched_) {
if(mut_) pthread_mutex_lock(mut_);
if(mgr_) {
@@ -206,24 +206,24 @@ class memTreeComponent {
public:
batchedRevalidatingIterator( rbtree_t *s, mergeManager * mgr, int64_t target_size, bool * flushing, int batch_size, pthread_mutex_t * rb_mut ) : s_(s), mgr_(mgr), target_size_(target_size), flushing_(flushing), batch_size_(batch_size), num_batched_(batch_size), cur_off_(batch_size), mut_(rb_mut) {
- next_ret_ = (datatuple**)malloc(sizeof(next_ret_[0]) * batch_size_);
+ next_ret_ = (dataTuple**)malloc(sizeof(next_ret_[0]) * batch_size_);
populate_next_ret();
}
- batchedRevalidatingIterator( rbtree_t *s, int batch_size, pthread_mutex_t * rb_mut, datatuple *&key ) : s_(s), mgr_(NULL), target_size_(0), flushing_(0), batch_size_(batch_size), num_batched_(batch_size), cur_off_(batch_size), mut_(rb_mut) {
- next_ret_ = (datatuple**)malloc(sizeof(next_ret_[0]) * batch_size_);
+ batchedRevalidatingIterator( rbtree_t *s, int batch_size, pthread_mutex_t * rb_mut, dataTuple *&key ) : s_(s), mgr_(NULL), target_size_(0), flushing_(0), batch_size_(batch_size), num_batched_(batch_size), cur_off_(batch_size), mut_(rb_mut) {
+ next_ret_ = (dataTuple**)malloc(sizeof(next_ret_[0]) * batch_size_);
populate_next_ret(key, true);
}
~batchedRevalidatingIterator() {
for(int i = cur_off_; i < num_batched_; i++) {
- datatuple::freetuple(next_ret_[i]);
+ dataTuple::freetuple(next_ret_[i]);
}
free(next_ret_);
}
- datatuple* next_callerFrees() {
+ dataTuple* next_callerFrees() {
if(cur_off_ == num_batched_) { return NULL; } // the last thing we did is call populate_next_ret_(), which only leaves us in this state at the end of the iterator.
- datatuple * ret = next_ret_[cur_off_];
+ dataTuple * ret = next_ret_[cur_off_];
cur_off_++;
populate_next_ret(ret);
return ret;
@@ -235,7 +235,7 @@ class memTreeComponent {
int operator-(batchedRevalidatingIterator & t) { abort(); }
rbtree_t *s_;
- datatuple ** next_ret_;
+ dataTuple ** next_ret_;
mergeManager * mgr_;
int64_t target_size_; // the low-water size for the tree. If cur_size_ is not null, and *cur_size_ < C * target_size_, we sleep.
bool* flushing_; // never block if *flushing is true.
View
10 mergeManager.cpp
@@ -425,7 +425,7 @@ void mergeManager::tick(mergeStats * s) {
}
}
-void mergeManager::read_tuple_from_small_component(int merge_level, datatuple * tup) {
+void mergeManager::read_tuple_from_small_component(int merge_level, dataTuple * tup) {
if(tup) {
mergeStats * s = get_merge_stats(merge_level);
(s->num_tuples_in_small)++;
@@ -450,7 +450,7 @@ void mergeManager::read_tuple_from_large_component(int merge_level, int tuple_co
}
}
-void mergeManager::wrote_tuple(int merge_level, datatuple * tup) {
+void mergeManager::wrote_tuple(int merge_level, dataTuple * tup) {
mergeStats * s = get_merge_stats(merge_level);
(s->num_tuples_out)++;
(s->bytes_out) += tup->byte_length();
@@ -543,7 +543,7 @@ void mergeManager::init_helper(void) {
pthread_create(&update_progress_pthread, 0, merge_manager_update_progress_thread, (void*)this);
}
-mergeManager::mergeManager(blsm *ltable):
+mergeManager::mergeManager(bLSM *ltable):
UPDATE_PROGRESS_PERIOD(0.005),
ltable(ltable) {
c0 = new mergeStats(0, ltable ? ltable->max_c0_size : 10000000);
@@ -551,7 +551,7 @@ mergeManager::mergeManager(blsm *ltable):
c2 = new mergeStats(2, 0);
init_helper();
}
-mergeManager::mergeManager(blsm *ltable, int xid, recordid rid):
+mergeManager::mergeManager(bLSM *ltable, int xid, recordid rid):
UPDATE_PROGRESS_PERIOD(0.005),
ltable(ltable) {
marshalled_header h;
@@ -581,7 +581,7 @@ void mergeManager::marshal(int xid, recordid rid) {
void mergeManager::pretty_print(FILE * out) {
#if EXTENDED_STATS
- blsm * lt = ltable;
+ bLSM * lt = ltable;
bool have_c0 = false;
bool have_c0m = false;
bool have_c1 = false;
View
14 mergeManager.h
@@ -27,7 +27,7 @@
#include <stdio.h>
#include <datatuple.h>
-class blsm;
+class bLSM;
class mergeStats;
class mergeManager {
@@ -48,8 +48,8 @@ class mergeManager {
uint64_t long_tv(struct timeval& tv) {
return (1000000ULL * (uint64_t)tv.tv_sec) + ((uint64_t)tv.tv_usec);
}
- mergeManager(blsm *ltable);
- mergeManager(blsm *ltable, int xid, recordid rid);
+ mergeManager(bLSM *ltable);
+ mergeManager(bLSM *ltable, int xid, recordid rid);
void marshal(int xid, recordid rid);
recordid talloc(int xid);
~mergeManager();
@@ -62,14 +62,14 @@ class mergeManager {
void tick(mergeStats * s);
mergeStats* get_merge_stats(int mergeLevel);
- void read_tuple_from_small_component(int merge_level, datatuple * tup);
- void read_tuple_from_large_component(int merge_level, datatuple * tup) {
+ void read_tuple_from_small_component(int merge_level, dataTuple * tup);
+ void read_tuple_from_large_component(int merge_level, dataTuple * tup) {
if(tup)
read_tuple_from_large_component(merge_level, 1, tup->byte_length());
}
void read_tuple_from_large_component(int merge_level, int tuple_count, pageid_t byte_len);
- void wrote_tuple(int merge_level, datatuple * tup);
+ void wrote_tuple(int merge_level, dataTuple * tup);
void pretty_print(FILE * out);
void *pretty_print_thread();
void *update_progress_thread();
@@ -106,7 +106,7 @@ class mergeManager {
*
* TODO: remove mergeManager->ltable?
*/
- blsm* ltable;
+ bLSM* ltable;
mergeStats * c0; /// Per-tree component statistics for c0 and c0_mergeable (the latter should always be null...)
mergeStats * c1; /// Per-tree component statistics for c1 and c1_mergeable.
mergeStats * c2; /// Per-tree component statistics for c2.
View
4 mergeStats.h
@@ -182,9 +182,9 @@ class mergeStats {
just_handed_off = true;
}
}
- void merged_tuples(datatuple * merged, datatuple * small, datatuple * large) {
+ void merged_tuples(dataTuple * merged, dataTuple * small, dataTuple * large) {
}
- void wrote_datapage(DataPage *dp) {
+ void wrote_datapage(dataPage *dp) {
#if EXTENDED_STATS
stats_num_datapages_out++;
stats_bytes_out_with_overhead += (PAGE_SIZE * dp->get_page_count());
View
52 merger.cpp
@@ -22,27 +22,27 @@
#include <stasis/transactional.h>
static void* memMerge_thr(void* arg) {
- return ((merge_scheduler*)arg)->memMergeThread();
+ return ((mergeScheduler*)arg)->memMergeThread();
}
static void* diskMerge_thr(void* arg) {
- return ((merge_scheduler*)arg)->diskMergeThread();
+ return ((mergeScheduler*)arg)->diskMergeThread();
}
-merge_scheduler::merge_scheduler(blsm *ltable) : ltable_(ltable), MIN_R(3.0) { }
-merge_scheduler::~merge_scheduler() { }
+mergeScheduler::mergeScheduler(bLSM *ltable) : ltable_(ltable), MIN_R(3.0) { }
+mergeScheduler::~mergeScheduler() { }
-void merge_scheduler::shutdown() {
+void mergeScheduler::shutdown() {
ltable_->stop();
pthread_join(mem_merge_thread_, 0);
pthread_join(disk_merge_thread_, 0);
}
-void merge_scheduler::start() {
+void mergeScheduler::start() {
pthread_create(&mem_merge_thread_, 0, memMerge_thr, this);
pthread_create(&disk_merge_thread_, 0, diskMerge_thr, this);
}
-bool insert_filter(blsm * ltable, datatuple * t, bool dropDeletes) {
+bool insert_filter(bLSM * ltable, dataTuple * t, bool dropDeletes) {
if(t->isDelete()) {
if(dropDeletes || ! ltable->mightBeAfterMemMerge(t)) {
return false;
@@ -57,7 +57,7 @@ template <class ITA, class ITB>
void merge_iterators(int xid, diskTreeComponent * forceMe,
ITA *itrA,
ITB *itrB,
- blsm *ltable,
+ bLSM *ltable,
diskTreeComponent *scratch_tree,
mergeStats * stats,
bool dropDeletes);
@@ -84,7 +84,7 @@ void merge_iterators(int xid, diskTreeComponent * forceMe,
</pre>
Merge algorithm: actual order: 1 2 3 4 5 6 12 11.5 11 [7 8 (9) 10] 13
*/
-void * merge_scheduler::memMergeThread() {
+void * mergeScheduler::memMergeThread() {
int xid;
@@ -240,7 +240,7 @@ void * merge_scheduler::memMergeThread() {
}
-void * merge_scheduler::diskMergeThread()
+void * mergeScheduler::diskMergeThread()
{
int xid;
@@ -355,11 +355,11 @@ static void periodically_force(int xid, int *i, diskTreeComponent * forceMe, sta
}
}
-static int garbage_collect(blsm * ltable_, datatuple ** garbage, int garbage_len, int next_garbage, bool force = false) {
+static int garbage_collect(bLSM * ltable_, dataTuple ** garbage, int garbage_len, int next_garbage, bool force = false) {
if(next_garbage == garbage_len || force) {
pthread_mutex_lock(&ltable_->rb_mut);
for(int i = 0; i < next_garbage; i++) {
- datatuple * t2tmp = NULL;
+ dataTuple * t2tmp = NULL;
{
memTreeComponent::rbtree_t::iterator rbitr = ltable_->get_tree_c0()->find(garbage[i]);
if(rbitr != ltable_->get_tree_c0()->end()) {
@@ -375,9 +375,9 @@ static int garbage_collect(blsm * ltable_, datatuple ** garbage, int garbage_len
if(t2tmp) {
ltable_->get_tree_c0()->erase(garbage[i]);
//ltable_->merge_mgr->get_merge_stats(0)->current_size -= garbage[i]->byte_length();
- datatuple::freetuple(t2tmp);
+ dataTuple::freetuple(t2tmp);
}
- datatuple::freetuple(garbage[i]);
+ dataTuple::freetuple(garbage[i]);
}
pthread_mutex_unlock(&ltable_->rb_mut);
return 0;
@@ -391,20 +391,20 @@ void merge_iterators(int xid,
diskTreeComponent * forceMe,
ITA *itrA, //iterator on c1 or c2
ITB *itrB, //iterator on c0 or c1, respectively
- blsm *ltable,
+ bLSM *ltable,
diskTreeComponent *scratch_tree, mergeStats * stats,
bool dropDeletes // should be true iff this is biggest component
)
{
stasis_log_t * log = (stasis_log_t*)stasis_log();
- datatuple *t1 = itrA->next_callerFrees();
+ dataTuple *t1 = itrA->next_callerFrees();
ltable->merge_mgr->read_tuple_from_large_component(stats->merge_level, t1);
- datatuple *t2 = 0;
+ dataTuple *t2 = 0;
int garbage_len = 100;
int next_garbage = 0;
- datatuple ** garbage = (datatuple**)malloc(sizeof(garbage[0]) * garbage_len);
+ dataTuple ** garbage = (dataTuple**)malloc(sizeof(garbage[0]) * garbage_len);
int i = 0;
@@ -415,7 +415,7 @@ void merge_iterators(int xid,
DEBUG("tuple\t%lld: keylen %d datalen %d\n",
ntuples, *(t2->keylen),*(t2->datalen) );
- while(t1 != 0 && datatuple::compare(t1->rawkey(), t1->rawkeylen(), t2->rawkey(), t2->rawkeylen()) < 0) // t1 is less than t2
+ while(t1 != 0 && dataTuple::compare(t1->rawkey(), t1->rawkeylen(), t2->rawkey(), t2->rawkeylen()) < 0) // t1 is less than t2
{
//insert t1
if(insert_filter(ltable, t1, dropDeletes)) {
@@ -423,7 +423,7 @@ void merge_iterators(int xid,
i+=t1->byte_length();
ltable->merge_mgr->wrote_tuple(stats->merge_level, t1);
}
- datatuple::freetuple(t1);
+ dataTuple::freetuple(t1);
//advance itrA
t1 = itrA->next_callerFrees();
@@ -432,9 +432,9 @@ void merge_iterators(int xid,
periodically_force(xid, &i, forceMe, log);
}
- if(t1 != 0 && datatuple::compare(t1->strippedkey(), t1->strippedkeylen(), t2->strippedkey(), t2->strippedkeylen()) == 0)
+ if(t1 != 0 && dataTuple::compare(t1->strippedkey(), t1->strippedkeylen(), t2->strippedkey(), t2->strippedkeylen()) == 0)
{
- datatuple *mtuple = ltable->gettuplemerger()->merge(t1,t2);
+ dataTuple *mtuple = ltable->gettuplemerger()->merge(t1,t2);
stats->merged_tuples(mtuple, t2, t1); // this looks backwards, but is right.
//insert merged tuple, drop deletes
@@ -443,10 +443,10 @@ void merge_iterators(int xid,
i+=mtuple->byte_length();
ltable->merge_mgr->wrote_tuple(stats->merge_level, mtuple);
}
- datatuple::freetuple(t1);
+ dataTuple::freetuple(t1);
t1 = itrA->next_callerFrees(); //advance itrA
ltable->merge_mgr->read_tuple_from_large_component(stats->merge_level, t1);
- datatuple::freetuple(mtuple);
+ dataTuple::freetuple(mtuple);
periodically_force(xid, &i, forceMe, log);
}
else
@@ -469,7 +469,7 @@ void merge_iterators(int xid,
next_garbage++;
}
if(stats->merge_level != 1) {
- datatuple::freetuple(t2);
+ dataTuple::freetuple(t2);
}
}
@@ -480,7 +480,7 @@ void merge_iterators(int xid,
ltable->merge_mgr->wrote_tuple(stats->merge_level, t1);
i += t1->byte_length();
}
- datatuple::freetuple(t1);
+ dataTuple::freetuple(t1);
//advance itrA
t1 = itrA->next_callerFrees();
View
8 merger.h
@@ -23,10 +23,10 @@
#include <stasis/common.h>
-class merge_scheduler {
+class mergeScheduler {
public:
- merge_scheduler(blsm * ltable);
- ~merge_scheduler();
+ mergeScheduler(bLSM * ltable);
+ ~mergeScheduler();
void start();
void shutdown();
@@ -37,7 +37,7 @@ class merge_scheduler {
private:
pthread_t mem_merge_thread_;
pthread_t disk_merge_thread_;
- blsm * ltable_;
+ bLSM * ltable_;
const double MIN_R;
};
View
10 regionAllocator.h
@@ -24,12 +24,12 @@
#include <stasis/transactional.h>
-class RegionAllocator
+class regionAllocator
{
public:
// Open an existing region allocator.
- RegionAllocator(int xid, recordid rid) :
+ regionAllocator(int xid, recordid rid) :
nextPage_(INVALID_PAGE),
endOfRegion_(INVALID_PAGE),
bm_((stasis_buffer_manager_t*)stasis_runtime_buffer_manager()),
@@ -39,7 +39,7 @@ class RegionAllocator
regionCount_ = TarrayListLength(xid, header_.region_list);
}
// Create a