Skip to content
Browse files

Add new Immix GC as the mature GC

  • Loading branch information...
1 parent 0649d9d commit 45be183889b9daac5e1115c1db3a697ca26a2e55 Evan Phoenix committed Mar 9, 2009
View
8 vm/builtin/tuple.cpp
@@ -178,6 +178,14 @@ namespace rubinius {
}
}
+ void Tuple::Info::visit(Object* obj, ObjectVisitor& visit) {
+ Tuple* tup = as<Tuple>(obj);
+
+ for(size_t i = 0; i < tup->num_fields(); i++) {
+ visit.call(tup->field[i]);
+ }
+ }
+
void Tuple::Info::show(STATE, Object* self, int level) {
Tuple* tup = as<Tuple>(self);
size_t size = tup->num_fields();
View
1 vm/builtin/tuple.hpp
@@ -58,6 +58,7 @@ namespace rubinius {
virtual void mark(Object* t, ObjectMark& mark);
virtual void show(STATE, Object* self, int level);
virtual void show_simple(STATE, Object* self, int level);
+ virtual void visit(Object*, ObjectVisitor& visit);
};
};
};
View
1 vm/builtin/variable_scope.hpp
@@ -96,6 +96,7 @@ namespace rubinius {
virtual void set_field(STATE, Object*, size_t, Object*);
virtual Object* get_field(STATE, Object*, size_t);
virtual void auto_mark(Object*, ObjectMark&);
+ virtual void auto_visit(Object*, ObjectVisitor&);
};
};
}
View
33 vm/codegen/field_extract.rb
@@ -359,6 +359,21 @@ def generate_marks(cpp)
return str
end
+ def generate_visits(cpp)
+ str = ''
+
+ str << generate_visits(cpp.super) if cpp.super
+
+ cpp.fields.each do |name, type, idx|
+ str << <<-EOF
+ visit.call(target->#{name}());
+ EOF
+ end
+
+ return str
+
+ end
+
def generate_mark
marks = generate_marks(self).rstrip
@@ -376,6 +391,23 @@ def generate_mark
str
end
+ def generate_visit
+ marks = generate_visits(self).rstrip
+
+ str = ''
+
+ str << <<-EOF unless marks.empty?
+void #{@name}::Info::auto_visit(Object* _t, ObjectVisitor& visit) {
+ #{@name}* target = as<#{@name}>(_t);
+
+#{marks}
+}
+
+ EOF
+
+ str
+ end
+
def kind_of_code(what)
case @name
when "Fixnum"
@@ -654,6 +686,7 @@ def write_if_new(path)
parser.classes.each do |n, cpp|
f.puts cpp.generate_typechecks
f.puts cpp.generate_mark
+ f.puts cpp.generate_visit
end
end
View
169 vm/gc.cpp
@@ -51,15 +51,8 @@ namespace rubinius {
void GarbageCollector::scan_object(Object* obj) {
Object* slot;
- // If this object's refs are weak, then add it to the weak_refs
- // vector and don't look at it otherwise.
- if(obj->RefsAreWeak) {
- if(!weak_refs) {
- weak_refs = new ObjectArray(0);
- }
-
- weak_refs->push_back(obj);
- return;
+ if(watched_p(obj)) {
+ std::cout << "detected " << obj << " during scan_object.\n";
}
if(obj->klass() && obj->klass()->reference_p()) {
@@ -72,6 +65,17 @@ namespace rubinius {
if(slot) obj->ivars(object_memory->state, slot);
}
+ // If this object's refs are weak, then add it to the weak_refs
+ // vector and don't look at it otherwise.
+ if(obj->RefsAreWeak) {
+ if(!weak_refs) {
+ weak_refs = new ObjectArray(0);
+ }
+
+ weak_refs->push_back(obj);
+ return;
+ }
+
TypeInfo* ti = object_memory->type_info[obj->obj_type];
assert(ti);
@@ -147,6 +151,153 @@ namespace rubinius {
call_frame = call_frame->previous;
}
+ }
+
+ void GarbageCollector::visit_variable_scope(VariableScope* scope, ObjectVisitor& visit) {
+ scope->update(visit.call(scope->self()),
+ visit.call(scope->module()),
+ visit.call(scope->block()));
+
+ for(int i = 0; i < scope->number_of_locals(); i++) {
+ Object* local = scope->get_local(i);
+ if(local->reference_p()) {
+ scope->set_local(i, visit.call(local));
+ }
+ }
+
+ VariableScope* parent = scope->parent();
+ if(parent && parent->reference_p()) {
+ if(parent->stack_allocated_p()) {
+ saw_variable_scope(parent);
+ } else {
+ scope->update_parent((VariableScope*)visit.call(parent));
+ }
+ }
+ }
+
+ void GarbageCollector::visit_call_frame(CallFrame* top_call_frame, ObjectVisitor& visit) {
+ CallFrame* call_frame = top_call_frame;
+ while(call_frame) {
+ if(call_frame->name && call_frame->name->reference_p()) {
+ call_frame->name = (Symbol*)visit.call(call_frame->name);
+ }
+
+ if(call_frame->cm && call_frame->cm->reference_p()) {
+ call_frame->cm = (CompiledMethod*)visit.call(call_frame->cm);
+ }
+
+ for(int i = 0; i < call_frame->stack_size; i++) {
+ Object* obj = call_frame->stk[i];
+ if(obj && obj->reference_p()) {
+ call_frame->stk[i] = visit.call(obj);
+ }
+ }
+
+ if(call_frame->top_scope) {
+ if(call_frame->top_scope->stack_allocated_p()) {
+ visit_variable_scope(call_frame->top_scope, visit);
+ } else {
+ call_frame->top_scope = (VariableScope*)visit.call(call_frame->top_scope);
+ }
+ }
+
+ if(call_frame->scope) {
+ if(call_frame->scope->stack_allocated_p()) {
+ visit_variable_scope(call_frame->scope, visit);
+ } else {
+ call_frame->scope = (VariableScope*)visit.call(call_frame->scope);
+ }
+ }
+
+ call_frame = call_frame->previous;
+ }
+ }
+
+ void GarbageCollector::visit_roots(Roots& roots, ObjectVisitor& visit) {
+ Root* root = static_cast<Root*>(roots.head());
+ while(root) {
+ Object* tmp = root->get();
+ if(tmp->reference_p()) {
+ visit.call(tmp);
+ }
+
+ root = static_cast<Root*>(root->next());
+ }
+ }
+
+ void GarbageCollector::visit_call_frames_list(CallFrameLocationList& call_frames,
+ ObjectVisitor& visit) {
+
+ // Walk all the call frames
+ for(CallFrameLocationList::const_iterator i = call_frames.begin();
+ i != call_frames.end();
+ i++) {
+ CallFrame** loc = *i;
+ visit_call_frame(*loc, visit);
+ }
+ }
+
+ class UnmarkVisitor : public ObjectVisitor {
+ std::vector<Object*> stack_;
+ ObjectMemory* object_memory_;
+
+ public:
+
+ UnmarkVisitor(ObjectMemory* om)
+ : object_memory_(om)
+ {}
+
+ Object* call(Object* obj) {
+ if(watched_p(obj)) {
+ std::cout << "detected " << obj << " during unmarking.\n";
+ }
+
+ if(obj->reference_p() && obj->marked_p()) {
+ obj->clear_mark();
+ stack_.push_back(obj);
+ }
+
+ return obj;
+ }
+
+ /* Understands how to read the inside of an object and find all references
+ * located within. It copies the objects pointed to, but does not follow into
+ * those further (ie, not recursive) */
+ void visit_object(Object* obj) {
+ if(obj->klass() && obj->klass()->reference_p()) {
+ call(obj->klass());
+ }
+
+ if(obj->ivars() && obj->ivars()->reference_p()) {
+ call(obj->ivars());
+ }
+
+ TypeInfo* ti = object_memory_->type_info[obj->obj_type];
+ assert(ti);
+
+ ti->visit(obj, *this);
+ }
+
+ void drain_stack() {
+ while(!stack_.empty()) {
+ Object* obj = stack_.back();
+ stack_.pop_back();
+
+ if(watched_p(obj)) {
+ std::cout << "detected " << obj << " in unmarking stack.\n";
+ }
+
+ visit_object(obj);
+ }
+ }
+ };
+
+ void GarbageCollector::unmark_all(Roots &roots, CallFrameLocationList& call_frames) {
+ UnmarkVisitor visit(object_memory);
+
+ visit_roots(roots, visit);
+ visit_call_frames_list(call_frames, visit);
+ visit.drain_stack();
}
}
View
14 vm/gc.hpp
@@ -13,9 +13,14 @@ namespace rubinius {
typedef std::vector<Object*> ObjectArray;
+ class ObjectVisitor {
+ public:
+ virtual ~ObjectVisitor() { }
+ virtual Object* call(Object*) = 0;
+ };
class GarbageCollector {
- public:
+ public:
ObjectMemory* object_memory;
ObjectArray* weak_refs;
@@ -28,12 +33,19 @@ namespace rubinius {
void walk_call_frame(CallFrame* top_call_frame);
void saw_variable_scope(VariableScope* scope);
+ void visit_variable_scope(VariableScope* scope, ObjectVisitor& visit);
+ void visit_call_frame(CallFrame* top, ObjectVisitor& visit);
+
Object* mark_object(Object* obj) {
if(!obj || !obj->reference_p()) return obj;
Object* tmp = saw_object(obj);
if(tmp) return tmp;
return obj;
}
+
+ void visit_roots(Roots& roots, ObjectVisitor& visit);
+ void visit_call_frames_list(CallFrameLocationList& call_frames, ObjectVisitor& visit);
+ void unmark_all(Roots &roots, CallFrameLocationList& call_frames);
};
}
View
13 vm/gc_baker.cpp
@@ -29,6 +29,10 @@ namespace rubinius {
Object* BakerGC::saw_object(Object* obj) {
Object* copy;
+ if(watched_p(obj)) {
+ std::cout << "detected " << obj << " during baker collection\n";
+ }
+
if(!obj->reference_p()) return obj;
if(obj->zone != YoungObjectZone) return obj;
@@ -56,6 +60,10 @@ namespace rubinius {
ctx->post_copy(as<MethodContext>(obj));
}
+ if(watched_p(copy)) {
+ std::cout << "detected " << copy << " during baker collection (2)\n";
+ }
+
obj->set_forward(object_memory->state, copy);
return copy;
}
@@ -102,7 +110,7 @@ namespace rubinius {
assert(tmp->zone == MatureObjectZone);
assert(!tmp->forwarded_p());
- /* Remove the Remember bit, since we're clearing the set. */
+ // Remove the Remember bit, since we're clearing the set.
tmp->Remember = 0;
scan_object(tmp);
}
@@ -153,6 +161,9 @@ namespace rubinius {
tmp = *oi;
assert(tmp->zone == MatureObjectZone);
scan_object(tmp);
+ if(watched_p(tmp)) {
+ std::cout << "detected " << tmp << " during scan of promoted objects.\n";
+ }
}
delete cur;
View
8 vm/gc_baker.hpp
@@ -15,12 +15,14 @@
#include "call_frame_list.hpp"
+#include "object_watch.hpp"
+
namespace rubinius {
class ObjectMemory;
class BakerGC : public GarbageCollector {
- public:
+ public:
/* Fields */
Heap heap_a;
@@ -60,6 +62,10 @@ namespace rubinius {
obj = (Object*)current->allocate(bytes);
}
+ if(watched_p(obj)) {
+ std::cout << "detected " << obj << " during baker allocation.\n";
+ }
+
obj->init_header(YoungObjectZone, bytes);
#ifdef RBX_GC_STATS
View
105 vm/gc_immix.cpp
@@ -0,0 +1,105 @@
+#include "gc_immix.hpp"
+#include "objectmemory.hpp"
+
+namespace rubinius {
+ void ImmixGC::ObjectDescriber::added_chunk(int count) {
+#ifdef IMMIX_DEBUG
+ std::cout << "Added a chunk: " << count << "\n";
+#endif
+
+ if(object_memory_) {
+ object_memory_->collect_mature_now = true;
+ }
+ }
+
+ void ImmixGC::ObjectDescriber::set_forwarding_pointer(immix::Address from, immix::Address to) {
+ from.as<Object>()->set_forward(object_memory_->state, to.as<Object>());
+ }
+
+ ImmixGC::ImmixGC(ObjectMemory* om)
+ : GarbageCollector(om)
+ , allocator_(gc_.block_allocator())
+ {
+ gc_.describer().set_object_memory(om, this);
+ }
+
+ ImmixGC::~ImmixGC() {
+ // TODO free data
+ }
+
+ Object* ImmixGC::allocate(int bytes) {
+ Object* obj = allocator_.allocate(bytes).as<Object>();
+ obj->init_header(MatureObjectZone, bytes);
+ obj->InImmix = 1;
+ return obj;
+ }
+
+ Object* ImmixGC::saw_object(Object* obj) {
+ if(watched_p(obj)) {
+ std::cout << "detected " << obj << " during immix scanning.\n";
+ }
+
+ immix::Address fwd = gc_.mark_address(immix::Address(obj), allocator_);
+ return fwd.as<Object>();
+ }
+
+ void ImmixGC::collect(Roots &roots, CallFrameLocationList& call_frames) {
+ Object* tmp;
+
+ Root* root = static_cast<Root*>(roots.head());
+ while(root) {
+ tmp = root->get();
+ if(tmp->reference_p()) {
+ saw_object(tmp);
+ }
+
+ root = static_cast<Root*>(root->next());
+ }
+
+ // Walk all the call frames
+ for(CallFrameLocationList::const_iterator i = call_frames.begin();
+ i != call_frames.end();
+ i++) {
+ CallFrame** loc = *i;
+ walk_call_frame(*loc);
+ }
+
+ gc_.process_mark_stack(allocator_);
+
+ // Cleanup all weakrefs seen
+ // TODO support weakrefs!
+
+ // Sweep up the garbage
+ gc_.sweep_blocks();
+
+ // This resets the allocator state to sync it up with the BlockAllocator
+ // properly.
+ allocator_.get_new_block();
+
+ ObjectArray *current_rs = object_memory->remember_set;
+
+ int cleared = 0;
+
+ for(ObjectArray::iterator oi = current_rs->begin();
+ oi != current_rs->end();
+ oi++) {
+ tmp = *oi;
+ // unremember_object throws a NULL in to remove an object
+ // so we don't have to compact the set in unremember
+ if(tmp) {
+ assert(tmp->zone == MatureObjectZone);
+ assert(!tmp->forwarded_p());
+
+ if(!tmp->Marked) {
+ cleared++;
+ *oi = NULL;
+ }
+ }
+ }
+
+#ifdef IMMIX_DEBUG
+ std::cout << "Immix: RS size cleared: " << cleared << "\n";
+#endif
+ }
+
+}
View
92 vm/gc_immix.hpp
@@ -0,0 +1,92 @@
+#ifndef RBX_GC_IMMIX
+#define RBX_GC_IMMIX
+
+#include "util/immix.hpp"
+#include "gc.hpp"
+
+namespace rubinius {
+ class ObjectMemory;
+ class ImmixGC;
+
+ class ImmixGC : public GarbageCollector {
+ class ObjectDescriber {
+ ObjectMemory* object_memory_;
+ ImmixGC* gc_;
+
+ public:
+ ObjectDescriber()
+ : object_memory_(0)
+ {}
+
+ void set_object_memory(ObjectMemory* om, ImmixGC* gc) {
+ object_memory_ = om;
+ gc_ = gc;
+ }
+
+ void added_chunk(int count);
+
+ void set_forwarding_pointer(immix::Address from, immix::Address to);
+
+ immix::Address forwarding_pointer(immix::Address cur) {
+ Object* obj = cur.as<Object>();
+
+ if(obj->forwarded_p()) return obj->forward();
+
+ return immix::Address::null();
+ }
+
+ immix::Address copy(immix::Address original, immix::Allocator& alloc) {
+ Object* orig = original.as<Object>();
+
+ immix::Address copy_addr = alloc.allocate(orig->size_in_bytes());
+ Object* copy = copy_addr.as<Object>();
+
+ copy->initialize_copy(orig, 0);
+ copy->copy_body(orig);
+
+ copy->zone = MatureObjectZone;
+ copy->InImmix = 1;
+
+ return copy_addr;
+ }
+
+ void walk_pointers(immix::Address addr, immix::Marker<ObjectDescriber>& mark) {
+ gc_->scan_object(addr.as<Object>());
+ }
+
+ int size(immix::Address addr) {
+ return addr.as<Object>()->size_in_bytes();
+ }
+
+ bool mark_address(immix::Address addr, immix::MarkStack& ms) {
+ Object* obj = addr.as<Object>();
+
+ if(obj->marked_p()) return false;
+ obj->mark();
+
+ ms.push_back(addr);
+
+ // If this is a young object, let the GC know not to try and mark
+ // the block it's in.
+ if(obj->young_object_p() || !obj->InImmix) {
+ return false;
+ }
+ return true;
+ }
+ };
+
+ immix::GC<ObjectDescriber> gc_;
+ immix::ExpandingAllocator allocator_;
+
+ public:
+ ImmixGC(ObjectMemory* om);
+ virtual ~ImmixGC();
+
+ Object* allocate(int bytes);
+
+ virtual Object* saw_object(Object*);
+ void collect(Roots &roots, CallFrameLocationList& call_frame);
+ };
+}
+
+#endif
View
8 vm/gc_marksweep.cpp
@@ -113,12 +113,11 @@ namespace rubinius {
}
Object* MarkSweepGC::saw_object(Object* obj) {
-
#ifdef RBX_GC_STATS
stats::GCStats::get()->objects_seen++;
#endif
- if(obj->young_object_p()) {
+ if(obj->young_object_p() || obj->InImmix) {
if(obj->marked_p()) return NULL;
obj->mark();
@@ -182,12 +181,13 @@ namespace rubinius {
std::list<Entry*>::iterator i;
for(i = entries.begin(); i != entries.end();) {
- if((*i)->unmarked_p()) {
+ Entry* ent = *i;
+ if(ent->unmarked_p() && !ent->header->to_object()->marked_p()) {
free_object(*i);
if(free_entries) delete *i;
i = entries.erase(i);
} else {
- (*i)->clear();
+ ent->clear();
i++;
}
}
View
23 vm/object_watch.hpp
@@ -0,0 +1,23 @@
+#ifndef RBX_OBJECT_WATCH
+#define RBX_OBJECT_WATCH
+
+// #define ENABLE_OBJECT_WATCH
+
+namespace rubinius {
+ class Object;
+
+ extern Object* object_watch;
+
+#ifdef ENABLE_OBJECT_WATCH
+ static inline bool watched_p(Object* obj) {
+ return obj == object_watch;
+ }
+#else
+ static inline bool watched_p(Object* obj) {
+ return false;
+ }
+#endif
+}
+
+#endif
+
View
45 vm/objectmemory.cpp
@@ -10,12 +10,22 @@
namespace rubinius {
+ Object* object_watch = 0;
+
/* ObjectMemory methods */
- ObjectMemory::ObjectMemory(STATE, size_t young_bytes):
- state(state),
- young(this, young_bytes),
- mature(this),
- contexts(cContextHeapSize) {
+ ObjectMemory::ObjectMemory(STATE, size_t young_bytes)
+ : state(state)
+ , young(this, young_bytes)
+ , mature(this)
+ , immix_(this)
+ , contexts(cContextHeapSize)
+ {
+
+ // TODO Not sure where this code should be...
+ if(char* num = getenv("WATCH")) {
+ object_watch = (Object*)strtol(num, NULL, 10);
+ std::cout << "Watching for " << object_watch << "\n";
+ }
remember_set = new ObjectArray(0);
@@ -41,6 +51,8 @@ namespace rubinius {
young.free_objects();
mature.free_objects();
+ // TODO free immix data
+
delete remember_set;
for(size_t i = 0; i < LastObjectType; i++) {
@@ -77,8 +89,16 @@ namespace rubinius {
stats::GCStats::get()->objects_promoted++;
#endif
- Object* copy = mature.copy_object(obj);
+ Object* copy = immix_.allocate(obj->size_in_bytes());
+ copy->initialize_copy(obj, 0);
+ copy->copy_body(obj);
+
copy->zone = MatureObjectZone;
+
+ if(watched_p(obj)) {
+ std::cout << "detected object " << obj << " during promotion.\n";
+ }
+
return copy;
}
@@ -91,7 +111,10 @@ namespace rubinius {
}
void ObjectMemory::collect_mature(Roots &roots, CallFrameLocationList& call_frames) {
+ immix_.collect(roots, call_frames);
mature.collect(roots, call_frames);
+
+ immix_.unmark_all(roots, call_frames);
young.clear_marks();
clear_context_marks();
}
@@ -145,10 +168,18 @@ namespace rubinius {
if(obj == NULL) {
collect_young_now = true;
state->interrupts.check = true;
- obj = mature.allocate(bytes, &collect_mature_now);
+
+ obj = immix_.allocate(bytes);
+ if(collect_mature_now) {
+ state->interrupts.check = true;
+ }
}
}
+ if(watched_p(obj)) {
+ std::cout << "detected " << obj << " during allocation\n";
+ }
+
obj->clear_fields();
return obj;
}
View
7 vm/objectmemory.hpp
@@ -3,6 +3,8 @@
#include "gc_marksweep.hpp"
#include "gc_baker.hpp"
+#include "gc_immix.hpp"
+
#include "prelude.hpp"
#include "type_info.hpp"
@@ -47,6 +49,9 @@ namespace rubinius {
ObjectArray *remember_set;
BakerGC young;
MarkSweepGC mature;
+
+ ImmixGC immix_;
+
Heap contexts;
size_t last_object_id;
TypeInfo* type_info[(int)LastObjectType];
@@ -159,7 +164,7 @@ namespace rubinius {
void write_barrier(Object* target, Object* val) {
if(target->Remember) return;
if(!REFERENCE_P(val)) return;
- if(target->zone != MatureObjectZone) return;
+ if(target->zone == YoungObjectZone) return;
if(val->zone != YoungObjectZone) return;
remember_object(target);
View
8 vm/oop.hpp
@@ -110,9 +110,9 @@ const int cUndef = 0x22L;
typedef enum
{
UnspecifiedZone = 0,
- MatureObjectZone = 1,
- YoungObjectZone = 2,
- LargeObjectZone = 3,
+ LargeObjectZone = 1,
+ MatureObjectZone = 2,
+ YoungObjectZone = 3,
} gc_zone;
/* the sizeof(class ObjectHeader) must an increment of the platform
@@ -151,6 +151,8 @@ const int cUndef = 0x22L;
unsigned int IsTainted : 1;
unsigned int IsFrozen : 1;
unsigned int RefsAreWeak : 1;
+
+ unsigned int InImmix : 1;
};
uint32_t all_flags;
};
View
531 vm/test/test_gc_immix.hpp
@@ -0,0 +1,531 @@
+
+#include <cxxtest/TestSuite.h>
+#include "util/immix.hpp"
+
+struct SimpleObject {
+ bool marked, body_checked;
+ immix::Address fwd;
+ int magic, size;
+ SimpleObject* sub;
+};
+
+class SimpleObjectDescriber {
+public:
+ typedef SimpleObject Object;
+
+ void set_forwarding_pointer(immix::Address from, immix::Address to) {
+ from.as<SimpleObject>()->fwd = to;
+ }
+
+ immix::Address forwarding_pointer(immix::Address cur) {
+ SimpleObject* obj = cur.as<SimpleObject>();
+ if(obj->fwd.is_null()) return 0;
+ return obj->fwd;
+ }
+
+ immix::Address copy(immix::Address original, immix::Allocator& alloc) {
+ immix::Address copy_addr = alloc.allocate(sizeof(SimpleObject));
+ SimpleObject* copy = copy_addr.as<SimpleObject>();
+ SimpleObject* orig = original.as<SimpleObject>();
+
+ copy->fwd = 0;
+ copy->magic = orig->magic;
+ copy->sub = orig->sub;
+
+ return copy;
+ }
+
+ bool mark_address(immix::Address addr, immix::MarkStack& ms) {
+ SimpleObject* obj = addr.as<SimpleObject>();
+ if(obj->marked) return false;
+
+ obj->marked = true;
+ ms.push_back(obj);
+ return true;
+ }
+
+ void walk_pointers(immix::Address addr, immix::Marker<SimpleObjectDescriber>& mark) {
+ SimpleObject* obj = addr.as<SimpleObject>();
+ obj->body_checked = true;
+ if(obj->sub) {
+ mark.mark_address(obj->sub);
+ }
+ }
+
+ int size(immix::Address addr) {
+ SimpleObject* obj = addr.as<SimpleObject>();
+
+ if(obj->size == 0) return sizeof(SimpleObject);
+
+ return obj->size;
+ }
+
+ void added_chunk(int size) { }
+};
+
+class TestImmixGC : public CxxTest::TestSuite {
+public:
+
+ typedef immix::GC<SimpleObjectDescriber> GC;
+
+ GC* gc;
+
+ void setUp() {
+ gc = new GC;
+ }
+
+ void tearDown() {
+ delete gc;
+ }
+
+ void test_get_block() {
+ immix::Block& block = gc->get_block();
+ TS_ASSERT_EQUALS(block.size(), immix::cBlockSize);
+ TS_ASSERT(block.address() != 0);
+ TS_ASSERT_EQUALS(block.status(), immix::cFree);
+ TS_ASSERT_EQUALS(block.lines_used(), 0);
+ }
+
+ void test_Block_is_line_free() {
+ immix::Block& block = gc->get_block();
+ TS_ASSERT(block.is_line_free(0));
+ block.mark_line(0);
+ TS_ASSERT(!block.is_line_free(0));
+ }
+
+ void test_Block_address_of_line() {
+ immix::Block& block = gc->get_block();
+ immix::Address top = block.address();
+ TS_ASSERT_EQUALS(block.address_of_line(0), top);
+ TS_ASSERT_EQUALS(block.address_of_line(1), top + immix::cLineSize);
+ }
+
+ void test_SingleBlockAllocator_allocate() {
+ immix::Block& block = gc->get_block();
+ immix::SingleBlockAllocator alloc(block);
+ immix::Address addr = alloc.allocate(24);
+ immix::Address top = block.first_address();
+ TS_ASSERT_EQUALS(addr, top);
+
+ immix::Address another = alloc.allocate(24);
+ TS_ASSERT_EQUALS(another, top + 24);
+ }
+
+ void test_SingleBlockAllocator_allocate_checks_mark_on_spill() {
+ immix::Block& block = gc->get_block();
+ immix::Address top = block.address();
+
+ block.mark_line(1);
+ immix::SingleBlockAllocator alloc(block);
+ alloc.allocate(96);
+
+ immix::Address addr = alloc.allocate(64);
+ TS_ASSERT_EQUALS(addr, top + (immix::cLineSize * 2));
+ }
+
+ void test_SingleBlockAllocator_allocate_spans_next_line() {
+ immix::Block& block = gc->get_block();
+ immix::Address top = block.first_address();
+
+ immix::SingleBlockAllocator alloc(block);
+ int size = immix::cLineSize - sizeof(immix::BlockHeader) - 4;
+ alloc.allocate(size);
+ TS_ASSERT(sizeof(SimpleObject) > 4);
+ immix::Address addr = alloc.allocate(sizeof(SimpleObject));
+
+ TS_ASSERT_EQUALS(addr, top + size);
+
+ immix::Address addr2 = alloc.allocate(immix::cLineSize + 4);
+ immix::Address addr3 = alloc.allocate(4);
+ TS_ASSERT_EQUALS(addr2, addr + sizeof(SimpleObject));
+ TS_ASSERT_EQUALS(addr3, addr2 + (immix::cLineSize + 4));
+ }
+
+ void test_SingleBlockAllocator_allocate_spans_lines() {
+ immix::Block& block = gc->get_block();
+ immix::Address top = block.first_address();
+
+ immix::SingleBlockAllocator alloc(block);
+ alloc.allocate(24);
+ int size = (immix::cLineSize * 2) + 32;
+ immix::Address big = alloc.allocate(size);
+ TS_ASSERT_EQUALS(big, top + 24);
+
+ immix::Address addr2 = alloc.allocate(24);
+ TS_ASSERT_EQUALS(addr2, big + size);
+ }
+
+ void test_SingleBlockAllocator_allocate_skips_marked_lines() {
+ immix::Block& block = gc->get_block();
+ immix::Address top = block.address();
+ block.mark_line(0);
+ block.mark_line(2);
+ block.mark_line(4);
+ block.mark_line(5);
+ block.mark_line(7);
+
+ immix::SingleBlockAllocator alloc(block);
+ immix::Address addr = alloc.allocate(24);
+ TS_ASSERT_EQUALS(addr, top + immix::cLineSize);
+
+ immix::Address addr2 = alloc.allocate(24);
+ TS_ASSERT_EQUALS(addr2, addr + 24);
+
+ immix::Address addr3 = alloc.allocate(128);
+ TS_ASSERT_EQUALS(addr3, top + (immix::cLineSize * 3));
+
+ immix::Address addr4 = alloc.allocate(156);
+ TS_ASSERT_EQUALS(addr4, top + (immix::cLineSize * 8));
+ }
+
+ void test_SingleBlockAllocator_allocate_indicates_failure() {
+ immix::Block& block = gc->get_block();
+ immix::Address top = block.address();
+
+ for(int i = 0; i < immix::cLineTableSize; i++) {
+ block.mark_line(i);
+ }
+
+ block.free_line(1);
+ immix::SingleBlockAllocator alloc(block);
+ immix::Address small = alloc.allocate(24);
+ TS_ASSERT(!small.is_null());
+
+ immix::Address addr = alloc.allocate(156);
+ TS_ASSERT(addr.is_null());
+ }
+
+ void test_sweep_blocks_frees_empty_blocks() {
+ immix::Block& block = gc->get_block();
+
+ gc->sweep_blocks();
+ immix::Block& block2 = gc->get_block();
+ TS_ASSERT_EQUALS(&block, &block2);
+ }
+
+ void test_sweep_blocks_sorts_blocks() {
+ immix::Block& block = gc->get_block();
+ immix::Block& block2 = gc->get_block();
+ immix::Block& block3 = gc->get_block();
+
+ block.set_status(immix::cUnavailable);
+ block2.set_status(immix::cRecyclable);
+ gc->evacuate_block(block3);
+ TS_ASSERT_EQUALS(block3.status(), immix::cEvacuate);
+
+ gc->sweep_blocks();
+
+ TS_ASSERT_EQUALS(block3.status(), immix::cFree);
+ }
+
+ void test_Block_update_stats() {
+ immix::Block& block = gc->get_block();
+
+ block.mark_line(0);
+ block.mark_line(1);
+ block.mark_line(3);
+ block.mark_line(5);
+ block.mark_line(7);
+ block.mark_line(10);
+
+ block.update_stats();
+ TS_ASSERT_EQUALS(block.status(), immix::cRecyclable);
+ TS_ASSERT_EQUALS(block.holes(), 5);
+ TS_ASSERT_EQUALS(block.lines_used(), 6);
+ }
+
+ void test_Block_update_stats_finds_empty_blocks() {
+ immix::Block& block = gc->get_block();
+
+ block.set_status(immix::cRecyclable);
+ block.update_stats();
+ TS_ASSERT_EQUALS(block.status(), immix::cFree);
+ TS_ASSERT_EQUALS(block.holes(), 1);
+ TS_ASSERT_EQUALS(block.lines_used(), 0);
+ }
+
+ void test_Block_update_stats_finds_unavailable_blocks() {
+ immix::Block& block = gc->get_block();
+
+ for(int i = 0; i < immix::cLineTableSize; i++) {
+ block.mark_line(i);
+ }
+
+ block.update_stats();
+ TS_ASSERT_EQUALS(block.status(), immix::cUnavailable);
+ TS_ASSERT_EQUALS(block.holes(), 0);
+ TS_ASSERT_EQUALS(block.lines_used(), immix::cLineTableSize);
+ }
+
+ void test_get_block_returns_recyclable_blocks() {
+ immix::Block& block = gc->get_block();
+ block.set_status(immix::cRecyclable);
+
+ gc->sweep_blocks();
+
+ immix::Block& block2 = gc->get_block();
+
+ TS_ASSERT_EQUALS(&block2, &block);
+ }
+
+ void test_mark_address_updates_block() {
+ immix::Block& block = gc->get_block();
+ immix::SingleBlockAllocator alloc(block);
+ immix::Address addr = alloc.allocate(24);
+
+ TS_ASSERT(block.is_line_free(0));
+ gc->mark_address(addr, alloc);
+ TS_ASSERT(!block.is_line_free(0));
+ }
+
+ void test_mark_address_ignores_already_marked_objects() {
+ immix::Block& block = gc->get_block();
+ immix::SingleBlockAllocator alloc(block);
+ immix::Address addr = alloc.allocate(24);
+
+ addr.as<SimpleObject>()->marked = true;
+
+ TS_ASSERT(block.is_line_free(0));
+ gc->mark_address(addr, alloc);
+ TS_ASSERT(block.is_line_free(0));
+ }
+
+ void test_mark_address_returns_forwarding_pointer() {
+ immix::Block& block = gc->get_block();
+ immix::SingleBlockAllocator alloc(block);
+ immix::Address addr = alloc.allocate(24);
+
+ // Clear out fwd to be sure it's not set
+ addr.as<SimpleObject>()->fwd = 0;
+
+ immix::Block& block2 = gc->get_block();
+ immix::SingleBlockAllocator alloc2(block2);
+ immix::Address addr2 = alloc2.allocate(24);
+
+ gc->describer().set_forwarding_pointer(addr, addr2);
+
+ immix::Address out = gc->mark_address(addr, alloc);
+
+ TS_ASSERT_EQUALS(addr.as<SimpleObject>()->fwd, addr2);
+ TS_ASSERT_EQUALS(out, addr2);
+ }
+
+ void test_mark_address_can_move_objects() {
+ immix::Block& block = gc->get_block();
+ immix::SingleBlockAllocator alloc(block);
+ immix::Address addr = alloc.allocate(sizeof(SimpleObject));
+
+ addr.as<SimpleObject>()->magic = 0xdecafbad;
+
+ immix::Block& dest = gc->get_block();
+ immix::SingleBlockAllocator dest_alloc(dest);
+
+ block.set_status(immix::cEvacuate);
+
+ immix::Address redirect = gc->mark_address(addr, dest_alloc);
+
+ immix::Address fwd = gc->describer().forwarding_pointer(addr);
+ TS_ASSERT_EQUALS(fwd, dest.first_address());
+ TS_ASSERT_EQUALS(fwd, redirect);
+
+ TS_ASSERT_EQUALS(fwd.as<SimpleObject>()->magic, 0xdecafbad);
+ }
+
+ void test_mark_address_calls_describer() {
+ immix::Block& block = gc->get_block();
+ immix::SingleBlockAllocator alloc(block);
+ immix::Address addr = alloc.allocate(sizeof(SimpleObject));
+
+ SimpleObject* obj = addr.as<SimpleObject>();
+ immix::Address addr2 = alloc.allocate(sizeof(SimpleObject));
+ obj->sub = addr2.as<SimpleObject>();
+
+ obj->marked = false;
+
+ gc->mark_address(addr, alloc);
+
+ TS_ASSERT_EQUALS(obj->marked, true);
+ TS_ASSERT_EQUALS(gc->mark_stack().size(), 1);
+ TS_ASSERT_EQUALS(gc->mark_stack()[0], addr);
+ }
+
+ void test_mark_address_marks_all_lines_for_object() {
+ immix::Block& block = gc->get_block();
+ immix::SingleBlockAllocator alloc(block);
+ int size = immix::cLineSize - sizeof(immix::BlockHeader) - 4;
+ alloc.allocate(size);
+ TS_ASSERT(sizeof(SimpleObject) > 4);
+ immix::Address addr = alloc.allocate(sizeof(SimpleObject));
+
+ gc->mark_address(addr, alloc);
+ TS_ASSERT(!block.is_line_free(0));
+ TS_ASSERT(!block.is_line_free(1));
+
+ int big_size = immix::cLineSize * 3;
+ immix::Address addr2 = alloc.allocate(big_size);
+ addr2.as<SimpleObject>()->size = big_size;
+
+ gc->mark_address(addr2, alloc);
+ TS_ASSERT(!block.is_line_free(1));
+ TS_ASSERT(!block.is_line_free(2));
+ TS_ASSERT(!block.is_line_free(3));
+ TS_ASSERT(!block.is_line_free(4));
+ }
+
+ void test_process_mark_stack() {
+ immix::Block& block = gc->get_block();
+ immix::SingleBlockAllocator alloc(block);
+ immix::Address addr = alloc.allocate(sizeof(SimpleObject));
+ immix::Address addr2 = alloc.allocate(sizeof(SimpleObject));
+
+ SimpleObject* obj = addr.as<SimpleObject>();
+ SimpleObject* sub = addr2.as<SimpleObject>();
+
+ obj->marked = false;
+ obj->sub = sub;
+ obj->body_checked = false;
+
+ sub->marked = false;
+ sub->sub = 0;
+ sub->body_checked = false;
+
+ gc->mark_address(addr, alloc);
+ TS_ASSERT_EQUALS(obj->marked, true);
+
+ gc->process_mark_stack(alloc);
+ TS_ASSERT_EQUALS(obj->body_checked, true);
+ TS_ASSERT_EQUALS(sub->marked, true);
+ TS_ASSERT_EQUALS(sub->body_checked, true);
+ }
+
+ void test_BlockAllocator_reset_updates_block_stats() {
+ immix::Block& block = gc->get_block();
+
+ block.mark_line(1);
+ block.mark_line(3);
+
+ TS_ASSERT_EQUALS(block.lines_used(), 0);
+ immix::BlockAllocator& ba = gc->block_allocator();
+ ba.reset();
+
+ TS_ASSERT_EQUALS(block.lines_used(), 2);
+ }
+
+ void test_BlockAllocator_get_free_block() {
+ immix::BlockAllocator& ba = gc->block_allocator();
+ ba.add_chunk();
+ ba.current_chunk().get_block(0).set_status(immix::cRecyclable);
+ immix::Block& block = ba.get_free_block();
+ TS_ASSERT_EQUALS(block.status(), immix::cFree);
+ }
+
+ void test_BlockAllocator_uses_all_available_chunks_first() {
+ immix::BlockAllocator& ba = gc->block_allocator();
+ immix::Chunk *c1 = ba.add_chunk();
+ immix::Chunk* c2 = ba.add_chunk();
+ ba.reset();
+
+ TS_ASSERT_EQUALS(&ba.current_chunk(), c1);
+
+ for(int i = 0; i < immix::cBlocksPerChunk + 1; i++) {
+ ba.get_block();
+ }
+
+ TS_ASSERT_EQUALS(&ba.current_chunk(), c2);
+ }
+
+ void test_BlockAllocator_skips_unavailable_blocks() {
+ immix::BlockAllocator& ba = gc->block_allocator();
+ immix::Chunk* chunk = ba.add_chunk();
+ ba.reset();
+ chunk->get_block(0).set_status(immix::cUnavailable);
+
+ immix::Block& block = ba.get_block();
+
+ TS_ASSERT_EQUALS(&block, &chunk->get_block(1));
+ }
+
+ void test_BlockAllocator_skips_evacuating_blocks() {
+ immix::BlockAllocator& ba = gc->block_allocator();
+ immix::Chunk* chunk = ba.add_chunk();
+ ba.reset();
+ chunk->get_block(0).set_status(immix::cEvacuate);
+
+ immix::Block& block = ba.get_block();
+
+ TS_ASSERT_EQUALS(&block, &chunk->get_block(1));
+ }
+
+ void test_ExpandingAllocator_allocate_pulls_new_blocks() {
+ immix::ExpandingAllocator ea(gc->block_allocator());
+ immix::Block& block = ea.current_block();
+
+ for(int i = 0; i < immix::cLineTableSize; i++) {
+ block.mark_line(i);
+ }
+
+ block.free_line(1);
+
+ ea.resync_position();
+
+ immix::Address small = ea.allocate(24);
+ TS_ASSERT(!small.is_null());
+
+ immix::Address addr = ea.allocate(156);
+ TS_ASSERT(!addr.is_null());
+
+ immix::Block& block2 = ea.current_block();
+
+ TS_ASSERT(&block != &block2);
+ }
+
+ void test_ExpandingAllocator_allocate_searches_for_new_holes() {
+ immix::ExpandingAllocator ea(gc->block_allocator());
+ immix::Block& block = ea.current_block();
+
+ block.mark_line(1);
+
+ ea.resync_position();
+
+ immix::Address small = ea.allocate(24);
+ TS_ASSERT(!small.is_null());
+
+ immix::Address addr = ea.allocate(156);
+ TS_ASSERT(!addr.is_null());
+
+ immix::Block& block2 = ea.current_block();
+
+ TS_ASSERT(&block == &block2);
+ }
+
+ void test_HoleFinder_find_hole_on_empty_block() {
+ immix::Block& block = gc->get_block();
+ immix::HoleFinder alloc;
+ alloc.reset(&block);
+
+ alloc.find_hole();
+
+ TS_ASSERT_EQUALS(alloc.cursor(), block.first_address());
+ TS_ASSERT_EQUALS(alloc.limit(), block.address() + immix::cBlockSize);
+ TS_ASSERT_EQUALS(alloc.hole_start_line(), immix::cLineTableSize);
+ }
+
+ void test_HoleFinder_find_hole_with_hole_in_middle() {
+ immix::Block& block = gc->get_block();
+ block.mark_line(11);
+
+ immix::HoleFinder alloc;
+
+ // .reset calls find_hole
+ alloc.reset(&block);
+
+ TS_ASSERT_EQUALS(alloc.cursor(), block.first_address());
+ TS_ASSERT_EQUALS(alloc.limit(), block.address_of_line(11));
+ TS_ASSERT_EQUALS(alloc.hole_start_line(), 11);
+
+ alloc.find_hole();
+ TS_ASSERT_EQUALS(alloc.cursor(), block.address_of_line(12));
+ TS_ASSERT_EQUALS(alloc.limit(), block.address() + immix::cBlockSize);
+ TS_ASSERT_EQUALS(alloc.hole_start_line(), immix::cLineTableSize);
+ }
+};
View
11 vm/type_info.cpp
@@ -90,6 +90,17 @@ namespace rubinius {
}
}
+ /* By default, just call auto_mark(). This exists so that
+ * other types can overload this to perform work before or
+ * after auto_marking is done. */
+ void TypeInfo::visit(Object* obj, ObjectVisitor& visit) {
+ auto_visit(obj, visit);
+ }
+
+ void TypeInfo::auto_visit(Object* obj, ObjectVisitor& visit) {
+ // Must be implemented in subclasses!
+ }
+
void TypeInfo::class_info(STATE, const Object* self, bool newline) {
std::cout << const_cast<Object*>(self)->to_s(state, true)->c_str();
if(newline) std::cout << std::endl;
View
6 vm/type_info.hpp
@@ -13,6 +13,7 @@ namespace rubinius {
class Class;
class Object;
class ObjectMark;
+ class ObjectVisitor;
class ObjectMemory;
/**
@@ -50,6 +51,7 @@ namespace rubinius {
static void auto_init(ObjectMemory* om);
static void auto_learn_fields(STATE);
virtual void auto_mark(Object* obj, ObjectMark& mark);
+ virtual void auto_visit(Object* obj, ObjectVisitor& visit);
public: /* Ctors */
@@ -92,6 +94,8 @@ namespace rubinius {
virtual void cleanup(Object* obj);
virtual void mark(Object* obj, ObjectMark& mark);
+ virtual void visit(Object* obj, ObjectVisitor& visit);
+
virtual void set_field(STATE, Object* target, size_t index, Object* val);
virtual Object* get_field(STATE, Object* target, size_t index);
@@ -157,6 +161,7 @@ namespace rubinius {
#define BASIC_TYPEINFO(super) \
Info(object_type type, bool cleanup = false) : super(type, cleanup) { } \
virtual void auto_mark(Object* obj, ObjectMark& mark); \
+ virtual void auto_visit(Object* obj, ObjectVisitor& visit); \
virtual void set_field(STATE, Object* target, size_t index, Object* val); \
virtual Object* get_field(STATE, Object* target, size_t index);
@@ -172,6 +177,7 @@ namespace rubinius {
#define BASIC_TYPEINFO_WITH_CLEANUP(super) \
Info(object_type type, bool cleanup = true) : super(type, true) { } \
virtual void auto_mark(Object* obj, ObjectMark& mark); \
+ virtual void auto_visit(Object* obj, ObjectVisitor& visit); \
virtual void cleanup(Object* obj); \
virtual void set_field(STATE, Object* target, size_t index, Object* val); \
virtual Object* get_field(STATE, Object* target, size_t index);
View
652 vm/util/immix.hpp
@@ -0,0 +1,652 @@
+#ifndef RBX_UTIL_IMMIX
+#define RBX_UTIL_IMMIX
+
+#include <list>
+#include <stdint.h>
+#include <vector>
+
+#include <sys/mman.h>
+
+namespace immix {
+
+ /* A wonderful little class that is used always as a value, never
+ * a reference or pointer. It basically allows us the ability to
+ * implement our own pointer logic. */
+ struct Address {
+ uintptr_t address_;
+
+ Address(void* addr)
+ : address_(reinterpret_cast<intptr_t>(addr))
+ {}
+
+ operator void*() {
+ return reinterpret_cast<void*>(address_);
+ }
+
+ Address operator+(int change) {
+ return Address(reinterpret_cast<void*>(address_ + change));
+ }
+
+ Address operator+=(int change) {
+ address_ += change;
+ return *this;
+ }
+
+ Address operator-(Address change) {
+ return Address(reinterpret_cast<void*>(address_ - change.address_));
+ }
+
+ Address operator-(int change) {
+ return Address(reinterpret_cast<void*>(address_ - change));
+ }
+
+ Address operator&(uintptr_t mask) {
+ return Address(reinterpret_cast<void*>(address_ & mask));
+ }
+
+ bool is_null() {
+ return address_ == 0;
+ }
+
+ static Address null() {
+ return Address(0);
+ }
+
+ intptr_t as_int() {
+ return address_;
+ }
+
+ template <typename T>
+ T* as() {
+ return reinterpret_cast<T*>(reinterpret_cast<void*>(address_));
+ }
+ };
+
+ const int cBlockSize = 32768;
+ const int cBlockMask = cBlockSize - 1;
+ const int cLineSize = 128;
+ const int cLineMask = cLineSize - 1;
+ const int cLineTableSize = cBlockSize / cLineSize;
+ const int cLineBits = 7;
+ const int cChunkSize = 1024 * 1024;
+ const int cBlocksPerChunk = cChunkSize / cBlockSize;
+ const int cMediumObjectLimit = cLineSize * 4; // TODO calculate this
+
+ enum BlockStatus {
+ cFree,
+ cRecyclable,
+ cUnavailable,
+ cEvacuate
+ };
+
+ typedef uint8_t LineEntry;
+
+ class Block;
+
+ struct BlockHeader {
+ Block* block;
+ };
+
+ class Block {
+ Address address_;
+ BlockStatus status_;
+ int holes_;
+ int lines_used_;
+ LineEntry lines_[cLineTableSize];
+
+ public:
+ Block()
+ : address_(0)
+ , status_(cFree)
+ , holes_(0)
+ , lines_used_(0)
+ {
+ memset(lines_, 0, sizeof(lines_));
+ }
+
+ void set_address(Address addr) {
+ address_ = addr;
+ }
+
+ static int size() {
+ return immix::cBlockSize;
+ }
+
+ int holes() const {
+ return holes_;
+ }
+
+ Address address() const {
+ return address_;
+ }
+
+ Address first_address() {
+ return address_ + sizeof(BlockHeader);
+ }
+
+ BlockStatus status() const {
+ return status_;
+ }
+
+ void set_status(BlockStatus status) {
+ status_ = status;
+ }
+
+ int lines_used() const {
+ return lines_used_;
+ }
+
+ void mark_line(int line) {
+ lines_[line] = 1;
+ }
+
+ void free_line(int line) {
+ lines_[line] = 0;
+ }
+
+ bool is_line_free(int line) {
+ return lines_[line] == 0;
+ }
+
+ int offset_of_line(int line) {
+ return line * cLineSize;
+ }
+
+ Address address_of_line(int line) {
+ return address_ + (line * cLineSize);
+ }
+
+ static Block* from_address(Address addr) {
+ Address base = addr & ~cBlockMask;
+ BlockHeader* header = base.as<BlockHeader>();
+ return header->block;
+ }
+
+ static Address align(Address addr) {
+ return addr & ~cBlockMask;
+ }
+
+ void mark_address(Address addr, int size) {
+ Address offset = addr - address_;
+ int line = offset.as_int() / cLineSize;
+ mark_line(line);
+
+ // the immix paper talks about doing conservative line marking
+ // here. We're going to do accurate for now.
+
+ int line_offset = (addr & cLineMask).as_int();
+ int additional_lines = ((line_offset + size - 1) >> cLineBits);
+
+ for(int i = 1; i <= additional_lines; i++) {
+ mark_line(line + i);
+ }
+ }
+
+ void update_stats() {
+ holes_ = 0;
+ lines_used_ = 0;
+ bool in_hole = false;
+ for(int i = 0; i < cLineTableSize; i++) {
+ if(lines_[i] == 0) {
+ if(!in_hole) holes_++;
+ in_hole = true;
+ } else {
+ in_hole = false;
+ lines_used_++;
+ }
+ }
+
+ if(lines_used_ == 0) {
+ status_ = cFree;
+ } else if(holes_ > 0) {
+ status_ = cRecyclable;
+ } else {
+ status_ = cUnavailable;
+ }
+ }
+
+ bool usable() {
+ return status_ == cFree || status_ == cRecyclable;
+ }
+ };
+
+ typedef std::vector<Block*> Blocks;
+
+ class Chunk {
+ Address system_base_;
+ int system_size_;
+
+ Address base_;
+ Block blocks_[cBlocksPerChunk];
+
+ public:
+ Chunk()
+ : system_base_(0)
+ , base_(0)
+ {
+ base_ = mmap(0, cChunkSize, PROT_EXEC | PROT_READ | PROT_WRITE,
+ MAP_ANON | MAP_PRIVATE, 0, 0);
+
+ if(base_.as_int() == -1) {
+ perror("mmap");
+ abort();
+ }
+
+ // Best case scenario
+ if(base_ == Block::align(base_)) {
+ system_base_ = base_;
+ system_size_ = cChunkSize;
+ } else {
+ assert(munmap(base_, cChunkSize) == 0);
+
+ system_size_ = cChunkSize + cBlockSize;
+ system_base_ = mmap(0, system_size_, PROT_EXEC | PROT_READ | PROT_WRITE,
+ MAP_ANON | MAP_PRIVATE, 0, 0);
+
+ base_ = Block::align(system_base_ + cBlockSize);
+ }
+
+ add_blocks();
+ }
+
+ void free() {
+ if(munmap(system_base_, system_size_) != 0) {
+ perror("munmap");
+ abort();
+ }
+ }
+
+ Address base() {
+ return base_;
+ }
+
+ static int size() {
+ return cChunkSize;
+ }
+
+ void add_blocks() {
+ assert(base_ == Block::align(base_));
+
+ Address current = base_;
+
+ for(int index = 0; index < cBlocksPerChunk; index++) {
+ Block& block = blocks_[index];
+ block.set_address(current);
+ BlockHeader* header = current.as<BlockHeader>();
+ header->block = &block;
+ current += cBlockSize;
+ }
+ }
+
+ Block& get_block(int index) {
+ return blocks_[index];
+ }
+
+ void update_stats() {
+ for(int i = 0; i < cBlocksPerChunk; i++) {
+ blocks_[i].update_stats();
+ }
+ }
+ };
+
+ typedef std::vector<Chunk*> Chunks;
+
+ class Triggers {
+ public:
+ virtual ~Triggers() { }
+ virtual void added_chunk(int count) = 0;
+ };
+
+ class BlockAllocator {
+ Triggers& triggers_;
+ Chunks chunks_;
+ Chunk* current_chunk_;
+ size_t chunk_cursor_;
+ size_t block_cursor_;
+
+ // Used when allocating free-only blocks.
+ size_t free_chunk_cursor_;
+ size_t free_block_cursor_;
+ public:
+
+ BlockAllocator(Triggers& trig)
+ : triggers_(trig)
+ , current_chunk_(0)
+ , chunk_cursor_(0)
+ , block_cursor_(0)
+ , free_chunk_cursor_(0)
+ , free_block_cursor_(0)
+ {}
+
+ ~BlockAllocator() {
+ for(Chunks::iterator i = chunks_.begin();
+ i != chunks_.end();
+ i++) {
+ Chunk* chunk = *i;
+ chunk->free();
+ }
+ }
+
+ Chunks& chunks() {
+ return chunks_;
+ }
+
+ Chunk& current_chunk() {
+ return *current_chunk_;
+ }
+
+ Block& get_block() {
+ if(current_chunk_ == 0) {
+ add_chunk();
+ return current_chunk_->get_block(0);
+ }
+
+ for(;;) {
+ if(block_cursor_ >= (size_t)cBlocksPerChunk) {
+ chunk_cursor_++;
+ if(chunk_cursor_ >= chunks_.size()) {
+ add_chunk();
+ } else {
+ block_cursor_ = 0;
+ }
+
+ current_chunk_ = chunks_[chunk_cursor_];
+ }
+
+ Block& block = current_chunk_->get_block(block_cursor_++);
+ if(block.usable()) return block;
+ }
+ }
+
+ Block& get_free_block() {
+ if(current_chunk_ == 0) {
+ add_chunk();
+ return current_chunk_->get_block(0);
+ }
+
+ for(int i = block_cursor_; i < cBlocksPerChunk; i++) {
+ Block& block = current_chunk_->get_block(i);
+ if(block.status() == cFree) return block;
+ }
+
+ add_chunk();
+ return current_chunk_->get_block(0);
+ }
+
+ Chunk* add_chunk() {
+ chunks_.push_back(new Chunk);
+ Chunk* chunk = chunks_.back();
+ chunk_cursor_ = chunks_.size() - 1;
+ block_cursor_ = 1;
+ current_chunk_ = chunk;
+
+ triggers_.added_chunk(chunks_.size());
+ return chunk;
+ }
+
+ void reset() {
+ for(Chunks::iterator i = chunks_.begin();
+ i != chunks_.end();
+ i++) {
+ Chunk* chunk = *i;
+ chunk->update_stats();
+ }
+ chunk_cursor_ = 0;
+ block_cursor_ = 0;
+ current_chunk_ = chunks_[chunk_cursor_];
+ }
+
+ };
+
+ class Allocator {
+ public:
+ virtual ~Allocator() {}
+ virtual Address allocate(int bytes) = 0;
+ };
+
+ class HoleFinder {
+ protected:
+ Address cursor_;
+ Address limit_;
+ int hole_start_line_;
+ Block* block_;
+
+ public:
+ HoleFinder()
+ : cursor_(0)
+ , limit_(0)
+ , hole_start_line_(0)
+ , block_(0)
+ {}
+
+ Address cursor() {
+ return cursor_;
+ }
+
+ Address limit() {
+ return limit_;
+ }
+
+ int hole_start_line() {
+ return hole_start_line_;
+ }
+
+ Block& block() {
+ return *block_;
+ }
+
+ bool reset(Block* block = 0) {
+ if(block) block_ = block;
+ hole_start_line_ = 0;
+ return find_hole();
+ }
+
+ bool find_hole() {
+ for(; hole_start_line_ < cLineTableSize; hole_start_line_++) {
+ if(block_->is_line_free(hole_start_line_)) {
+ cursor_ = block_->address_of_line(hole_start_line_);
+
+ // Compensate for the header
+ if(hole_start_line_ == 0) {
+ cursor_ = block_->first_address();
+ }
+
+ while(hole_start_line_ < cLineTableSize &&
+ block_->is_line_free(hole_start_line_)) {
+ hole_start_line_++;
+ }
+
+ limit_ = block_->address_of_line(hole_start_line_);
+ return true;
+ }
+ }
+
+ return false;
+ }
+
+ Address bump(int size) {
+ Address alloc = cursor_;
+ cursor_ += size;
+ return alloc;
+ }
+
+ };
+
+ class SingleBlockAllocator : public HoleFinder, public Allocator {
+ public:
+
+ SingleBlockAllocator(Block& block) {
+ assert(reset(&block));
+ }
+
+ Address allocate(int size) {
+ while(cursor_ + size > limit_) {
+ if(!find_hole()) {
+ return Address::null();
+ }
+ }
+
+ return bump(size);
+ }
+ };
+
+ class ExpandingAllocator : public HoleFinder, public Allocator {
+ BlockAllocator& block_allocator_;
+
+ public:
+ ExpandingAllocator(BlockAllocator& ba)
+ : block_allocator_(ba)
+ {
+ get_new_block();
+ }
+
+ Block& current_block() {
+ return block();
+ }
+
+ void get_new_block(bool free_only = false) {
+ for(;;) {
+ Block* block = &block_allocator_.get_block();
+ /*
+ *
+ * DISABLED FOR NOW.
+ if(free_only) {
+ block = &block_allocator_.get_free_block();
+ } else {
+ block = &block_allocator_.get_block();
+ }
+ */
+
+ if(reset(block)) break;
+ }
+ }
+
+ void resync_position() {
+ if(!reset()) get_new_block();
+ }
+
+ Address allocate(int size) {
+ while(cursor_ + size > limit_) {
+ if(!find_hole()) {
+ get_new_block(size >= cMediumObjectLimit);
+ }
+ }
+
+ return bump(size);
+ }
+ };
+
+ typedef std::vector<Address> MarkStack;
+
+ template <typename Describer> class GC;
+
+ template <typename Describer>
+ class Marker {
+ GC<Describer>* gc;
+ Allocator& alloc;
+
+ public:
+ Marker(GC<Describer>* gc, Allocator& alloc)
+ : gc(gc)
+ , alloc(alloc)
+ {}
+
+ void mark_address(Address addr) {
+ gc->mark_address(addr, alloc);
+ }
+ };
+
+ typedef std::list<Block*> BlockList;
+
+ template <typename Describer>
+ class GC : public Triggers {
+ BlockList evacuate_;
+ BlockAllocator block_allocator_;
+
+ Describer desc;
+ MarkStack mark_stack_;
+
+ public:
+ GC()
+ : block_allocator_(*this)
+ {}
+
+ Describer& describer() {
+ return desc;
+ }
+
+ MarkStack& mark_stack() {
+ return mark_stack_;
+ }
+
+ BlockAllocator& block_allocator() {
+ return block_allocator_;
+ }
+
+ Block& get_block() {
+ return block_allocator_.get_block();
+ }
+
+ void evacuate_block(Block& block) {
+ block.set_status(cEvacuate);
+ evacuate_.push_back(&block);
+ }
+
+ void sweep_blocks() {
+ for(BlockList::const_iterator i = evacuate_.begin();
+ i != evacuate_.end();
+ i++) {
+ Block* block = *i;
+ if(block->status() == cEvacuate) {
+ block->set_status(cFree);
+ }
+ }
+
+ block_allocator_.reset();
+ }
+
+ Address mark_address(Address addr, Allocator& alloc) {
+ Address fwd = desc.forwarding_pointer(addr);
+
+ if(!fwd.is_null()) {
+ addr = fwd;
+ }
+
+ // Returns false if addr is already marked, if so, we don't
+ // do the block marking logic again.
+ if(!desc.mark_address(addr, mark_stack_)) {
+ return addr;
+ }
+
+ Block* block = Block::from_address(addr);
+ if(block->status() == cEvacuate) {
+ fwd = desc.copy(addr, alloc);
+ desc.set_forwarding_pointer(addr, fwd);
+
+ addr = fwd;
+ block = Block::from_address(addr);
+ }
+
+ block->mark_address(addr, desc.size(addr));
+
+ return addr;
+ }
+
+ void process_mark_stack(Allocator& alloc) {
+ Marker<Describer> mark(this, alloc);
+
+ // Use while() since mark_stack_ is modified as we walk it.
+ while(!mark_stack_.empty()) {
+ Address addr = mark_stack_.back();
+ mark_stack_.pop_back();
+ desc.walk_pointers(addr, mark);
+ }
+ }
+
+ void added_chunk(int count) {
+ desc.added_chunk(count);
+ }
+ };
+}
+
+#endif

0 comments on commit 45be183

Please sign in to comment.
Something went wrong with that request. Please try again.