Skip to content

HTTPS clone URL

Subversion checkout URL

You can clone with
or
.
Download ZIP
Browse files

Add/update Doxygen class comments for several classes

  • Loading branch information...
commit 2749c19dd87fa0909af4125a8eb9a0eb3c026f68 1 parent d3e44c5
@agardiner agardiner authored
View
23 vm/drivers/cli.cpp
@@ -16,18 +16,19 @@ using namespace rubinius;
static void check_directory(std::string root);
-/* The main function here handles the CL arguments passed to it.
- * It then boots the VM, runs the appropriate file (`loader`),
- * and returns 0. If there is an Assertion raised or an Exception,
- * it prints the backtrace supplied. This function is the wrapper for
- * the entire VM, as it deals with anything that could possibly happen
- * to the VM. It's like the person playing whack-a-mole, in that if
- * something tries to come out of the VM that's evil (such as a failed
- * assertion or exception), it catches it and skins it and shows it to
- * the user.
+/**
+ * Main rbx entry point.
*
- * Note: Although Rubinius is gathering support for multiple VMs, this
- * function does not deal with that subject.
+ * The main function here handles the environment settings and command-line
+ * arguments passed to it. It then boots the VM, runs the appropriate file
+ * (`loader`), and returns 0 if no errors occur along the way.
+ *
+ * If there is an Assertion raised or an Exception, it prints the backtrace
+ * supplied. This function is the wrapper for the entire VM, as it deals with
+ * anything that could possibly happen to the VM. It's like the person
+ * playing whack-a-mole, in that if something tries to come out of the VM
+ * that's evil (such as a failed assertion or exception), it catches it and
+ * skins it and shows it to the user.
*/
int main(int argc, char** argv) {
Environment env(argc, argv);
View
38 vm/environment.cpp
@@ -1,5 +1,3 @@
-/* An Environment is the toplevel class for Rubinius. It manages multiple
- * VMs, as well as imports C data from the process into Rubyland. */
#include "config.h"
#include "prelude.hpp"
#include "environment.hpp"
@@ -495,6 +493,9 @@ namespace rubinius {
state->shared.stop_the_world(state);
}
+ /**
+ * Returns the exit code to use when exiting the rbx process.
+ */
int Environment::exit_code() {
#ifdef ENABLE_LLVM
@@ -528,16 +529,19 @@ namespace rubinius {
agent->run();
}
- /* Loads the runtime kernel files. They're stored in /kernel.
- * These files consist of classes needed to bootstrap the kernel
- * and just get things started in general.
+ /**
+ * Loads the runtime kernel files stored in /runtime.
+ * These files consist of the compiled Ruby /kernel code in .rbc files, which
+ * are needed to bootstrap the Ruby kernel.
+ * This method is called after the VM has completed bootstrapping, and is
+ * ready to load Ruby code.
*
- * @param root [String] The file root for /kernel. This expects to find
- * alpha.rbc (will compile if not there).
- * @param env [Environment&] The environment for Rubinius. It is the uber
- * manager for multiple VMs and process-Ruby interaction.
+ * @param root The path to the /runtime directory. All kernel loading is
+ * relative to this path.
*/
void Environment::load_kernel(std::string root) {
+ // Check that the index file exists; this tells us which sub-directories to
+ // load, and the order in which to load them
std::string index = root + "/index";
std::ifstream stream(index.c_str());
if(!stream) {
@@ -545,12 +549,9 @@ namespace rubinius {
exit(1);
}
- // Load the ruby file to prepare for bootstrapping Ruby!
- // The bootstrapping for the VM is already done by the time we're here.
-
- // First, pull in the signature file. This helps control when .rbc files need
- // to be discarded.
-
+ // Pull in the signature file; this helps control when .rbc files need to
+ // be discarded and recompiled due to changes to the compiler since the
+ // .rbc files were created.
std::string sig_path = root + "/signature";
std::ifstream sig_stream(sig_path.c_str());
if(sig_stream) {
@@ -587,6 +588,13 @@ namespace rubinius {
}
}
+ /**
+ * Runs rbx from the filesystem, loading the Ruby kernel files relative to
+ * the supplied root directory.
+ *
+ * @param root The path to the Rubinius /runtime directory, which contains
+ * the loader.rbc and kernel files.
+ */
void Environment::run_from_filesystem(std::string root) {
int i = 0;
state->set_stack_start(&i);
View
16 vm/environment.hpp
@@ -13,7 +13,9 @@ namespace rubinius {
class ConfigParser;
class QueryAgent;
- // throw when there is a bad signature on a kernel .rbc file.
+ /**
+ * Thrown when there is a bad signature on a kernel .rbc file.
+ */
class BadKernelFile : public std::runtime_error {
public:
BadKernelFile(const std::string& str)
@@ -21,10 +23,22 @@ namespace rubinius {
{}
};
+
+ /**
+ * The environment context under which Rubinius virtual machines are executed.
+ *
+ * Environment and configuration data is processed and stored in an Environment
+ * instance, which uses this information to bootstrap the VM. It also stores
+ * all runtime shared state.
+ */
+
class Environment {
int argc_;
char** argv_;
+ /// Signature to be used to verify the validity of .rbc files.
+ /// If the signature in a .rbc file does not match this value, the file
+ /// needs to be recompiled.
uint64_t signature_;
public:
View
47 vm/gc/baker.cpp
@@ -19,6 +19,16 @@
#include "capi/tag.hpp"
namespace rubinius {
+
+ /**
+ * Creates a BakerGC of the specified size.
+ *
+ * The requested size is allocated as a contiguous heap, which is then split
+ * into three spaces:
+ * - Eden, which gets half of the heap
+ * - Heap A and Heap B, which get one quarter of the heap each. Heaps A and B
+ * alternate between being the Current and Next space on each collection.
+ */
BakerGC::BakerGC(ObjectMemory *om, size_t bytes)
: GarbageCollector(om)
, full(bytes * 2)
@@ -38,6 +48,15 @@ namespace rubinius {
BakerGC::~BakerGC() { }
+ /**
+ * Called for each object in the young generation that is seen during garbage
+ * collection. An object is seen by scanning from the root objects to all
+ * reachable objects. Therefore, only reachable objects will be seen, and
+ * reachable objects may be seen more than once.
+ *
+ * Returns the new address for the object, so that the source reference can
+ * be updated when the object has been moved.
+ */
Object* BakerGC::saw_object(Object* obj) {
Object* copy;
@@ -76,6 +95,10 @@ namespace rubinius {
return copy;
}
+
+ /**
+ * Scans the remaining unscanned portion of the Next heap.
+ */
void BakerGC::copy_unscanned() {
Object* iobj = next->next_unscanned(object_memory_->state());
@@ -86,7 +109,14 @@ namespace rubinius {
}
}
+
+ /**
+ * Returns true if the young generation has been fully scanned in the
+ * current collection.
+ */
bool BakerGC::fully_scanned_p() {
+ // Note: The spaces are swapped at the start of collection, which is why we
+ // check the Next heap
return next->fully_scanned_p();
}
@@ -98,7 +128,9 @@ namespace rubinius {
const static int cUnderFullTimes = -3;
const static size_t cMaximumLifetime = 6;
- /* Perform garbage collection on the young objects. */
+ /**
+ * Perform garbage collection on the young objects.
+ */
void BakerGC::collect(GCData& data, YoungCollectStats* stats) {
#ifdef RBX_GC_STATS
stats::GCStats::get()->bytes_copied.start();
@@ -115,6 +147,7 @@ namespace rubinius {
copy_spills_ = 0;
reset_promoted();
+ // Start by copying objects in the remember set
for(ObjectArray::iterator oi = current_rs->begin();
oi != current_rs->end();
++oi) {
@@ -227,11 +260,10 @@ namespace rubinius {
assert(fully_scanned_p());
// We're now done seeing the entire object graph of normal, live references.
// Now we get to handle the unusual references, like finalizers and such.
- //
- /* Update finalizers. Doing so can cause objects that would have just died
- * to continue life until we can get around to running the finalizer. That
- * more promoted objects, etc. */
+ // Update finalizers. Doing so can cause objects that would have just died
+ // to continue life until we can get around to running the finalizer. That
+ // means more promoted objects, etc.
check_finalize();
// Run promotions again, because checking finalizers can keep more objects
@@ -246,10 +278,10 @@ namespace rubinius {
// find_lost_souls();
#endif
- /* Check any weakrefs and replace dead objects with nil*/
+ // Check any weakrefs and replace dead objects with nil
clean_weakrefs(true);
- /* Swap the 2 halves */
+ // Swap the 2 halves
Heap *x = next;
next = current;
current = x;
@@ -265,6 +297,7 @@ namespace rubinius {
stats->excess_objects = copy_spills_;
}
+ // Tune the age at which promotion occurs
if(autotune_) {
double used = current->percentage_used();
if(used > cOverFullThreshold) {
View
136 vm/gc/baker.hpp
@@ -25,17 +25,62 @@ namespace rubinius {
class GCData;
struct YoungCollectStats;
+
+ /**
+ * The BakerGC garbage collector is used for managing memory in the young
+ * generation.
+ *
+ * The BakerGC takes advantage of the fact that most newly created objects
+ * will die young. As a result, most objects encountered when collecting the
+ * young generation will be garbage; this makes it efficient to copy the
+ * surviving objects to a new location, and then reclaim the memory from the
+ * collected area in one step.
+ *
+ * Memory for the young generation is thus split into three spaces:
+ * - the Eden space, where new objects are initially allocated;
+ * - the Current space, where young objects that have survived at least one
+ * collection (but not enough for promotion to the mature generation) are
+ * located; and
+ * - the Next space, where surviving objects from the Eden and Current spaces
+ * wil be copied on the next collection.
+ *
+ * When a young generation collection is required, the garbage collector
+ * scans the Eden and Current spaces. When it finds a live object, it either
+ * copies it to the Next space, or promotes it to the mature generation,
+ * depending on how many collections it has survived.
+ *
+ * On the rare occasions that there is insufficient space in the Next space to
+ * hold all surviving young objects during a collection, then all remaining
+ * live young objects are promoted directly to the mature generation.
+ *
+ * At completion, the allocation pointer is reset in both the Eden and
+ * Current spaces, and then the Current and Next spaces are swapped.
+ */
+
class BakerGC : public GarbageCollector {
+ /// The total memory heap allocated to the BakerGC
Heap full;
+ /// The Eden space heap, which is a subset of the full heap
Heap eden;
+ /// The A heap, which is a subset of the full heap
Heap heap_a;
+ /// The B heap, which is a subset of the full heap
Heap heap_b;
+ /// Pointer to the Current space heap (i.e. Heap A or B)
Heap *current;
+ /// Pointer to the Next space heap (i.e. Heap B or A)
Heap *next;
public:
+ /// Total number of objects currently allocated in the young generation
+ /// Note: This does not include objects allocated in the Eden space in
+ /// thread-local slabs.
size_t total_objects;
+ /**
+ * Allocates a slab of memory from the Eden space for use as a thread-local
+ * allocation area that can be used without locking.
+ */
void* allocate_for_slab(size_t bytes) {
if(!eden.enough_space_p(bytes)) {
return NULL;
@@ -45,12 +90,21 @@ namespace rubinius {
addr = eden.allocate(bytes);
+ // @todo Return the bytes we just grabbed
if(eden.over_limit_p(addr)) return NULL;
return addr;
}
/* Inline methods */
+
+ /**
+ * Attempts to allocate an object of the specified size from the Eden heap.
+ * Unlike allocate, the header of the returned object is not initialized.
+ *
+ * If there is insufficient space remaining, NULL is returned and the
+ * limit_hit parameter is set to true.
+ */
Object* raw_allocate(size_t bytes, bool* limit_hit) {
Object* obj;
@@ -86,6 +140,16 @@ namespace rubinius {
return obj;
}
+
+ /**
+ * Attempts to allocate an object of the specified size from the Eden heap.
+ *
+ * If successful, the returned object's header is initiliazed to the young
+ * generation.
+ *
+ * If there is insufficient space remaining, NULL is returned and the
+ * limit_hit parameter is set to true.
+ */
Object* allocate(size_t bytes, bool* limit_hit) {
Object* obj;
@@ -124,46 +188,92 @@ namespace rubinius {
}
private:
+ /// Count of the number of objects that were promoted to the mature
+ /// generation on the current collection due to lack of space in the Next
+ /// space.
int copy_spills_;
+ /// True if the young to mature promotion threshhold should be auto-tuned
bool autotune_;
+
+ /// Count of the successive number of times a collection has gone over
+ /// the cOverFullThreshhold (positive value) or under the
+ /// cUnderFullThreshhold (negative value). Used to auto-tune the age
+ /// at which objects are promoted to the mature generation.
int tune_threshold_;
+
+ /// The original configured lifetime objects had to survive before being
+ /// promoted to the mature generation
size_t original_lifetime_;
+
+ /// The current lifetime objects have to survive before being promoted
size_t lifetime_;
+ /// Count of the number of objects promoted on the current collection
int promoted_objects_;
+
+ /// List of objects that have been promoted to the mature generation, but
+ /// not yet scanned
ObjectArray promoted_stack_;
+ /**
+ * Adds the specified object to the promoted objects stack.
+ * Objects on the promoted_objects_ stack must be scanned after the
+ * remaining young objects have been copied, since only then can we know
+ * which references from the promoted object need to be added to the
+ * remember set.
+ */
void promoted_push(Object* obj) {
promoted_objects_++;
promoted_stack_.push_back(obj);
}
+ /**
+ * Reset the list of promoted objects.
+ */
void reset_promoted() {
promoted_objects_ = 0;
}
public:
+
+ /**
+ * Returns the number of bytes used in the Eden space.
+ */
size_t bytes_used() {
return eden.used();
}
+ /**
+ * Sets the age at which objects will be promoted from the young generation
+ * to the mature generation.
+ */
void set_lifetime(int val) {
lifetime_ = original_lifetime_ = val;
}
+ /**
+ * Controls whether the BakerGC is self-tuning.
+ *
+ * This involves tweaking the lifetime threshhold at which objects are
+ * promoted to the mature generation, based on how full the Next space is
+ * following a collecion.
+ */
void set_autotune(bool val = true) {
autotune_ = val;
}
+ /// Returns true if the BakerGC is auto-tuning
bool autotune() {
return autotune_;
}
+ /// Returns the start address of the young generation
void* start_address() {
return full.start();
}
+ /// Returns the last address in the young generation
void* last_address() {
return full.last();
}
@@ -172,22 +282,48 @@ namespace rubinius {
/* Prototypes */
BakerGC(ObjectMemory *om, size_t size);
virtual ~BakerGC();
+
+ /// Called for each object "seen" during the collection of the young
+ /// generation.
virtual Object* saw_object(Object* obj);
+
+ /// Scans the remaining unscanned portion of what was the Current space.
void copy_unscanned();
+
+ /// Returns true if the young generation has been fully scanned in the
+ /// current collection.
bool fully_scanned_p();
+
+ /// Performs a collection of the young generation.
void collect(GCData& data, YoungCollectStats* stats = 0);
+
+ /// Clears the marks on all young generation objects.
void clear_marks();
+
+ /// Gathers statistics on the age of objects at the time of their death.
void find_lost_souls();
+
+ /// Scans the finalizers for young generation objects that need to be kept
+ /// alive.
void check_finalize();
+
+ /// Scans any references held by objects that have been promoted to the
+ /// mature generation.
void handle_promotions();
+ /// Validates the specified object, and returns an ObjectPosition value
+ /// indicating in which space in the young generation the object lies.
ObjectPosition validate_object(Object* obj);
+
+ /// Returns true if the specified object is in the Current space.
bool in_current_p(Object* obj);
private:
/* Private for inlining */
+ /// Given an object, finds the next object following it.
Object* next_object(Object* obj);
};
+
};
#endif
View
22 vm/gc/code_manager.hpp
@@ -7,9 +7,31 @@ namespace rubinius {
class CodeResource;
class SharedState;
+
+ /**
+ * Manages memory for code-based resources that are owned by Ruby objects,
+ * such as VMMethod instances, JIT code, FFI resources etc.
+ *
+ * These objects are not directly accessible via Ruby code, but are used by
+ * the VM to support the running of Ruby code. As such, these objects also
+ * need to be garbage collected, since references to these VM internal
+ * objects are only held by Ruby objects (e.g. methods); when the associated
+ * Ruby object is no longer reachable, these objects can also be collected.
+ *
+ * Unlike the other memory managers that manage memory for Ruby objects,
+ * objects managed by this class are created and destroyed using new and
+ * delete.
+ */
+
class CodeManager {
const static int cDefaultChunkSize = 64;
+ /**
+ * A chunk of memory used to store an array of references to CodeResource
+ * instances. Chunks also maintain a next pointer, so that a linked list
+ * of Chunks can be created for handling an arbitrarily large list of
+ * CodeResource references.
+ */
struct Chunk {
CodeResource** resources;
Chunk* next;
View
16 vm/gc/gc.cpp
@@ -40,9 +40,11 @@ namespace rubinius {
return object_memory_->state();
}
- /* Understands how to read the inside of an object and find all references
+ /**
+ * Understands how to read the inside of an object and find all references
* located within. It copies the objects pointed to, but does not follow into
- * those further (ie, not recursive) */
+ * those further (i.e. not recursive)
+ */
void GarbageCollector::scan_object(Object* obj) {
Object* slot;
@@ -85,6 +87,11 @@ namespace rubinius {
}
}
+
+ /**
+ * Removes an object from the remembered set, ensuring it will be collected
+ * if no other live references to the object exist.
+ */
void GarbageCollector::delete_object(Object* obj) {
if(obj->remembered_p()) {
object_memory_->unremember_object(obj);
@@ -121,6 +128,10 @@ namespace rubinius {
}
}
+
+ /**
+ * Walks the chain of objects accessible from the specified CallFrame.
+ */
void GarbageCollector::walk_call_frame(CallFrame* top_call_frame) {
CallFrame* call_frame = top_call_frame;
while(call_frame) {
@@ -273,4 +284,5 @@ namespace rubinius {
delete weak_refs_;
weak_refs_ = NULL;
}
+
}
View
17 vm/gc/gc.hpp
@@ -74,11 +74,17 @@ namespace rubinius {
}
};
+
+ /**
+ * Base class for the various garbage collectors.
+ */
class GarbageCollector {
protected:
+ /// Reference to the ObjectMemory we are collecting
ObjectMemory* object_memory_;
private:
+ /// Array of weak references
ObjectArray* weak_refs_;
public:
@@ -88,6 +94,8 @@ namespace rubinius {
if(weak_refs_) delete weak_refs_;
}
+ /// Subclasses implement appropriate behaviour for handling a live object
+ /// encountered during garbage collection.
virtual Object* saw_object(Object*) = 0;
void scan_object(Object* obj);
void delete_object(Object* obj);
@@ -109,6 +117,15 @@ namespace rubinius {
VM* state();
+ /**
+ * Adds a weak reference to the specified object.
+ *
+ * A weak reference provides a way to hold a reference to an object without
+ * that reference being sufficient to keep the object alive. If no other
+ * reference to the weak-referenced object exists, it can be collected by
+ * the garbage collector, with the weak-reference subsequently returning
+ * null.
+ */
void add_weak_ref(Object* obj) {
if(!weak_refs_) {
weak_refs_ = new ObjectArray;
View
81 vm/gc/heap.hpp
@@ -3,34 +3,78 @@
#include "builtin/object.hpp"
namespace rubinius {
+ // @todo Replace this with the Address class from Immix
typedef void *address;
class Object;
class VM;
+
+ /**
+ * A Heap is a chunk of memory used by the Baker GC.
+ *
+ * Memory allocations are made out of the Heap using bump pointer allocation,
+ * until the heap free space is exhausted. To assist in garbage collection,
+ * the heap also manages a scan pointer, which identifies the last object
+ * visited during a linear scan of the heap.
+ */
+
class Heap {
+ /// Address at which the heap starts
address start_;
+
+ /// Next free memory in the heap
address current_;
+
+ /// Address at which the heap ends
address last_;
+
+ /// Next object to visit in a linear scan of the heap
address scan_;
+
+ /// A red-line near the end of the heap; once the heap reaches this limit,
+ /// a garbage collection will be scheduled.
address limit_;
+ /// Size of the heap in bytes
size_t size_;
+
+ /// True if this heap is the owner of the underlying memory.
+ /// Heaps can be shared; the BakerGC uses a single large heap to allocate
+ /// memory, and then creates three smaller heaps within the large heap for
+ /// the nursery, heap A and heap B spaces.
bool owner_;
public:
/* Inline methods */
+
+ /**
+ * Returns the address at which the next allocation will take place.
+ */
address current() {
return current_;
}
+ /**
+ * Returns the address of the last byte in the heap.
+ */
address last() {
return last_;
}
+ /**
+ * Returns the address of the start of the heap.
+ */
address start() {
return start_;
}
+ /**
+ * Allocates the requested number of +size+ bytes from the heap.
+ * Note: This method does not check if the allocation exceeds the heap
+ * size, so the caller should have previously checked this.
+ *
+ * @todo Check that callers use this method in a thread-safe way
+ */
address allocate(size_t size) {
address addr;
addr = current_;
@@ -39,31 +83,51 @@ namespace rubinius {
return addr;
}
+ /**
+ * Returns +size+ bites back to the heap.
+ */
void put_back(size_t size) {
current_ = (address)((uintptr_t)current_ - size);
}
+ /**
+ * True if this heap contains the object at the specified address.
+ */
bool contains_p(address addr) {
if(addr < start_) return false;
if(addr >= last_) return false;
return true;
}
+ // @todo Why do we have two functions that do exactly the same thing?
bool in_current_p(address addr) {
if(addr < start_) return false;
if(addr >= current_) return false;
return true;
}
+ /**
+ * True if the current usage of the heap is at or over the red-line of the
+ * limit_ marker.
+ */
bool over_limit_p(void* ptr) {
return (address)ptr >= limit_;
}
+ /**
+ * Returns true if the heap contains enough free space to allocate the
+ * specified number of bytes.
+ */
bool enough_space_p(size_t size) {
if((uintptr_t)current_ + size > (uintptr_t)last_) return false;
return true;
}
+ /**
+ * Attempts to allocate +size+ bytes. If enough free space exists, the
+ * allocation succeeds. If insufficient free space is available, the
+ * allocation fails and 0 is returned.
+ */
address try_allocate(size_t size) {
address addr = current_;
address next = (address)((uintptr_t)current_ + size);
@@ -73,10 +137,17 @@ namespace rubinius {
return addr;
}
+ /**
+ * True if a linear scan of the heap has now visited every object.
+ */
bool fully_scanned_p() {
return scan_ == current_;
}
+ /**
+ * Returns the next object from the current scan location, advancing the
+ * scan_ pointer in the process.
+ */
Object* next_unscanned(VM* state) {
Object* obj;
if(fully_scanned_p()) return NULL;
@@ -86,15 +157,23 @@ namespace rubinius {
return obj;
}
+ /**
+ * Returns the first Object in the heap.
+ */
Object* first_object() {
return (Object*)start_;
}
- // Set the scan pointer to +addr+
+ /**
+ * Set the scan pointer to +addr+
+ */
void set_scan(address addr) {
scan_ = addr;
}
+ /**
+ * Returns the percentage of the heap that is currently used.
+ */
double percentage_used() {
size_t used = (size_t)current_ - (size_t)start_;
return ((double)used/ (double)size_) * 100.0;
View
19 vm/gc/inflated_headers.cpp
@@ -13,6 +13,12 @@ namespace rubinius {
}
}
+ /**
+ * Allocates a new InflatedHeader object for the specified obj ObjectHeader.
+ *
+ * /param obj The ObjectHeader that is to be inflated.
+ * /returns the InflatedHeader representing the new inflated object header.
+ */
InflatedHeader* InflatedHeaders::allocate(ObjectHeader* obj) {
if(!free_list_) allocate_chunk();
InflatedHeader* header = free_list_;
@@ -24,6 +30,10 @@ namespace rubinius {
return header;
}
+ /**
+ * Allocates a new chunk of storage for InflatedHeader objects, and then
+ * adds each InflatedHeader slot to the free list.
+ */
void InflatedHeaders::allocate_chunk() {
InflatedHeader* chunk = new InflatedHeader[cChunkSize];
for(size_t i = 0; i < cChunkSize; i++) {
@@ -35,6 +45,15 @@ namespace rubinius {
chunks_.push_back(chunk);
}
+ /**
+ * Scans the list of InflatedHeader objects checking to see which are in use.
+ * Those that do not have the appropriate mark value set are cleared and
+ * added back to the free list. Chunks that are completely unused are removed
+ * from the linked list.
+ *
+ * /param mark The current value of the mark; only InflatedHeaders that bear
+ * this mark will be retained.
+ */
void InflatedHeaders::deallocate_headers(int mark) {
// Detect and free any full chunks first!
for(Chunks::iterator i = chunks_.begin();
View
21 vm/gc/inflated_headers.hpp
@@ -4,14 +4,35 @@ namespace rubinius {
class ObjectHeader;
class InflatedHeader;
+
+ /**
+ * Manages a list of InflatedHeader instances.
+ *
+ * Storage for InflatedHeader objects are allocated in chunks, and these are
+ * in turn stored in a linked list. As ObjectHeader instances are inflated,
+ * they are added to the next free spot, or if no slots exist, a new chunk
+ * is allocated.
+ *
+ * As InflatedHeader instances are deflated, they are added to free_list_,
+ * which re-uses the storage of the InflatedHeader to link to the next free
+ * InflatedHeader slot.
+ */
+
class InflatedHeaders {
public:
typedef std::list<InflatedHeader*> Chunks;
+
+ /// Storage for InflatedHeader references is allocated in chunks.
static const size_t cChunkSize = 1024;
private:
+ /// Linked list of InflatedHeader pointers.
Chunks chunks_;
+
+ /// Pointer to the first free InflatedHeader slot in the list.
InflatedHeader* free_list_;
+
+ /// Number of in-use slots
int in_use_;
public:
View
23 vm/gc/object_mark.cpp
@@ -10,6 +10,14 @@ namespace rubinius {
return gc->state();
}
+ /**
+ * Checks the reference to the target obj, and if it is valid, notifies the GC
+ * that the object is still alive.
+ *
+ * /param obj The object reference to be marked as still reachable/alive.
+ * /returns the location of the object after GC has processed the object,
+ * which may involve moving it.
+ */
Object* ObjectMark::call(Object* obj) {
if(!obj->reference_p()) return NULL;
#ifdef RBX_DEBUG
@@ -22,6 +30,14 @@ namespace rubinius {
return gc->saw_object(obj);
}
+ /**
+ * Updates the reference pos in target with the new location of val, and
+ * then runs the write-barrier.
+ *
+ * /param target The object containing the reference to val.
+ * /param pos A pointer to the location in target that references val.
+ * /param val The object being referenced.
+ */
void ObjectMark::set(Object* target, Object** pos, Object* val) {
*pos = val;
if(val->reference_p()) {
@@ -29,9 +45,16 @@ namespace rubinius {
}
}
+ /**
+ * Runs the write-barrier for the reference from target to val.
+ *
+ * /param target The object containing the reference to val.
+ * /param val The object being referenced.
+ */
void ObjectMark::just_set(Object* target, Object* val) {
if(val->reference_p()) {
gc->object_memory_->write_barrier(target, val);
}
}
}
+
View
6 vm/gc/object_mark.hpp
@@ -7,9 +7,13 @@ namespace rubinius {
class GarbageCollector;
class Object;
+
/**
- * Implementation in gc.cpp for now..
+ * Used by GarbageCollector implementations to mark objects that are still
+ * reachable. ObjectMark carries a GarbageCollector instance with it as it
+ * visits an object and its contents.
*/
+
class ObjectMark {
public:
GarbageCollector* gc;
View
43 vm/gc/write_barrier.hpp
@@ -5,13 +5,43 @@
namespace rubinius {
namespace gc {
+
+ /**
+ * A write barrier keeps track of references from older generations to the
+ * young generation.
+ *
+ * A write barrier allows the young generation to be garbage collected without
+ * inspecting every object in the older generations (mature and large object)
+ * to determine if a young object is still referenced (i.e. alive).
+ * As most cross-generation references are from young objects to mature ones,
+ * remembering the rarer instances when a mature object has a reference to a
+ * young object results in a smaller set of objects to scan when collecting
+ * the young generation.
+ *
+ * The set of locations containing pointers to young generation objects is
+ * known as the remembered set. At every store, the system must ensure that
+ * the updated location is added to the remembered set if the store creates
+ * a reference from an older generation object to a young generation object.
+ *
+ * The remembered set is cleared on each collection of the young generation,
+ * as scanning objects during the collection will re-remember objects that
+ * continue to hold a reference to young generation objects.
+ */
+
class WriteBarrier {
thread::SpinLock lock_;
protected:
+
+ /// Holds a list of remembered Object instances in the mature generation
+ /// that contain one or more references to young generation objects.
ObjectArray* remember_set_;
public:
+
+ /**
+ * Returns the current remembered set of mature objects.
+ */
ObjectArray* remember_set() {
return remember_set_;
}
@@ -19,6 +49,14 @@ namespace gc {
WriteBarrier();
~WriteBarrier();
+ /**
+ * Checks if the store is creating a reference from a mature generation
+ * object (target) to a young generation object (val). If it is, the mature
+ * object is added to the remember set.
+ *
+ * \param target The object holding the reference (i.e. the referer).
+ * \param val The object being referenced (i.e. the referee).
+ */
void write_barrier(Object* target, Object* val) {
if(target->remembered_p()) return;
if(!REFERENCE_P(val)) return;
@@ -28,11 +66,16 @@ namespace gc {
remember_object(target);
}
+ /// Adds the target object directly to the remembered set.
void remember_object(Object* target);
+
+ /// Removes the target object from the remembered set.
void unremember_object(Object* target);
+ /// Returns the current remember set, and replaces it with a new, empty one.
ObjectArray* swap_remember_set();
};
+
}}
#endif
View
16 vm/objectmemory.cpp
@@ -1346,7 +1346,9 @@ namespace rubinius {
}
};
-// Used in gdb
+
+// Memory utility functions for use in gdb
+
void x_memstat() {
rubinius::VM::current()->om->memstats();
}
@@ -1363,6 +1365,18 @@ void x_print_snapshot() {
rubinius::VM::current()->om->print_new_since_snapshot();
}
+
+// The following memory functions are defined in ruby.h for use by C-API
+// extensions, and also used by library code lifted from MRI (e.g. Oniguruma).
+// They provide some book-keeping around memory usage for non-VM code, so that
+// the garbage collector is run periodically in response to memory allocations
+// in non-VM code.
+// Without these checks, memory can become exhausted without the VM being aware
+// there is a problem. As this memory may only be being used by Ruby objects
+// that have become garbage, performing a garbage collection periodically after
+// a significant amount of memory has been malloc-ed should keep non-VM memory
+// usage from growing uncontrollably.
+
#define DEFAULT_MALLOC_THRESHOLD 10000000
static long bytes_until_collection = DEFAULT_MALLOC_THRESHOLD;
View
72 vm/objectmemory.hpp
@@ -22,24 +22,6 @@ namespace rubinius {
class Object;
- /* ObjectMemory is the primary API that the rest of the VM uses to interact
- * with actions such as allocating objects, storing data in objects, and
- * perform garbage collection.
- *
- * It is current split between 2 generations, the BakerGC, which handles
- * the young objects, and the MarkSweepGC, which handles the mature.
- *
- * Basic tasks:
- *
- * Allocate an object of a given class and number of fields.
- * If the object is large, it's put to start in the mature space,
- * otherwise in the young space.
- *
- * Detection of memory condition requiring collection of both generations
- * independently.
- *
- */
-
struct CallFrame;
class GCData;
class Configuration;
@@ -70,21 +52,58 @@ namespace rubinius {
{}
};
+
+ /**
+ * ObjectMemory is the primary API that the rest of the VM uses to interact
+ * with actions such as allocating objects, storing data in objects, and
+ * performing garbage collection.
+ *
+ * It is currently split between 3 generations, the BakerGC, which handles
+ * the young objects, the ImmixGC which handles mature objects, and the
+ * MarkSweepGC, which handles large objects.
+ *
+ * ObjectMemory also manages the memory used for CodeResources, which are
+ * internal objects used for executing Ruby code. This includes VMMethod,
+ * various JIT classes, and FFI data.
+ *
+ * Basic tasks:
+ * - Allocate an object of a given class and number of fields. If the object
+ * is large, it's allocated in the large object space, otherwise in the
+ * young space.
+ * - Detection of memory condition requiring collection of the young and
+ * mautre generations independently.
+ */
+
class ObjectMemory : public gc::WriteBarrier, public Lockable {
+ /// BakerGC used for the young generation
BakerGC* young_;
+
+ /// MarkSweepGC used for the large object store
MarkSweepGC* mark_sweep_;
+ /// ImmixGC used for the mature generation
ImmixGC* immix_;
+
+ /// Storage for all InflatedHeader instances.
InflatedHeaders* inflated_headers_;
unsigned int mark_;
+
+ /// Garbage collector for CodeResource objects.
CodeManager code_manager_;
std::list<FinalizeObject> finalize_;
std::list<FinalizeObject*> to_finalize_;
bool allow_gc_;
+ /// List of additional write-barriers that may hold references to young
+ /// objects.
std::list<gc::WriteBarrier*> aux_barriers_;
+
+ /// Size of slabs to be allocated to threads for lockless thread-local
+ /// allocations.
size_t slab_size_;
+
+ /// True if finalizers are currently being run.
bool running_finalizers_;
thread::Condition contention_var_;
@@ -99,16 +118,27 @@ namespace rubinius {
TypeInfo* type_info[(int)LastObjectType];
/* Config variables */
+ /// Threshhold size at which an object is considered a large object, and
+ /// therefore allocated in the large object space.
size_t large_object_threshold;
/* Stats */
+ /// Total number of objects allocated to date.
size_t objects_allocated;
+
+ /// Total number of bytes allocated to date.
size_t bytes_allocated;
+ /// Total number of young generation collections to date.
size_t young_collections;
+
+ /// Total number of full collections to date.
size_t full_collections;
+ /// Total amount of time spent collecting the young generation to date.
size_t young_collection_time;
+
+ /// Total amount of time spent performing full collections to date.
size_t full_collection_time;
public:
@@ -144,11 +174,17 @@ namespace rubinius {
allow_gc_ = false;
}
+ /**
+ * Adds an additional write-barrier to the auxilliary write-barriers list.
+ */
void add_aux_barrier(STATE, gc::WriteBarrier* wb) {
SYNC(state);
aux_barriers_.push_back(wb);
}
+ /**
+ * Removes a write-barrier from the auxilliary wirte-barriers list.
+ */
void del_aux_barrier(STATE, gc::WriteBarrier* wb) {
SYNC(state);
aux_barriers_.remove(wb);
View
7 vm/oop.hpp
@@ -221,7 +221,14 @@ const int cUndef = 0x22L;
class Handle;
}
+
+ /**
+ * An InflatedHeader is used on the infrequent occasions when an Object needs
+ * to store more metadata than can fit in the ObjectHeader HeaderWord struct.
+ */
class InflatedHeader {
+ // Treat the header as either storage for the ObjectFlags, or as a pointer
+ // to the next free InflatedHeader in the InflatedHeaders free list.
union {
ObjectFlags flags_;
InflatedHeader* next_;
View
6 vm/shared_state.cpp
@@ -97,7 +97,7 @@ namespace rubinius {
this->ref();
- // If there is no root vm, then the first on created becomes it.
+ // If there is no root vm, then the first one created becomes it.
if(!root_vm_) root_vm_ = vm;
return vm;
}
@@ -161,7 +161,9 @@ namespace rubinius {
}
}
- // Create the preemption thread and call scheduler_loop() in the new thread
+ /**
+ * Create the preemption thread and call scheduler_loop() in the new thread.
+ */
void SharedState::enable_preemption() {
interrupts.enable_preempt = true;
}
View
11 vm/shared_state.hpp
@@ -67,6 +67,17 @@ namespace rubinius {
}
};
+
+ /**
+ * SharedState represents the global shared state that needs to be shared
+ * across all VM instances.
+ *
+ * Rubinius makes no use of global variables; instead, all shared state is
+ * stored in a reference counted instance of this class. This makes it
+ * possible in theory to have multiple independent Rubinius runtimes in a
+ * single process.
+ */
+
class SharedState : public RefCount, public Lockable {
private:
bool initialized_;
View
14 vm/vm.cpp
@@ -51,9 +51,11 @@ namespace rubinius {
unsigned long VM::cStackDepthMax = 655300;
#ifndef RBX_WINDOWS
- // getrlimit can report there is 4G of stack (ie, unlimited).
- // Even when there is unlimited stack, we clamp the max to
- // this value (currently 128M)
+ /**
+ * Maximum amount of stack space to use.
+ * getrlimit can report there is 4G of stack (ie, unlimited). Even when
+ * there is unlimited stack, we clamp the max to this value (currently 128M).
+ */
static rlim_t cMaxStack = (1024 * 1024 * 128);
#endif
@@ -159,10 +161,16 @@ namespace rubinius {
#endif
}
+ /**
+ * Returns the current VM executing on this pthread.
+ */
VM* VM::current() {
return ManagedThread::current()->as_vm();
}
+ /**
+ * Sets this VM instance as the current VM on this pthread.
+ */
void VM::set_current(VM* vm) {
ManagedThread::set_current(vm);
}
View
42 vm/vm.hpp
@@ -71,6 +71,16 @@ namespace rubinius {
eNone, ePrivate, eProtected, eSuper, eVCall, eNormal
};
+
+ /**
+ * Represents an execution context for running Ruby code.
+ *
+ * Each Ruby thread is backed by an instance of this class, as well as an
+ * instance of the Thread class. Thread manages the (Ruby visible) thread-
+ * related state, while this class manages the execution machinery for
+ * running Ruby code.
+ */
+
class VM : public ManagedThread {
private:
CallFrame* saved_call_frame_;
@@ -105,13 +115,13 @@ namespace rubinius {
ThreadState thread_state_;
- // The Thread object for this VM state
+ /// The Thread object for this VM state
TypedRoot<Thread*> thread;
- // The current fiber running on this thread
+ /// The current fiber running on this thread
TypedRoot<Fiber*> current_fiber;
- // Root fiber, if any (lazily initialized)
+ /// Root fiber, if any (lazily initialized)
TypedRoot<Fiber*> root_fiber;
static unsigned long cStackDepthMax;
@@ -237,7 +247,6 @@ namespace rubinius {
public:
static void init_stack_size();
- // Better than current_state, uses a NativeThread local.
static VM* current();
static void set_current(VM* vm);
@@ -250,8 +259,6 @@ namespace rubinius {
void check_exception(CallFrame* call_frame);
- // Used to create ObjectMemory and such. Only run for the
- // root vm.
void initialize_as_root();
void bootstrap_class();
@@ -289,20 +296,20 @@ namespace rubinius {
return reinterpret_cast<T*>(new_object_typed_mature(cls, sizeof(T), T::type));
}
- // Create an uninitialized Class object
+ /// Create an uninitialized Class object
Class* new_basic_class(Class* sup);
- // Create a Class of name +name+ as an Object subclass
+ /// Create a Class of name +name+ as an Object subclass
Class* new_class(const char* name);
- // Create a Class of name +name+ as a subclass of +super_class+
+ /// Create a Class of name +name+ as a subclass of +super_class+
Class* new_class(const char* name, Class* super_class);
- // Create a Class of name +name+ as a subclass of +sup+
- // under Module +under+
+ /// Create a Class of name +name+ as a subclass of +sup+
+ /// under Module +under+
Class* new_class(const char* name, Class* sup, Module* under);
- // Create a Class of name +name+ under +under+
+ /// Create a Class of name +name+ under +under+
Class* new_class_under(const char* name, Module* under);
Module* new_module(const char* name, Module* under = NULL);
@@ -318,7 +325,7 @@ namespace rubinius {
Thread* current_thread();
void collect(CallFrame* call_frame);
- // Check the flags in ObjectMemory and collect if we need to.
+ /// Check the GC flags in ObjectMemory and collect if we need to.
void collect_maybe(CallFrame* call_frame);
void raise_from_errno(const char* reason);
@@ -337,7 +344,7 @@ namespace rubinius {
void print_backtrace();
- // Run the garbage collectors as soon as you can
+ /// Run the garbage collectors as soon as you can
void run_gc_soon();
void wait_on_channel(Channel* channel);
@@ -374,6 +381,13 @@ namespace rubinius {
static std::list<Roots*>* roots;
};
+
+ /**
+ * Instantiation of an instance of this class causes Ruby execution on all
+ * threads to be susepended. Upon destruction of the instance, Ruby execution
+ * is resumed.
+ */
+
class StopTheWorld {
VM* vm_;
View
13 vm/world_state.hpp
@@ -23,8 +23,10 @@ namespace rubinius {
return time_waiting_;
}
- // Called after a fork(), when we know we're alone again, to get
- // everything back in the proper order.
+ /**
+ * Called after a fork(), when we know we're alone again, to get
+ * everything back in the proper order.
+ */
void reinit() {
mutex_.init();
waiting_to_stop_.init();
@@ -33,9 +35,10 @@ namespace rubinius {
should_stop_ = false;
}
- // If called when the GC is waiting to run,
- // wait until the GC tells us it's ok to continue.
- // always increments pending_threads_ at the end.
+ /**
+ * If called when the GC is waiting to run, wait until the GC tells us its
+ * OK to continue. Always decrements pending_threads_ at the end.
+ */
void become_independent(THREAD) {
thread::Mutex::LockGuard guard(mutex_);
Please sign in to comment.
Something went wrong with that request. Please try again.