Skip to content

HTTPS clone URL

Subversion checkout URL

You can clone with HTTPS or Subversion.

Download ZIP
tree: ff245102f6
Fetching contributors…

Cannot retrieve contributors at this time

675 lines (648 sloc) 17.992 kb
Index: debug.c
===================================================================
--- debug.c (revision 19067)
+++ debug.c (working copy)
@@ -31,7 +31,7 @@
RUBY_ENC_CODERANGE_7BIT = ENC_CODERANGE_7BIT,
RUBY_ENC_CODERANGE_VALID = ENC_CODERANGE_VALID,
RUBY_ENC_CODERANGE_BROKEN = ENC_CODERANGE_BROKEN,
- RUBY_FL_MARK = FL_MARK,
+ RUBY_FL_ALLOCATED = FL_ALLOCATED,
RUBY_FL_RESERVED = FL_RESERVED,
RUBY_FL_FINALIZE = FL_FINALIZE,
RUBY_FL_TAINT = FL_TAINT,
Index: include/ruby/ruby.h
===================================================================
--- include/ruby/ruby.h (revision 19067)
+++ include/ruby/ruby.h (working copy)
@@ -758,7 +758,7 @@
#define RCOMPLEX(obj) (R_CAST(RComplex)(obj))
#define FL_SINGLETON FL_USER0
-#define FL_MARK (((VALUE)1)<<5)
+#define FL_ALLOCATED (((VALUE)1)<<5)
#define FL_RESERVED (((VALUE)1)<<6) /* will be used in the future GC */
#define FL_FINALIZE (((VALUE)1)<<7)
#define FL_TAINT (((VALUE)1)<<8)
@@ -854,6 +854,7 @@
void rb_register_mark_object(VALUE);
void rb_gc_register_address(VALUE*);
void rb_gc_unregister_address(VALUE*);
+int rb_gc_is_thread_marked(VALUE);
ID rb_intern(const char*);
ID rb_intern2(const char*, long);
Index: common.mk
===================================================================
--- common.mk (revision 19067)
+++ common.mk (working copy)
@@ -48,6 +48,7 @@
object.$(OBJEXT) \
pack.$(OBJEXT) \
parse.$(OBJEXT) \
+ pointerset.$(OBJEXT) \
process.$(OBJEXT) \
prec.$(OBJEXT) \
random.$(OBJEXT) \
@@ -486,7 +487,7 @@
{$(VPATH)}encoding.h {$(VPATH)}vm_core.h {$(VPATH)}debug.h \
{$(VPATH)}vm_opts.h $(ID_H_INCLUDES) \
{$(VPATH)}thread_$(THREAD_MODEL).h \
- {$(VPATH)}gc.h {$(VPATH)}eval_intern.h
+ {$(VPATH)}gc.h {$(VPATH)}pointerset.h {$(VPATH)}eval_intern.h
hash.$(OBJEXT): {$(VPATH)}hash.c $(RUBY_H_INCLUDES) \
$(ID_H_INCLUDES) \
{$(VPATH)}st.h {$(VPATH)}util.h {$(VPATH)}signal.h
@@ -515,6 +516,7 @@
{$(VPATH)}regenc.h {$(VPATH)}regex.h {$(VPATH)}util.h \
{$(VPATH)}lex.c {$(VPATH)}keywords {$(VPATH)}debug.h
prec.$(OBJEXT): {$(VPATH)}prec.c $(RUBY_H_INCLUDES) {$(VPATH)}st.h
+pointerset.$(OBJEXT): {$(VPATH)}pointerset.c
proc.$(OBJEXT): {$(VPATH)}proc.c {$(VPATH)}eval_intern.h \
$(RUBY_H_INCLUDES) {$(VPATH)}st.h {$(VPATH)}node.h \
{$(VPATH)}util.h {$(VPATH)}signal.h {$(VPATH)}vm_core.h \
Index: gc.c
===================================================================
--- gc.c (revision 19067)
+++ gc.c (working copy)
@@ -23,8 +23,14 @@
#include "gc.h"
#include <stdio.h>
#include <setjmp.h>
+#include <math.h>
#include <sys/types.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <unistd.h>
+
#ifdef HAVE_SYS_TIME_H
#include <sys/time.h>
#endif
@@ -285,6 +291,8 @@
void *membase;
RVALUE *slot;
int limit;
+ int *marks;
+ int marks_size;
};
#define HEAP_MIN_SLOTS 10000
@@ -674,7 +682,83 @@
vm_xfree(&rb_objspace, x);
}
+static int debugging = 0;
+#define DEBUG_POINT(message) \
+ do { \
+ if (debugging) { \
+ printf("%s\n", message); \
+ getchar(); \
+ } \
+ } while (0)
+
+#define OPTION_ENABLED(name) (getenv((name)) && *getenv((name)) && *getenv((name)) != '0')
+
+typedef struct {
+ int fd;
+ size_t size;
+} FileHeapAllocatorMetaData;
+
+static void *
+alloc_ruby_heap_with_file(size_t size)
+{
+ FileHeapAllocatorMetaData meta;
+ meta.fd = open("/dev/zero", O_RDONLY);
+ meta.size = size;
+ if (meta.fd == -1) {
+ return NULL;
+ }
+ else {
+ void *memory = mmap(NULL, size + sizeof(meta), PROT_READ | PROT_WRITE,
+ MAP_PRIVATE, meta.fd, 0);
+ if (memory == NULL) {
+ return NULL;
+ }
+ else {
+ FileHeapAllocatorMetaData *p = (FileHeapAllocatorMetaData *) memory;
+ *p = meta;
+ return p + 1;
+ }
+ }
+}
+
+static void *
+alloc_ruby_heap(size_t size)
+{
+ if (debugging || OPTION_ENABLED("RUBY_GC_ALLOC_HEAP_WITH_FILE")) {
+ return alloc_ruby_heap_with_file(size);
+ }
+ else {
+ return malloc(size);
+ }
+}
+
+static void
+free_ruby_heap_with_file(void *heap)
+{
+ FileHeapAllocatorMetaData *p = (FileHeapAllocatorMetaData *) heap;
+ close(p->fd);
+ munmap(p, p->size);
+}
+
+static void
+free_ruby_heap(void *heap)
+{
+ if (debugging || OPTION_ENABLED("RUBY_GC_ALLOC_HEAP_WITH_FILE")) {
+ free_ruby_heap_with_file(heap);
+ }
+ else {
+ free(heap);
+ }
+}
+
+static void
+init_debugging()
+{
+ debugging = OPTION_ENABLED("RUBY_GC_DEBUG");
+}
+
+
/*
* call-seq:
* GC.enable => true or false
@@ -764,8 +848,158 @@
}
}
+#include "pointerset.h"
+/* A mark table for filenames and objects that are not on the heap. */
+static PointerSet *mark_table = NULL;
+static struct heaps_slot *last_heap = NULL;
+
+
+static inline struct heaps_slot *
+find_heap_slot_for_object(rb_objspace_t *objspace, RVALUE *object)
+{
+ int i;
+
+ /* Look in the cache first. */
+ if (last_heap != NULL && object >= last_heap->slot
+ && object < last_heap->slot + last_heap->limit) {
+ return last_heap;
+ }
+ for (i = 0; i < heaps_used; i++) {
+ struct heaps_slot *heap = &heaps[i];
+ if (object >= heap->slot
+ && object < heap->slot + heap->limit) {
+ /* Cache this result. According to empirical evidence, the chance is
+ * high that the next lookup will be for the same heap slot.
+ */
+ last_heap = heap;
+ return heap;
+ }
+ }
+ return NULL;
+}
+
+static inline void
+find_position_in_bitfield(struct heaps_slot *hs, RVALUE *object,
+ unsigned int *bitfield_index, unsigned int *bitfield_offset)
+{
+ unsigned int index;
+
+ index = object - hs->slot;
+ *bitfield_index = index / (sizeof(int) * 8);
+ *bitfield_offset = index % (sizeof(int) * 8);
+}
+
+
+
static void
+rb_mark_table_init()
+{
+ mark_table = pointer_set_new();
+}
+
+static void
+rb_mark_table_prepare()
+{
+ last_heap = NULL;
+}
+
+static void
+rb_mark_table_finalize(rb_objspace_t *objspace)
+{
+ int i;
+
+ if (pointer_set_get_size(mark_table) > 0) {
+ pointer_set_reset(mark_table);
+ }
+ for (i = 0; i < heaps_used; i++) {
+ memset(heaps[i].marks, 0, heaps[i].marks_size * sizeof(int));
+ }
+}
+
+static void
+rb_mark_table_add(rb_objspace_t *objspace, RVALUE *object)
+{
+ struct heaps_slot *hs;
+ unsigned int bitfield_index, bitfield_offset;
+
+ hs = find_heap_slot_for_object(objspace, object);
+ if (hs != NULL) {
+ find_position_in_bitfield(hs, object, &bitfield_index, &bitfield_offset);
+ hs->marks[bitfield_index] |= (1 << bitfield_offset);
+ }
+ else {
+ pointer_set_insert(mark_table, (void *) object);
+ }
+}
+
+static int
+rb_mark_table_contains(rb_objspace_t *objspace, RVALUE *object)
+{
+ struct heaps_slot *hs;
+ unsigned int bitfield_index, bitfield_offset;
+
+ hs = find_heap_slot_for_object(objspace, object);
+ if (hs != NULL) {
+ find_position_in_bitfield(hs, object, &bitfield_index, &bitfield_offset);
+ return hs->marks[bitfield_index] & (1 << bitfield_offset);
+ }
+ else {
+ return pointer_set_contains(mark_table, (void *) object);
+ }
+}
+
+static int
+rb_mark_table_heap_contains(struct heaps_slot *hs, RVALUE *object)
+{
+ unsigned int bitfield_index, bitfield_offset;
+ find_position_in_bitfield(hs, object, &bitfield_index, &bitfield_offset);
+ return hs->marks[bitfield_index] & (1 << bitfield_offset);
+}
+
+static void
+rb_mark_table_remove(rb_objspace_t *objspace, RVALUE *object)
+{
+ struct heaps_slot *hs;
+ unsigned int bitfield_index, bitfield_offset;
+
+ hs = find_heap_slot_for_object(objspace, object);
+ if (hs != NULL) {
+ find_position_in_bitfield(hs, object, &bitfield_index, &bitfield_offset);
+ hs->marks[bitfield_index] &= ~(1 << bitfield_offset);
+ }
+ else {
+ pointer_set_delete(mark_table, (void *) object);
+ }
+}
+
+static void
+rb_mark_table_heap_remove(struct heaps_slot *hs, RVALUE *object)
+{
+ unsigned int bitfield_index, bitfield_offset;
+ find_position_in_bitfield(hs, object, &bitfield_index, &bitfield_offset);
+ hs->marks[bitfield_index] &= ~(1 << bitfield_offset);
+}
+
+static void
+rb_mark_table_add_filename(const char *filename)
+{
+ pointer_set_insert(mark_table, (void *) filename);
+}
+
+static int
+rb_mark_table_contains_filename(const char *filename)
+{
+ return pointer_set_contains(mark_table, (void *) filename);
+}
+
+static void
+rb_mark_table_remove_filename(const char *filename)
+{
+ pointer_set_delete(mark_table, (void *) filename);
+}
+
+static void
allocate_heaps(rb_objspace_t *objspace, size_t next_heaps_length)
{
struct heaps_slot *p;
@@ -832,6 +1066,8 @@
heaps[hi].membase = membase;
heaps[hi].slot = p;
heaps[hi].limit = objs;
+ heaps[hi].marks_size = (int) (ceil(objs / (sizeof(int) * 8.0)));
+ heaps[hi].marks = (int *) calloc(heaps[hi].marks_size, sizeof(int));
pend = p + objs;
if (lomem == 0 || lomem > p) lomem = p;
if (himem < pend) himem = pend;
@@ -904,6 +1140,7 @@
freelist = freelist->as.free.next;
MEMZERO((void*)obj, RVALUE, 1);
+ RANY(obj)->as.free.flags = FL_ALLOCATED;
#ifdef GC_DEBUG
RANY(obj)->file = rb_sourcefile();
RANY(obj)->line = rb_sourceline();
@@ -1079,6 +1316,28 @@
static void gc_mark(rb_objspace_t *objspace, VALUE ptr, int lev);
static void gc_mark_children(rb_objspace_t *objspace, VALUE ptr, int lev);
+void
+rb_mark_source_filename(char *f)
+{
+ if (f) {
+ rb_mark_table_add_filename(f);
+ }
+}
+
+static int
+sweep_source_filename(char *key, char *value)
+{
+ if (rb_mark_table_contains_filename(value + 1)) {
+ rb_mark_table_remove_filename(value + 1);
+ return ST_CONTINUE;
+ }
+ else {
+ rb_mark_table_remove_filename(value + 1);
+ free(value);
+ return ST_DELETE;
+ }
+}
+
static void
gc_mark_all(rb_objspace_t *objspace)
{
@@ -1089,8 +1348,8 @@
for (i = 0; i < heaps_used; i++) {
p = heaps[i].slot; pend = p + heaps[i].limit;
while (p < pend) {
- if ((p->as.basic.flags & FL_MARK) &&
- (p->as.basic.flags != FL_MARK)) {
+ if (rb_mark_table_contains(objspace, p) &&
+ (p->as.basic.flags != FL_ALLOCATED)) {
gc_mark_children(objspace, (VALUE)p, 0);
}
p++;
@@ -1270,8 +1529,8 @@
obj = RANY(ptr);
if (rb_special_const_p(ptr)) return; /* special const not marked */
if (obj->as.basic.flags == 0) return; /* free cell */
- if (obj->as.basic.flags & FL_MARK) return; /* already marked */
- obj->as.basic.flags |= FL_MARK;
+ if (rb_mark_table_contains(objspace, obj)) return; /* already marked */
+ rb_mark_table_add(objspace, obj);
if (lev > GC_LEVEL_MAX || (lev == 0 && ruby_stack_check())) {
if (!mark_stack_overflow) {
@@ -1305,8 +1564,8 @@
obj = RANY(ptr);
if (rb_special_const_p(ptr)) return; /* special const not marked */
if (obj->as.basic.flags == 0) return; /* free cell */
- if (obj->as.basic.flags & FL_MARK) return; /* already marked */
- obj->as.basic.flags |= FL_MARK;
+ if (rb_mark_table_contains(objspace, obj)) return; /* already marked */
+ rb_mark_table_add(objspace, obj);
marking:
if (FL_TEST(obj, FL_EXIVAR)) {
@@ -1565,6 +1824,7 @@
{
VALGRIND_MAKE_MEM_UNDEFINED((void*)p, sizeof(RVALUE));
p->as.free.flags = 0;
+ rb_mark_table_remove(objspace, (RVALUE *) p);
p->as.free.next = freelist;
freelist = p;
}
@@ -1575,7 +1835,11 @@
while (p) {
RVALUE *tmp = p->as.free.next;
run_final(objspace, (VALUE)p);
- if (!FL_TEST(p, FL_SINGLETON)) { /* not freeing page */
+ /* Don't free objects that are singletons, or objects that are already freed.
+ * The latter is to prevent the unnecessary marking of memory pages as dirty,
+ * which can destroy copy-on-write semantics.
+ */
+ if (!FL_TEST(p, FL_SINGLETON) && p->as.free.flags != 0) {
add_freelist(objspace, p);
}
else {
@@ -1590,16 +1854,11 @@
free_unused_heaps(rb_objspace_t *objspace)
{
size_t i, j;
- RVALUE *last = 0;
for (i = j = 1; j < heaps_used; i++) {
if (heaps[i].limit == 0) {
- if (!last) {
- last = heaps[i].membase;
- }
- else {
- free(heaps[i].membase);
- }
+ free_ruby_heap(heaps[i].membase);
+ free(heaps[i].marks);
heaps_used--;
}
else {
@@ -1609,15 +1868,6 @@
j++;
}
}
- if (last) {
- if (last < heaps_freed) {
- free(heaps_freed);
- heaps_freed = last;
- }
- else {
- free(last);
- }
- }
}
static void
@@ -1647,7 +1897,7 @@
p = heaps[i].slot; pend = p + heaps[i].limit;
while (p < pend) {
- if (!(p->as.basic.flags & FL_MARK)) {
+ if (!rb_mark_table_contains(objspace, p)) {
if (p->as.basic.flags &&
((deferred = obj_free(objspace, (VALUE)p)) ||
((FL_TEST(p, FL_FINALIZE)) && need_call_final))) {
@@ -1655,13 +1905,21 @@
p->as.free.flags = T_DEFERRED;
RDATA(p)->dfree = 0;
}
- p->as.free.flags |= FL_MARK;
+ rb_mark_table_add(objspace, p); /* remain marked */
p->as.free.next = final_list;
final_list = p;
final_num++;
}
else {
- add_freelist(objspace, p);
+ VALGRIND_MAKE_MEM_UNDEFINED((void*)p, sizeof(RVALUE));
+ /* Do not touch the fields if they don't have to be modified.
+ * This is in order to preserve copy-on-write semantics.
+ */
+ if (p->as.free.flags != 0)
+ p->as.free.flags = 0;
+ if (p->as.free.next != freelist)
+ p->as.free.next = freelist;
+ freelist = p;
free_num++;
}
}
@@ -1670,7 +1928,7 @@
/* do nothing remain marked */
}
else {
- RBASIC(p)->flags &= ~FL_MARK;
+ rb_mark_table_heap_remove(&heaps[i], p);
live++;
}
p++;
@@ -1709,6 +1967,7 @@
RUBY_VM_SET_FINALIZER_INTERRUPT(GET_THREAD());
}
else{
+ rb_mark_table_finalize(objspace);
free_unused_heaps(objspace);
GC_PROF_SET_HEAP_INFO;
}
@@ -1916,6 +2175,7 @@
FLUSH_REGISTER_WINDOWS;
/* This assumes that all registers are saved into the jmp_buf (and stack) */
+ memset(save_regs_gc_mark, 0, sizeof(save_regs_gc_mark));
rb_setjmp(save_regs_gc_mark);
mark_locations_array(objspace,
(VALUE*)save_regs_gc_mark,
@@ -1962,6 +2222,7 @@
GC_PROF_MARK_TIMER_START;
SET_STACK_END;
+ rb_mark_table_prepare();
init_mark_stack(objspace);
th->vm->self ? rb_gc_mark(th->vm->self) : rb_vm_mark(th->vm);
@@ -2058,6 +2319,20 @@
#undef Init_stack
+int
+rb_gc_is_thread_marked(the_thread)
+ VALUE the_thread;
+{
+ rb_objspace_t *objspace = &rb_objspace;
+
+ if (FL_ABLE(the_thread)) {
+ return rb_mark_table_contains(objspace, (RVALUE *) the_thread);
+ }
+ else {
+ return 0;
+ }
+}
+
void
Init_stack(VALUE *addr)
{
@@ -2098,6 +2373,7 @@
void
Init_heap(void)
{
+ rb_mark_table_init();
init_heap(&rb_objspace);
}
@@ -2325,10 +2601,13 @@
static int
chain_finalized_object(st_data_t key, st_data_t val, st_data_t arg)
{
+ rb_objspace_t *objspace = &rb_objspace;
+
RVALUE *p = (RVALUE *)key, **final_list = (RVALUE **)arg;
if (p->as.basic.flags & FL_FINALIZE) {
if (BUILTIN_TYPE(p) != T_DEFERRED) {
- p->as.free.flags = FL_MARK | T_DEFERRED; /* remain marked */
+ p->as.free.flags = FL_ALLOCATED | T_DEFERRED; /* remain marked */
+ rb_mark_table_add(objspace, p);
RDATA(p)->dfree = 0;
}
p->as.free.next = *final_list;
@@ -2375,6 +2654,7 @@
DATA_PTR(p) && RANY(p)->as.data.dfree &&
RANY(p)->as.basic.klass != rb_cThread) {
p->as.free.flags = 0;
+ rb_mark_table_remove(objspace, p);
if ((long)RANY(p)->as.data.dfree == -1) {
xfree(DATA_PTR(p));
}
@@ -2386,6 +2666,7 @@
else if (BUILTIN_TYPE(p) == T_FILE) {
if (rb_io_fptr_finalize(RANY(p)->as.file.fptr)) {
p->as.free.flags = 0;
+ rb_mark_table_remove(objspace, p);
VALGRIND_MAKE_MEM_UNDEFINED((void*)p, sizeof(RVALUE));
}
}
@@ -2633,6 +2914,61 @@
return hash;
}
+static VALUE
+os_statistics()
+{
+ int i;
+ unsigned int objects = 0;
+ unsigned int total_heap_size = 0;
+ unsigned int ast_nodes = 0;
+ char message[1024];
+ rb_objspace_t *objspace = &rb_objspace;
+
+ for (i = 0; i < heaps_used; i++) {
+ RVALUE *p, *pend;
+
+ p = heaps[i].slot;
+ pend = p + heaps[i].limit;
+ for (;p < pend; p++) {
+ if (p->as.basic.flags) {
+ int isAST = 0;
+ switch (TYPE(p)) {
+ case T_ICLASS:
+ case T_NODE:
+ isAST = 1;
+ break;
+ case T_CLASS:
+ if (FL_TEST(p, FL_SINGLETON)) {
+ isAST = 1;
+ break;
+ }
+ default:
+ break;
+ }
+ objects++;
+ if (isAST) {
+ ast_nodes++;
+ }
+ }
+ }
+ total_heap_size += (void *) pend - heaps[i].membase;
+ }
+
+ snprintf(message, sizeof(message),
+ "Number of objects: %d (%d AST nodes, %.2f%%)\n"
+ "Heap slot size: %d\n"
+ "Number of heaps: %d\n"
+ "Total size of objects: %.2f KB\n"
+ "Total size of heaps: %.2f KB\n",
+ objects, ast_nodes, ast_nodes * 100 / (double) objects,
+ sizeof(RVALUE),
+ heaps_used,
+ objects * sizeof(RVALUE) / 1024.0,
+ total_heap_size / 1024.0
+ );
+ return rb_str_new2(message);
+}
+
/*
* call-seq:
* GC.count -> Integer
@@ -2834,6 +3170,8 @@
rb_define_module_function(rb_mObSpace, "_id2ref", id2ref, 1);
+ rb_define_module_function(rb_mObSpace, "statistics", os_statistics, 0);
+
nomem_error = rb_exc_new3(rb_eNoMemError,
rb_obj_freeze(rb_str_new2("failed to allocate memory")));
OBJ_TAINT(nomem_error);
@@ -2844,6 +3182,7 @@
rb_define_method(rb_mKernel, "object_id", rb_obj_id, 0);
rb_define_module_function(rb_mObSpace, "count_objects", count_objects, -1);
+ init_debugging();
#if CALC_EXACT_MALLOC_SIZE
rb_define_singleton_method(rb_mGC, "malloc_allocated_size", gc_malloc_allocated_size, 0);
Jump to Line
Something went wrong with that request. Please try again.