diff --git a/patches/ruby/1.8.7/head/railsexpress/01-ignore-generated-files.patch b/patches/ruby/1.8.7/head/railsexpress/01-ignore-generated-files.patch new file mode 100644 index 0000000000..b669ad44bd --- /dev/null +++ b/patches/ruby/1.8.7/head/railsexpress/01-ignore-generated-files.patch @@ -0,0 +1,101 @@ +diff --git a/.gitignore b/.gitignore +new file mode 100644 +index 0000000..00c347a +--- /dev/null ++++ b/.gitignore +@@ -0,0 +1,95 @@ ++.ext ++.installed.list ++.rbconfig.time ++Makefile ++autom4te.cache/ ++config.h ++config.status ++configure ++ext/Win32API/Makefile ++ext/bigdecimal/Makefile ++ext/curses/Makefile ++ext/dbm/Makefile ++ext/digest/Makefile ++ext/digest/bubblebabble/Makefile ++ext/digest/md5/Makefile ++ext/digest/rmd160/Makefile ++ext/digest/sha1/Makefile ++ext/digest/sha2/Makefile ++ext/dl/Makefile ++ext/dl/call.func ++ext/dl/callback.func ++ext/dl/cbtable.func ++ext/dl/dlconfig.h ++ext/dl/dlconfig.rb ++ext/enumerator/Makefile ++ext/etc/Makefile ++ext/fcntl/Makefile ++ext/gdbm/Makefile ++ext/iconv/Makefile ++ext/io/wait/Makefile ++ext/nkf/Makefile ++ext/openssl/Makefile ++ext/openssl/extconf.h ++ext/pty/Makefile ++ext/racc/cparse/Makefile ++ext/readline/Makefile ++ext/sdbm/Makefile ++ext/socket/Makefile ++ext/stringio/Makefile ++ext/strscan/Makefile ++ext/syck/Makefile ++ext/syslog/Makefile ++ext/thread/Makefile ++ext/tk/Makefile ++ext/tk/tkutil/Makefile ++ext/win32ole/Makefile ++ext/win32ole/.document ++ext/zlib/Makefile ++largefile.h ++miniruby ++parse.c ++rbconfig.rb ++ruby ++enc.mk ++ext/bigdecimal/extconf.h ++ext/continuation/ ++ext/coverage/ ++ext/curses/extconf.h ++ext/dbm/extconf.h ++ext/digest/bubblebabble/extconf.h ++ext/digest/extconf.h ++ext/digest/md5/extconf.h ++ext/digest/rmd160/extconf.h ++ext/digest/sha1/extconf.h ++ext/digest/sha2/extconf.h ++ext/dl/callback.h ++ext/dl/extconf.h ++ext/etc/extconf.h ++ext/fcntl/extconf.h ++ext/fiber/ ++ext/iconv/extconf.h ++ext/io/wait/extconf.h ++ext/json/ ++ext/nkf/extconf.h ++ext/pty/extconf.h ++ext/racc/cparse/extconf.h ++ext/readline/extconf.h ++ext/ripper/ ++ext/sdbm/extconf.h ++ext/socket/constants.h ++ext/socket/extconf.h ++ext/stringio/extconf.h ++ext/strscan/extconf.h ++ext/syck/extconf.h ++ext/syslog/extconf.h ++ext/tk/extconf.h ++ext/tk/tkutil/extconf.h ++ext/zlib/extconf.h ++miniprelude.c ++prelude.c ++revision.h ++*.dylib ++*.log ++*.dSYM ++patches-ruby* diff --git a/patches/ruby/1.8.7/head/railsexpress/02-fix-tests-for-osx.patch b/patches/ruby/1.8.7/head/railsexpress/02-fix-tests-for-osx.patch new file mode 100644 index 0000000000..6250e97f36 --- /dev/null +++ b/patches/ruby/1.8.7/head/railsexpress/02-fix-tests-for-osx.patch @@ -0,0 +1,139 @@ +diff --git a/test/drb/drbtest.rb b/test/drb/drbtest.rb +index bc16ab1..c12b168 100644 +--- a/test/drb/drbtest.rb ++++ b/test/drb/drbtest.rb +@@ -22,7 +22,7 @@ class DRbService + %w(ut_drb.rb ut_array.rb ut_port.rb ut_large.rb ut_safe1.rb ut_eval.rb).each do |nm| + add_service_command(nm) + end +- @server = @@server = DRb::DRbServer.new('druby://localhost:0', @@manager, {}) ++ @server = @@server = DRb::DRbServer.new('druby://127.0.0.1:0', @@manager, {}) + @@manager.uri = @@server.uri + def self.manager + @@manager +@@ -79,16 +79,16 @@ module DRbCore + end + + def test_00_DRbObject +- ro = DRbObject.new(nil, 'druby://localhost:12345') +- assert_equal('druby://localhost:12345', ro.__drburi) ++ ro = DRbObject.new(nil, 'druby://127.0.0.1:12345') ++ assert_equal('druby://127.0.0.1:12345', ro.__drburi) + assert_equal(nil, ro.__drbref) + +- ro = DRbObject.new_with_uri('druby://localhost:12345') +- assert_equal('druby://localhost:12345', ro.__drburi) ++ ro = DRbObject.new_with_uri('druby://127.0.0.1:12345') ++ assert_equal('druby://127.0.0.1:12345', ro.__drburi) + assert_equal(nil, ro.__drbref) + +- ro = DRbObject.new_with_uri('druby://localhost:12345?foobar') +- assert_equal('druby://localhost:12345', ro.__drburi) ++ ro = DRbObject.new_with_uri('druby://127.0.0.1:12345?foobar') ++ assert_equal('druby://127.0.0.1:12345', ro.__drburi) + assert_equal(DRb::DRbURIOption.new('foobar'), ro.__drbref) + end + +diff --git a/test/drb/ut_drb.rb b/test/drb/ut_drb.rb +index f5720cf..265713d 100644 +--- a/test/drb/ut_drb.rb ++++ b/test/drb/ut_drb.rb +@@ -154,7 +154,7 @@ if __FILE__ == $0 + + DRb::DRbServer.default_argc_limit(8) + DRb::DRbServer.default_load_limit(4096) +- DRb.start_service('druby://localhost:0', DRbEx.new) ++ DRb.start_service('druby://127.0.0.1:0', DRbEx.new) + es = DRb::ExtServ.new(ARGV.shift, ARGV.shift) + DRb.thread.join + end +diff --git a/test/drb/ut_drb_drbssl.rb b/test/drb/ut_drb_drbssl.rb +index 0a2191e..bca3012 100644 +--- a/test/drb/ut_drb_drbssl.rb ++++ b/test/drb/ut_drb_drbssl.rb +@@ -18,7 +18,7 @@ if __FILE__ == $0 + + DRb::DRbServer.default_argc_limit(8) + DRb::DRbServer.default_load_limit(4096) +- DRb.start_service('drbssl://localhost:0', DRbEx.new, config) ++ DRb.start_service('drbssl://127.0.0.1:0', DRbEx.new, config) + es = DRb::ExtServ.new(ARGV.shift, ARGV.shift) + DRb.thread.join + end +diff --git a/test/drb/ut_eval.rb b/test/drb/ut_eval.rb +index 4df963e..9127939 100644 +--- a/test/drb/ut_eval.rb ++++ b/test/drb/ut_eval.rb +@@ -3,7 +3,7 @@ require 'drb/extserv' + + class EvalAttack + def initialize +- @four = DRb::DRbServer.new('druby://localhost:0', self, {:safe_level => 4}) ++ @four = DRb::DRbServer.new('druby://127.0.0.1:0', self, {:safe_level => 4}) + end + + def four +@@ -25,7 +25,7 @@ if __FILE__ == $0 + + $SAFE = 1 + +- DRb.start_service('druby://localhost:0', EvalAttack.new, {:safe_level => 2}) ++ DRb.start_service('druby://127.0.0.1:0', EvalAttack.new, {:safe_level => 2}) + es = DRb::ExtServ.new(ARGV.shift, ARGV.shift) + DRb.thread.join + end +diff --git a/test/drb/ut_large.rb b/test/drb/ut_large.rb +index d6717c5..0aefd1b 100644 +--- a/test/drb/ut_large.rb ++++ b/test/drb/ut_large.rb +@@ -31,7 +31,7 @@ if __FILE__ == $0 + + DRb::DRbServer.default_argc_limit(3) + DRb::DRbServer.default_load_limit(100000) +- DRb.start_service('druby://localhost:0', DRbLarge.new) ++ DRb.start_service('druby://127.0.0.1:0', DRbLarge.new) + es = DRb::ExtServ.new(ARGV.shift, ARGV.shift) + DRb.thread.join + end +diff --git a/test/drb/ut_safe1.rb b/test/drb/ut_safe1.rb +index 4df8e1e..ee097f6 100644 +--- a/test/drb/ut_safe1.rb ++++ b/test/drb/ut_safe1.rb +@@ -8,7 +8,7 @@ if __FILE__ == $0 + it + end + +- DRb.start_service('druby://localhost:0', [1, 2, 'III', 4, "five", 6], ++ DRb.start_service('druby://127.0.0.1:0', [1, 2, 'III', 4, "five", 6], + {:safe_level => 1}) + es = DRb::ExtServ.new(ARGV.shift, ARGV.shift) + DRb.thread.join +diff --git a/test/net/pop/test_pop.rb b/test/net/pop/test_pop.rb +index c8aa9a8..129ca08 100644 +--- a/test/net/pop/test_pop.rb ++++ b/test/net/pop/test_pop.rb +@@ -3,10 +3,14 @@ require 'test/unit' + require 'digest/md5' + + class TestPOP < Test::Unit::TestCase ++ def localhost ++ '127.0.0.1' ++ end ++ + def setup + @users = {'user' => 'pass' } + @ok_user = 'user' +- @stamp_base = "#{$$}.#{Time.now.to_i}@localhost" ++ @stamp_base = "#{$$}.#{Time.now.to_i}@#{localhost}" + end + + def test_pop_auth_ok +@@ -64,7 +68,7 @@ class TestPOP < Test::Unit::TestCase + end + + def pop_test(apop=false) +- host = 'localhost' ++ host = localhost + server = TCPServer.new(host, 0) + port = server.addr[1] + thread = Thread.start do diff --git a/patches/ruby/1.8.7/head/railsexpress/03-sigvtalrm-fix.patch b/patches/ruby/1.8.7/head/railsexpress/03-sigvtalrm-fix.patch new file mode 100644 index 0000000000..fe99f6eed6 --- /dev/null +++ b/patches/ruby/1.8.7/head/railsexpress/03-sigvtalrm-fix.patch @@ -0,0 +1,27 @@ +diff --git a/eval.c b/eval.c +index 7886e17..6ff2560 100644 +--- a/eval.c ++++ b/eval.c +@@ -12461,6 +12461,11 @@ rb_thread_start_0(fn, arg, th) + curr_thread->next = th; + th->priority = curr_thread->priority; + th->thgroup = curr_thread->thgroup; ++#if defined(HAVE_SETITIMER) || defined(_THREAD_SAFE) ++ if (!thread_init) { ++ rb_thread_start_timer(); ++ } ++#endif + } + START_TIMER(); + +@@ -13189,7 +13194,9 @@ rb_thread_atfork() + main_thread = curr_thread; + curr_thread->next = curr_thread; + curr_thread->prev = curr_thread; +- STOP_TIMER(); ++#if defined(HAVE_SETITIMER) || defined(_THREAD_SAFE) ++ rb_thread_stop_timer(); ++#endif + } + + diff --git a/patches/ruby/1.8.7/head/railsexpress/04-railsbench-gc-patch.patch b/patches/ruby/1.8.7/head/railsexpress/04-railsbench-gc-patch.patch new file mode 100644 index 0000000000..7b6c0e251a --- /dev/null +++ b/patches/ruby/1.8.7/head/railsexpress/04-railsbench-gc-patch.patch @@ -0,0 +1,1876 @@ +diff --git a/gc.c b/gc.c +index fa45cd1..ab71d22 100644 +--- a/gc.c ++++ b/gc.c +@@ -22,8 +22,16 @@ + #include + #include + ++#ifdef _WIN32 ++#include ++#else ++#include ++#endif ++ + #ifdef HAVE_SYS_TIME_H + #include ++#elif defined(_WIN32) ++#include + #endif + + #ifdef HAVE_SYS_RESOURCE_H +@@ -42,7 +50,6 @@ void rb_io_fptr_finalize _((struct rb_io_t*)); + #ifdef __CYGWIN__ + int _setjmp(), _longjmp(); + #endif +- + /* Make alloca work the best possible way. */ + #ifdef __GNUC__ + # ifndef atarist +@@ -86,12 +93,12 @@ rb_memerror() + rb_thread_t th = rb_curr_thread; + + if (!nomem_error || +- (rb_thread_raised_p(th, RAISED_NOMEMORY) && rb_safe_level() < 4)) { +- fprintf(stderr, "[FATAL] failed to allocate memory\n"); +- exit(1); ++ (rb_thread_raised_p(th, RAISED_NOMEMORY) && rb_safe_level() < 4)) { ++ fprintf(stderr, "[FATAL] failed to allocate memory\n"); ++ exit(1); + } + if (rb_thread_raised_p(th, RAISED_NOMEMORY)) { +- rb_exc_jump(nomem_error); ++ rb_exc_jump(nomem_error); + } + rb_thread_raised_set(th, RAISED_NOMEMORY); + rb_exc_raise(nomem_error); +@@ -139,7 +146,7 @@ ruby_xmalloc(size) + void *mem; + + if (size < 0) { +- rb_raise(rb_eNoMemError, "negative allocation size (or too big)"); ++ rb_raise(rb_eNoMemError, "negative allocation size (or too big)"); + } + if (size == 0) size = 1; + +@@ -148,11 +155,11 @@ ruby_xmalloc(size) + } + RUBY_CRITICAL(mem = malloc(size)); + if (!mem) { +- garbage_collect(); +- RUBY_CRITICAL(mem = malloc(size)); +- if (!mem) { +- rb_memerror(); +- } ++ garbage_collect(); ++ RUBY_CRITICAL(mem = malloc(size)); ++ if (!mem) { ++ rb_memerror(); ++ } + } + malloc_increase += size; + +@@ -179,17 +186,17 @@ ruby_xrealloc(ptr, size) + void *mem; + + if (size < 0) { +- rb_raise(rb_eArgError, "negative re-allocation size"); ++ rb_raise(rb_eArgError, "negative re-allocation size"); + } + if (!ptr) return xmalloc(size); + if (size == 0) size = 1; + if (ruby_gc_stress) garbage_collect(); + RUBY_CRITICAL(mem = realloc(ptr, size)); + if (!mem) { +- garbage_collect(); +- RUBY_CRITICAL(mem = realloc(ptr, size)); +- if (!mem) { +- rb_memerror(); ++ garbage_collect(); ++ RUBY_CRITICAL(mem = realloc(ptr, size)); ++ if (!mem) { ++ rb_memerror(); + } + } + malloc_increase += size; +@@ -202,11 +209,20 @@ ruby_xfree(x) + void *x; + { + if (x) +- RUBY_CRITICAL(free(x)); ++ RUBY_CRITICAL(free(x)); + } + ++#if HAVE_LONG_LONG ++#define GC_TIME_TYPE LONG_LONG ++#else ++#define GC_TIME_TYPE long ++#endif ++ + extern int ruby_in_compile; + static int dont_gc; ++static int gc_statistics = 0; ++static GC_TIME_TYPE gc_time = 0; ++static int gc_collections = 0; + static int during_gc; + static int need_call_final = 0; + static st_table *finalizer_table = 0; +@@ -241,7 +257,7 @@ rb_gc_enable() + * Disables garbage collection, returning true if garbage + * collection was already disabled. + * +- * GC.disable #=> false ++ * GC.disable #=> false or true + * GC.disable #=> true + * + */ +@@ -255,6 +271,104 @@ rb_gc_disable() + return old; + } + ++/* ++ * call-seq: ++ * GC.enable_stats => true or false ++ * ++ * Enables garbage collection statistics, returning true if garbage ++ * collection statistics was already enabled. ++ * ++ * GC.enable_stats #=> false or true ++ * GC.enable_stats #=> true ++ * ++ */ ++ ++VALUE ++rb_gc_enable_stats() ++{ ++ int old = gc_statistics; ++ gc_statistics = Qtrue; ++ return old; ++} ++ ++/* ++ * call-seq: ++ * GC.disable_stats => true or false ++ * ++ * Disables garbage collection statistics, returning true if garbage ++ * collection statistics was already disabled. ++ * ++ * GC.disable_stats #=> false or true ++ * GC.disable_stats #=> true ++ * ++ */ ++ ++VALUE ++rb_gc_disable_stats() ++{ ++ int old = gc_statistics; ++ gc_statistics = Qfalse; ++ return old; ++} ++ ++/* ++ * call-seq: ++ * GC.clear_stats => nil ++ * ++ * Clears garbage collection statistics, returning nil. This resets the number ++ * of collections (GC.collections) and the time used (GC.time) to 0. ++ * ++ * GC.clear_stats #=> nil ++ * ++ */ ++ ++VALUE ++rb_gc_clear_stats() ++{ ++ gc_collections = 0; ++ gc_time = 0; ++ return Qnil; ++} ++ ++/* ++ * call-seq: ++ * GC.collections => Integer ++ * ++ * Returns the number of garbage collections performed while GC statistics collection ++ * was enabled. ++ * ++ * GC.collections #=> 35 ++ * ++ */ ++ ++VALUE ++rb_gc_collections() ++{ ++ return INT2NUM(gc_collections); ++} ++ ++/* ++ * call-seq: ++ * GC.time => Integer ++ * ++ * Returns the time spent during garbage collection while GC statistics collection ++ * was enabled (in micro seconds). ++ * ++ * GC.time #=> 20000 ++ * ++ */ ++ ++VALUE ++rb_gc_time() ++{ ++#if HAVE_LONG_LONG ++ return LL2NUM(gc_time); ++#else ++ return LONG2NUM(gc_time); ++#endif ++} ++ ++ + VALUE rb_mGC; + + static struct gc_list { +@@ -281,19 +395,19 @@ rb_gc_unregister_address(addr) + struct gc_list *tmp = global_List; + + if (tmp->varptr == addr) { +- global_List = tmp->next; +- RUBY_CRITICAL(free(tmp)); +- return; ++ global_List = tmp->next; ++ RUBY_CRITICAL(free(tmp)); ++ return; + } + while (tmp->next) { +- if (tmp->next->varptr == addr) { +- struct gc_list *t = tmp->next; ++ if (tmp->next->varptr == addr) { ++ struct gc_list *t = tmp->next; + +- tmp->next = tmp->next->next; +- RUBY_CRITICAL(free(t)); +- break; +- } +- tmp = tmp->next; ++ tmp->next = tmp->next->next; ++ RUBY_CRITICAL(free(t)); ++ break; ++ } ++ tmp = tmp->next; + } + } + +@@ -312,26 +426,26 @@ rb_global_variable(var) + + typedef struct RVALUE { + union { +- struct { +- unsigned long flags; /* always 0 for freed obj */ +- struct RVALUE *next; +- } free; +- struct RBasic basic; +- struct RObject object; +- struct RClass klass; +- struct RFloat flonum; +- struct RString string; +- struct RArray array; +- struct RRegexp regexp; +- struct RHash hash; +- struct RData data; +- struct RStruct rstruct; +- struct RBignum bignum; +- struct RFile file; +- struct RNode node; +- struct RMatch match; +- struct RVarmap varmap; +- struct SCOPE scope; ++ struct { ++ unsigned long flags; /* always 0 for freed obj */ ++ struct RVALUE *next; ++ } free; ++ struct RBasic basic; ++ struct RObject object; ++ struct RClass klass; ++ struct RFloat flonum; ++ struct RString string; ++ struct RArray array; ++ struct RRegexp regexp; ++ struct RHash hash; ++ struct RData data; ++ struct RStruct rstruct; ++ struct RBignum bignum; ++ struct RFile file; ++ struct RNode node; ++ struct RMatch match; ++ struct RVarmap varmap; ++ struct SCOPE scope; + } as; + #ifdef GC_DEBUG + char *file; +@@ -346,7 +460,7 @@ typedef struct RVALUE { + static RVALUE *freelist = 0; + static RVALUE *deferred_final_list = 0; + +-#define HEAPS_INCREMENT 10 ++static int heaps_increment = 10; + static struct heaps_slot { + void *membase; + RVALUE *slot; +@@ -355,45 +469,197 @@ static struct heaps_slot { + static int heaps_length = 0; + static int heaps_used = 0; + +-#define HEAP_MIN_SLOTS 10000 +-static int heap_slots = HEAP_MIN_SLOTS; ++static int heap_min_slots = 10000; ++static int heap_slots = 10000; + +-#define FREE_MIN 4096 ++static int heap_free_min = 4096; ++static int heap_slots_increment = 10000; ++static double heap_slots_growth_factor = 1.8; ++ ++static long initial_malloc_limit = GC_MALLOC_LIMIT; ++ ++static int verbose_gc_stats = Qfalse; ++ ++static FILE* gc_data_file = NULL; + + static RVALUE *himem, *lomem; + ++static void set_gc_parameters() ++{ ++ char *gc_stats_ptr, *min_slots_ptr, *free_min_ptr, *heap_slots_incr_ptr, ++ *heap_incr_ptr, *malloc_limit_ptr, *gc_heap_file_ptr, *heap_slots_growth_factor_ptr; ++ ++ gc_data_file = stderr; ++ ++ gc_stats_ptr = getenv("RUBY_GC_STATS"); ++ if (gc_stats_ptr != NULL) { ++ int gc_stats_i = atoi(gc_stats_ptr); ++ if (gc_stats_i > 0) { ++ verbose_gc_stats = Qtrue; ++ } ++ } ++ ++ gc_heap_file_ptr = getenv("RUBY_GC_DATA_FILE"); ++ if (gc_heap_file_ptr != NULL) { ++ FILE* data_file = fopen(gc_heap_file_ptr, "w"); ++ if (data_file != NULL) { ++ gc_data_file = data_file; ++ } ++ else { ++ fprintf(stderr, ++ "can't open gc log file %s for writing, using default\n", gc_heap_file_ptr); ++ } ++ } ++ ++ min_slots_ptr = getenv("RUBY_HEAP_MIN_SLOTS"); ++ if (min_slots_ptr != NULL) { ++ int min_slots_i = atoi(min_slots_ptr); ++ if (verbose_gc_stats) { ++ fprintf(gc_data_file, "RUBY_HEAP_MIN_SLOTS=%s\n", min_slots_ptr); ++ } ++ if (min_slots_i > 0) { ++ heap_slots = min_slots_i; ++ heap_min_slots = min_slots_i; ++ } ++ } ++ ++ free_min_ptr = getenv("RUBY_HEAP_FREE_MIN"); ++ if (free_min_ptr != NULL) { ++ int free_min_i = atoi(free_min_ptr); ++ if (verbose_gc_stats) { ++ fprintf(gc_data_file, "RUBY_HEAP_FREE_MIN=%s\n", free_min_ptr); ++ } ++ if (free_min_i > 0) { ++ heap_free_min = free_min_i; ++ } ++ } ++ ++ heap_incr_ptr = getenv("RUBY_HEAP_INCREMENT"); ++ if (heap_incr_ptr != NULL) { ++ int heap_incr_i = atoi(heap_incr_ptr); ++ if (verbose_gc_stats) { ++ fprintf(gc_data_file, "RUBY_HEAP_INCREMENT=%s\n", heap_incr_ptr); ++ } ++ if (heap_incr_i > 0) { ++ heaps_increment = heap_incr_i; ++ } ++ } ++ ++ heap_slots_incr_ptr = getenv("RUBY_HEAP_SLOTS_INCREMENT"); ++ if (heap_slots_incr_ptr != NULL) { ++ int heap_slots_incr_i = atoi(heap_slots_incr_ptr); ++ if (verbose_gc_stats) { ++ fprintf(gc_data_file, "RUBY_HEAP_SLOTS_INCREMENT=%s\n", heap_slots_incr_ptr); ++ } ++ if (heap_slots_incr_i > 0) { ++ heap_slots_increment = heap_slots_incr_i; ++ } ++ } ++ ++ heap_slots_growth_factor_ptr = getenv("RUBY_HEAP_SLOTS_GROWTH_FACTOR"); ++ if (heap_slots_growth_factor_ptr != NULL) { ++ double heap_slots_growth_factor_d = atoi(heap_slots_growth_factor_ptr); ++ if (verbose_gc_stats) { ++ fprintf(gc_data_file, "RUBY_HEAP_SLOTS_GROWTH_FACTOR=%s\n", heap_slots_growth_factor_ptr); ++ } ++ if (heap_slots_growth_factor_d > 0) { ++ heap_slots_growth_factor = heap_slots_growth_factor_d; ++ } ++ } ++ ++ malloc_limit_ptr = getenv("RUBY_GC_MALLOC_LIMIT"); ++ if (malloc_limit_ptr != NULL) { ++ int malloc_limit_i = atol(malloc_limit_ptr); ++ if (verbose_gc_stats) { ++ fprintf(gc_data_file, "RUBY_GC_MALLOC_LIMIT=%s\n", malloc_limit_ptr); ++ } ++ if (malloc_limit_i > 0) { ++ initial_malloc_limit = malloc_limit_i; ++ } ++ } ++} ++ ++/* ++ * call-seq: ++ * GC.dump => nil ++ * ++ * dumps information about the current GC data structures to the GC log file ++ * ++ * GC.dump #=> nil ++ * ++ */ ++ ++VALUE ++rb_gc_dump() ++{ ++ int i; ++ ++ for (i = 0; i < heaps_used; i++) { ++ int heap_size = heaps[i].limit; ++ fprintf(gc_data_file, "HEAP[%2d]: size=%7d\n", i, heap_size); ++ } ++ ++ return Qnil; ++} ++ ++/* ++ * call-seq: ++ * GC.log String => String ++ * ++ * Logs string to the GC data file and returns it. ++ * ++ * GC.log "manual GC call" #=> "manual GC call" ++ * ++ */ ++ ++VALUE ++rb_gc_log(self, original_str) ++ VALUE self, original_str; ++{ ++ if (original_str == Qnil) { ++ fprintf(gc_data_file, "\n"); ++ } ++ else { ++ VALUE str = StringValue(original_str); ++ char *p = RSTRING(str)->ptr; ++ fprintf(gc_data_file, "%s\n", p); ++ } ++ return original_str; ++} ++ ++ + static void + add_heap() + { + RVALUE *p, *pend; + + if (heaps_used == heaps_length) { +- /* Realloc heaps */ +- struct heaps_slot *p; +- int length; +- +- heaps_length += HEAPS_INCREMENT; +- length = heaps_length*sizeof(struct heaps_slot); +- RUBY_CRITICAL( +- if (heaps_used > 0) { +- p = (struct heaps_slot *)realloc(heaps, length); +- if (p) heaps = p; +- } +- else { +- p = heaps = (struct heaps_slot *)malloc(length); +- }); +- if (p == 0) rb_memerror(); ++ /* Realloc heaps */ ++ struct heaps_slot *p; ++ int length; ++ ++ heaps_length += heaps_increment; ++ length = heaps_length*sizeof(struct heaps_slot); ++ RUBY_CRITICAL( ++ if (heaps_used > 0) { ++ p = (struct heaps_slot *)realloc(heaps, length); ++ if (p) heaps = p; ++ } ++ else { ++ p = heaps = (struct heaps_slot *)malloc(length); ++ }); ++ if (p == 0) rb_memerror(); + } + + for (;;) { +- RUBY_CRITICAL(p = (RVALUE*)malloc(sizeof(RVALUE)*(heap_slots+1))); +- if (p == 0) { +- if (heap_slots == HEAP_MIN_SLOTS) { +- rb_memerror(); +- } +- heap_slots = HEAP_MIN_SLOTS; +- continue; +- } ++ RUBY_CRITICAL(p = (RVALUE*)malloc(sizeof(RVALUE)*(heap_slots+1))); ++ if (p == 0) { ++ if (heap_slots == heap_min_slots) { ++ rb_memerror(); ++ } ++ heap_slots = heap_min_slots; ++ continue; ++ } + heaps[heaps_used].membase = p; + if ((VALUE)p % sizeof(RVALUE) == 0) + heap_slots += 1; +@@ -401,25 +667,26 @@ add_heap() + p = (RVALUE*)((VALUE)p + sizeof(RVALUE) - ((VALUE)p % sizeof(RVALUE))); + heaps[heaps_used].slot = p; + heaps[heaps_used].limit = heap_slots; +- break; ++ break; + } + pend = p + heap_slots; + if (lomem == 0 || lomem > p) lomem = p; + if (himem < pend) himem = pend; + heaps_used++; +- heap_slots *= 1.8; +- if (heap_slots <= 0) heap_slots = HEAP_MIN_SLOTS; ++ heap_slots += heap_slots_increment; ++ heap_slots_increment *= heap_slots_growth_factor; ++ if (heap_slots <= 0) heap_slots = heap_min_slots; + + while (p < pend) { +- p->as.free.flags = 0; +- p->as.free.next = freelist; +- freelist = p; +- p++; ++ p->as.free.flags = 0; ++ p->as.free.next = freelist; ++ freelist = p; ++ p++; + } + } + #define RANY(o) ((RVALUE*)(o)) + +-int ++int + rb_during_gc() + { + return during_gc; +@@ -431,7 +698,7 @@ rb_newobj() + VALUE obj; + + if (during_gc) +- rb_bug("object allocation during garbage collection phase"); ++ rb_bug("object allocation during garbage collection phase"); + + if (ruby_gc_stress || !freelist) garbage_collect(); + +@@ -580,13 +847,13 @@ rb_source_filename(f) + st_data_t name; + + if (!st_lookup(source_filenames, (st_data_t)f, &name)) { +- long len = strlen(f) + 1; +- char *ptr = ALLOC_N(char, len + 1); +- name = (st_data_t)ptr; +- *ptr++ = 0; +- MEMCPY(ptr, f, char, len); +- st_add_direct(source_filenames, (st_data_t)ptr, name); +- return ptr; ++ long len = strlen(f) + 1; ++ char *ptr = ALLOC_N(char, len + 1); ++ name = (st_data_t)ptr; ++ *ptr++ = 0; ++ MEMCPY(ptr, f, char, len); ++ st_add_direct(source_filenames, (st_data_t)ptr, name); ++ return ptr; + } + return (char *)name + 1; + } +@@ -596,7 +863,7 @@ mark_source_filename(f) + char *f; + { + if (f) { +- f[-1] = 1; ++ f[-1] = 1; + } + } + +@@ -605,12 +872,12 @@ sweep_source_filename(key, value) + char *key, *value; + { + if (*value) { +- *value = 0; +- return ST_CONTINUE; ++ *value = 0; ++ return ST_CONTINUE; + } + else { +- free(value); +- return ST_DELETE; ++ free(value); ++ return ST_DELETE; + } + } + +@@ -625,14 +892,14 @@ gc_mark_all() + + init_mark_stack(); + for (i = 0; i < heaps_used; i++) { +- p = heaps[i].slot; pend = p + heaps[i].limit; +- while (p < pend) { +- if ((p->as.basic.flags & FL_MARK) && +- (p->as.basic.flags != FL_MARK)) { +- gc_mark_children((VALUE)p, 0); +- } +- p++; +- } ++ p = heaps[i].slot; pend = p + heaps[i].limit; ++ while (p < pend) { ++ if ((p->as.basic.flags & FL_MARK) && ++ (p->as.basic.flags != FL_MARK)) { ++ gc_mark_children((VALUE)p, 0); ++ } ++ p++; ++ } + } + } + +@@ -647,8 +914,8 @@ gc_mark_rest() + + init_mark_stack(); + while(p != tmp_arry){ +- p--; +- gc_mark_children(*p, 0); ++ p--; ++ gc_mark_children(*p, 0); + } + } + +@@ -665,9 +932,9 @@ is_pointer_to_heap(ptr) + + /* check if p looks like a pointer */ + for (i=0; i < heaps_used; i++) { +- heap_org = heaps[i].slot; +- if (heap_org <= p && p < heap_org + heaps[i].limit) +- return Qtrue; ++ heap_org = heaps[i].slot; ++ if (heap_org <= p && p < heap_org + heaps[i].limit) ++ return Qtrue; + } + return Qfalse; + } +@@ -680,10 +947,10 @@ mark_locations_array(x, n) + VALUE v; + while (n--) { + v = *x; +- if (is_pointer_to_heap((void *)v)) { +- gc_mark(v, 0); +- } +- x++; ++ if (is_pointer_to_heap((void *)v)) { ++ gc_mark(v, 0); ++ } ++ x++; + } + } + +@@ -780,7 +1047,7 @@ rb_gc_mark_maybe(obj) + VALUE obj; + { + if (is_pointer_to_heap((void *)obj)) { +- gc_mark(obj, 0); ++ gc_mark(obj, 0); + } + } + +@@ -828,7 +1095,7 @@ gc_mark_children(ptr, lev) + { + register RVALUE *obj = RANY(ptr); + +- goto marking; /* skip */ ++ goto marking; /* skip */ + + again: + obj = RANY(ptr); +@@ -839,148 +1106,148 @@ gc_mark_children(ptr, lev) + + marking: + if (FL_TEST(obj, FL_EXIVAR)) { +- rb_mark_generic_ivar(ptr); ++ rb_mark_generic_ivar(ptr); + } + + switch (obj->as.basic.flags & T_MASK) { + case T_NIL: + case T_FIXNUM: +- rb_bug("rb_gc_mark() called for broken object"); +- break; ++ rb_bug("rb_gc_mark() called for broken object"); ++ break; + + case T_NODE: +- mark_source_filename(obj->as.node.nd_file); +- switch (nd_type(obj)) { +- case NODE_IF: /* 1,2,3 */ +- case NODE_FOR: +- case NODE_ITER: +- case NODE_CREF: +- case NODE_WHEN: +- case NODE_MASGN: +- case NODE_RESCUE: +- case NODE_RESBODY: +- case NODE_CLASS: +- gc_mark((VALUE)obj->as.node.u2.node, lev); +- /* fall through */ +- case NODE_BLOCK: /* 1,3 */ +- case NODE_ARRAY: +- case NODE_DSTR: +- case NODE_DXSTR: +- case NODE_DREGX: +- case NODE_DREGX_ONCE: +- case NODE_FBODY: +- case NODE_ENSURE: +- case NODE_CALL: +- case NODE_DEFS: +- case NODE_OP_ASGN1: +- gc_mark((VALUE)obj->as.node.u1.node, lev); +- /* fall through */ +- case NODE_SUPER: /* 3 */ +- case NODE_FCALL: +- case NODE_DEFN: +- case NODE_NEWLINE: +- ptr = (VALUE)obj->as.node.u3.node; +- goto again; +- +- case NODE_WHILE: /* 1,2 */ +- case NODE_UNTIL: +- case NODE_AND: +- case NODE_OR: +- case NODE_CASE: +- case NODE_SCLASS: +- case NODE_DOT2: +- case NODE_DOT3: +- case NODE_FLIP2: +- case NODE_FLIP3: +- case NODE_MATCH2: +- case NODE_MATCH3: +- case NODE_OP_ASGN_OR: +- case NODE_OP_ASGN_AND: +- case NODE_MODULE: +- case NODE_ALIAS: +- case NODE_VALIAS: +- case NODE_ARGS: +- gc_mark((VALUE)obj->as.node.u1.node, lev); +- /* fall through */ +- case NODE_METHOD: /* 2 */ +- case NODE_NOT: +- case NODE_GASGN: +- case NODE_LASGN: +- case NODE_DASGN: +- case NODE_DASGN_CURR: +- case NODE_IASGN: +- case NODE_CVDECL: +- case NODE_CVASGN: +- case NODE_COLON3: +- case NODE_OPT_N: +- case NODE_EVSTR: +- case NODE_UNDEF: +- ptr = (VALUE)obj->as.node.u2.node; +- goto again; +- +- case NODE_HASH: /* 1 */ +- case NODE_LIT: +- case NODE_STR: +- case NODE_XSTR: +- case NODE_DEFINED: +- case NODE_MATCH: +- case NODE_RETURN: +- case NODE_BREAK: +- case NODE_NEXT: +- case NODE_YIELD: +- case NODE_COLON2: +- case NODE_SPLAT: +- case NODE_TO_ARY: +- case NODE_SVALUE: +- ptr = (VALUE)obj->as.node.u1.node; +- goto again; +- +- case NODE_SCOPE: /* 2,3 */ +- case NODE_BLOCK_PASS: +- case NODE_CDECL: +- gc_mark((VALUE)obj->as.node.u3.node, lev); +- ptr = (VALUE)obj->as.node.u2.node; +- goto again; +- +- case NODE_ZARRAY: /* - */ +- case NODE_ZSUPER: +- case NODE_CFUNC: +- case NODE_VCALL: +- case NODE_GVAR: +- case NODE_LVAR: +- case NODE_DVAR: +- case NODE_IVAR: +- case NODE_CVAR: +- case NODE_NTH_REF: +- case NODE_BACK_REF: +- case NODE_REDO: +- case NODE_RETRY: +- case NODE_SELF: +- case NODE_NIL: +- case NODE_TRUE: +- case NODE_FALSE: +- case NODE_ATTRSET: +- case NODE_BLOCK_ARG: +- case NODE_POSTEXE: +- break; +- case NODE_ALLOCA: +- mark_locations_array((VALUE*)obj->as.node.u1.value, +- obj->as.node.u3.cnt); +- ptr = (VALUE)obj->as.node.u2.node; +- goto again; +- +- default: /* unlisted NODE */ +- if (is_pointer_to_heap(obj->as.node.u1.node)) { +- gc_mark((VALUE)obj->as.node.u1.node, lev); +- } +- if (is_pointer_to_heap(obj->as.node.u2.node)) { +- gc_mark((VALUE)obj->as.node.u2.node, lev); +- } +- if (is_pointer_to_heap(obj->as.node.u3.node)) { +- gc_mark((VALUE)obj->as.node.u3.node, lev); +- } +- } +- return; /* no need to mark class. */ ++ mark_source_filename(obj->as.node.nd_file); ++ switch (nd_type(obj)) { ++ case NODE_IF: /* 1,2,3 */ ++ case NODE_FOR: ++ case NODE_ITER: ++ case NODE_CREF: ++ case NODE_WHEN: ++ case NODE_MASGN: ++ case NODE_RESCUE: ++ case NODE_RESBODY: ++ case NODE_CLASS: ++ gc_mark((VALUE)obj->as.node.u2.node, lev); ++ /* fall through */ ++ case NODE_BLOCK: /* 1,3 */ ++ case NODE_ARRAY: ++ case NODE_DSTR: ++ case NODE_DXSTR: ++ case NODE_DREGX: ++ case NODE_DREGX_ONCE: ++ case NODE_FBODY: ++ case NODE_ENSURE: ++ case NODE_CALL: ++ case NODE_DEFS: ++ case NODE_OP_ASGN1: ++ gc_mark((VALUE)obj->as.node.u1.node, lev); ++ /* fall through */ ++ case NODE_SUPER: /* 3 */ ++ case NODE_FCALL: ++ case NODE_DEFN: ++ case NODE_NEWLINE: ++ ptr = (VALUE)obj->as.node.u3.node; ++ goto again; ++ ++ case NODE_WHILE: /* 1,2 */ ++ case NODE_UNTIL: ++ case NODE_AND: ++ case NODE_OR: ++ case NODE_CASE: ++ case NODE_SCLASS: ++ case NODE_DOT2: ++ case NODE_DOT3: ++ case NODE_FLIP2: ++ case NODE_FLIP3: ++ case NODE_MATCH2: ++ case NODE_MATCH3: ++ case NODE_OP_ASGN_OR: ++ case NODE_OP_ASGN_AND: ++ case NODE_MODULE: ++ case NODE_ALIAS: ++ case NODE_VALIAS: ++ case NODE_ARGS: ++ gc_mark((VALUE)obj->as.node.u1.node, lev); ++ /* fall through */ ++ case NODE_METHOD: /* 2 */ ++ case NODE_NOT: ++ case NODE_GASGN: ++ case NODE_LASGN: ++ case NODE_DASGN: ++ case NODE_DASGN_CURR: ++ case NODE_IASGN: ++ case NODE_CVDECL: ++ case NODE_CVASGN: ++ case NODE_COLON3: ++ case NODE_OPT_N: ++ case NODE_EVSTR: ++ case NODE_UNDEF: ++ ptr = (VALUE)obj->as.node.u2.node; ++ goto again; ++ ++ case NODE_HASH: /* 1 */ ++ case NODE_LIT: ++ case NODE_STR: ++ case NODE_XSTR: ++ case NODE_DEFINED: ++ case NODE_MATCH: ++ case NODE_RETURN: ++ case NODE_BREAK: ++ case NODE_NEXT: ++ case NODE_YIELD: ++ case NODE_COLON2: ++ case NODE_SPLAT: ++ case NODE_TO_ARY: ++ case NODE_SVALUE: ++ ptr = (VALUE)obj->as.node.u1.node; ++ goto again; ++ ++ case NODE_SCOPE: /* 2,3 */ ++ case NODE_BLOCK_PASS: ++ case NODE_CDECL: ++ gc_mark((VALUE)obj->as.node.u3.node, lev); ++ ptr = (VALUE)obj->as.node.u2.node; ++ goto again; ++ ++ case NODE_ZARRAY: /* - */ ++ case NODE_ZSUPER: ++ case NODE_CFUNC: ++ case NODE_VCALL: ++ case NODE_GVAR: ++ case NODE_LVAR: ++ case NODE_DVAR: ++ case NODE_IVAR: ++ case NODE_CVAR: ++ case NODE_NTH_REF: ++ case NODE_BACK_REF: ++ case NODE_REDO: ++ case NODE_RETRY: ++ case NODE_SELF: ++ case NODE_NIL: ++ case NODE_TRUE: ++ case NODE_FALSE: ++ case NODE_ATTRSET: ++ case NODE_BLOCK_ARG: ++ case NODE_POSTEXE: ++ break; ++ case NODE_ALLOCA: ++ mark_locations_array((VALUE*)obj->as.node.u1.value, ++ obj->as.node.u3.cnt); ++ ptr = (VALUE)obj->as.node.u2.node; ++ goto again; ++ ++ default: /* unlisted NODE */ ++ if (is_pointer_to_heap(obj->as.node.u1.node)) { ++ gc_mark((VALUE)obj->as.node.u1.node, lev); ++ } ++ if (is_pointer_to_heap(obj->as.node.u2.node)) { ++ gc_mark((VALUE)obj->as.node.u2.node, lev); ++ } ++ if (is_pointer_to_heap(obj->as.node.u3.node)) { ++ gc_mark((VALUE)obj->as.node.u3.node, lev); ++ } ++ } ++ return; /* no need to mark class. */ + } + + gc_mark(obj->as.basic.klass, lev); +@@ -988,92 +1255,92 @@ gc_mark_children(ptr, lev) + case T_ICLASS: + case T_CLASS: + case T_MODULE: +- mark_tbl(obj->as.klass.m_tbl, lev); +- mark_tbl(obj->as.klass.iv_tbl, lev); +- ptr = obj->as.klass.super; +- goto again; ++ mark_tbl(obj->as.klass.m_tbl, lev); ++ mark_tbl(obj->as.klass.iv_tbl, lev); ++ ptr = obj->as.klass.super; ++ goto again; + + case T_ARRAY: +- if (FL_TEST(obj, ELTS_SHARED)) { +- ptr = obj->as.array.aux.shared; +- goto again; +- } +- else { +- long i, len = obj->as.array.len; +- VALUE *ptr = obj->as.array.ptr; ++ if (FL_TEST(obj, ELTS_SHARED)) { ++ ptr = obj->as.array.aux.shared; ++ goto again; ++ } ++ else { ++ long i, len = obj->as.array.len; ++ VALUE *ptr = obj->as.array.ptr; + +- for (i=0; i < len; i++) { +- gc_mark(*ptr++, lev); +- } +- } +- break; ++ for (i=0; i < len; i++) { ++ gc_mark(*ptr++, lev); ++ } ++ } ++ break; + + case T_HASH: +- mark_hash(obj->as.hash.tbl, lev); +- ptr = obj->as.hash.ifnone; +- goto again; ++ mark_hash(obj->as.hash.tbl, lev); ++ ptr = obj->as.hash.ifnone; ++ goto again; + + case T_STRING: + #define STR_ASSOC FL_USER3 /* copied from string.c */ +- if (FL_TEST(obj, ELTS_SHARED|STR_ASSOC)) { +- ptr = obj->as.string.aux.shared; +- goto again; +- } +- break; ++ if (FL_TEST(obj, ELTS_SHARED|STR_ASSOC)) { ++ ptr = obj->as.string.aux.shared; ++ goto again; ++ } ++ break; + + case T_DATA: +- if (obj->as.data.dmark) (*obj->as.data.dmark)(DATA_PTR(obj)); +- break; ++ if (obj->as.data.dmark) (*obj->as.data.dmark)(DATA_PTR(obj)); ++ break; + + case T_OBJECT: +- mark_tbl(obj->as.object.iv_tbl, lev); +- break; ++ mark_tbl(obj->as.object.iv_tbl, lev); ++ break; + + case T_FILE: + case T_REGEXP: + case T_FLOAT: + case T_BIGNUM: + case T_BLKTAG: +- break; ++ break; + + case T_MATCH: +- if (obj->as.match.str) { +- ptr = obj->as.match.str; +- goto again; +- } +- break; ++ if (obj->as.match.str) { ++ ptr = obj->as.match.str; ++ goto again; ++ } ++ break; + + case T_VARMAP: +- gc_mark(obj->as.varmap.val, lev); +- ptr = (VALUE)obj->as.varmap.next; +- goto again; ++ gc_mark(obj->as.varmap.val, lev); ++ ptr = (VALUE)obj->as.varmap.next; ++ goto again; + + case T_SCOPE: +- if (obj->as.scope.local_vars && (obj->as.scope.flags & SCOPE_MALLOC)) { +- int n = obj->as.scope.local_tbl[0]+1; +- VALUE *vars = &obj->as.scope.local_vars[-1]; ++ if (obj->as.scope.local_vars && (obj->as.scope.flags & SCOPE_MALLOC)) { ++ int n = obj->as.scope.local_tbl[0]+1; ++ VALUE *vars = &obj->as.scope.local_vars[-1]; + +- while (n--) { +- gc_mark(*vars++, lev); +- } +- } +- break; ++ while (n--) { ++ gc_mark(*vars++, lev); ++ } ++ } ++ break; + + case T_STRUCT: +- { +- long len = obj->as.rstruct.len; +- VALUE *ptr = obj->as.rstruct.ptr; ++ { ++ long len = obj->as.rstruct.len; ++ VALUE *ptr = obj->as.rstruct.ptr; + +- while (len--) { +- gc_mark(*ptr++, lev); +- } +- } +- break; ++ while (len--) { ++ gc_mark(*ptr++, lev); ++ } ++ } ++ break; + + default: +- rb_bug("rb_gc_mark(): unknown data type 0x%lx(0x%lx) %s", +- obj->as.basic.flags & T_MASK, obj, +- is_pointer_to_heap(obj) ? "corrupted object" : "non object"); ++ rb_bug("rb_gc_mark(): unknown data type 0x%lx(0x%lx) %s", ++ obj->as.basic.flags & T_MASK, obj, ++ is_pointer_to_heap(obj) ? "corrupted object" : "non object"); + } + } + +@@ -1102,22 +1369,55 @@ finalize_list(p) + } + } + ++static char* obj_type(int tp) ++{ ++ switch (tp) { ++ case T_NIL : return "NIL"; ++ case T_OBJECT : return "OBJECT"; ++ case T_CLASS : return "CLASS"; ++ case T_ICLASS : return "ICLASS"; ++ case T_MODULE : return "MODULE"; ++ case T_FLOAT : return "FLOAT"; ++ case T_STRING : return "STRING"; ++ case T_REGEXP : return "REGEXP"; ++ case T_ARRAY : return "ARRAY"; ++ case T_FIXNUM : return "FIXNUM"; ++ case T_HASH : return "HASH"; ++ case T_STRUCT : return "STRUCT"; ++ case T_BIGNUM : return "BIGNUM"; ++ case T_FILE : return "FILE"; ++ ++ case T_TRUE : return "TRUE"; ++ case T_FALSE : return "FALSE"; ++ case T_DATA : return "DATA"; ++ case T_MATCH : return "MATCH"; ++ case T_SYMBOL : return "SYMBOL"; ++ ++ case T_BLKTAG : return "BLKTAG"; ++ case T_UNDEF : return "UNDEF"; ++ case T_VARMAP : return "VARMAP"; ++ case T_SCOPE : return "SCOPE"; ++ case T_NODE : return "NODE"; ++ default: return "____"; ++ } ++} ++ + static void + free_unused_heaps() + { + int i, j; + + for (i = j = 1; j < heaps_used; i++) { +- if (heaps[i].limit == 0) { +- free(heaps[i].membase); +- heaps_used--; +- } +- else { +- if (i != j) { +- heaps[j] = heaps[i]; +- } +- j++; +- } ++ if (heaps[i].limit == 0) { ++ free(heaps[i].membase); ++ heaps_used--; ++ } ++ else { ++ if (i != j) { ++ heaps[j] = heaps[i]; ++ } ++ j++; ++ } + } + } + +@@ -1134,24 +1434,33 @@ gc_sweep() + unsigned long live = 0; + unsigned long free_min = 0; + ++ unsigned long really_freed = 0; ++ int free_counts[256]; ++ int live_counts[256]; ++ int do_gc_stats = gc_statistics & verbose_gc_stats; ++ + for (i = 0; i < heaps_used; i++) { + free_min += heaps[i].limit; + } + free_min = free_min * 0.2; +- if (free_min < FREE_MIN) +- free_min = FREE_MIN; ++ if (free_min < heap_free_min) ++ free_min = heap_free_min; ++ ++ if (do_gc_stats) { ++ for (i = 0 ; i< 256; i++) { free_counts[i] = live_counts[i] = 0; } ++ } + + if (ruby_in_compile && ruby_parser_stack_on_heap()) { +- /* should not reclaim nodes during compilation ++ /* should not reclaim nodes during compilation + if yacc's semantic stack is not allocated on machine stack */ +- for (i = 0; i < heaps_used; i++) { +- p = heaps[i].slot; pend = p + heaps[i].limit; +- while (p < pend) { +- if (!(p->as.basic.flags&FL_MARK) && BUILTIN_TYPE(p) == T_NODE) +- gc_mark((VALUE)p, 0); +- p++; +- } +- } ++ for (i = 0; i < heaps_used; i++) { ++ p = heaps[i].slot; pend = p + heaps[i].limit; ++ while (p < pend) { ++ if (!(p->as.basic.flags&FL_MARK) && BUILTIN_TYPE(p) == T_NODE) ++ gc_mark((VALUE)p, 0); ++ p++; ++ } ++ } + } + + mark_source_filename(ruby_sourcefile); +@@ -1172,7 +1481,7 @@ gc_sweep() + while (p < pend) { + if (!(p->as.basic.flags & FL_MARK)) { + if (p->as.basic.flags && +- ((deferred = obj_free((VALUE)p)) || ++ (((do_gc_stats && really_freed++), deferred = obj_free((VALUE)p)) || + ((FL_TEST(p, FL_FINALIZE)) && need_call_final))) { + if (!deferred) { + p->as.free.flags = T_DEFERRED; +@@ -1183,6 +1492,12 @@ gc_sweep() + final_list = p; + } + else { ++ if (do_gc_stats) { ++ int obt = p->as.basic.flags & T_MASK; ++ if (obt) { ++ free_counts[obt]++; ++ } ++ } + add_freelist(p); + } + n++; +@@ -1194,6 +1509,9 @@ gc_sweep() + else { + RBASIC(p)->flags &= ~FL_MARK; + live++; ++ if (do_gc_stats) { ++ live_counts[RANY((VALUE)p)->as.basic.flags & T_MASK]++; ++ } + } + p++; + } +@@ -1211,15 +1529,29 @@ gc_sweep() + } + } + if (malloc_increase > malloc_limit) { +- malloc_limit += (malloc_increase - malloc_limit) * (double)live / (live + freed); +- if (malloc_limit < GC_MALLOC_LIMIT) malloc_limit = GC_MALLOC_LIMIT; ++ malloc_limit += (malloc_increase - malloc_limit) * (double)live / (live + freed); ++ if (malloc_limit < initial_malloc_limit) malloc_limit = initial_malloc_limit; + } + malloc_increase = 0; + if (freed < free_min) { +- add_heap(); ++ add_heap(); + } + during_gc = 0; + ++ if (do_gc_stats) { ++ fprintf(gc_data_file, "objects processed: %.7d\n", live+freed); ++ fprintf(gc_data_file, "live objects : %.7d\n", live); ++ fprintf(gc_data_file, "freelist objects : %.7d\n", freed - really_freed); ++ fprintf(gc_data_file, "freed objects : %.7d\n", really_freed); ++ for(i=0; i<256; i++) { ++ if (free_counts[i]>0 || live_counts[i]>0) { ++ fprintf(gc_data_file, ++ "kept %.7d / freed %.7d objects of type %s\n", ++ live_counts[i], free_counts[i], obj_type(i)); ++ } ++ } ++ } ++ + /* clear finalization list */ + if (final_list) { + deferred_final_list = final_list; +@@ -1260,51 +1592,51 @@ obj_free(obj) + case T_FIXNUM: + case T_TRUE: + case T_FALSE: +- rb_bug("obj_free() called for broken object"); +- break; ++ rb_bug("obj_free() called for broken object"); ++ break; + } + + if (FL_TEST(obj, FL_EXIVAR)) { +- rb_free_generic_ivar((VALUE)obj); ++ rb_free_generic_ivar((VALUE)obj); + } + + switch (BUILTIN_TYPE(obj)) { + case T_OBJECT: +- if (RANY(obj)->as.object.iv_tbl) { +- st_free_table(RANY(obj)->as.object.iv_tbl); +- } +- break; ++ if (RANY(obj)->as.object.iv_tbl) { ++ st_free_table(RANY(obj)->as.object.iv_tbl); ++ } ++ break; + case T_MODULE: + case T_CLASS: +- rb_clear_cache_by_class((VALUE)obj); +- st_free_table(RANY(obj)->as.klass.m_tbl); +- if (RANY(obj)->as.object.iv_tbl) { +- st_free_table(RANY(obj)->as.object.iv_tbl); +- } +- break; ++ rb_clear_cache_by_class((VALUE)obj); ++ st_free_table(RANY(obj)->as.klass.m_tbl); ++ if (RANY(obj)->as.object.iv_tbl) { ++ st_free_table(RANY(obj)->as.object.iv_tbl); ++ } ++ break; + case T_STRING: +- if (RANY(obj)->as.string.ptr && !FL_TEST(obj, ELTS_SHARED)) { +- RUBY_CRITICAL(free(RANY(obj)->as.string.ptr)); +- } +- break; ++ if (RANY(obj)->as.string.ptr && !FL_TEST(obj, ELTS_SHARED)) { ++ RUBY_CRITICAL(free(RANY(obj)->as.string.ptr)); ++ } ++ break; + case T_ARRAY: +- if (RANY(obj)->as.array.ptr && !FL_TEST(obj, ELTS_SHARED)) { +- RUBY_CRITICAL(free(RANY(obj)->as.array.ptr)); +- } +- break; ++ if (RANY(obj)->as.array.ptr && !FL_TEST(obj, ELTS_SHARED)) { ++ RUBY_CRITICAL(free(RANY(obj)->as.array.ptr)); ++ } ++ break; + case T_HASH: +- if (RANY(obj)->as.hash.tbl) { +- st_free_table(RANY(obj)->as.hash.tbl); +- } +- break; ++ if (RANY(obj)->as.hash.tbl) { ++ st_free_table(RANY(obj)->as.hash.tbl); ++ } ++ break; + case T_REGEXP: +- if (RANY(obj)->as.regexp.ptr) { +- re_free_pattern(RANY(obj)->as.regexp.ptr); +- } +- if (RANY(obj)->as.regexp.str) { +- RUBY_CRITICAL(free(RANY(obj)->as.regexp.str)); +- } +- break; ++ if (RANY(obj)->as.regexp.ptr) { ++ re_free_pattern(RANY(obj)->as.regexp.ptr); ++ } ++ if (RANY(obj)->as.regexp.str) { ++ RUBY_CRITICAL(free(RANY(obj)->as.regexp.str)); ++ } ++ break; + case T_DATA: + if (DATA_PTR(obj)) { + if ((long)RANY(obj)->as.data.dfree == -1) { +@@ -1317,11 +1649,11 @@ obj_free(obj) + } + break; + case T_MATCH: +- if (RANY(obj)->as.match.regs) { +- re_free_registers(RANY(obj)->as.match.regs); +- RUBY_CRITICAL(free(RANY(obj)->as.match.regs)); +- } +- break; ++ if (RANY(obj)->as.match.regs) { ++ re_free_registers(RANY(obj)->as.match.regs); ++ RUBY_CRITICAL(free(RANY(obj)->as.match.regs)); ++ } ++ break; + case T_FILE: + if (RANY(obj)->as.file.fptr) { + struct rb_io_t *fptr = RANY(obj)->as.file.fptr; +@@ -1332,19 +1664,19 @@ obj_free(obj) + } + break; + case T_ICLASS: +- /* iClass shares table with the module */ +- break; ++ /* iClass shares table with the module */ ++ break; + + case T_FLOAT: + case T_VARMAP: + case T_BLKTAG: +- break; ++ break; + + case T_BIGNUM: +- if (RANY(obj)->as.bignum.digits) { +- RUBY_CRITICAL(free(RANY(obj)->as.bignum.digits)); +- } +- break; ++ if (RANY(obj)->as.bignum.digits) { ++ RUBY_CRITICAL(free(RANY(obj)->as.bignum.digits)); ++ } ++ break; + case T_NODE: + switch (nd_type(obj)) { + case NODE_SCOPE: +@@ -1359,7 +1691,7 @@ obj_free(obj) + break; /* no need to free iv_tbl */ + + case T_SCOPE: +- if (RANY(obj)->as.scope.local_vars && ++ if (RANY(obj)->as.scope.local_vars && + RANY(obj)->as.scope.flags != SCOPE_ALLOCA) { + VALUE *vars = RANY(obj)->as.scope.local_vars-1; + if (!(RANY(obj)->as.scope.flags & SCOPE_CLONE) && vars[0] == 0) +@@ -1370,14 +1702,14 @@ obj_free(obj) + break; + + case T_STRUCT: +- if (RANY(obj)->as.rstruct.ptr) { +- RUBY_CRITICAL(free(RANY(obj)->as.rstruct.ptr)); +- } +- break; ++ if (RANY(obj)->as.rstruct.ptr) { ++ RUBY_CRITICAL(free(RANY(obj)->as.rstruct.ptr)); ++ } ++ break; + + default: +- rb_bug("gc_sweep(): unknown data type 0x%lx(0x%lx)", +- RANY(obj)->as.basic.flags & T_MASK, obj); ++ rb_bug("gc_sweep(): unknown data type 0x%lx(0x%lx)", ++ RANY(obj)->as.basic.flags & T_MASK, obj); + } + + return 0; +@@ -1407,18 +1739,18 @@ _rb_setjmp:\n\ + typedef unsigned long rb_jmp_buf[6]; + __asm__ (".align 4\n\ + _rb_setjmp:\n\ +- pushl %ebp\n\ +- movl %esp,%ebp\n\ +- movl 8(%ebp),%ebp\n\ +- movl %eax,(%ebp)\n\ +- movl %ebx,4(%ebp)\n\ +- movl %ecx,8(%ebp)\n\ +- movl %edx,12(%ebp)\n\ +- movl %esi,16(%ebp)\n\ +- movl %edi,20(%ebp)\n\ +- popl %ebp\n\ +- xorl %eax,%eax\n\ +- ret"); ++ pushl %ebp\n\ ++ movl %esp,%ebp\n\ ++ movl 8(%ebp),%ebp\n\ ++ movl %eax,(%ebp)\n\ ++ movl %ebx,4(%ebp)\n\ ++ movl %ecx,8(%ebp)\n\ ++ movl %edx,12(%ebp)\n\ ++ movl %esi,16(%ebp)\n\ ++ movl %edi,20(%ebp)\n\ ++ popl %ebp\n\ ++ xorl %eax,%eax\n\ ++ ret"); + #endif + #endif + int rb_setjmp (rb_jmp_buf); +@@ -1431,41 +1763,50 @@ garbage_collect() + struct gc_list *list; + struct FRAME * volatile frame; /* gcc 2.7.2.3 -O2 bug?? */ + jmp_buf save_regs_gc_mark; ++ struct timeval gctv1, gctv2; + SET_STACK_END; + + #ifdef HAVE_NATIVETHREAD + if (!is_ruby_native_thread()) { +- rb_bug("cross-thread violation on rb_gc()"); ++ rb_bug("cross-thread violation on rb_gc()"); + } + #endif + if (dont_gc || during_gc) { +- if (!freelist) { +- add_heap(); +- } +- return; ++ if (!freelist) { ++ add_heap(); ++ } ++ return; + } + if (during_gc) return; + during_gc++; + ++ if (gc_statistics) { ++ gc_collections++; ++ gettimeofday(&gctv1, NULL); ++ if (verbose_gc_stats) { ++ fprintf(gc_data_file, "Garbage collection started\n"); ++ } ++ } ++ + init_mark_stack(); + + gc_mark((VALUE)ruby_current_node, 0); + + /* mark frame stack */ + for (frame = ruby_frame; frame; frame = frame->prev) { +- rb_gc_mark_frame(frame); +- if (frame->tmp) { +- struct FRAME *tmp = frame->tmp; +- while (tmp) { +- rb_gc_mark_frame(tmp); +- tmp = tmp->prev; +- } +- } ++ rb_gc_mark_frame(frame); ++ if (frame->tmp) { ++ struct FRAME *tmp = frame->tmp; ++ while (tmp) { ++ rb_gc_mark_frame(tmp); ++ tmp = tmp->prev; ++ } ++ } + } + gc_mark((VALUE)ruby_scope, 0); + gc_mark((VALUE)ruby_dyna_vars, 0); + if (finalizer_table) { +- mark_tbl(finalizer_table, 0); ++ mark_tbl(finalizer_table, 0); + } + + FLUSH_REGISTER_WINDOWS; +@@ -1478,9 +1819,9 @@ garbage_collect() + rb_gc_mark_locations(rb_gc_stack_start, (VALUE*)STACK_END + 1); + #else + if ((VALUE*)STACK_END < rb_gc_stack_start) +- rb_gc_mark_locations((VALUE*)STACK_END, rb_gc_stack_start); ++ rb_gc_mark_locations((VALUE*)STACK_END, rb_gc_stack_start); + else +- rb_gc_mark_locations(rb_gc_stack_start, (VALUE*)STACK_END + 1); ++ rb_gc_mark_locations(rb_gc_stack_start, (VALUE*)STACK_END + 1); + #endif + #ifdef __ia64 + /* mark backing store (flushed register window on the stack) */ +@@ -1489,13 +1830,13 @@ garbage_collect() + #endif + #if defined(__human68k__) || defined(__mc68000__) + rb_gc_mark_locations((VALUE*)((char*)STACK_END + 2), +- (VALUE*)((char*)rb_gc_stack_start + 2)); ++ (VALUE*)((char*)rb_gc_stack_start + 2)); + #endif + rb_gc_mark_threads(); + + /* mark protected global variables */ + for (list = global_List; list; list = list->next) { +- rb_gc_mark_maybe(*list->varptr); ++ rb_gc_mark_maybe(*list->varptr); + } + rb_mark_end_proc(); + rb_gc_mark_global_tbl(); +@@ -1510,18 +1851,30 @@ garbage_collect() + + /* gc_mark objects whose marking are not completed*/ + do { +- while (!MARK_STACK_EMPTY) { +- if (mark_stack_overflow){ +- gc_mark_all(); +- } +- else { +- gc_mark_rest(); +- } +- } +- rb_gc_abort_threads(); ++ while (!MARK_STACK_EMPTY) { ++ if (mark_stack_overflow){ ++ gc_mark_all(); ++ } ++ else { ++ gc_mark_rest(); ++ } ++ } ++ rb_gc_abort_threads(); + } while (!MARK_STACK_EMPTY); + + gc_sweep(); ++ ++ if (gc_statistics) { ++ GC_TIME_TYPE musecs_used; ++ gettimeofday(&gctv2, NULL); ++ musecs_used = ((GC_TIME_TYPE)(gctv2.tv_sec - gctv1.tv_sec) * 1000000) + (gctv2.tv_usec - gctv1.tv_usec); ++ gc_time += musecs_used; ++ ++ if (verbose_gc_stats) { ++ fprintf(gc_data_file, "GC time: %d msec\n", musecs_used / 1000); ++ fflush(gc_data_file); ++ } ++ } + } + + void +@@ -1588,8 +1941,8 @@ Init_stack(addr) + memset(&m, 0, sizeof(m)); + VirtualQuery(&m, &m, sizeof(m)); + rb_gc_stack_start = +- STACK_UPPER((VALUE *)&m, (VALUE *)m.BaseAddress, +- (VALUE *)((char *)m.BaseAddress + m.RegionSize) - 1); ++ STACK_UPPER((VALUE *)&m, (VALUE *)m.BaseAddress, ++ (VALUE *)((char *)m.BaseAddress + m.RegionSize) - 1); + #elif defined(STACK_END_ADDRESS) + { + extern void *STACK_END_ADDRESS; +@@ -1599,24 +1952,24 @@ Init_stack(addr) + if (!addr) addr = (void *)&addr; + STACK_UPPER(&addr, addr, ++addr); + if (rb_gc_stack_start) { +- if (STACK_UPPER(&addr, +- rb_gc_stack_start > addr, +- rb_gc_stack_start < addr)) +- rb_gc_stack_start = addr; +- return; ++ if (STACK_UPPER(&addr, ++ rb_gc_stack_start > addr, ++ rb_gc_stack_start < addr)) ++ rb_gc_stack_start = addr; ++ return; + } + rb_gc_stack_start = addr; + #endif + #ifdef HAVE_GETRLIMIT + { +- struct rlimit rlim; ++ struct rlimit rlim; + +- if (getrlimit(RLIMIT_STACK, &rlim) == 0) { +- unsigned int space = rlim.rlim_cur/5; ++ if (getrlimit(RLIMIT_STACK, &rlim) == 0) { ++ unsigned int space = rlim.rlim_cur/5; + +- if (space > 1024*1024) space = 1024*1024; +- STACK_LEVEL_MAX = (rlim.rlim_cur - space) / sizeof(VALUE); +- } ++ if (space > 1024*1024) space = 1024*1024; ++ STACK_LEVEL_MAX = (rlim.rlim_cur - space) / sizeof(VALUE); ++ } + } + #endif + } +@@ -1652,16 +2005,16 @@ void ruby_init_stack(VALUE *addr + } + #elif defined _WIN32 + { +- MEMORY_BASIC_INFORMATION mi; +- DWORD size; +- DWORD space; +- +- if (VirtualQuery(&mi, &mi, sizeof(mi))) { +- size = (char *)mi.BaseAddress - (char *)mi.AllocationBase; +- space = size / 5; +- if (space > 1024*1024) space = 1024*1024; +- STACK_LEVEL_MAX = (size - space) / sizeof(VALUE); +- } ++ MEMORY_BASIC_INFORMATION mi; ++ DWORD size; ++ DWORD space; ++ ++ if (VirtualQuery(&mi, &mi, sizeof(mi))) { ++ size = (char *)mi.BaseAddress - (char *)mi.AllocationBase; ++ space = size / 5; ++ if (space > 1024*1024) space = 1024*1024; ++ STACK_LEVEL_MAX = (size - space) / sizeof(VALUE); ++ } + } + #endif + } +@@ -1701,8 +2054,9 @@ void + Init_heap() + { + if (!rb_gc_stack_start) { +- Init_stack(0); ++ Init_stack(0); + } ++ set_gc_parameters(); + add_heap(); + } + +@@ -1715,7 +2069,7 @@ os_obj_of(of) + volatile VALUE v; + + for (i = 0; i < heaps_used; i++) { +- RVALUE *p, *pend; ++ RVALUE *p, *pend; + + p = heaps[i].slot; pend = p + heaps[i].limit; + for (;p < pend; p++) { +@@ -1808,8 +2162,8 @@ add_final(os, block) + { + rb_warn("ObjectSpace::add_finalizer is deprecated; use define_finalizer"); + if (!rb_respond_to(block, rb_intern("call"))) { +- rb_raise(rb_eArgError, "wrong type argument %s (should be callable)", +- rb_obj_classname(block)); ++ rb_raise(rb_eArgError, "wrong type argument %s (should be callable)", ++ rb_obj_classname(block)); + } + rb_ary_push(finalizers, block); + return block; +@@ -1864,7 +2218,7 @@ undefine_final(os, obj) + VALUE os, obj; + { + if (finalizer_table) { +- st_delete(finalizer_table, (st_data_t*)&obj, 0); ++ st_delete(finalizer_table, (st_data_t*)&obj, 0); + } + return obj; + } +@@ -1888,11 +2242,11 @@ define_final(argc, argv, os) + + rb_scan_args(argc, argv, "11", &obj, &block); + if (argc == 1) { +- block = rb_block_proc(); ++ block = rb_block_proc(); + } + else if (!rb_respond_to(block, rb_intern("call"))) { +- rb_raise(rb_eArgError, "wrong type argument %s (should be callable)", +- rb_obj_classname(block)); ++ rb_raise(rb_eArgError, "wrong type argument %s (should be callable)", ++ rb_obj_classname(block)); + } + need_call_final = 1; + if (!FL_ABLE(obj)) { +@@ -1905,10 +2259,10 @@ define_final(argc, argv, os) + OBJ_FREEZE(block); + + if (!finalizer_table) { +- finalizer_table = st_init_numtable(); ++ finalizer_table = st_init_numtable(); + } + if (st_lookup(finalizer_table, obj, &table)) { +- rb_ary_push(table, block); ++ rb_ary_push(table, block); + } + else { + table = rb_ary_new3(1, block); +@@ -1927,7 +2281,7 @@ rb_gc_copy_finalizer(dest, obj) + if (!finalizer_table) return; + if (!FL_TEST(obj, FL_FINALIZE)) return; + if (st_lookup(finalizer_table, obj, &table)) { +- st_insert(finalizer_table, dest, table); ++ st_insert(finalizer_table, dest, table); + } + RBASIC(dest)->flags |= FL_FINALIZE; + } +@@ -1957,18 +2311,18 @@ run_final(obj) + args[1] = 0; + args[2] = (VALUE)ruby_safe_level; + for (i=0; ilen; i++) { +- args[0] = RARRAY(finalizers)->ptr[i]; +- if (!args[1]) args[1] = rb_ary_new3(1, objid); +- rb_protect((VALUE(*)_((VALUE)))run_single_final, (VALUE)args, &status); ++ args[0] = RARRAY(finalizers)->ptr[i]; ++ if (!args[1]) args[1] = rb_ary_new3(1, objid); ++ rb_protect((VALUE(*)_((VALUE)))run_single_final, (VALUE)args, &status); + } + if (finalizer_table && st_delete(finalizer_table, (st_data_t*)&obj, &table)) { +- for (i=0; ilen; i++) { +- VALUE final = RARRAY(table)->ptr[i]; +- args[0] = RARRAY(final)->ptr[1]; +- if (!args[1]) args[1] = rb_ary_new3(1, objid); +- args[2] = FIX2INT(RARRAY(final)->ptr[0]); +- rb_protect((VALUE(*)_((VALUE)))run_single_final, (VALUE)args, &status); +- } ++ for (i=0; ilen; i++) { ++ VALUE final = RARRAY(table)->ptr[i]; ++ args[0] = RARRAY(final)->ptr[1]; ++ if (!args[1]) args[1] = rb_ary_new3(1, objid); ++ args[2] = FIX2INT(RARRAY(final)->ptr[0]); ++ rb_protect((VALUE(*)_((VALUE)))run_single_final, (VALUE)args, &status); ++ } + } + rb_thread_critical = critical_save; + } +@@ -1980,8 +2334,8 @@ rb_gc_finalize_deferred() + + deferred_final_list = 0; + if (p) { +- finalize_list(p); +- free_unused_heaps(); ++ finalize_list(p); ++ free_unused_heaps(); + } + } + +@@ -2061,7 +2415,7 @@ id2ref(obj, objid) + if (ptr == Qfalse) return Qfalse; + if (ptr == Qnil) return Qnil; + if (FIXNUM_P(ptr)) return (VALUE)ptr; +- ptr = objid ^ FIXNUM_FLAG; /* unset FIXNUM_FLAG */ ++ ptr = objid ^ FIXNUM_FLAG; /* unset FIXNUM_FLAG */ + + if ((ptr % sizeof(RVALUE)) == (4 << 2)) { + ID symid = ptr / sizeof(RVALUE); +@@ -2075,7 +2429,7 @@ id2ref(obj, objid) + rb_raise(rb_eRangeError, "0x%lx is not id value", p0); + } + if (BUILTIN_TYPE(ptr) == 0 || RBASIC(ptr)->klass == 0) { +- rb_raise(rb_eRangeError, "0x%lx is recycled object", p0); ++ rb_raise(rb_eRangeError, "0x%lx is recycled object", p0); + } + return (VALUE)ptr; + } +@@ -2166,6 +2520,14 @@ Init_GC() + rb_define_singleton_method(rb_mGC, "stress=", gc_stress_set, 1); + rb_define_method(rb_mGC, "garbage_collect", rb_gc_start, 0); + ++ rb_define_singleton_method(rb_mGC, "enable_stats", rb_gc_enable_stats, 0); ++ rb_define_singleton_method(rb_mGC, "disable_stats", rb_gc_disable_stats, 0); ++ rb_define_singleton_method(rb_mGC, "clear_stats", rb_gc_clear_stats, 0); ++ rb_define_singleton_method(rb_mGC, "collections", rb_gc_collections, 0); ++ rb_define_singleton_method(rb_mGC, "time", rb_gc_time, 0); ++ rb_define_singleton_method(rb_mGC, "dump", rb_gc_dump, 0); ++ rb_define_singleton_method(rb_mGC, "log", rb_gc_log, 1); ++ + rb_mObSpace = rb_define_module("ObjectSpace"); + rb_define_module_function(rb_mObSpace, "each_object", os_each_obj, -1); + rb_define_module_function(rb_mObSpace, "garbage_collect", rb_gc_start, 0); +@@ -2188,7 +2550,7 @@ Init_GC() + + rb_global_variable(&nomem_error); + nomem_error = rb_exc_new3(rb_eNoMemError, +- rb_obj_freeze(rb_str_new2("failed to allocate memory"))); ++ rb_obj_freeze(rb_str_new2("failed to allocate memory"))); + OBJ_TAINT(nomem_error); + OBJ_FREEZE(nomem_error); + diff --git a/patches/ruby/1.8.7/head/railsexpress/05-display-full-stack-trace.patch b/patches/ruby/1.8.7/head/railsexpress/05-display-full-stack-trace.patch new file mode 100644 index 0000000000..a927a80ba6 --- /dev/null +++ b/patches/ruby/1.8.7/head/railsexpress/05-display-full-stack-trace.patch @@ -0,0 +1,15 @@ +diff --git a/eval.c b/eval.c +index 6ff2560..fb3307c 100644 +--- a/eval.c ++++ b/eval.c +@@ -1325,8 +1325,8 @@ error_print() + int truncate = eclass == rb_eSysStackError; + + #define TRACE_MAX (TRACE_HEAD+TRACE_TAIL+5) +-#define TRACE_HEAD 8 +-#define TRACE_TAIL 5 ++#define TRACE_HEAD 100 ++#define TRACE_TAIL 100 + + ep = RARRAY(errat); + for (i=1; ilen; i++) { diff --git a/patches/ruby/1.8.7/head/railsexpress/06-better-source-file-tracing.patch b/patches/ruby/1.8.7/head/railsexpress/06-better-source-file-tracing.patch new file mode 100644 index 0000000000..f7a4d4f24f --- /dev/null +++ b/patches/ruby/1.8.7/head/railsexpress/06-better-source-file-tracing.patch @@ -0,0 +1,13 @@ +diff --git a/eval.c b/eval.c +index fb3307c..356226e 100644 +--- a/eval.c ++++ b/eval.c +@@ -1161,7 +1161,7 @@ static VALUE trace_func = 0; + static int tracing = 0; + static void call_trace_func _((rb_event_t,NODE*,VALUE,ID,VALUE)); + +-#if 0 ++#if 1 + #define SET_CURRENT_SOURCE() (ruby_sourcefile = ruby_current_node->nd_file, \ + ruby_sourceline = nd_line(ruby_current_node)) + #else diff --git a/patches/ruby/1.8.7/head/railsexpress/07-heap-dump-support.patch b/patches/ruby/1.8.7/head/railsexpress/07-heap-dump-support.patch new file mode 100644 index 0000000000..5235be6bcc --- /dev/null +++ b/patches/ruby/1.8.7/head/railsexpress/07-heap-dump-support.patch @@ -0,0 +1,159 @@ +diff --git a/configure.in b/configure.in +index 62b34a8..4be088c 100644 +--- a/configure.in ++++ b/configure.in +@@ -1595,6 +1595,14 @@ fi + LDFLAGS="-L. $LDFLAGS" + AC_SUBST(ARCHFILE) + ++dnl enable gc debugging ++AC_ARG_ENABLE(gcdebug, ++ [ --enable-gcdebug build garbage collector with debugging enabled. ], ++ [enable_gcdebug=$enableval]) ++if test "$enable_gcdebug" = 'yes'; then ++ AC_DEFINE(GC_DEBUG, 1) ++fi ++ + dnl build rdoc index if requested + RDOCTARGET="" + AC_ARG_ENABLE(install-doc, +diff --git a/gc.c b/gc.c +index ab71d22..9ad716f 100644 +--- a/gc.c ++++ b/gc.c +@@ -411,7 +411,6 @@ rb_gc_unregister_address(addr) + } + } + +-#undef GC_DEBUG + + void + rb_global_variable(var) +@@ -602,6 +601,85 @@ rb_gc_dump() + return Qnil; + } + ++ ++static char* obj_type(int tp); ++ ++#ifdef GC_DEBUG ++/* ++ * call-seq: ++ * GC.dump_file_and_line_info(String, boolean) => nil ++ * ++ * dumps information on which currently allocated object was created by which file and on which line ++ * ++ * GC.dump_file_and_line_info(String, boolean) #=> nil ++ * ++ * The second parameter specifies whether class names should be included in the dump. ++ * Note that including class names will allocate additional string objects on the heap. ++ * ++ */ ++ ++VALUE ++rb_gc_dump_file_and_line_info(int argc, VALUE *argv) ++{ ++ VALUE filename, str, include_classnames = Qnil; ++ char *fname = NULL; ++ char *klass = NULL; ++ FILE* f = NULL; ++ int i,n = 0; ++ ++ rb_scan_args(argc, argv, "11", &filename, &include_classnames); ++ ++ str = StringValue(filename); ++ fname = RSTRING(str)->ptr; ++ f = fopen(fname, "w"); ++ ++ for (i = 0; i < heaps_used; i++) { ++ RVALUE *p, *pend; ++ ++ p = heaps[i].slot; pend = p + heaps[i].limit; ++ for (;p < pend; p++) { ++ if (p->as.basic.flags) { ++ fprintf(f, "%s:%s:%d", obj_type(p->as.basic.flags & T_MASK), p->file, p->line); ++ // rb_obj_classname will create objects on the heap, we need a better solution ++ if (include_classnames == Qtrue) { ++ /* write the class */ ++ fprintf(f, ":"); ++ switch (TYPE(p)) { ++ case T_NONE: ++ fprintf(f, "__none__"); ++ break; ++ case T_BLKTAG: ++ fprintf(f, "__blktag__"); ++ break; ++ case T_UNDEF: ++ fprintf(f, "__undef__"); ++ break; ++ case T_VARMAP: ++ fprintf(f, "__varmap__"); ++ break; ++ case T_SCOPE: ++ fprintf(f, "__scope__"); ++ break; ++ case T_NODE: ++ fprintf(f, "__node__"); ++ break; ++ default: ++ if (!p->as.basic.klass) { ++ fprintf(f, "__unknown__"); ++ } else { ++ fprintf(f, rb_obj_classname((VALUE)p)); ++ } ++ } ++ } ++ fprintf(f, "\n"); ++ } ++ } ++ } ++ fclose(f); ++ return Qnil; ++} ++#endif ++ + /* + * call-seq: + * GC.log String => String +@@ -1066,6 +1144,11 @@ gc_mark(ptr, lev) + if (obj->as.basic.flags & FL_MARK) return; /* already marked */ + obj->as.basic.flags |= FL_MARK; + ++#ifdef GC_DEBUG ++ /* mark our new reference point for sourcefile objects */ ++ mark_source_filename(RANY(obj)->file); ++#endif ++ + if (lev > GC_LEVEL_MAX || (lev == 0 && ruby_stack_check())) { + if (!mark_stack_overflow) { + if (mark_stack_ptr - mark_stack < MARK_STACK_MAX) { +@@ -1104,6 +1187,11 @@ gc_mark_children(ptr, lev) + if (obj->as.basic.flags & FL_MARK) return; /* already marked */ + obj->as.basic.flags |= FL_MARK; + ++#ifdef GC_DEBUG ++ /* mark our new reference point for sourcefile objects */ ++ mark_source_filename(RANY(obj)->file); ++#endif ++ + marking: + if (FL_TEST(obj, FL_EXIVAR)) { + rb_mark_generic_ivar(ptr); +@@ -1550,6 +1638,7 @@ gc_sweep() + live_counts[i], free_counts[i], obj_type(i)); + } + } ++ fflush(gc_data_file); + } + + /* clear finalization list */ +@@ -2526,6 +2615,9 @@ Init_GC() + rb_define_singleton_method(rb_mGC, "collections", rb_gc_collections, 0); + rb_define_singleton_method(rb_mGC, "time", rb_gc_time, 0); + rb_define_singleton_method(rb_mGC, "dump", rb_gc_dump, 0); ++#ifdef GC_DEBUG ++ rb_define_singleton_method(rb_mGC, "dump_file_and_line_info", rb_gc_dump_file_and_line_info, -1); ++#endif + rb_define_singleton_method(rb_mGC, "log", rb_gc_log, 1); + + rb_mObSpace = rb_define_module("ObjectSpace"); diff --git a/patches/ruby/1.8.7/head/railsexpress/08-fork-support-for-gc-logging.patch b/patches/ruby/1.8.7/head/railsexpress/08-fork-support-for-gc-logging.patch new file mode 100644 index 0000000000..0f01a75db0 --- /dev/null +++ b/patches/ruby/1.8.7/head/railsexpress/08-fork-support-for-gc-logging.patch @@ -0,0 +1,249 @@ +diff --git a/gc.c b/gc.c +index 9ad716f..a3cbe91 100644 +--- a/gc.c ++++ b/gc.c +@@ -223,6 +223,8 @@ static int dont_gc; + static int gc_statistics = 0; + static GC_TIME_TYPE gc_time = 0; + static int gc_collections = 0; ++static int verbose_gc_stats = Qfalse; ++static FILE* gc_data_file = NULL; + static int during_gc; + static int need_call_final = 0; + static st_table *finalizer_table = 0; +@@ -368,9 +370,148 @@ rb_gc_time() + #endif + } + +- + VALUE rb_mGC; + ++/* ++ * call-seq: ++ * GC.enable_trace => true or false ++ * ++ * Enables garbage collection tracing, returning true if garbage ++ * collection tracing was already enabled. ++ * ++ * GC.enable_trace #=> false or true ++ * GC.enable_trace #=> true ++ * ++ */ ++ ++VALUE ++rb_gc_enable_trace() ++{ ++ int old = verbose_gc_stats; ++ verbose_gc_stats = Qtrue; ++ return old; ++} ++ ++/* ++ * call-seq: ++ * GC.disable_trace => true or false ++ * ++ * Disables garbage collection tracing, returning true if garbage ++ * collection tracing was already disabled. ++ * ++ * GC.disable_trace #=> false or true ++ * GC.disable_trace #=> true ++ * ++ */ ++ ++VALUE ++rb_gc_disable_trace() ++{ ++ int old = verbose_gc_stats; ++ verbose_gc_stats = Qfalse; ++ return old; ++} ++ ++char* GC_LOGFILE_IVAR = "@gc_logfile_name"; ++ ++/* ++ * call-seq: ++ * GC.log_file(filename=nil, mode="w") => boolean ++ * ++ * Changes the GC data log file. Closes the currently open logfile. ++ * Returns true if the file was successfully opened for ++ * writing. Returns false if the file could not be opened for ++ * writing. Returns the name of the current logfile (or nil) if no ++ * parameter is given. Restores logging to stderr when given nil as ++ * an argument. ++ * ++ * GC.log_file #=> nil ++ * GC.log_file "/tmp/gc.log" #=> true ++ * GC.log_file #=> "/tmp/gc.log" ++ * GC.log_file nil #=> true ++ * ++ */ ++ ++VALUE ++rb_gc_log_file(int argc, VALUE *argv, VALUE self) ++{ ++ VALUE filename = Qnil; ++ VALUE mode_str = Qnil; ++ FILE* f = NULL; ++ char* mode = "w"; ++ ++ VALUE current_logfile_name = rb_iv_get(rb_mGC, GC_LOGFILE_IVAR); ++ ++ if (argc==0) ++ return current_logfile_name; ++ ++ rb_scan_args(argc, argv, "02", &filename, &mode_str); ++ ++ if (filename == Qnil) { ++ /* close current logfile and reset logfile to stderr */ ++ if (gc_data_file != stderr) { ++ fclose(gc_data_file); ++ gc_data_file = stderr; ++ rb_iv_set(rb_mGC, GC_LOGFILE_IVAR, Qnil); ++ } ++ return Qtrue; ++ } ++ ++ /* we have a real logfile name */ ++ filename = StringValue(filename); ++ ++ if (rb_equal(current_logfile_name, filename) == Qtrue) { ++ /* do nothing if we get the file name we're already logging to */ ++ return Qtrue; ++ } ++ ++ /* get mode for file opening */ ++ if (mode_str != Qnil) ++ { ++ mode = RSTRING(StringValue(mode_str))->ptr; ++ } ++ ++ /* try to open file in given mode */ ++ if (f = fopen(RSTRING(filename)->ptr, mode)) { ++ if (gc_data_file != stderr) { ++ fclose(gc_data_file); ++ } ++ gc_data_file = f; ++ rb_iv_set(rb_mGC, GC_LOGFILE_IVAR, filename); ++ } else { ++ return Qfalse; ++ } ++ return Qtrue; ++} ++ ++ ++/* ++ * Called from process.c before a fork. Flushes the gc log file to ++ * avoid writing the buffered output twice (once in the parent, and ++ * once in the child). ++ */ ++void ++rb_gc_before_fork() ++{ ++ /* flush gc log file */ ++ fflush(gc_data_file); ++} ++ ++/* ++ * Called from process.c after a fork in the child process. Turns off ++ * logging, disables GC stats and resets all gc counters and timing ++ * information. ++ */ ++void ++rb_gc_after_fork() ++{ ++ rb_gc_disable_stats(); ++ rb_gc_clear_stats(); ++ rb_gc_disable_trace(); ++ gc_data_file = stderr; ++ rb_iv_set(rb_mGC, GC_LOGFILE_IVAR, Qnil); ++} ++ + static struct gc_list { + VALUE *varptr; + struct gc_list *next; +@@ -477,10 +618,6 @@ static double heap_slots_growth_factor = 1.8; + + static long initial_malloc_limit = GC_MALLOC_LIMIT; + +-static int verbose_gc_stats = Qfalse; +- +-static FILE* gc_data_file = NULL; +- + static RVALUE *himem, *lomem; + + static void set_gc_parameters() +@@ -496,6 +633,8 @@ static void set_gc_parameters() + if (gc_stats_i > 0) { + verbose_gc_stats = Qtrue; + } ++ /* child processes should not inherit RUBY_GC_STATS */ ++ unsetenv("RUBY_GC_STATS"); + } + + gc_heap_file_ptr = getenv("RUBY_GC_DATA_FILE"); +@@ -508,6 +647,8 @@ static void set_gc_parameters() + fprintf(stderr, + "can't open gc log file %s for writing, using default\n", gc_heap_file_ptr); + } ++ /* child processes should not inherit RUBY_GC_DATA_FILE to avoid clobbering */ ++ unsetenv("RUBY_GC_DATA_FILE"); + } + + min_slots_ptr = getenv("RUBY_HEAP_MIN_SLOTS"); +@@ -2619,6 +2760,9 @@ Init_GC() + rb_define_singleton_method(rb_mGC, "dump_file_and_line_info", rb_gc_dump_file_and_line_info, -1); + #endif + rb_define_singleton_method(rb_mGC, "log", rb_gc_log, 1); ++ rb_define_singleton_method(rb_mGC, "log_file", rb_gc_log_file, -1); ++ rb_define_singleton_method(rb_mGC, "enable_trace", rb_gc_enable_trace, 0); ++ rb_define_singleton_method(rb_mGC, "disable_trace", rb_gc_disable_trace, 0); + + rb_mObSpace = rb_define_module("ObjectSpace"); + rb_define_module_function(rb_mObSpace, "each_object", os_each_obj, -1); +diff --git a/intern.h b/intern.h +index 950ae9d..99696f1 100644 +--- a/intern.h ++++ b/intern.h +@@ -270,6 +270,8 @@ void rb_gc_call_finalizer_at_exit _((void)); + VALUE rb_gc_enable _((void)); + VALUE rb_gc_disable _((void)); + VALUE rb_gc_start _((void)); ++void rb_gc_before_fork _((void)); ++void rb_gc_after_fork _((void)); + /* hash.c */ + void st_foreach_safe _((struct st_table *, int (*)(ANYARGS), unsigned long)); + void rb_hash_foreach _((VALUE, int (*)(ANYARGS), VALUE)); +diff --git a/process.c b/process.c +index 8f6285d..ea28cb8 100644 +--- a/process.c ++++ b/process.c +@@ -1330,6 +1330,8 @@ rb_f_fork(obj) + fflush(stderr); + #endif + ++ rb_gc_before_fork(); ++ + before_exec(); + pid = fork(); + after_exec(); +@@ -1339,6 +1341,7 @@ rb_f_fork(obj) + #ifdef linux + after_exec(); + #endif ++ rb_gc_after_fork(); + rb_thread_atfork(); + if (rb_block_given_p()) { + int status; +@@ -1574,10 +1577,12 @@ rb_f_system(argc, argv) + + chfunc = signal(SIGCHLD, SIG_DFL); + retry: ++ rb_gc_before_fork(); + before_exec(); + pid = fork(); + if (pid == 0) { + /* child process */ ++ rb_gc_after_fork(); + rb_thread_atfork(); + rb_protect(proc_exec_args, (VALUE)&earg, NULL); + _exit(127); diff --git a/patches/ruby/1.8.7/head/railsexpress/09-track-malloc-size.patch b/patches/ruby/1.8.7/head/railsexpress/09-track-malloc-size.patch new file mode 100644 index 0000000000..d078748a44 --- /dev/null +++ b/patches/ruby/1.8.7/head/railsexpress/09-track-malloc-size.patch @@ -0,0 +1,120 @@ +diff --git a/gc.c b/gc.c +index a3cbe91..30a1219 100644 +--- a/gc.c ++++ b/gc.c +@@ -79,6 +79,17 @@ void *alloca (); + + static unsigned long malloc_increase = 0; + static unsigned long malloc_limit = GC_MALLOC_LIMIT; ++ ++#ifdef HAVE_LONG_LONG ++static unsigned LONG_LONG gc_allocated_size = 0; ++static unsigned LONG_LONG gc_num_allocations = 0; ++#else ++static unsigned long gc_allocated_size = 0; ++static unsigned long gc_num_allocations = 0; ++#endif ++static int gc_statistics = 0; ++ ++ + static void run_final(); + static VALUE nomem_error; + static void garbage_collect(); +@@ -163,6 +174,11 @@ ruby_xmalloc(size) + } + malloc_increase += size; + ++ if (gc_statistics) { ++ gc_allocated_size += size; ++ gc_num_allocations += 1; ++ } ++ + return mem; + } + +@@ -220,7 +236,6 @@ ruby_xfree(x) + + extern int ruby_in_compile; + static int dont_gc; +-static int gc_statistics = 0; + static GC_TIME_TYPE gc_time = 0; + static int gc_collections = 0; + static int verbose_gc_stats = Qfalse; +@@ -329,11 +344,55 @@ rb_gc_clear_stats() + { + gc_collections = 0; + gc_time = 0; ++ gc_allocated_size = 0; ++ gc_num_allocations = 0; + return Qnil; + } + + /* + * call-seq: ++ * GC.allocated_size => Integer ++ * ++ * Returns the size of memory (in bytes) allocated since GC statistics collection ++ * was enabled. ++ * ++ * GC.allocated_size #=> 35 ++ * ++ */ ++VALUE ++rb_gc_allocated_size() ++{ ++#if HAVE_LONG_LONG ++ return ULL2NUM(gc_allocated_size); ++#else ++ return ULONG2NUM(gc_allocated_size); ++#endif ++} ++ ++/* ++ * call-seq: ++ * GC.num_allocations => Integer ++ * ++ * Returns the number of memory allocations since GC statistics collection ++ * was enabled. ++ * ++ * GC.num_allocations #=> 150 ++ * ++ */ ++VALUE ++rb_gc_num_allocations() ++{ ++#if HAVE_LONG_LONG ++ return ULL2NUM(gc_num_allocations); ++#else ++ return ULONG2NUM(gc_num_allocations); ++#endif ++} ++ ++/* ++ ++/* ++ * call-seq: + * GC.collections => Integer + * + * Returns the number of garbage collections performed while GC statistics collection +@@ -2753,6 +2812,8 @@ Init_GC() + rb_define_singleton_method(rb_mGC, "enable_stats", rb_gc_enable_stats, 0); + rb_define_singleton_method(rb_mGC, "disable_stats", rb_gc_disable_stats, 0); + rb_define_singleton_method(rb_mGC, "clear_stats", rb_gc_clear_stats, 0); ++ rb_define_singleton_method(rb_mGC, "allocated_size", rb_gc_allocated_size, 0); ++ rb_define_singleton_method(rb_mGC, "num_allocations", rb_gc_num_allocations, 0); + rb_define_singleton_method(rb_mGC, "collections", rb_gc_collections, 0); + rb_define_singleton_method(rb_mGC, "time", rb_gc_time, 0); + rb_define_singleton_method(rb_mGC, "dump", rb_gc_dump, 0); +diff --git a/intern.h b/intern.h +index 99696f1..1117614 100644 +--- a/intern.h ++++ b/intern.h +@@ -272,6 +272,8 @@ VALUE rb_gc_disable _((void)); + VALUE rb_gc_start _((void)); + void rb_gc_before_fork _((void)); + void rb_gc_after_fork _((void)); ++VALUE rb_gc_allocated_size _((void)); ++VALUE rb_gc_num_allocations _((void)); + /* hash.c */ + void st_foreach_safe _((struct st_table *, int (*)(ANYARGS), unsigned long)); + void rb_hash_foreach _((VALUE, int (*)(ANYARGS), VALUE)); diff --git a/patches/ruby/1.8.7/head/railsexpress/10-track-object-allocation.patch b/patches/ruby/1.8.7/head/railsexpress/10-track-object-allocation.patch new file mode 100644 index 0000000000..0eef717a24 --- /dev/null +++ b/patches/ruby/1.8.7/head/railsexpress/10-track-object-allocation.patch @@ -0,0 +1,111 @@ +diff --git a/gc.c b/gc.c +index 30a1219..5b42b90 100644 +--- a/gc.c ++++ b/gc.c +@@ -96,6 +96,26 @@ static void garbage_collect(); + + int ruby_gc_stress = 0; + ++static unsigned long live_objects = 0; ++unsigned long rb_os_live_objects() ++{ ++ return live_objects; ++} ++ ++#if defined(HAVE_LONG_LONG) ++static unsigned LONG_LONG allocated_objects = 0; ++unsigned LONG_LONG rb_os_allocated_objects() ++{ ++ return allocated_objects; ++} ++#else ++static unsigned long allocated_objects = 0; ++unsigned long rb_os_allocated_objects() ++{ ++ return allocated_objects; ++} ++#endif ++ + NORETURN(void rb_exc_jump _((VALUE))); + + void +@@ -987,6 +1007,8 @@ rb_newobj() + RANY(obj)->file = ruby_sourcefile; + RANY(obj)->line = ruby_sourceline; + #endif ++ live_objects++; ++ allocated_objects++; + return obj; + } + +@@ -1825,6 +1847,7 @@ gc_sweep() + add_heap(); + } + during_gc = 0; ++ live_objects = live; + + if (do_gc_stats) { + fprintf(gc_data_file, "objects processed: %.7d\n", live+freed); +@@ -2790,6 +2813,35 @@ rb_obj_id(VALUE obj) + return (VALUE)((long)obj|FIXNUM_FLAG); + } + ++/* call-seq: ++ * ObjectSpace.live_objects => number ++ * ++ * Returns the count of objects currently allocated in the system. This goes ++ * down after the garbage collector runs. ++ */ ++static ++VALUE os_live_objects(VALUE self) ++{ ++ return ULONG2NUM(live_objects); ++} ++ ++/* call-seq: ++ * ObjectSpace.allocated_objects => number ++ * ++ * Returns the count of objects allocated since the Ruby interpreter has ++ * started. This number can only increase. To know how many objects are ++ * currently allocated, use ObjectSpace::live_objects ++ */ ++static ++VALUE os_allocated_objects(VALUE self) ++{ ++#if defined(HAVE_LONG_LONG) ++ return ULL2NUM(allocated_objects); ++#else ++ return ULONG2NUM(allocated_objects); ++#endif ++} ++ + /* + * The GC module provides an interface to Ruby's mark and + * sweep garbage collection mechanism. Some of the underlying methods +@@ -2833,6 +2885,9 @@ Init_GC() + rb_define_module_function(rb_mObSpace, "finalizers", finals, 0); + rb_define_module_function(rb_mObSpace, "call_finalizer", call_final, 1); + ++ rb_define_module_function(rb_mObSpace, "live_objects", os_live_objects, 0); ++ rb_define_module_function(rb_mObSpace, "allocated_objects", os_allocated_objects, 0); ++ + rb_define_module_function(rb_mObSpace, "define_finalizer", define_final, -1); + rb_define_module_function(rb_mObSpace, "undefine_finalizer", undefine_final, 1); + +diff --git a/intern.h b/intern.h +index 1117614..a87661d 100644 +--- a/intern.h ++++ b/intern.h +@@ -274,6 +274,12 @@ void rb_gc_before_fork _((void)); + void rb_gc_after_fork _((void)); + VALUE rb_gc_allocated_size _((void)); + VALUE rb_gc_num_allocations _((void)); ++unsigned long rb_os_live_objects _((void)); ++#ifdef HAVE_LONG_LONG ++unsigned LONG_LONG rb_os_allocated_objects _((void)); ++#else ++unsigned long rb_os_allocated_objects _((void)); ++#endif + /* hash.c */ + void st_foreach_safe _((struct st_table *, int (*)(ANYARGS), unsigned long)); + void rb_hash_foreach _((VALUE, int (*)(ANYARGS), VALUE)); diff --git a/patches/ruby/1.8.7/head/railsexpress/11-expose-heap-slots.patch b/patches/ruby/1.8.7/head/railsexpress/11-expose-heap-slots.patch new file mode 100644 index 0000000000..3ce846c6a0 --- /dev/null +++ b/patches/ruby/1.8.7/head/railsexpress/11-expose-heap-slots.patch @@ -0,0 +1,70 @@ +diff --git a/gc.c b/gc.c +index 5b42b90..21b3f6b 100644 +--- a/gc.c ++++ b/gc.c +@@ -690,6 +690,7 @@ static int heaps_used = 0; + + static int heap_min_slots = 10000; + static int heap_slots = 10000; ++static int heap_size = 0; + + static int heap_free_min = 4096; + static int heap_slots_increment = 10000; +@@ -800,6 +801,21 @@ static void set_gc_parameters() + + /* + * call-seq: ++ * GC.heap_slots => Integer ++ * ++ * Returns the number of heap slots available for object allocations. ++ * ++ * GC.heap_slots #=> 10000 ++ * ++ */ ++VALUE ++rb_gc_heap_slots() ++{ ++ return INT2NUM(heap_size); ++} ++ ++/* ++ * call-seq: + * GC.dump => nil + * + * dumps information about the current GC data structures to the GC log file +@@ -967,6 +983,7 @@ add_heap() + heaps[heaps_used].limit = heap_slots; + break; + } ++ heap_size += heap_slots; + pend = p + heap_slots; + if (lomem == 0 || lomem > p) lomem = p; + if (himem < pend) himem = pend; +@@ -1828,6 +1845,7 @@ gc_sweep() + if (n == heaps[i].limit && freed > free_min) { + RVALUE *pp; + ++ heap_size -= n; + heaps[i].limit = 0; + for (pp = final_list; pp != final; pp = pp->as.free.next) { + pp->as.free.flags |= FL_SINGLETON; /* freeing page mark */ +@@ -2866,6 +2884,7 @@ Init_GC() + rb_define_singleton_method(rb_mGC, "clear_stats", rb_gc_clear_stats, 0); + rb_define_singleton_method(rb_mGC, "allocated_size", rb_gc_allocated_size, 0); + rb_define_singleton_method(rb_mGC, "num_allocations", rb_gc_num_allocations, 0); ++ rb_define_singleton_method(rb_mGC, "heap_slots", rb_gc_heap_slots, 0); + rb_define_singleton_method(rb_mGC, "collections", rb_gc_collections, 0); + rb_define_singleton_method(rb_mGC, "time", rb_gc_time, 0); + rb_define_singleton_method(rb_mGC, "dump", rb_gc_dump, 0); +diff --git a/intern.h b/intern.h +index a87661d..e8f3209 100644 +--- a/intern.h ++++ b/intern.h +@@ -274,6 +274,7 @@ void rb_gc_before_fork _((void)); + void rb_gc_after_fork _((void)); + VALUE rb_gc_allocated_size _((void)); + VALUE rb_gc_num_allocations _((void)); ++VALUE rb_gc_heap_slots _((void)); + unsigned long rb_os_live_objects _((void)); + #ifdef HAVE_LONG_LONG + unsigned LONG_LONG rb_os_allocated_objects _((void)); diff --git a/patches/ruby/1.8.7/head/railsexpress/12-fix-heap-size-growth-logic.patch b/patches/ruby/1.8.7/head/railsexpress/12-fix-heap-size-growth-logic.patch new file mode 100644 index 0000000000..1db93f224d --- /dev/null +++ b/patches/ruby/1.8.7/head/railsexpress/12-fix-heap-size-growth-logic.patch @@ -0,0 +1,54 @@ +diff --git a/gc.c b/gc.c +index 21b3f6b..7db1ef6 100644 +--- a/gc.c ++++ b/gc.c +@@ -694,6 +694,7 @@ static int heap_size = 0; + + static int heap_free_min = 4096; + static int heap_slots_increment = 10000; ++static int initial_heap_slots_increment = 10000; + static double heap_slots_growth_factor = 1.8; + + static long initial_malloc_limit = GC_MALLOC_LIMIT; +@@ -771,14 +772,13 @@ static void set_gc_parameters() + if (verbose_gc_stats) { + fprintf(gc_data_file, "RUBY_HEAP_SLOTS_INCREMENT=%s\n", heap_slots_incr_ptr); + } +- if (heap_slots_incr_i > 0) { +- heap_slots_increment = heap_slots_incr_i; +- } ++ heap_slots_increment = heap_slots_incr_i; ++ initial_heap_slots_increment = heap_slots_increment; + } + + heap_slots_growth_factor_ptr = getenv("RUBY_HEAP_SLOTS_GROWTH_FACTOR"); + if (heap_slots_growth_factor_ptr != NULL) { +- double heap_slots_growth_factor_d = atoi(heap_slots_growth_factor_ptr); ++ double heap_slots_growth_factor_d = atof(heap_slots_growth_factor_ptr); + if (verbose_gc_stats) { + fprintf(gc_data_file, "RUBY_HEAP_SLOTS_GROWTH_FACTOR=%s\n", heap_slots_growth_factor_ptr); + } +@@ -988,8 +988,13 @@ add_heap() + if (lomem == 0 || lomem > p) lomem = p; + if (himem < pend) himem = pend; + heaps_used++; +- heap_slots += heap_slots_increment; +- heap_slots_increment *= heap_slots_growth_factor; ++ if (heaps_used == 1) ++ heap_slots = initial_heap_slots_increment; ++ else { ++ heap_slots_increment *= heap_slots_growth_factor; ++ heap_slots += heap_slots_increment; ++ } ++ + if (heap_slots <= 0) heap_slots = heap_min_slots; + + while (p < pend) { +@@ -1879,6 +1884,7 @@ gc_sweep() + live_counts[i], free_counts[i], obj_type(i)); + } + } ++ rb_gc_dump(); + fflush(gc_data_file); + } + diff --git a/patches/ruby/1.8.7/head/railsexpress/13-heap-slot-size.patch b/patches/ruby/1.8.7/head/railsexpress/13-heap-slot-size.patch new file mode 100644 index 0000000000..45091c14ae --- /dev/null +++ b/patches/ruby/1.8.7/head/railsexpress/13-heap-slot-size.patch @@ -0,0 +1,12 @@ +diff --git a/gc.c b/gc.c +index 7db1ef6..57740d2 100644 +--- a/gc.c ++++ b/gc.c +@@ -2891,6 +2891,7 @@ Init_GC() + rb_define_singleton_method(rb_mGC, "allocated_size", rb_gc_allocated_size, 0); + rb_define_singleton_method(rb_mGC, "num_allocations", rb_gc_num_allocations, 0); + rb_define_singleton_method(rb_mGC, "heap_slots", rb_gc_heap_slots, 0); ++ rb_define_const(rb_mGC, "HEAP_SLOT_SIZE", INT2FIX(sizeof(RVALUE))); + rb_define_singleton_method(rb_mGC, "collections", rb_gc_collections, 0); + rb_define_singleton_method(rb_mGC, "time", rb_gc_time, 0); + rb_define_singleton_method(rb_mGC, "dump", rb_gc_dump, 0); diff --git a/patches/ruby/1.8.7/head/railsexpress/14-add-trace-stats-enabled-methods.patch b/patches/ruby/1.8.7/head/railsexpress/14-add-trace-stats-enabled-methods.patch new file mode 100644 index 0000000000..4e3c4f2132 --- /dev/null +++ b/patches/ruby/1.8.7/head/railsexpress/14-add-trace-stats-enabled-methods.patch @@ -0,0 +1,66 @@ +diff --git a/gc.c b/gc.c +index 57740d2..2c34932 100644 +--- a/gc.c ++++ b/gc.c +@@ -350,6 +350,22 @@ rb_gc_disable_stats() + + /* + * call-seq: ++ * GC.stats_enabled? => true or false ++ * ++ * Check whether GC stats have been enabled. ++ * ++ * GC.stats_enabled? #=> false or true ++ * ++ */ ++ ++VALUE ++rb_gc_stats_enabled() ++{ ++ return gc_statistics ? Qtrue : Qfalse; ++} ++ ++/* ++ * call-seq: + * GC.clear_stats => nil + * + * Clears garbage collection statistics, returning nil. This resets the number +@@ -491,6 +507,22 @@ rb_gc_disable_trace() + return old; + } + ++/* ++ * call-seq: ++ * GC.trace_enabled? => true or false ++ * ++ * Check whether GC tracing has been enabled. ++ * ++ * GC.trace_enabled? #=> false or true ++ * ++ */ ++ ++VALUE ++rb_gc_trace_enabled() ++{ ++ return verbose_gc_stats ? Qtrue : Qfalse; ++} ++ + char* GC_LOGFILE_IVAR = "@gc_logfile_name"; + + /* +@@ -2887,6 +2919,7 @@ Init_GC() + + rb_define_singleton_method(rb_mGC, "enable_stats", rb_gc_enable_stats, 0); + rb_define_singleton_method(rb_mGC, "disable_stats", rb_gc_disable_stats, 0); ++ rb_define_singleton_method(rb_mGC, "stats_enabled?", rb_gc_stats_enabled, 0); + rb_define_singleton_method(rb_mGC, "clear_stats", rb_gc_clear_stats, 0); + rb_define_singleton_method(rb_mGC, "allocated_size", rb_gc_allocated_size, 0); + rb_define_singleton_method(rb_mGC, "num_allocations", rb_gc_num_allocations, 0); +@@ -2902,6 +2935,7 @@ Init_GC() + rb_define_singleton_method(rb_mGC, "log_file", rb_gc_log_file, -1); + rb_define_singleton_method(rb_mGC, "enable_trace", rb_gc_enable_trace, 0); + rb_define_singleton_method(rb_mGC, "disable_trace", rb_gc_disable_trace, 0); ++ rb_define_singleton_method(rb_mGC, "trace_enabled?", rb_gc_trace_enabled, 0); + + rb_mObSpace = rb_define_module("ObjectSpace"); + rb_define_module_function(rb_mObSpace, "each_object", os_each_obj, -1); diff --git a/patches/ruby/1.8.7/head/railsexpress/15-track-live-dataset-size.patch b/patches/ruby/1.8.7/head/railsexpress/15-track-live-dataset-size.patch new file mode 100644 index 0000000000..de44e6b365 --- /dev/null +++ b/patches/ruby/1.8.7/head/railsexpress/15-track-live-dataset-size.patch @@ -0,0 +1,52 @@ +diff --git a/gc.c b/gc.c +index 2c34932..0ce7e68 100644 +--- a/gc.c ++++ b/gc.c +@@ -89,6 +89,7 @@ static unsigned long gc_num_allocations = 0; + #endif + static int gc_statistics = 0; + ++static unsigned long heap_slots_live_after_last_gc = 0; + + static void run_final(); + static VALUE nomem_error; +@@ -465,6 +466,23 @@ rb_gc_time() + #endif + } + ++/* ++ * call-seq: ++ * GC.heap_slots_live_after_last_gc => Integer ++ * ++ * Returns the number of heap slots which were live after the last garbage collection. ++ * ++ * GC.heap_slots_live_after_last_gc #=> 231223 ++ * ++ */ ++VALUE ++rb_gc_heap_slots_live_after_last_gc() ++{ ++ return ULONG2NUM(heap_slots_live_after_last_gc); ++} ++ ++ ++ + VALUE rb_mGC; + + /* +@@ -1903,6 +1921,7 @@ gc_sweep() + } + during_gc = 0; + live_objects = live; ++ heap_slots_live_after_last_gc = live; + + if (do_gc_stats) { + fprintf(gc_data_file, "objects processed: %.7d\n", live+freed); +@@ -2924,6 +2943,7 @@ Init_GC() + rb_define_singleton_method(rb_mGC, "allocated_size", rb_gc_allocated_size, 0); + rb_define_singleton_method(rb_mGC, "num_allocations", rb_gc_num_allocations, 0); + rb_define_singleton_method(rb_mGC, "heap_slots", rb_gc_heap_slots, 0); ++ rb_define_singleton_method(rb_mGC, "heap_slots_live_after_last_gc", rb_gc_heap_slots_live_after_last_gc, 0); + rb_define_const(rb_mGC, "HEAP_SLOT_SIZE", INT2FIX(sizeof(RVALUE))); + rb_define_singleton_method(rb_mGC, "collections", rb_gc_collections, 0); + rb_define_singleton_method(rb_mGC, "time", rb_gc_time, 0); diff --git a/patches/ruby/1.8.7/head/railsexpress/16-add-object-size-information-to-heap-dump.patch b/patches/ruby/1.8.7/head/railsexpress/16-add-object-size-information-to-heap-dump.patch new file mode 100644 index 0000000000..e974cdb63a --- /dev/null +++ b/patches/ruby/1.8.7/head/railsexpress/16-add-object-size-information-to-heap-dump.patch @@ -0,0 +1,51 @@ +diff --git a/gc.c b/gc.c +index 0ce7e68..53450bf 100644 +--- a/gc.c ++++ b/gc.c +@@ -953,9 +953,21 @@ rb_gc_dump_file_and_line_info(int argc, VALUE *argv) + if (!p->as.basic.klass) { + fprintf(f, "__unknown__"); + } else { +- fprintf(f, rb_obj_classname((VALUE)p)); ++ fprintf(f, "%s", rb_obj_classname((VALUE)p)); + } + } ++ /* print object size for some known object types */ ++ switch (TYPE(p)) { ++ case T_STRING: ++ fprintf(f, ":%lu", RSTRING(p)->len); ++ break; ++ case T_ARRAY: ++ fprintf(f, ":%lu", RARRAY(p)->len); ++ break; ++ case T_HASH: ++ fprintf(f, ":%d", RHASH(p)->tbl->num_entries); ++ break; ++ } + } + fprintf(f, "\n"); + } +@@ -1924,10 +1936,10 @@ gc_sweep() + heap_slots_live_after_last_gc = live; + + if (do_gc_stats) { +- fprintf(gc_data_file, "objects processed: %.7d\n", live+freed); +- fprintf(gc_data_file, "live objects : %.7d\n", live); +- fprintf(gc_data_file, "freelist objects : %.7d\n", freed - really_freed); +- fprintf(gc_data_file, "freed objects : %.7d\n", really_freed); ++ fprintf(gc_data_file, "objects processed: %.7lu\n", live+freed); ++ fprintf(gc_data_file, "live objects : %.7lu\n", live); ++ fprintf(gc_data_file, "freelist objects : %.7lu\n", freed - really_freed); ++ fprintf(gc_data_file, "freed objects : %.7lu\n", really_freed); + for(i=0; i<256; i++) { + if (free_counts[i]>0 || live_counts[i]>0) { + fprintf(gc_data_file, +@@ -2258,7 +2270,7 @@ garbage_collect() + gc_time += musecs_used; + + if (verbose_gc_stats) { +- fprintf(gc_data_file, "GC time: %d msec\n", musecs_used / 1000); ++ fprintf(gc_data_file, "GC time: %ld msec\n", (long)(musecs_used / 1000)); + fflush(gc_data_file); + } + } diff --git a/patches/ruby/1.8.7/head/railsexpress/17-caller-for-all-threads.patch b/patches/ruby/1.8.7/head/railsexpress/17-caller-for-all-threads.patch new file mode 100644 index 0000000000..aa79d1602c --- /dev/null +++ b/patches/ruby/1.8.7/head/railsexpress/17-caller-for-all-threads.patch @@ -0,0 +1,230 @@ +diff --git a/eval.c b/eval.c +index 356226e..a0fdc55 100644 +--- a/eval.c ++++ b/eval.c +@@ -8199,6 +8199,17 @@ rb_f_method_name() + } + } + ++/* Hash (Thread => Backtrace) used to collect backtrace for each threads. */ ++static VALUE backtrace_for_each_thread; ++ ++static int backtrace_level_for_each_thread; ++ ++static VALUE ++switch_thread_context_to_collect_backtrace(rb_thread_t next); ++ ++static VALUE ++rb_f_caller_for_all_threads(); ++ + void + Init_eval() + { +@@ -8244,6 +8255,7 @@ Init_eval() + rb_define_global_function("fail", rb_f_raise, -1); + + rb_define_global_function("caller", rb_f_caller, -1); ++ rb_define_global_function("caller_for_all_threads", rb_f_caller_for_all_threads, -1); + + rb_define_global_function("exit", rb_f_exit, -1); + rb_define_global_function("abort", rb_f_abort, -1); +@@ -10599,6 +10611,7 @@ static int th_sig, th_safe; + #define RESTORE_RAISE 5 + #define RESTORE_SIGNAL 6 + #define RESTORE_EXIT 7 ++#define RESTORE_BACKTRACE 8 + + extern VALUE *rb_gc_stack_start; + #ifdef __ia64 +@@ -10705,6 +10718,15 @@ rb_thread_switch(n) + } + rb_exc_raise(th_raise_exception); + break; ++ case RESTORE_BACKTRACE: ++ rb_hash_aset(backtrace_for_each_thread, curr_thread->thread, ++ backtrace(backtrace_level_for_each_thread)); ++ if (curr_thread != main_thread) { ++ switch_thread_context_to_collect_backtrace(curr_thread->next); ++ } else { ++ /* Circled back to main thread, cycle is complete. */ ++ } ++ break; + case RESTORE_NORMAL: + default: + break; +@@ -13875,3 +13897,74 @@ rb_throw(tag, val) + argv[1] = val; + rb_f_throw(2, argv); + } ++ ++static VALUE ++switch_thread_context_to_collect_backtrace(rb_thread_t next) ++{ ++ if (THREAD_SAVE_CONTEXT(curr_thread)) { ++ return Qnil; ++ } ++ curr_thread = next; ++ rb_thread_restore_context(next, RESTORE_BACKTRACE); ++ return Qnil; ++} ++ ++ ++/* ++ * call-seq: ++ * caller_for_all_threads(start=1) => array ++ * ++ * Returns the current execution stack for all threads ++ * ---a hash whose keys are thread instances and values ++ * the thread caller backtrace. ++ * ++ * Backtraces are array of hashes indicating location on the ++ * stack. Hash keys include ``:line'' or ``:file'' ++ * and ``:method'''. ++ * ++ * The optional _start_ parameter ++ * determines the number of initial stack entries to omit from the ++ * result. ++ * ++ * def a(skip) ++ * caller_for_all_threads(skip) ++ * end ++ * def b(skip) ++ * a(skip) ++ * end ++ * def c(skip) ++ * b(skip) ++ * end ++ * c(0) #=> ["prog:2:in `a'", "prog:5:in `b'", "prog:8:in `c'", "prog:10"] ++ * c(1) #=> ["prog:5:in `b'", "prog:8:in `c'", "prog:11"] ++ * c(2) #=> ["prog:8:in `c'", "prog:12"] ++ * c(3) #=> ["prog:13"] ++ */ ++static VALUE ++rb_f_caller_for_all_threads(argc, argv) ++ int argc; ++ VALUE *argv; ++{ ++ volatile int critical; ++ VALUE level; ++ VALUE result; ++ ++ rb_scan_args(argc, argv, "01", &level); ++ backtrace_level_for_each_thread = NIL_P(level) ? 0 : NUM2INT(level); ++ if (backtrace_level_for_each_thread < 0) { ++ rb_raise(rb_eArgError, "negative level (%d)", backtrace_level_for_each_thread); ++ } ++ ++ critical = rb_thread_critical; ++ rb_thread_critical = Qtrue; ++ ++ backtrace_for_each_thread = rb_hash_new(); ++ switch_thread_context_to_collect_backtrace(main_thread->next); ++ ++ result = backtrace_for_each_thread; ++ backtrace_for_each_thread = Qnil; ++ backtrace_for_each_thread = 0; ++ ++ rb_thread_critical = critical; ++ return result; ++} +diff --git a/test/callerforallthreads/test_caller_for_each_thread.rb b/test/callerforallthreads/test_caller_for_each_thread.rb +new file mode 100644 +index 0000000..6aebaed +--- /dev/null ++++ b/test/callerforallthreads/test_caller_for_each_thread.rb +@@ -0,0 +1,95 @@ ++# -*- ruby-indent-level: 4 -*- ++require 'thread' ++require 'test/unit' ++ ++class AClassWithNestedmethods ++ ++ def an_ultra_nested_method(skip) ++ caller_for_all_threads skip ++ end ++ ++ def a_nested_method(skip) ++ an_ultra_nested_method skip ++ end ++ ++ def a_method(skip=0) ++ a_nested_method skip ++ end ++ ++end ++ ++class CallerForEachThreadTest < Test::Unit::TestCase ++ ++ def testCollectMeaningfulBacktraceForASingleThread ++ backtraces = AClassWithNestedmethods.new.a_method ++ backtrace = backtraces[Thread.current] ++ assert_not_nil backtrace ++ assert_equal __FILE__ + ":8:in `an_ultra_nested_method'", backtrace[0] ++ assert_equal __FILE__ + ":12:in `a_nested_method'", backtrace[1] ++ assert_equal __FILE__ + ":16:in `a_method'", backtrace[2] ++ assert_equal __FILE__ + ":24:in `testCollectMeaningfulBacktraceForASingleThread'", ++ backtrace[3] ++ end ++ ++ def testCanSkipFirstStackEntries ++ backtraces = AClassWithNestedmethods.new.a_method 2 ++ backtrace = backtraces[Thread.current] ++ assert_not_nil backtrace ++ assert_equal __FILE__ + ":16:in `a_method'", backtrace[0] ++ assert_equal __FILE__ + ":35:in `testCanSkipFirstStackEntries'", ++ backtrace[1] ++ end ++ ++ def testCollectMeaningfulBacktraceForMultipleThreads ++ first_thread = Thread.new do ++ loop do ++ Thread.pass ++ sleep 1 ++ end ++ end ++ ++ second_thread = Thread.new do ++ loop do ++ Thread.pass ++ sleep 1 ++ end ++ end ++ ++ backtraces = AClassWithNestedmethods.new.a_method ++ ++ backtrace = backtraces[Thread.current] ++ assert_not_nil backtrace ++ assert_match __FILE__ + ":8:in `an_ultra_nested_method'", backtrace[0] ++ assert_match __FILE__ + ":12:in `a_nested_method'", backtrace[1] ++ assert_equal __FILE__ + ":16:in `a_method'", backtrace[2] ++ assert_equal __FILE__ + ":58:in `testCollectMeaningfulBacktraceForMultipleThreads'", ++ backtrace[3] ++ ++ backtrace = backtraces[first_thread] ++ assert_not_nil backtrace ++ assert_equal __FILE__ + ":47:in `testCollectMeaningfulBacktraceForMultipleThreads'", ++ backtrace[0] ++ assert_equal __FILE__ + ":45:in `loop'", ++ backtrace[1] ++ assert_equal __FILE__ + ":45:in `testCollectMeaningfulBacktraceForMultipleThreads'", ++ backtrace[2] ++ assert_equal __FILE__ + ":44:in `initialize'",backtrace[3] ++ assert_equal __FILE__ + ":44:in `new'", backtrace[4] ++ assert_equal __FILE__ + ":44:in `testCollectMeaningfulBacktraceForMultipleThreads'", ++ backtrace[5] ++ ++ backtrace = backtraces[second_thread] ++ assert_not_nil backtrace ++ assert_equal __FILE__ + ":53:in `testCollectMeaningfulBacktraceForMultipleThreads'", ++ backtrace[0] ++ assert_equal __FILE__ + ":52:in `loop'", backtrace[1] ++ assert_equal __FILE__ + ":52:in `testCollectMeaningfulBacktraceForMultipleThreads'", ++ backtrace[2] ++ assert_equal __FILE__ + ":51:in `initialize'",backtrace[3] ++ assert_equal __FILE__ + ":51:in `new'", backtrace[4] ++ assert_equal __FILE__ + ":51:in `testCollectMeaningfulBacktraceForMultipleThreads'", ++ backtrace[5] ++ end ++ ++end ++ diff --git a/patches/ruby/1.8.7/p358/railsexpress/01-ignore-generated-files.patch b/patches/ruby/1.8.7/p358/railsexpress/01-ignore-generated-files.patch new file mode 100644 index 0000000000..b669ad44bd --- /dev/null +++ b/patches/ruby/1.8.7/p358/railsexpress/01-ignore-generated-files.patch @@ -0,0 +1,101 @@ +diff --git a/.gitignore b/.gitignore +new file mode 100644 +index 0000000..00c347a +--- /dev/null ++++ b/.gitignore +@@ -0,0 +1,95 @@ ++.ext ++.installed.list ++.rbconfig.time ++Makefile ++autom4te.cache/ ++config.h ++config.status ++configure ++ext/Win32API/Makefile ++ext/bigdecimal/Makefile ++ext/curses/Makefile ++ext/dbm/Makefile ++ext/digest/Makefile ++ext/digest/bubblebabble/Makefile ++ext/digest/md5/Makefile ++ext/digest/rmd160/Makefile ++ext/digest/sha1/Makefile ++ext/digest/sha2/Makefile ++ext/dl/Makefile ++ext/dl/call.func ++ext/dl/callback.func ++ext/dl/cbtable.func ++ext/dl/dlconfig.h ++ext/dl/dlconfig.rb ++ext/enumerator/Makefile ++ext/etc/Makefile ++ext/fcntl/Makefile ++ext/gdbm/Makefile ++ext/iconv/Makefile ++ext/io/wait/Makefile ++ext/nkf/Makefile ++ext/openssl/Makefile ++ext/openssl/extconf.h ++ext/pty/Makefile ++ext/racc/cparse/Makefile ++ext/readline/Makefile ++ext/sdbm/Makefile ++ext/socket/Makefile ++ext/stringio/Makefile ++ext/strscan/Makefile ++ext/syck/Makefile ++ext/syslog/Makefile ++ext/thread/Makefile ++ext/tk/Makefile ++ext/tk/tkutil/Makefile ++ext/win32ole/Makefile ++ext/win32ole/.document ++ext/zlib/Makefile ++largefile.h ++miniruby ++parse.c ++rbconfig.rb ++ruby ++enc.mk ++ext/bigdecimal/extconf.h ++ext/continuation/ ++ext/coverage/ ++ext/curses/extconf.h ++ext/dbm/extconf.h ++ext/digest/bubblebabble/extconf.h ++ext/digest/extconf.h ++ext/digest/md5/extconf.h ++ext/digest/rmd160/extconf.h ++ext/digest/sha1/extconf.h ++ext/digest/sha2/extconf.h ++ext/dl/callback.h ++ext/dl/extconf.h ++ext/etc/extconf.h ++ext/fcntl/extconf.h ++ext/fiber/ ++ext/iconv/extconf.h ++ext/io/wait/extconf.h ++ext/json/ ++ext/nkf/extconf.h ++ext/pty/extconf.h ++ext/racc/cparse/extconf.h ++ext/readline/extconf.h ++ext/ripper/ ++ext/sdbm/extconf.h ++ext/socket/constants.h ++ext/socket/extconf.h ++ext/stringio/extconf.h ++ext/strscan/extconf.h ++ext/syck/extconf.h ++ext/syslog/extconf.h ++ext/tk/extconf.h ++ext/tk/tkutil/extconf.h ++ext/zlib/extconf.h ++miniprelude.c ++prelude.c ++revision.h ++*.dylib ++*.log ++*.dSYM ++patches-ruby* diff --git a/patches/ruby/1.8.7/p358/railsexpress/02-fix-tests-for-osx.patch b/patches/ruby/1.8.7/p358/railsexpress/02-fix-tests-for-osx.patch new file mode 100644 index 0000000000..6250e97f36 --- /dev/null +++ b/patches/ruby/1.8.7/p358/railsexpress/02-fix-tests-for-osx.patch @@ -0,0 +1,139 @@ +diff --git a/test/drb/drbtest.rb b/test/drb/drbtest.rb +index bc16ab1..c12b168 100644 +--- a/test/drb/drbtest.rb ++++ b/test/drb/drbtest.rb +@@ -22,7 +22,7 @@ class DRbService + %w(ut_drb.rb ut_array.rb ut_port.rb ut_large.rb ut_safe1.rb ut_eval.rb).each do |nm| + add_service_command(nm) + end +- @server = @@server = DRb::DRbServer.new('druby://localhost:0', @@manager, {}) ++ @server = @@server = DRb::DRbServer.new('druby://127.0.0.1:0', @@manager, {}) + @@manager.uri = @@server.uri + def self.manager + @@manager +@@ -79,16 +79,16 @@ module DRbCore + end + + def test_00_DRbObject +- ro = DRbObject.new(nil, 'druby://localhost:12345') +- assert_equal('druby://localhost:12345', ro.__drburi) ++ ro = DRbObject.new(nil, 'druby://127.0.0.1:12345') ++ assert_equal('druby://127.0.0.1:12345', ro.__drburi) + assert_equal(nil, ro.__drbref) + +- ro = DRbObject.new_with_uri('druby://localhost:12345') +- assert_equal('druby://localhost:12345', ro.__drburi) ++ ro = DRbObject.new_with_uri('druby://127.0.0.1:12345') ++ assert_equal('druby://127.0.0.1:12345', ro.__drburi) + assert_equal(nil, ro.__drbref) + +- ro = DRbObject.new_with_uri('druby://localhost:12345?foobar') +- assert_equal('druby://localhost:12345', ro.__drburi) ++ ro = DRbObject.new_with_uri('druby://127.0.0.1:12345?foobar') ++ assert_equal('druby://127.0.0.1:12345', ro.__drburi) + assert_equal(DRb::DRbURIOption.new('foobar'), ro.__drbref) + end + +diff --git a/test/drb/ut_drb.rb b/test/drb/ut_drb.rb +index f5720cf..265713d 100644 +--- a/test/drb/ut_drb.rb ++++ b/test/drb/ut_drb.rb +@@ -154,7 +154,7 @@ if __FILE__ == $0 + + DRb::DRbServer.default_argc_limit(8) + DRb::DRbServer.default_load_limit(4096) +- DRb.start_service('druby://localhost:0', DRbEx.new) ++ DRb.start_service('druby://127.0.0.1:0', DRbEx.new) + es = DRb::ExtServ.new(ARGV.shift, ARGV.shift) + DRb.thread.join + end +diff --git a/test/drb/ut_drb_drbssl.rb b/test/drb/ut_drb_drbssl.rb +index 0a2191e..bca3012 100644 +--- a/test/drb/ut_drb_drbssl.rb ++++ b/test/drb/ut_drb_drbssl.rb +@@ -18,7 +18,7 @@ if __FILE__ == $0 + + DRb::DRbServer.default_argc_limit(8) + DRb::DRbServer.default_load_limit(4096) +- DRb.start_service('drbssl://localhost:0', DRbEx.new, config) ++ DRb.start_service('drbssl://127.0.0.1:0', DRbEx.new, config) + es = DRb::ExtServ.new(ARGV.shift, ARGV.shift) + DRb.thread.join + end +diff --git a/test/drb/ut_eval.rb b/test/drb/ut_eval.rb +index 4df963e..9127939 100644 +--- a/test/drb/ut_eval.rb ++++ b/test/drb/ut_eval.rb +@@ -3,7 +3,7 @@ require 'drb/extserv' + + class EvalAttack + def initialize +- @four = DRb::DRbServer.new('druby://localhost:0', self, {:safe_level => 4}) ++ @four = DRb::DRbServer.new('druby://127.0.0.1:0', self, {:safe_level => 4}) + end + + def four +@@ -25,7 +25,7 @@ if __FILE__ == $0 + + $SAFE = 1 + +- DRb.start_service('druby://localhost:0', EvalAttack.new, {:safe_level => 2}) ++ DRb.start_service('druby://127.0.0.1:0', EvalAttack.new, {:safe_level => 2}) + es = DRb::ExtServ.new(ARGV.shift, ARGV.shift) + DRb.thread.join + end +diff --git a/test/drb/ut_large.rb b/test/drb/ut_large.rb +index d6717c5..0aefd1b 100644 +--- a/test/drb/ut_large.rb ++++ b/test/drb/ut_large.rb +@@ -31,7 +31,7 @@ if __FILE__ == $0 + + DRb::DRbServer.default_argc_limit(3) + DRb::DRbServer.default_load_limit(100000) +- DRb.start_service('druby://localhost:0', DRbLarge.new) ++ DRb.start_service('druby://127.0.0.1:0', DRbLarge.new) + es = DRb::ExtServ.new(ARGV.shift, ARGV.shift) + DRb.thread.join + end +diff --git a/test/drb/ut_safe1.rb b/test/drb/ut_safe1.rb +index 4df8e1e..ee097f6 100644 +--- a/test/drb/ut_safe1.rb ++++ b/test/drb/ut_safe1.rb +@@ -8,7 +8,7 @@ if __FILE__ == $0 + it + end + +- DRb.start_service('druby://localhost:0', [1, 2, 'III', 4, "five", 6], ++ DRb.start_service('druby://127.0.0.1:0', [1, 2, 'III', 4, "five", 6], + {:safe_level => 1}) + es = DRb::ExtServ.new(ARGV.shift, ARGV.shift) + DRb.thread.join +diff --git a/test/net/pop/test_pop.rb b/test/net/pop/test_pop.rb +index c8aa9a8..129ca08 100644 +--- a/test/net/pop/test_pop.rb ++++ b/test/net/pop/test_pop.rb +@@ -3,10 +3,14 @@ require 'test/unit' + require 'digest/md5' + + class TestPOP < Test::Unit::TestCase ++ def localhost ++ '127.0.0.1' ++ end ++ + def setup + @users = {'user' => 'pass' } + @ok_user = 'user' +- @stamp_base = "#{$$}.#{Time.now.to_i}@localhost" ++ @stamp_base = "#{$$}.#{Time.now.to_i}@#{localhost}" + end + + def test_pop_auth_ok +@@ -64,7 +68,7 @@ class TestPOP < Test::Unit::TestCase + end + + def pop_test(apop=false) +- host = 'localhost' ++ host = localhost + server = TCPServer.new(host, 0) + port = server.addr[1] + thread = Thread.start do diff --git a/patches/ruby/1.8.7/p358/railsexpress/03-sigvtalrm-fix.patch b/patches/ruby/1.8.7/p358/railsexpress/03-sigvtalrm-fix.patch new file mode 100644 index 0000000000..fe99f6eed6 --- /dev/null +++ b/patches/ruby/1.8.7/p358/railsexpress/03-sigvtalrm-fix.patch @@ -0,0 +1,27 @@ +diff --git a/eval.c b/eval.c +index 7886e17..6ff2560 100644 +--- a/eval.c ++++ b/eval.c +@@ -12461,6 +12461,11 @@ rb_thread_start_0(fn, arg, th) + curr_thread->next = th; + th->priority = curr_thread->priority; + th->thgroup = curr_thread->thgroup; ++#if defined(HAVE_SETITIMER) || defined(_THREAD_SAFE) ++ if (!thread_init) { ++ rb_thread_start_timer(); ++ } ++#endif + } + START_TIMER(); + +@@ -13189,7 +13194,9 @@ rb_thread_atfork() + main_thread = curr_thread; + curr_thread->next = curr_thread; + curr_thread->prev = curr_thread; +- STOP_TIMER(); ++#if defined(HAVE_SETITIMER) || defined(_THREAD_SAFE) ++ rb_thread_stop_timer(); ++#endif + } + + diff --git a/patches/ruby/1.8.7/p358/railsexpress/04-railsbench-gc-patch.patch b/patches/ruby/1.8.7/p358/railsexpress/04-railsbench-gc-patch.patch new file mode 100644 index 0000000000..7b6c0e251a --- /dev/null +++ b/patches/ruby/1.8.7/p358/railsexpress/04-railsbench-gc-patch.patch @@ -0,0 +1,1876 @@ +diff --git a/gc.c b/gc.c +index fa45cd1..ab71d22 100644 +--- a/gc.c ++++ b/gc.c +@@ -22,8 +22,16 @@ + #include + #include + ++#ifdef _WIN32 ++#include ++#else ++#include ++#endif ++ + #ifdef HAVE_SYS_TIME_H + #include ++#elif defined(_WIN32) ++#include + #endif + + #ifdef HAVE_SYS_RESOURCE_H +@@ -42,7 +50,6 @@ void rb_io_fptr_finalize _((struct rb_io_t*)); + #ifdef __CYGWIN__ + int _setjmp(), _longjmp(); + #endif +- + /* Make alloca work the best possible way. */ + #ifdef __GNUC__ + # ifndef atarist +@@ -86,12 +93,12 @@ rb_memerror() + rb_thread_t th = rb_curr_thread; + + if (!nomem_error || +- (rb_thread_raised_p(th, RAISED_NOMEMORY) && rb_safe_level() < 4)) { +- fprintf(stderr, "[FATAL] failed to allocate memory\n"); +- exit(1); ++ (rb_thread_raised_p(th, RAISED_NOMEMORY) && rb_safe_level() < 4)) { ++ fprintf(stderr, "[FATAL] failed to allocate memory\n"); ++ exit(1); + } + if (rb_thread_raised_p(th, RAISED_NOMEMORY)) { +- rb_exc_jump(nomem_error); ++ rb_exc_jump(nomem_error); + } + rb_thread_raised_set(th, RAISED_NOMEMORY); + rb_exc_raise(nomem_error); +@@ -139,7 +146,7 @@ ruby_xmalloc(size) + void *mem; + + if (size < 0) { +- rb_raise(rb_eNoMemError, "negative allocation size (or too big)"); ++ rb_raise(rb_eNoMemError, "negative allocation size (or too big)"); + } + if (size == 0) size = 1; + +@@ -148,11 +155,11 @@ ruby_xmalloc(size) + } + RUBY_CRITICAL(mem = malloc(size)); + if (!mem) { +- garbage_collect(); +- RUBY_CRITICAL(mem = malloc(size)); +- if (!mem) { +- rb_memerror(); +- } ++ garbage_collect(); ++ RUBY_CRITICAL(mem = malloc(size)); ++ if (!mem) { ++ rb_memerror(); ++ } + } + malloc_increase += size; + +@@ -179,17 +186,17 @@ ruby_xrealloc(ptr, size) + void *mem; + + if (size < 0) { +- rb_raise(rb_eArgError, "negative re-allocation size"); ++ rb_raise(rb_eArgError, "negative re-allocation size"); + } + if (!ptr) return xmalloc(size); + if (size == 0) size = 1; + if (ruby_gc_stress) garbage_collect(); + RUBY_CRITICAL(mem = realloc(ptr, size)); + if (!mem) { +- garbage_collect(); +- RUBY_CRITICAL(mem = realloc(ptr, size)); +- if (!mem) { +- rb_memerror(); ++ garbage_collect(); ++ RUBY_CRITICAL(mem = realloc(ptr, size)); ++ if (!mem) { ++ rb_memerror(); + } + } + malloc_increase += size; +@@ -202,11 +209,20 @@ ruby_xfree(x) + void *x; + { + if (x) +- RUBY_CRITICAL(free(x)); ++ RUBY_CRITICAL(free(x)); + } + ++#if HAVE_LONG_LONG ++#define GC_TIME_TYPE LONG_LONG ++#else ++#define GC_TIME_TYPE long ++#endif ++ + extern int ruby_in_compile; + static int dont_gc; ++static int gc_statistics = 0; ++static GC_TIME_TYPE gc_time = 0; ++static int gc_collections = 0; + static int during_gc; + static int need_call_final = 0; + static st_table *finalizer_table = 0; +@@ -241,7 +257,7 @@ rb_gc_enable() + * Disables garbage collection, returning true if garbage + * collection was already disabled. + * +- * GC.disable #=> false ++ * GC.disable #=> false or true + * GC.disable #=> true + * + */ +@@ -255,6 +271,104 @@ rb_gc_disable() + return old; + } + ++/* ++ * call-seq: ++ * GC.enable_stats => true or false ++ * ++ * Enables garbage collection statistics, returning true if garbage ++ * collection statistics was already enabled. ++ * ++ * GC.enable_stats #=> false or true ++ * GC.enable_stats #=> true ++ * ++ */ ++ ++VALUE ++rb_gc_enable_stats() ++{ ++ int old = gc_statistics; ++ gc_statistics = Qtrue; ++ return old; ++} ++ ++/* ++ * call-seq: ++ * GC.disable_stats => true or false ++ * ++ * Disables garbage collection statistics, returning true if garbage ++ * collection statistics was already disabled. ++ * ++ * GC.disable_stats #=> false or true ++ * GC.disable_stats #=> true ++ * ++ */ ++ ++VALUE ++rb_gc_disable_stats() ++{ ++ int old = gc_statistics; ++ gc_statistics = Qfalse; ++ return old; ++} ++ ++/* ++ * call-seq: ++ * GC.clear_stats => nil ++ * ++ * Clears garbage collection statistics, returning nil. This resets the number ++ * of collections (GC.collections) and the time used (GC.time) to 0. ++ * ++ * GC.clear_stats #=> nil ++ * ++ */ ++ ++VALUE ++rb_gc_clear_stats() ++{ ++ gc_collections = 0; ++ gc_time = 0; ++ return Qnil; ++} ++ ++/* ++ * call-seq: ++ * GC.collections => Integer ++ * ++ * Returns the number of garbage collections performed while GC statistics collection ++ * was enabled. ++ * ++ * GC.collections #=> 35 ++ * ++ */ ++ ++VALUE ++rb_gc_collections() ++{ ++ return INT2NUM(gc_collections); ++} ++ ++/* ++ * call-seq: ++ * GC.time => Integer ++ * ++ * Returns the time spent during garbage collection while GC statistics collection ++ * was enabled (in micro seconds). ++ * ++ * GC.time #=> 20000 ++ * ++ */ ++ ++VALUE ++rb_gc_time() ++{ ++#if HAVE_LONG_LONG ++ return LL2NUM(gc_time); ++#else ++ return LONG2NUM(gc_time); ++#endif ++} ++ ++ + VALUE rb_mGC; + + static struct gc_list { +@@ -281,19 +395,19 @@ rb_gc_unregister_address(addr) + struct gc_list *tmp = global_List; + + if (tmp->varptr == addr) { +- global_List = tmp->next; +- RUBY_CRITICAL(free(tmp)); +- return; ++ global_List = tmp->next; ++ RUBY_CRITICAL(free(tmp)); ++ return; + } + while (tmp->next) { +- if (tmp->next->varptr == addr) { +- struct gc_list *t = tmp->next; ++ if (tmp->next->varptr == addr) { ++ struct gc_list *t = tmp->next; + +- tmp->next = tmp->next->next; +- RUBY_CRITICAL(free(t)); +- break; +- } +- tmp = tmp->next; ++ tmp->next = tmp->next->next; ++ RUBY_CRITICAL(free(t)); ++ break; ++ } ++ tmp = tmp->next; + } + } + +@@ -312,26 +426,26 @@ rb_global_variable(var) + + typedef struct RVALUE { + union { +- struct { +- unsigned long flags; /* always 0 for freed obj */ +- struct RVALUE *next; +- } free; +- struct RBasic basic; +- struct RObject object; +- struct RClass klass; +- struct RFloat flonum; +- struct RString string; +- struct RArray array; +- struct RRegexp regexp; +- struct RHash hash; +- struct RData data; +- struct RStruct rstruct; +- struct RBignum bignum; +- struct RFile file; +- struct RNode node; +- struct RMatch match; +- struct RVarmap varmap; +- struct SCOPE scope; ++ struct { ++ unsigned long flags; /* always 0 for freed obj */ ++ struct RVALUE *next; ++ } free; ++ struct RBasic basic; ++ struct RObject object; ++ struct RClass klass; ++ struct RFloat flonum; ++ struct RString string; ++ struct RArray array; ++ struct RRegexp regexp; ++ struct RHash hash; ++ struct RData data; ++ struct RStruct rstruct; ++ struct RBignum bignum; ++ struct RFile file; ++ struct RNode node; ++ struct RMatch match; ++ struct RVarmap varmap; ++ struct SCOPE scope; + } as; + #ifdef GC_DEBUG + char *file; +@@ -346,7 +460,7 @@ typedef struct RVALUE { + static RVALUE *freelist = 0; + static RVALUE *deferred_final_list = 0; + +-#define HEAPS_INCREMENT 10 ++static int heaps_increment = 10; + static struct heaps_slot { + void *membase; + RVALUE *slot; +@@ -355,45 +469,197 @@ static struct heaps_slot { + static int heaps_length = 0; + static int heaps_used = 0; + +-#define HEAP_MIN_SLOTS 10000 +-static int heap_slots = HEAP_MIN_SLOTS; ++static int heap_min_slots = 10000; ++static int heap_slots = 10000; + +-#define FREE_MIN 4096 ++static int heap_free_min = 4096; ++static int heap_slots_increment = 10000; ++static double heap_slots_growth_factor = 1.8; ++ ++static long initial_malloc_limit = GC_MALLOC_LIMIT; ++ ++static int verbose_gc_stats = Qfalse; ++ ++static FILE* gc_data_file = NULL; + + static RVALUE *himem, *lomem; + ++static void set_gc_parameters() ++{ ++ char *gc_stats_ptr, *min_slots_ptr, *free_min_ptr, *heap_slots_incr_ptr, ++ *heap_incr_ptr, *malloc_limit_ptr, *gc_heap_file_ptr, *heap_slots_growth_factor_ptr; ++ ++ gc_data_file = stderr; ++ ++ gc_stats_ptr = getenv("RUBY_GC_STATS"); ++ if (gc_stats_ptr != NULL) { ++ int gc_stats_i = atoi(gc_stats_ptr); ++ if (gc_stats_i > 0) { ++ verbose_gc_stats = Qtrue; ++ } ++ } ++ ++ gc_heap_file_ptr = getenv("RUBY_GC_DATA_FILE"); ++ if (gc_heap_file_ptr != NULL) { ++ FILE* data_file = fopen(gc_heap_file_ptr, "w"); ++ if (data_file != NULL) { ++ gc_data_file = data_file; ++ } ++ else { ++ fprintf(stderr, ++ "can't open gc log file %s for writing, using default\n", gc_heap_file_ptr); ++ } ++ } ++ ++ min_slots_ptr = getenv("RUBY_HEAP_MIN_SLOTS"); ++ if (min_slots_ptr != NULL) { ++ int min_slots_i = atoi(min_slots_ptr); ++ if (verbose_gc_stats) { ++ fprintf(gc_data_file, "RUBY_HEAP_MIN_SLOTS=%s\n", min_slots_ptr); ++ } ++ if (min_slots_i > 0) { ++ heap_slots = min_slots_i; ++ heap_min_slots = min_slots_i; ++ } ++ } ++ ++ free_min_ptr = getenv("RUBY_HEAP_FREE_MIN"); ++ if (free_min_ptr != NULL) { ++ int free_min_i = atoi(free_min_ptr); ++ if (verbose_gc_stats) { ++ fprintf(gc_data_file, "RUBY_HEAP_FREE_MIN=%s\n", free_min_ptr); ++ } ++ if (free_min_i > 0) { ++ heap_free_min = free_min_i; ++ } ++ } ++ ++ heap_incr_ptr = getenv("RUBY_HEAP_INCREMENT"); ++ if (heap_incr_ptr != NULL) { ++ int heap_incr_i = atoi(heap_incr_ptr); ++ if (verbose_gc_stats) { ++ fprintf(gc_data_file, "RUBY_HEAP_INCREMENT=%s\n", heap_incr_ptr); ++ } ++ if (heap_incr_i > 0) { ++ heaps_increment = heap_incr_i; ++ } ++ } ++ ++ heap_slots_incr_ptr = getenv("RUBY_HEAP_SLOTS_INCREMENT"); ++ if (heap_slots_incr_ptr != NULL) { ++ int heap_slots_incr_i = atoi(heap_slots_incr_ptr); ++ if (verbose_gc_stats) { ++ fprintf(gc_data_file, "RUBY_HEAP_SLOTS_INCREMENT=%s\n", heap_slots_incr_ptr); ++ } ++ if (heap_slots_incr_i > 0) { ++ heap_slots_increment = heap_slots_incr_i; ++ } ++ } ++ ++ heap_slots_growth_factor_ptr = getenv("RUBY_HEAP_SLOTS_GROWTH_FACTOR"); ++ if (heap_slots_growth_factor_ptr != NULL) { ++ double heap_slots_growth_factor_d = atoi(heap_slots_growth_factor_ptr); ++ if (verbose_gc_stats) { ++ fprintf(gc_data_file, "RUBY_HEAP_SLOTS_GROWTH_FACTOR=%s\n", heap_slots_growth_factor_ptr); ++ } ++ if (heap_slots_growth_factor_d > 0) { ++ heap_slots_growth_factor = heap_slots_growth_factor_d; ++ } ++ } ++ ++ malloc_limit_ptr = getenv("RUBY_GC_MALLOC_LIMIT"); ++ if (malloc_limit_ptr != NULL) { ++ int malloc_limit_i = atol(malloc_limit_ptr); ++ if (verbose_gc_stats) { ++ fprintf(gc_data_file, "RUBY_GC_MALLOC_LIMIT=%s\n", malloc_limit_ptr); ++ } ++ if (malloc_limit_i > 0) { ++ initial_malloc_limit = malloc_limit_i; ++ } ++ } ++} ++ ++/* ++ * call-seq: ++ * GC.dump => nil ++ * ++ * dumps information about the current GC data structures to the GC log file ++ * ++ * GC.dump #=> nil ++ * ++ */ ++ ++VALUE ++rb_gc_dump() ++{ ++ int i; ++ ++ for (i = 0; i < heaps_used; i++) { ++ int heap_size = heaps[i].limit; ++ fprintf(gc_data_file, "HEAP[%2d]: size=%7d\n", i, heap_size); ++ } ++ ++ return Qnil; ++} ++ ++/* ++ * call-seq: ++ * GC.log String => String ++ * ++ * Logs string to the GC data file and returns it. ++ * ++ * GC.log "manual GC call" #=> "manual GC call" ++ * ++ */ ++ ++VALUE ++rb_gc_log(self, original_str) ++ VALUE self, original_str; ++{ ++ if (original_str == Qnil) { ++ fprintf(gc_data_file, "\n"); ++ } ++ else { ++ VALUE str = StringValue(original_str); ++ char *p = RSTRING(str)->ptr; ++ fprintf(gc_data_file, "%s\n", p); ++ } ++ return original_str; ++} ++ ++ + static void + add_heap() + { + RVALUE *p, *pend; + + if (heaps_used == heaps_length) { +- /* Realloc heaps */ +- struct heaps_slot *p; +- int length; +- +- heaps_length += HEAPS_INCREMENT; +- length = heaps_length*sizeof(struct heaps_slot); +- RUBY_CRITICAL( +- if (heaps_used > 0) { +- p = (struct heaps_slot *)realloc(heaps, length); +- if (p) heaps = p; +- } +- else { +- p = heaps = (struct heaps_slot *)malloc(length); +- }); +- if (p == 0) rb_memerror(); ++ /* Realloc heaps */ ++ struct heaps_slot *p; ++ int length; ++ ++ heaps_length += heaps_increment; ++ length = heaps_length*sizeof(struct heaps_slot); ++ RUBY_CRITICAL( ++ if (heaps_used > 0) { ++ p = (struct heaps_slot *)realloc(heaps, length); ++ if (p) heaps = p; ++ } ++ else { ++ p = heaps = (struct heaps_slot *)malloc(length); ++ }); ++ if (p == 0) rb_memerror(); + } + + for (;;) { +- RUBY_CRITICAL(p = (RVALUE*)malloc(sizeof(RVALUE)*(heap_slots+1))); +- if (p == 0) { +- if (heap_slots == HEAP_MIN_SLOTS) { +- rb_memerror(); +- } +- heap_slots = HEAP_MIN_SLOTS; +- continue; +- } ++ RUBY_CRITICAL(p = (RVALUE*)malloc(sizeof(RVALUE)*(heap_slots+1))); ++ if (p == 0) { ++ if (heap_slots == heap_min_slots) { ++ rb_memerror(); ++ } ++ heap_slots = heap_min_slots; ++ continue; ++ } + heaps[heaps_used].membase = p; + if ((VALUE)p % sizeof(RVALUE) == 0) + heap_slots += 1; +@@ -401,25 +667,26 @@ add_heap() + p = (RVALUE*)((VALUE)p + sizeof(RVALUE) - ((VALUE)p % sizeof(RVALUE))); + heaps[heaps_used].slot = p; + heaps[heaps_used].limit = heap_slots; +- break; ++ break; + } + pend = p + heap_slots; + if (lomem == 0 || lomem > p) lomem = p; + if (himem < pend) himem = pend; + heaps_used++; +- heap_slots *= 1.8; +- if (heap_slots <= 0) heap_slots = HEAP_MIN_SLOTS; ++ heap_slots += heap_slots_increment; ++ heap_slots_increment *= heap_slots_growth_factor; ++ if (heap_slots <= 0) heap_slots = heap_min_slots; + + while (p < pend) { +- p->as.free.flags = 0; +- p->as.free.next = freelist; +- freelist = p; +- p++; ++ p->as.free.flags = 0; ++ p->as.free.next = freelist; ++ freelist = p; ++ p++; + } + } + #define RANY(o) ((RVALUE*)(o)) + +-int ++int + rb_during_gc() + { + return during_gc; +@@ -431,7 +698,7 @@ rb_newobj() + VALUE obj; + + if (during_gc) +- rb_bug("object allocation during garbage collection phase"); ++ rb_bug("object allocation during garbage collection phase"); + + if (ruby_gc_stress || !freelist) garbage_collect(); + +@@ -580,13 +847,13 @@ rb_source_filename(f) + st_data_t name; + + if (!st_lookup(source_filenames, (st_data_t)f, &name)) { +- long len = strlen(f) + 1; +- char *ptr = ALLOC_N(char, len + 1); +- name = (st_data_t)ptr; +- *ptr++ = 0; +- MEMCPY(ptr, f, char, len); +- st_add_direct(source_filenames, (st_data_t)ptr, name); +- return ptr; ++ long len = strlen(f) + 1; ++ char *ptr = ALLOC_N(char, len + 1); ++ name = (st_data_t)ptr; ++ *ptr++ = 0; ++ MEMCPY(ptr, f, char, len); ++ st_add_direct(source_filenames, (st_data_t)ptr, name); ++ return ptr; + } + return (char *)name + 1; + } +@@ -596,7 +863,7 @@ mark_source_filename(f) + char *f; + { + if (f) { +- f[-1] = 1; ++ f[-1] = 1; + } + } + +@@ -605,12 +872,12 @@ sweep_source_filename(key, value) + char *key, *value; + { + if (*value) { +- *value = 0; +- return ST_CONTINUE; ++ *value = 0; ++ return ST_CONTINUE; + } + else { +- free(value); +- return ST_DELETE; ++ free(value); ++ return ST_DELETE; + } + } + +@@ -625,14 +892,14 @@ gc_mark_all() + + init_mark_stack(); + for (i = 0; i < heaps_used; i++) { +- p = heaps[i].slot; pend = p + heaps[i].limit; +- while (p < pend) { +- if ((p->as.basic.flags & FL_MARK) && +- (p->as.basic.flags != FL_MARK)) { +- gc_mark_children((VALUE)p, 0); +- } +- p++; +- } ++ p = heaps[i].slot; pend = p + heaps[i].limit; ++ while (p < pend) { ++ if ((p->as.basic.flags & FL_MARK) && ++ (p->as.basic.flags != FL_MARK)) { ++ gc_mark_children((VALUE)p, 0); ++ } ++ p++; ++ } + } + } + +@@ -647,8 +914,8 @@ gc_mark_rest() + + init_mark_stack(); + while(p != tmp_arry){ +- p--; +- gc_mark_children(*p, 0); ++ p--; ++ gc_mark_children(*p, 0); + } + } + +@@ -665,9 +932,9 @@ is_pointer_to_heap(ptr) + + /* check if p looks like a pointer */ + for (i=0; i < heaps_used; i++) { +- heap_org = heaps[i].slot; +- if (heap_org <= p && p < heap_org + heaps[i].limit) +- return Qtrue; ++ heap_org = heaps[i].slot; ++ if (heap_org <= p && p < heap_org + heaps[i].limit) ++ return Qtrue; + } + return Qfalse; + } +@@ -680,10 +947,10 @@ mark_locations_array(x, n) + VALUE v; + while (n--) { + v = *x; +- if (is_pointer_to_heap((void *)v)) { +- gc_mark(v, 0); +- } +- x++; ++ if (is_pointer_to_heap((void *)v)) { ++ gc_mark(v, 0); ++ } ++ x++; + } + } + +@@ -780,7 +1047,7 @@ rb_gc_mark_maybe(obj) + VALUE obj; + { + if (is_pointer_to_heap((void *)obj)) { +- gc_mark(obj, 0); ++ gc_mark(obj, 0); + } + } + +@@ -828,7 +1095,7 @@ gc_mark_children(ptr, lev) + { + register RVALUE *obj = RANY(ptr); + +- goto marking; /* skip */ ++ goto marking; /* skip */ + + again: + obj = RANY(ptr); +@@ -839,148 +1106,148 @@ gc_mark_children(ptr, lev) + + marking: + if (FL_TEST(obj, FL_EXIVAR)) { +- rb_mark_generic_ivar(ptr); ++ rb_mark_generic_ivar(ptr); + } + + switch (obj->as.basic.flags & T_MASK) { + case T_NIL: + case T_FIXNUM: +- rb_bug("rb_gc_mark() called for broken object"); +- break; ++ rb_bug("rb_gc_mark() called for broken object"); ++ break; + + case T_NODE: +- mark_source_filename(obj->as.node.nd_file); +- switch (nd_type(obj)) { +- case NODE_IF: /* 1,2,3 */ +- case NODE_FOR: +- case NODE_ITER: +- case NODE_CREF: +- case NODE_WHEN: +- case NODE_MASGN: +- case NODE_RESCUE: +- case NODE_RESBODY: +- case NODE_CLASS: +- gc_mark((VALUE)obj->as.node.u2.node, lev); +- /* fall through */ +- case NODE_BLOCK: /* 1,3 */ +- case NODE_ARRAY: +- case NODE_DSTR: +- case NODE_DXSTR: +- case NODE_DREGX: +- case NODE_DREGX_ONCE: +- case NODE_FBODY: +- case NODE_ENSURE: +- case NODE_CALL: +- case NODE_DEFS: +- case NODE_OP_ASGN1: +- gc_mark((VALUE)obj->as.node.u1.node, lev); +- /* fall through */ +- case NODE_SUPER: /* 3 */ +- case NODE_FCALL: +- case NODE_DEFN: +- case NODE_NEWLINE: +- ptr = (VALUE)obj->as.node.u3.node; +- goto again; +- +- case NODE_WHILE: /* 1,2 */ +- case NODE_UNTIL: +- case NODE_AND: +- case NODE_OR: +- case NODE_CASE: +- case NODE_SCLASS: +- case NODE_DOT2: +- case NODE_DOT3: +- case NODE_FLIP2: +- case NODE_FLIP3: +- case NODE_MATCH2: +- case NODE_MATCH3: +- case NODE_OP_ASGN_OR: +- case NODE_OP_ASGN_AND: +- case NODE_MODULE: +- case NODE_ALIAS: +- case NODE_VALIAS: +- case NODE_ARGS: +- gc_mark((VALUE)obj->as.node.u1.node, lev); +- /* fall through */ +- case NODE_METHOD: /* 2 */ +- case NODE_NOT: +- case NODE_GASGN: +- case NODE_LASGN: +- case NODE_DASGN: +- case NODE_DASGN_CURR: +- case NODE_IASGN: +- case NODE_CVDECL: +- case NODE_CVASGN: +- case NODE_COLON3: +- case NODE_OPT_N: +- case NODE_EVSTR: +- case NODE_UNDEF: +- ptr = (VALUE)obj->as.node.u2.node; +- goto again; +- +- case NODE_HASH: /* 1 */ +- case NODE_LIT: +- case NODE_STR: +- case NODE_XSTR: +- case NODE_DEFINED: +- case NODE_MATCH: +- case NODE_RETURN: +- case NODE_BREAK: +- case NODE_NEXT: +- case NODE_YIELD: +- case NODE_COLON2: +- case NODE_SPLAT: +- case NODE_TO_ARY: +- case NODE_SVALUE: +- ptr = (VALUE)obj->as.node.u1.node; +- goto again; +- +- case NODE_SCOPE: /* 2,3 */ +- case NODE_BLOCK_PASS: +- case NODE_CDECL: +- gc_mark((VALUE)obj->as.node.u3.node, lev); +- ptr = (VALUE)obj->as.node.u2.node; +- goto again; +- +- case NODE_ZARRAY: /* - */ +- case NODE_ZSUPER: +- case NODE_CFUNC: +- case NODE_VCALL: +- case NODE_GVAR: +- case NODE_LVAR: +- case NODE_DVAR: +- case NODE_IVAR: +- case NODE_CVAR: +- case NODE_NTH_REF: +- case NODE_BACK_REF: +- case NODE_REDO: +- case NODE_RETRY: +- case NODE_SELF: +- case NODE_NIL: +- case NODE_TRUE: +- case NODE_FALSE: +- case NODE_ATTRSET: +- case NODE_BLOCK_ARG: +- case NODE_POSTEXE: +- break; +- case NODE_ALLOCA: +- mark_locations_array((VALUE*)obj->as.node.u1.value, +- obj->as.node.u3.cnt); +- ptr = (VALUE)obj->as.node.u2.node; +- goto again; +- +- default: /* unlisted NODE */ +- if (is_pointer_to_heap(obj->as.node.u1.node)) { +- gc_mark((VALUE)obj->as.node.u1.node, lev); +- } +- if (is_pointer_to_heap(obj->as.node.u2.node)) { +- gc_mark((VALUE)obj->as.node.u2.node, lev); +- } +- if (is_pointer_to_heap(obj->as.node.u3.node)) { +- gc_mark((VALUE)obj->as.node.u3.node, lev); +- } +- } +- return; /* no need to mark class. */ ++ mark_source_filename(obj->as.node.nd_file); ++ switch (nd_type(obj)) { ++ case NODE_IF: /* 1,2,3 */ ++ case NODE_FOR: ++ case NODE_ITER: ++ case NODE_CREF: ++ case NODE_WHEN: ++ case NODE_MASGN: ++ case NODE_RESCUE: ++ case NODE_RESBODY: ++ case NODE_CLASS: ++ gc_mark((VALUE)obj->as.node.u2.node, lev); ++ /* fall through */ ++ case NODE_BLOCK: /* 1,3 */ ++ case NODE_ARRAY: ++ case NODE_DSTR: ++ case NODE_DXSTR: ++ case NODE_DREGX: ++ case NODE_DREGX_ONCE: ++ case NODE_FBODY: ++ case NODE_ENSURE: ++ case NODE_CALL: ++ case NODE_DEFS: ++ case NODE_OP_ASGN1: ++ gc_mark((VALUE)obj->as.node.u1.node, lev); ++ /* fall through */ ++ case NODE_SUPER: /* 3 */ ++ case NODE_FCALL: ++ case NODE_DEFN: ++ case NODE_NEWLINE: ++ ptr = (VALUE)obj->as.node.u3.node; ++ goto again; ++ ++ case NODE_WHILE: /* 1,2 */ ++ case NODE_UNTIL: ++ case NODE_AND: ++ case NODE_OR: ++ case NODE_CASE: ++ case NODE_SCLASS: ++ case NODE_DOT2: ++ case NODE_DOT3: ++ case NODE_FLIP2: ++ case NODE_FLIP3: ++ case NODE_MATCH2: ++ case NODE_MATCH3: ++ case NODE_OP_ASGN_OR: ++ case NODE_OP_ASGN_AND: ++ case NODE_MODULE: ++ case NODE_ALIAS: ++ case NODE_VALIAS: ++ case NODE_ARGS: ++ gc_mark((VALUE)obj->as.node.u1.node, lev); ++ /* fall through */ ++ case NODE_METHOD: /* 2 */ ++ case NODE_NOT: ++ case NODE_GASGN: ++ case NODE_LASGN: ++ case NODE_DASGN: ++ case NODE_DASGN_CURR: ++ case NODE_IASGN: ++ case NODE_CVDECL: ++ case NODE_CVASGN: ++ case NODE_COLON3: ++ case NODE_OPT_N: ++ case NODE_EVSTR: ++ case NODE_UNDEF: ++ ptr = (VALUE)obj->as.node.u2.node; ++ goto again; ++ ++ case NODE_HASH: /* 1 */ ++ case NODE_LIT: ++ case NODE_STR: ++ case NODE_XSTR: ++ case NODE_DEFINED: ++ case NODE_MATCH: ++ case NODE_RETURN: ++ case NODE_BREAK: ++ case NODE_NEXT: ++ case NODE_YIELD: ++ case NODE_COLON2: ++ case NODE_SPLAT: ++ case NODE_TO_ARY: ++ case NODE_SVALUE: ++ ptr = (VALUE)obj->as.node.u1.node; ++ goto again; ++ ++ case NODE_SCOPE: /* 2,3 */ ++ case NODE_BLOCK_PASS: ++ case NODE_CDECL: ++ gc_mark((VALUE)obj->as.node.u3.node, lev); ++ ptr = (VALUE)obj->as.node.u2.node; ++ goto again; ++ ++ case NODE_ZARRAY: /* - */ ++ case NODE_ZSUPER: ++ case NODE_CFUNC: ++ case NODE_VCALL: ++ case NODE_GVAR: ++ case NODE_LVAR: ++ case NODE_DVAR: ++ case NODE_IVAR: ++ case NODE_CVAR: ++ case NODE_NTH_REF: ++ case NODE_BACK_REF: ++ case NODE_REDO: ++ case NODE_RETRY: ++ case NODE_SELF: ++ case NODE_NIL: ++ case NODE_TRUE: ++ case NODE_FALSE: ++ case NODE_ATTRSET: ++ case NODE_BLOCK_ARG: ++ case NODE_POSTEXE: ++ break; ++ case NODE_ALLOCA: ++ mark_locations_array((VALUE*)obj->as.node.u1.value, ++ obj->as.node.u3.cnt); ++ ptr = (VALUE)obj->as.node.u2.node; ++ goto again; ++ ++ default: /* unlisted NODE */ ++ if (is_pointer_to_heap(obj->as.node.u1.node)) { ++ gc_mark((VALUE)obj->as.node.u1.node, lev); ++ } ++ if (is_pointer_to_heap(obj->as.node.u2.node)) { ++ gc_mark((VALUE)obj->as.node.u2.node, lev); ++ } ++ if (is_pointer_to_heap(obj->as.node.u3.node)) { ++ gc_mark((VALUE)obj->as.node.u3.node, lev); ++ } ++ } ++ return; /* no need to mark class. */ + } + + gc_mark(obj->as.basic.klass, lev); +@@ -988,92 +1255,92 @@ gc_mark_children(ptr, lev) + case T_ICLASS: + case T_CLASS: + case T_MODULE: +- mark_tbl(obj->as.klass.m_tbl, lev); +- mark_tbl(obj->as.klass.iv_tbl, lev); +- ptr = obj->as.klass.super; +- goto again; ++ mark_tbl(obj->as.klass.m_tbl, lev); ++ mark_tbl(obj->as.klass.iv_tbl, lev); ++ ptr = obj->as.klass.super; ++ goto again; + + case T_ARRAY: +- if (FL_TEST(obj, ELTS_SHARED)) { +- ptr = obj->as.array.aux.shared; +- goto again; +- } +- else { +- long i, len = obj->as.array.len; +- VALUE *ptr = obj->as.array.ptr; ++ if (FL_TEST(obj, ELTS_SHARED)) { ++ ptr = obj->as.array.aux.shared; ++ goto again; ++ } ++ else { ++ long i, len = obj->as.array.len; ++ VALUE *ptr = obj->as.array.ptr; + +- for (i=0; i < len; i++) { +- gc_mark(*ptr++, lev); +- } +- } +- break; ++ for (i=0; i < len; i++) { ++ gc_mark(*ptr++, lev); ++ } ++ } ++ break; + + case T_HASH: +- mark_hash(obj->as.hash.tbl, lev); +- ptr = obj->as.hash.ifnone; +- goto again; ++ mark_hash(obj->as.hash.tbl, lev); ++ ptr = obj->as.hash.ifnone; ++ goto again; + + case T_STRING: + #define STR_ASSOC FL_USER3 /* copied from string.c */ +- if (FL_TEST(obj, ELTS_SHARED|STR_ASSOC)) { +- ptr = obj->as.string.aux.shared; +- goto again; +- } +- break; ++ if (FL_TEST(obj, ELTS_SHARED|STR_ASSOC)) { ++ ptr = obj->as.string.aux.shared; ++ goto again; ++ } ++ break; + + case T_DATA: +- if (obj->as.data.dmark) (*obj->as.data.dmark)(DATA_PTR(obj)); +- break; ++ if (obj->as.data.dmark) (*obj->as.data.dmark)(DATA_PTR(obj)); ++ break; + + case T_OBJECT: +- mark_tbl(obj->as.object.iv_tbl, lev); +- break; ++ mark_tbl(obj->as.object.iv_tbl, lev); ++ break; + + case T_FILE: + case T_REGEXP: + case T_FLOAT: + case T_BIGNUM: + case T_BLKTAG: +- break; ++ break; + + case T_MATCH: +- if (obj->as.match.str) { +- ptr = obj->as.match.str; +- goto again; +- } +- break; ++ if (obj->as.match.str) { ++ ptr = obj->as.match.str; ++ goto again; ++ } ++ break; + + case T_VARMAP: +- gc_mark(obj->as.varmap.val, lev); +- ptr = (VALUE)obj->as.varmap.next; +- goto again; ++ gc_mark(obj->as.varmap.val, lev); ++ ptr = (VALUE)obj->as.varmap.next; ++ goto again; + + case T_SCOPE: +- if (obj->as.scope.local_vars && (obj->as.scope.flags & SCOPE_MALLOC)) { +- int n = obj->as.scope.local_tbl[0]+1; +- VALUE *vars = &obj->as.scope.local_vars[-1]; ++ if (obj->as.scope.local_vars && (obj->as.scope.flags & SCOPE_MALLOC)) { ++ int n = obj->as.scope.local_tbl[0]+1; ++ VALUE *vars = &obj->as.scope.local_vars[-1]; + +- while (n--) { +- gc_mark(*vars++, lev); +- } +- } +- break; ++ while (n--) { ++ gc_mark(*vars++, lev); ++ } ++ } ++ break; + + case T_STRUCT: +- { +- long len = obj->as.rstruct.len; +- VALUE *ptr = obj->as.rstruct.ptr; ++ { ++ long len = obj->as.rstruct.len; ++ VALUE *ptr = obj->as.rstruct.ptr; + +- while (len--) { +- gc_mark(*ptr++, lev); +- } +- } +- break; ++ while (len--) { ++ gc_mark(*ptr++, lev); ++ } ++ } ++ break; + + default: +- rb_bug("rb_gc_mark(): unknown data type 0x%lx(0x%lx) %s", +- obj->as.basic.flags & T_MASK, obj, +- is_pointer_to_heap(obj) ? "corrupted object" : "non object"); ++ rb_bug("rb_gc_mark(): unknown data type 0x%lx(0x%lx) %s", ++ obj->as.basic.flags & T_MASK, obj, ++ is_pointer_to_heap(obj) ? "corrupted object" : "non object"); + } + } + +@@ -1102,22 +1369,55 @@ finalize_list(p) + } + } + ++static char* obj_type(int tp) ++{ ++ switch (tp) { ++ case T_NIL : return "NIL"; ++ case T_OBJECT : return "OBJECT"; ++ case T_CLASS : return "CLASS"; ++ case T_ICLASS : return "ICLASS"; ++ case T_MODULE : return "MODULE"; ++ case T_FLOAT : return "FLOAT"; ++ case T_STRING : return "STRING"; ++ case T_REGEXP : return "REGEXP"; ++ case T_ARRAY : return "ARRAY"; ++ case T_FIXNUM : return "FIXNUM"; ++ case T_HASH : return "HASH"; ++ case T_STRUCT : return "STRUCT"; ++ case T_BIGNUM : return "BIGNUM"; ++ case T_FILE : return "FILE"; ++ ++ case T_TRUE : return "TRUE"; ++ case T_FALSE : return "FALSE"; ++ case T_DATA : return "DATA"; ++ case T_MATCH : return "MATCH"; ++ case T_SYMBOL : return "SYMBOL"; ++ ++ case T_BLKTAG : return "BLKTAG"; ++ case T_UNDEF : return "UNDEF"; ++ case T_VARMAP : return "VARMAP"; ++ case T_SCOPE : return "SCOPE"; ++ case T_NODE : return "NODE"; ++ default: return "____"; ++ } ++} ++ + static void + free_unused_heaps() + { + int i, j; + + for (i = j = 1; j < heaps_used; i++) { +- if (heaps[i].limit == 0) { +- free(heaps[i].membase); +- heaps_used--; +- } +- else { +- if (i != j) { +- heaps[j] = heaps[i]; +- } +- j++; +- } ++ if (heaps[i].limit == 0) { ++ free(heaps[i].membase); ++ heaps_used--; ++ } ++ else { ++ if (i != j) { ++ heaps[j] = heaps[i]; ++ } ++ j++; ++ } + } + } + +@@ -1134,24 +1434,33 @@ gc_sweep() + unsigned long live = 0; + unsigned long free_min = 0; + ++ unsigned long really_freed = 0; ++ int free_counts[256]; ++ int live_counts[256]; ++ int do_gc_stats = gc_statistics & verbose_gc_stats; ++ + for (i = 0; i < heaps_used; i++) { + free_min += heaps[i].limit; + } + free_min = free_min * 0.2; +- if (free_min < FREE_MIN) +- free_min = FREE_MIN; ++ if (free_min < heap_free_min) ++ free_min = heap_free_min; ++ ++ if (do_gc_stats) { ++ for (i = 0 ; i< 256; i++) { free_counts[i] = live_counts[i] = 0; } ++ } + + if (ruby_in_compile && ruby_parser_stack_on_heap()) { +- /* should not reclaim nodes during compilation ++ /* should not reclaim nodes during compilation + if yacc's semantic stack is not allocated on machine stack */ +- for (i = 0; i < heaps_used; i++) { +- p = heaps[i].slot; pend = p + heaps[i].limit; +- while (p < pend) { +- if (!(p->as.basic.flags&FL_MARK) && BUILTIN_TYPE(p) == T_NODE) +- gc_mark((VALUE)p, 0); +- p++; +- } +- } ++ for (i = 0; i < heaps_used; i++) { ++ p = heaps[i].slot; pend = p + heaps[i].limit; ++ while (p < pend) { ++ if (!(p->as.basic.flags&FL_MARK) && BUILTIN_TYPE(p) == T_NODE) ++ gc_mark((VALUE)p, 0); ++ p++; ++ } ++ } + } + + mark_source_filename(ruby_sourcefile); +@@ -1172,7 +1481,7 @@ gc_sweep() + while (p < pend) { + if (!(p->as.basic.flags & FL_MARK)) { + if (p->as.basic.flags && +- ((deferred = obj_free((VALUE)p)) || ++ (((do_gc_stats && really_freed++), deferred = obj_free((VALUE)p)) || + ((FL_TEST(p, FL_FINALIZE)) && need_call_final))) { + if (!deferred) { + p->as.free.flags = T_DEFERRED; +@@ -1183,6 +1492,12 @@ gc_sweep() + final_list = p; + } + else { ++ if (do_gc_stats) { ++ int obt = p->as.basic.flags & T_MASK; ++ if (obt) { ++ free_counts[obt]++; ++ } ++ } + add_freelist(p); + } + n++; +@@ -1194,6 +1509,9 @@ gc_sweep() + else { + RBASIC(p)->flags &= ~FL_MARK; + live++; ++ if (do_gc_stats) { ++ live_counts[RANY((VALUE)p)->as.basic.flags & T_MASK]++; ++ } + } + p++; + } +@@ -1211,15 +1529,29 @@ gc_sweep() + } + } + if (malloc_increase > malloc_limit) { +- malloc_limit += (malloc_increase - malloc_limit) * (double)live / (live + freed); +- if (malloc_limit < GC_MALLOC_LIMIT) malloc_limit = GC_MALLOC_LIMIT; ++ malloc_limit += (malloc_increase - malloc_limit) * (double)live / (live + freed); ++ if (malloc_limit < initial_malloc_limit) malloc_limit = initial_malloc_limit; + } + malloc_increase = 0; + if (freed < free_min) { +- add_heap(); ++ add_heap(); + } + during_gc = 0; + ++ if (do_gc_stats) { ++ fprintf(gc_data_file, "objects processed: %.7d\n", live+freed); ++ fprintf(gc_data_file, "live objects : %.7d\n", live); ++ fprintf(gc_data_file, "freelist objects : %.7d\n", freed - really_freed); ++ fprintf(gc_data_file, "freed objects : %.7d\n", really_freed); ++ for(i=0; i<256; i++) { ++ if (free_counts[i]>0 || live_counts[i]>0) { ++ fprintf(gc_data_file, ++ "kept %.7d / freed %.7d objects of type %s\n", ++ live_counts[i], free_counts[i], obj_type(i)); ++ } ++ } ++ } ++ + /* clear finalization list */ + if (final_list) { + deferred_final_list = final_list; +@@ -1260,51 +1592,51 @@ obj_free(obj) + case T_FIXNUM: + case T_TRUE: + case T_FALSE: +- rb_bug("obj_free() called for broken object"); +- break; ++ rb_bug("obj_free() called for broken object"); ++ break; + } + + if (FL_TEST(obj, FL_EXIVAR)) { +- rb_free_generic_ivar((VALUE)obj); ++ rb_free_generic_ivar((VALUE)obj); + } + + switch (BUILTIN_TYPE(obj)) { + case T_OBJECT: +- if (RANY(obj)->as.object.iv_tbl) { +- st_free_table(RANY(obj)->as.object.iv_tbl); +- } +- break; ++ if (RANY(obj)->as.object.iv_tbl) { ++ st_free_table(RANY(obj)->as.object.iv_tbl); ++ } ++ break; + case T_MODULE: + case T_CLASS: +- rb_clear_cache_by_class((VALUE)obj); +- st_free_table(RANY(obj)->as.klass.m_tbl); +- if (RANY(obj)->as.object.iv_tbl) { +- st_free_table(RANY(obj)->as.object.iv_tbl); +- } +- break; ++ rb_clear_cache_by_class((VALUE)obj); ++ st_free_table(RANY(obj)->as.klass.m_tbl); ++ if (RANY(obj)->as.object.iv_tbl) { ++ st_free_table(RANY(obj)->as.object.iv_tbl); ++ } ++ break; + case T_STRING: +- if (RANY(obj)->as.string.ptr && !FL_TEST(obj, ELTS_SHARED)) { +- RUBY_CRITICAL(free(RANY(obj)->as.string.ptr)); +- } +- break; ++ if (RANY(obj)->as.string.ptr && !FL_TEST(obj, ELTS_SHARED)) { ++ RUBY_CRITICAL(free(RANY(obj)->as.string.ptr)); ++ } ++ break; + case T_ARRAY: +- if (RANY(obj)->as.array.ptr && !FL_TEST(obj, ELTS_SHARED)) { +- RUBY_CRITICAL(free(RANY(obj)->as.array.ptr)); +- } +- break; ++ if (RANY(obj)->as.array.ptr && !FL_TEST(obj, ELTS_SHARED)) { ++ RUBY_CRITICAL(free(RANY(obj)->as.array.ptr)); ++ } ++ break; + case T_HASH: +- if (RANY(obj)->as.hash.tbl) { +- st_free_table(RANY(obj)->as.hash.tbl); +- } +- break; ++ if (RANY(obj)->as.hash.tbl) { ++ st_free_table(RANY(obj)->as.hash.tbl); ++ } ++ break; + case T_REGEXP: +- if (RANY(obj)->as.regexp.ptr) { +- re_free_pattern(RANY(obj)->as.regexp.ptr); +- } +- if (RANY(obj)->as.regexp.str) { +- RUBY_CRITICAL(free(RANY(obj)->as.regexp.str)); +- } +- break; ++ if (RANY(obj)->as.regexp.ptr) { ++ re_free_pattern(RANY(obj)->as.regexp.ptr); ++ } ++ if (RANY(obj)->as.regexp.str) { ++ RUBY_CRITICAL(free(RANY(obj)->as.regexp.str)); ++ } ++ break; + case T_DATA: + if (DATA_PTR(obj)) { + if ((long)RANY(obj)->as.data.dfree == -1) { +@@ -1317,11 +1649,11 @@ obj_free(obj) + } + break; + case T_MATCH: +- if (RANY(obj)->as.match.regs) { +- re_free_registers(RANY(obj)->as.match.regs); +- RUBY_CRITICAL(free(RANY(obj)->as.match.regs)); +- } +- break; ++ if (RANY(obj)->as.match.regs) { ++ re_free_registers(RANY(obj)->as.match.regs); ++ RUBY_CRITICAL(free(RANY(obj)->as.match.regs)); ++ } ++ break; + case T_FILE: + if (RANY(obj)->as.file.fptr) { + struct rb_io_t *fptr = RANY(obj)->as.file.fptr; +@@ -1332,19 +1664,19 @@ obj_free(obj) + } + break; + case T_ICLASS: +- /* iClass shares table with the module */ +- break; ++ /* iClass shares table with the module */ ++ break; + + case T_FLOAT: + case T_VARMAP: + case T_BLKTAG: +- break; ++ break; + + case T_BIGNUM: +- if (RANY(obj)->as.bignum.digits) { +- RUBY_CRITICAL(free(RANY(obj)->as.bignum.digits)); +- } +- break; ++ if (RANY(obj)->as.bignum.digits) { ++ RUBY_CRITICAL(free(RANY(obj)->as.bignum.digits)); ++ } ++ break; + case T_NODE: + switch (nd_type(obj)) { + case NODE_SCOPE: +@@ -1359,7 +1691,7 @@ obj_free(obj) + break; /* no need to free iv_tbl */ + + case T_SCOPE: +- if (RANY(obj)->as.scope.local_vars && ++ if (RANY(obj)->as.scope.local_vars && + RANY(obj)->as.scope.flags != SCOPE_ALLOCA) { + VALUE *vars = RANY(obj)->as.scope.local_vars-1; + if (!(RANY(obj)->as.scope.flags & SCOPE_CLONE) && vars[0] == 0) +@@ -1370,14 +1702,14 @@ obj_free(obj) + break; + + case T_STRUCT: +- if (RANY(obj)->as.rstruct.ptr) { +- RUBY_CRITICAL(free(RANY(obj)->as.rstruct.ptr)); +- } +- break; ++ if (RANY(obj)->as.rstruct.ptr) { ++ RUBY_CRITICAL(free(RANY(obj)->as.rstruct.ptr)); ++ } ++ break; + + default: +- rb_bug("gc_sweep(): unknown data type 0x%lx(0x%lx)", +- RANY(obj)->as.basic.flags & T_MASK, obj); ++ rb_bug("gc_sweep(): unknown data type 0x%lx(0x%lx)", ++ RANY(obj)->as.basic.flags & T_MASK, obj); + } + + return 0; +@@ -1407,18 +1739,18 @@ _rb_setjmp:\n\ + typedef unsigned long rb_jmp_buf[6]; + __asm__ (".align 4\n\ + _rb_setjmp:\n\ +- pushl %ebp\n\ +- movl %esp,%ebp\n\ +- movl 8(%ebp),%ebp\n\ +- movl %eax,(%ebp)\n\ +- movl %ebx,4(%ebp)\n\ +- movl %ecx,8(%ebp)\n\ +- movl %edx,12(%ebp)\n\ +- movl %esi,16(%ebp)\n\ +- movl %edi,20(%ebp)\n\ +- popl %ebp\n\ +- xorl %eax,%eax\n\ +- ret"); ++ pushl %ebp\n\ ++ movl %esp,%ebp\n\ ++ movl 8(%ebp),%ebp\n\ ++ movl %eax,(%ebp)\n\ ++ movl %ebx,4(%ebp)\n\ ++ movl %ecx,8(%ebp)\n\ ++ movl %edx,12(%ebp)\n\ ++ movl %esi,16(%ebp)\n\ ++ movl %edi,20(%ebp)\n\ ++ popl %ebp\n\ ++ xorl %eax,%eax\n\ ++ ret"); + #endif + #endif + int rb_setjmp (rb_jmp_buf); +@@ -1431,41 +1763,50 @@ garbage_collect() + struct gc_list *list; + struct FRAME * volatile frame; /* gcc 2.7.2.3 -O2 bug?? */ + jmp_buf save_regs_gc_mark; ++ struct timeval gctv1, gctv2; + SET_STACK_END; + + #ifdef HAVE_NATIVETHREAD + if (!is_ruby_native_thread()) { +- rb_bug("cross-thread violation on rb_gc()"); ++ rb_bug("cross-thread violation on rb_gc()"); + } + #endif + if (dont_gc || during_gc) { +- if (!freelist) { +- add_heap(); +- } +- return; ++ if (!freelist) { ++ add_heap(); ++ } ++ return; + } + if (during_gc) return; + during_gc++; + ++ if (gc_statistics) { ++ gc_collections++; ++ gettimeofday(&gctv1, NULL); ++ if (verbose_gc_stats) { ++ fprintf(gc_data_file, "Garbage collection started\n"); ++ } ++ } ++ + init_mark_stack(); + + gc_mark((VALUE)ruby_current_node, 0); + + /* mark frame stack */ + for (frame = ruby_frame; frame; frame = frame->prev) { +- rb_gc_mark_frame(frame); +- if (frame->tmp) { +- struct FRAME *tmp = frame->tmp; +- while (tmp) { +- rb_gc_mark_frame(tmp); +- tmp = tmp->prev; +- } +- } ++ rb_gc_mark_frame(frame); ++ if (frame->tmp) { ++ struct FRAME *tmp = frame->tmp; ++ while (tmp) { ++ rb_gc_mark_frame(tmp); ++ tmp = tmp->prev; ++ } ++ } + } + gc_mark((VALUE)ruby_scope, 0); + gc_mark((VALUE)ruby_dyna_vars, 0); + if (finalizer_table) { +- mark_tbl(finalizer_table, 0); ++ mark_tbl(finalizer_table, 0); + } + + FLUSH_REGISTER_WINDOWS; +@@ -1478,9 +1819,9 @@ garbage_collect() + rb_gc_mark_locations(rb_gc_stack_start, (VALUE*)STACK_END + 1); + #else + if ((VALUE*)STACK_END < rb_gc_stack_start) +- rb_gc_mark_locations((VALUE*)STACK_END, rb_gc_stack_start); ++ rb_gc_mark_locations((VALUE*)STACK_END, rb_gc_stack_start); + else +- rb_gc_mark_locations(rb_gc_stack_start, (VALUE*)STACK_END + 1); ++ rb_gc_mark_locations(rb_gc_stack_start, (VALUE*)STACK_END + 1); + #endif + #ifdef __ia64 + /* mark backing store (flushed register window on the stack) */ +@@ -1489,13 +1830,13 @@ garbage_collect() + #endif + #if defined(__human68k__) || defined(__mc68000__) + rb_gc_mark_locations((VALUE*)((char*)STACK_END + 2), +- (VALUE*)((char*)rb_gc_stack_start + 2)); ++ (VALUE*)((char*)rb_gc_stack_start + 2)); + #endif + rb_gc_mark_threads(); + + /* mark protected global variables */ + for (list = global_List; list; list = list->next) { +- rb_gc_mark_maybe(*list->varptr); ++ rb_gc_mark_maybe(*list->varptr); + } + rb_mark_end_proc(); + rb_gc_mark_global_tbl(); +@@ -1510,18 +1851,30 @@ garbage_collect() + + /* gc_mark objects whose marking are not completed*/ + do { +- while (!MARK_STACK_EMPTY) { +- if (mark_stack_overflow){ +- gc_mark_all(); +- } +- else { +- gc_mark_rest(); +- } +- } +- rb_gc_abort_threads(); ++ while (!MARK_STACK_EMPTY) { ++ if (mark_stack_overflow){ ++ gc_mark_all(); ++ } ++ else { ++ gc_mark_rest(); ++ } ++ } ++ rb_gc_abort_threads(); + } while (!MARK_STACK_EMPTY); + + gc_sweep(); ++ ++ if (gc_statistics) { ++ GC_TIME_TYPE musecs_used; ++ gettimeofday(&gctv2, NULL); ++ musecs_used = ((GC_TIME_TYPE)(gctv2.tv_sec - gctv1.tv_sec) * 1000000) + (gctv2.tv_usec - gctv1.tv_usec); ++ gc_time += musecs_used; ++ ++ if (verbose_gc_stats) { ++ fprintf(gc_data_file, "GC time: %d msec\n", musecs_used / 1000); ++ fflush(gc_data_file); ++ } ++ } + } + + void +@@ -1588,8 +1941,8 @@ Init_stack(addr) + memset(&m, 0, sizeof(m)); + VirtualQuery(&m, &m, sizeof(m)); + rb_gc_stack_start = +- STACK_UPPER((VALUE *)&m, (VALUE *)m.BaseAddress, +- (VALUE *)((char *)m.BaseAddress + m.RegionSize) - 1); ++ STACK_UPPER((VALUE *)&m, (VALUE *)m.BaseAddress, ++ (VALUE *)((char *)m.BaseAddress + m.RegionSize) - 1); + #elif defined(STACK_END_ADDRESS) + { + extern void *STACK_END_ADDRESS; +@@ -1599,24 +1952,24 @@ Init_stack(addr) + if (!addr) addr = (void *)&addr; + STACK_UPPER(&addr, addr, ++addr); + if (rb_gc_stack_start) { +- if (STACK_UPPER(&addr, +- rb_gc_stack_start > addr, +- rb_gc_stack_start < addr)) +- rb_gc_stack_start = addr; +- return; ++ if (STACK_UPPER(&addr, ++ rb_gc_stack_start > addr, ++ rb_gc_stack_start < addr)) ++ rb_gc_stack_start = addr; ++ return; + } + rb_gc_stack_start = addr; + #endif + #ifdef HAVE_GETRLIMIT + { +- struct rlimit rlim; ++ struct rlimit rlim; + +- if (getrlimit(RLIMIT_STACK, &rlim) == 0) { +- unsigned int space = rlim.rlim_cur/5; ++ if (getrlimit(RLIMIT_STACK, &rlim) == 0) { ++ unsigned int space = rlim.rlim_cur/5; + +- if (space > 1024*1024) space = 1024*1024; +- STACK_LEVEL_MAX = (rlim.rlim_cur - space) / sizeof(VALUE); +- } ++ if (space > 1024*1024) space = 1024*1024; ++ STACK_LEVEL_MAX = (rlim.rlim_cur - space) / sizeof(VALUE); ++ } + } + #endif + } +@@ -1652,16 +2005,16 @@ void ruby_init_stack(VALUE *addr + } + #elif defined _WIN32 + { +- MEMORY_BASIC_INFORMATION mi; +- DWORD size; +- DWORD space; +- +- if (VirtualQuery(&mi, &mi, sizeof(mi))) { +- size = (char *)mi.BaseAddress - (char *)mi.AllocationBase; +- space = size / 5; +- if (space > 1024*1024) space = 1024*1024; +- STACK_LEVEL_MAX = (size - space) / sizeof(VALUE); +- } ++ MEMORY_BASIC_INFORMATION mi; ++ DWORD size; ++ DWORD space; ++ ++ if (VirtualQuery(&mi, &mi, sizeof(mi))) { ++ size = (char *)mi.BaseAddress - (char *)mi.AllocationBase; ++ space = size / 5; ++ if (space > 1024*1024) space = 1024*1024; ++ STACK_LEVEL_MAX = (size - space) / sizeof(VALUE); ++ } + } + #endif + } +@@ -1701,8 +2054,9 @@ void + Init_heap() + { + if (!rb_gc_stack_start) { +- Init_stack(0); ++ Init_stack(0); + } ++ set_gc_parameters(); + add_heap(); + } + +@@ -1715,7 +2069,7 @@ os_obj_of(of) + volatile VALUE v; + + for (i = 0; i < heaps_used; i++) { +- RVALUE *p, *pend; ++ RVALUE *p, *pend; + + p = heaps[i].slot; pend = p + heaps[i].limit; + for (;p < pend; p++) { +@@ -1808,8 +2162,8 @@ add_final(os, block) + { + rb_warn("ObjectSpace::add_finalizer is deprecated; use define_finalizer"); + if (!rb_respond_to(block, rb_intern("call"))) { +- rb_raise(rb_eArgError, "wrong type argument %s (should be callable)", +- rb_obj_classname(block)); ++ rb_raise(rb_eArgError, "wrong type argument %s (should be callable)", ++ rb_obj_classname(block)); + } + rb_ary_push(finalizers, block); + return block; +@@ -1864,7 +2218,7 @@ undefine_final(os, obj) + VALUE os, obj; + { + if (finalizer_table) { +- st_delete(finalizer_table, (st_data_t*)&obj, 0); ++ st_delete(finalizer_table, (st_data_t*)&obj, 0); + } + return obj; + } +@@ -1888,11 +2242,11 @@ define_final(argc, argv, os) + + rb_scan_args(argc, argv, "11", &obj, &block); + if (argc == 1) { +- block = rb_block_proc(); ++ block = rb_block_proc(); + } + else if (!rb_respond_to(block, rb_intern("call"))) { +- rb_raise(rb_eArgError, "wrong type argument %s (should be callable)", +- rb_obj_classname(block)); ++ rb_raise(rb_eArgError, "wrong type argument %s (should be callable)", ++ rb_obj_classname(block)); + } + need_call_final = 1; + if (!FL_ABLE(obj)) { +@@ -1905,10 +2259,10 @@ define_final(argc, argv, os) + OBJ_FREEZE(block); + + if (!finalizer_table) { +- finalizer_table = st_init_numtable(); ++ finalizer_table = st_init_numtable(); + } + if (st_lookup(finalizer_table, obj, &table)) { +- rb_ary_push(table, block); ++ rb_ary_push(table, block); + } + else { + table = rb_ary_new3(1, block); +@@ -1927,7 +2281,7 @@ rb_gc_copy_finalizer(dest, obj) + if (!finalizer_table) return; + if (!FL_TEST(obj, FL_FINALIZE)) return; + if (st_lookup(finalizer_table, obj, &table)) { +- st_insert(finalizer_table, dest, table); ++ st_insert(finalizer_table, dest, table); + } + RBASIC(dest)->flags |= FL_FINALIZE; + } +@@ -1957,18 +2311,18 @@ run_final(obj) + args[1] = 0; + args[2] = (VALUE)ruby_safe_level; + for (i=0; ilen; i++) { +- args[0] = RARRAY(finalizers)->ptr[i]; +- if (!args[1]) args[1] = rb_ary_new3(1, objid); +- rb_protect((VALUE(*)_((VALUE)))run_single_final, (VALUE)args, &status); ++ args[0] = RARRAY(finalizers)->ptr[i]; ++ if (!args[1]) args[1] = rb_ary_new3(1, objid); ++ rb_protect((VALUE(*)_((VALUE)))run_single_final, (VALUE)args, &status); + } + if (finalizer_table && st_delete(finalizer_table, (st_data_t*)&obj, &table)) { +- for (i=0; ilen; i++) { +- VALUE final = RARRAY(table)->ptr[i]; +- args[0] = RARRAY(final)->ptr[1]; +- if (!args[1]) args[1] = rb_ary_new3(1, objid); +- args[2] = FIX2INT(RARRAY(final)->ptr[0]); +- rb_protect((VALUE(*)_((VALUE)))run_single_final, (VALUE)args, &status); +- } ++ for (i=0; ilen; i++) { ++ VALUE final = RARRAY(table)->ptr[i]; ++ args[0] = RARRAY(final)->ptr[1]; ++ if (!args[1]) args[1] = rb_ary_new3(1, objid); ++ args[2] = FIX2INT(RARRAY(final)->ptr[0]); ++ rb_protect((VALUE(*)_((VALUE)))run_single_final, (VALUE)args, &status); ++ } + } + rb_thread_critical = critical_save; + } +@@ -1980,8 +2334,8 @@ rb_gc_finalize_deferred() + + deferred_final_list = 0; + if (p) { +- finalize_list(p); +- free_unused_heaps(); ++ finalize_list(p); ++ free_unused_heaps(); + } + } + +@@ -2061,7 +2415,7 @@ id2ref(obj, objid) + if (ptr == Qfalse) return Qfalse; + if (ptr == Qnil) return Qnil; + if (FIXNUM_P(ptr)) return (VALUE)ptr; +- ptr = objid ^ FIXNUM_FLAG; /* unset FIXNUM_FLAG */ ++ ptr = objid ^ FIXNUM_FLAG; /* unset FIXNUM_FLAG */ + + if ((ptr % sizeof(RVALUE)) == (4 << 2)) { + ID symid = ptr / sizeof(RVALUE); +@@ -2075,7 +2429,7 @@ id2ref(obj, objid) + rb_raise(rb_eRangeError, "0x%lx is not id value", p0); + } + if (BUILTIN_TYPE(ptr) == 0 || RBASIC(ptr)->klass == 0) { +- rb_raise(rb_eRangeError, "0x%lx is recycled object", p0); ++ rb_raise(rb_eRangeError, "0x%lx is recycled object", p0); + } + return (VALUE)ptr; + } +@@ -2166,6 +2520,14 @@ Init_GC() + rb_define_singleton_method(rb_mGC, "stress=", gc_stress_set, 1); + rb_define_method(rb_mGC, "garbage_collect", rb_gc_start, 0); + ++ rb_define_singleton_method(rb_mGC, "enable_stats", rb_gc_enable_stats, 0); ++ rb_define_singleton_method(rb_mGC, "disable_stats", rb_gc_disable_stats, 0); ++ rb_define_singleton_method(rb_mGC, "clear_stats", rb_gc_clear_stats, 0); ++ rb_define_singleton_method(rb_mGC, "collections", rb_gc_collections, 0); ++ rb_define_singleton_method(rb_mGC, "time", rb_gc_time, 0); ++ rb_define_singleton_method(rb_mGC, "dump", rb_gc_dump, 0); ++ rb_define_singleton_method(rb_mGC, "log", rb_gc_log, 1); ++ + rb_mObSpace = rb_define_module("ObjectSpace"); + rb_define_module_function(rb_mObSpace, "each_object", os_each_obj, -1); + rb_define_module_function(rb_mObSpace, "garbage_collect", rb_gc_start, 0); +@@ -2188,7 +2550,7 @@ Init_GC() + + rb_global_variable(&nomem_error); + nomem_error = rb_exc_new3(rb_eNoMemError, +- rb_obj_freeze(rb_str_new2("failed to allocate memory"))); ++ rb_obj_freeze(rb_str_new2("failed to allocate memory"))); + OBJ_TAINT(nomem_error); + OBJ_FREEZE(nomem_error); + diff --git a/patches/ruby/1.8.7/p358/railsexpress/05-display-full-stack-trace.patch b/patches/ruby/1.8.7/p358/railsexpress/05-display-full-stack-trace.patch new file mode 100644 index 0000000000..a927a80ba6 --- /dev/null +++ b/patches/ruby/1.8.7/p358/railsexpress/05-display-full-stack-trace.patch @@ -0,0 +1,15 @@ +diff --git a/eval.c b/eval.c +index 6ff2560..fb3307c 100644 +--- a/eval.c ++++ b/eval.c +@@ -1325,8 +1325,8 @@ error_print() + int truncate = eclass == rb_eSysStackError; + + #define TRACE_MAX (TRACE_HEAD+TRACE_TAIL+5) +-#define TRACE_HEAD 8 +-#define TRACE_TAIL 5 ++#define TRACE_HEAD 100 ++#define TRACE_TAIL 100 + + ep = RARRAY(errat); + for (i=1; ilen; i++) { diff --git a/patches/ruby/1.8.7/p358/railsexpress/06-better-source-file-tracing.patch b/patches/ruby/1.8.7/p358/railsexpress/06-better-source-file-tracing.patch new file mode 100644 index 0000000000..f7a4d4f24f --- /dev/null +++ b/patches/ruby/1.8.7/p358/railsexpress/06-better-source-file-tracing.patch @@ -0,0 +1,13 @@ +diff --git a/eval.c b/eval.c +index fb3307c..356226e 100644 +--- a/eval.c ++++ b/eval.c +@@ -1161,7 +1161,7 @@ static VALUE trace_func = 0; + static int tracing = 0; + static void call_trace_func _((rb_event_t,NODE*,VALUE,ID,VALUE)); + +-#if 0 ++#if 1 + #define SET_CURRENT_SOURCE() (ruby_sourcefile = ruby_current_node->nd_file, \ + ruby_sourceline = nd_line(ruby_current_node)) + #else diff --git a/patches/ruby/1.8.7/p358/railsexpress/07-heap-dump-support.patch b/patches/ruby/1.8.7/p358/railsexpress/07-heap-dump-support.patch new file mode 100644 index 0000000000..5235be6bcc --- /dev/null +++ b/patches/ruby/1.8.7/p358/railsexpress/07-heap-dump-support.patch @@ -0,0 +1,159 @@ +diff --git a/configure.in b/configure.in +index 62b34a8..4be088c 100644 +--- a/configure.in ++++ b/configure.in +@@ -1595,6 +1595,14 @@ fi + LDFLAGS="-L. $LDFLAGS" + AC_SUBST(ARCHFILE) + ++dnl enable gc debugging ++AC_ARG_ENABLE(gcdebug, ++ [ --enable-gcdebug build garbage collector with debugging enabled. ], ++ [enable_gcdebug=$enableval]) ++if test "$enable_gcdebug" = 'yes'; then ++ AC_DEFINE(GC_DEBUG, 1) ++fi ++ + dnl build rdoc index if requested + RDOCTARGET="" + AC_ARG_ENABLE(install-doc, +diff --git a/gc.c b/gc.c +index ab71d22..9ad716f 100644 +--- a/gc.c ++++ b/gc.c +@@ -411,7 +411,6 @@ rb_gc_unregister_address(addr) + } + } + +-#undef GC_DEBUG + + void + rb_global_variable(var) +@@ -602,6 +601,85 @@ rb_gc_dump() + return Qnil; + } + ++ ++static char* obj_type(int tp); ++ ++#ifdef GC_DEBUG ++/* ++ * call-seq: ++ * GC.dump_file_and_line_info(String, boolean) => nil ++ * ++ * dumps information on which currently allocated object was created by which file and on which line ++ * ++ * GC.dump_file_and_line_info(String, boolean) #=> nil ++ * ++ * The second parameter specifies whether class names should be included in the dump. ++ * Note that including class names will allocate additional string objects on the heap. ++ * ++ */ ++ ++VALUE ++rb_gc_dump_file_and_line_info(int argc, VALUE *argv) ++{ ++ VALUE filename, str, include_classnames = Qnil; ++ char *fname = NULL; ++ char *klass = NULL; ++ FILE* f = NULL; ++ int i,n = 0; ++ ++ rb_scan_args(argc, argv, "11", &filename, &include_classnames); ++ ++ str = StringValue(filename); ++ fname = RSTRING(str)->ptr; ++ f = fopen(fname, "w"); ++ ++ for (i = 0; i < heaps_used; i++) { ++ RVALUE *p, *pend; ++ ++ p = heaps[i].slot; pend = p + heaps[i].limit; ++ for (;p < pend; p++) { ++ if (p->as.basic.flags) { ++ fprintf(f, "%s:%s:%d", obj_type(p->as.basic.flags & T_MASK), p->file, p->line); ++ // rb_obj_classname will create objects on the heap, we need a better solution ++ if (include_classnames == Qtrue) { ++ /* write the class */ ++ fprintf(f, ":"); ++ switch (TYPE(p)) { ++ case T_NONE: ++ fprintf(f, "__none__"); ++ break; ++ case T_BLKTAG: ++ fprintf(f, "__blktag__"); ++ break; ++ case T_UNDEF: ++ fprintf(f, "__undef__"); ++ break; ++ case T_VARMAP: ++ fprintf(f, "__varmap__"); ++ break; ++ case T_SCOPE: ++ fprintf(f, "__scope__"); ++ break; ++ case T_NODE: ++ fprintf(f, "__node__"); ++ break; ++ default: ++ if (!p->as.basic.klass) { ++ fprintf(f, "__unknown__"); ++ } else { ++ fprintf(f, rb_obj_classname((VALUE)p)); ++ } ++ } ++ } ++ fprintf(f, "\n"); ++ } ++ } ++ } ++ fclose(f); ++ return Qnil; ++} ++#endif ++ + /* + * call-seq: + * GC.log String => String +@@ -1066,6 +1144,11 @@ gc_mark(ptr, lev) + if (obj->as.basic.flags & FL_MARK) return; /* already marked */ + obj->as.basic.flags |= FL_MARK; + ++#ifdef GC_DEBUG ++ /* mark our new reference point for sourcefile objects */ ++ mark_source_filename(RANY(obj)->file); ++#endif ++ + if (lev > GC_LEVEL_MAX || (lev == 0 && ruby_stack_check())) { + if (!mark_stack_overflow) { + if (mark_stack_ptr - mark_stack < MARK_STACK_MAX) { +@@ -1104,6 +1187,11 @@ gc_mark_children(ptr, lev) + if (obj->as.basic.flags & FL_MARK) return; /* already marked */ + obj->as.basic.flags |= FL_MARK; + ++#ifdef GC_DEBUG ++ /* mark our new reference point for sourcefile objects */ ++ mark_source_filename(RANY(obj)->file); ++#endif ++ + marking: + if (FL_TEST(obj, FL_EXIVAR)) { + rb_mark_generic_ivar(ptr); +@@ -1550,6 +1638,7 @@ gc_sweep() + live_counts[i], free_counts[i], obj_type(i)); + } + } ++ fflush(gc_data_file); + } + + /* clear finalization list */ +@@ -2526,6 +2615,9 @@ Init_GC() + rb_define_singleton_method(rb_mGC, "collections", rb_gc_collections, 0); + rb_define_singleton_method(rb_mGC, "time", rb_gc_time, 0); + rb_define_singleton_method(rb_mGC, "dump", rb_gc_dump, 0); ++#ifdef GC_DEBUG ++ rb_define_singleton_method(rb_mGC, "dump_file_and_line_info", rb_gc_dump_file_and_line_info, -1); ++#endif + rb_define_singleton_method(rb_mGC, "log", rb_gc_log, 1); + + rb_mObSpace = rb_define_module("ObjectSpace"); diff --git a/patches/ruby/1.8.7/p358/railsexpress/08-fork-support-for-gc-logging.patch b/patches/ruby/1.8.7/p358/railsexpress/08-fork-support-for-gc-logging.patch new file mode 100644 index 0000000000..0f01a75db0 --- /dev/null +++ b/patches/ruby/1.8.7/p358/railsexpress/08-fork-support-for-gc-logging.patch @@ -0,0 +1,249 @@ +diff --git a/gc.c b/gc.c +index 9ad716f..a3cbe91 100644 +--- a/gc.c ++++ b/gc.c +@@ -223,6 +223,8 @@ static int dont_gc; + static int gc_statistics = 0; + static GC_TIME_TYPE gc_time = 0; + static int gc_collections = 0; ++static int verbose_gc_stats = Qfalse; ++static FILE* gc_data_file = NULL; + static int during_gc; + static int need_call_final = 0; + static st_table *finalizer_table = 0; +@@ -368,9 +370,148 @@ rb_gc_time() + #endif + } + +- + VALUE rb_mGC; + ++/* ++ * call-seq: ++ * GC.enable_trace => true or false ++ * ++ * Enables garbage collection tracing, returning true if garbage ++ * collection tracing was already enabled. ++ * ++ * GC.enable_trace #=> false or true ++ * GC.enable_trace #=> true ++ * ++ */ ++ ++VALUE ++rb_gc_enable_trace() ++{ ++ int old = verbose_gc_stats; ++ verbose_gc_stats = Qtrue; ++ return old; ++} ++ ++/* ++ * call-seq: ++ * GC.disable_trace => true or false ++ * ++ * Disables garbage collection tracing, returning true if garbage ++ * collection tracing was already disabled. ++ * ++ * GC.disable_trace #=> false or true ++ * GC.disable_trace #=> true ++ * ++ */ ++ ++VALUE ++rb_gc_disable_trace() ++{ ++ int old = verbose_gc_stats; ++ verbose_gc_stats = Qfalse; ++ return old; ++} ++ ++char* GC_LOGFILE_IVAR = "@gc_logfile_name"; ++ ++/* ++ * call-seq: ++ * GC.log_file(filename=nil, mode="w") => boolean ++ * ++ * Changes the GC data log file. Closes the currently open logfile. ++ * Returns true if the file was successfully opened for ++ * writing. Returns false if the file could not be opened for ++ * writing. Returns the name of the current logfile (or nil) if no ++ * parameter is given. Restores logging to stderr when given nil as ++ * an argument. ++ * ++ * GC.log_file #=> nil ++ * GC.log_file "/tmp/gc.log" #=> true ++ * GC.log_file #=> "/tmp/gc.log" ++ * GC.log_file nil #=> true ++ * ++ */ ++ ++VALUE ++rb_gc_log_file(int argc, VALUE *argv, VALUE self) ++{ ++ VALUE filename = Qnil; ++ VALUE mode_str = Qnil; ++ FILE* f = NULL; ++ char* mode = "w"; ++ ++ VALUE current_logfile_name = rb_iv_get(rb_mGC, GC_LOGFILE_IVAR); ++ ++ if (argc==0) ++ return current_logfile_name; ++ ++ rb_scan_args(argc, argv, "02", &filename, &mode_str); ++ ++ if (filename == Qnil) { ++ /* close current logfile and reset logfile to stderr */ ++ if (gc_data_file != stderr) { ++ fclose(gc_data_file); ++ gc_data_file = stderr; ++ rb_iv_set(rb_mGC, GC_LOGFILE_IVAR, Qnil); ++ } ++ return Qtrue; ++ } ++ ++ /* we have a real logfile name */ ++ filename = StringValue(filename); ++ ++ if (rb_equal(current_logfile_name, filename) == Qtrue) { ++ /* do nothing if we get the file name we're already logging to */ ++ return Qtrue; ++ } ++ ++ /* get mode for file opening */ ++ if (mode_str != Qnil) ++ { ++ mode = RSTRING(StringValue(mode_str))->ptr; ++ } ++ ++ /* try to open file in given mode */ ++ if (f = fopen(RSTRING(filename)->ptr, mode)) { ++ if (gc_data_file != stderr) { ++ fclose(gc_data_file); ++ } ++ gc_data_file = f; ++ rb_iv_set(rb_mGC, GC_LOGFILE_IVAR, filename); ++ } else { ++ return Qfalse; ++ } ++ return Qtrue; ++} ++ ++ ++/* ++ * Called from process.c before a fork. Flushes the gc log file to ++ * avoid writing the buffered output twice (once in the parent, and ++ * once in the child). ++ */ ++void ++rb_gc_before_fork() ++{ ++ /* flush gc log file */ ++ fflush(gc_data_file); ++} ++ ++/* ++ * Called from process.c after a fork in the child process. Turns off ++ * logging, disables GC stats and resets all gc counters and timing ++ * information. ++ */ ++void ++rb_gc_after_fork() ++{ ++ rb_gc_disable_stats(); ++ rb_gc_clear_stats(); ++ rb_gc_disable_trace(); ++ gc_data_file = stderr; ++ rb_iv_set(rb_mGC, GC_LOGFILE_IVAR, Qnil); ++} ++ + static struct gc_list { + VALUE *varptr; + struct gc_list *next; +@@ -477,10 +618,6 @@ static double heap_slots_growth_factor = 1.8; + + static long initial_malloc_limit = GC_MALLOC_LIMIT; + +-static int verbose_gc_stats = Qfalse; +- +-static FILE* gc_data_file = NULL; +- + static RVALUE *himem, *lomem; + + static void set_gc_parameters() +@@ -496,6 +633,8 @@ static void set_gc_parameters() + if (gc_stats_i > 0) { + verbose_gc_stats = Qtrue; + } ++ /* child processes should not inherit RUBY_GC_STATS */ ++ unsetenv("RUBY_GC_STATS"); + } + + gc_heap_file_ptr = getenv("RUBY_GC_DATA_FILE"); +@@ -508,6 +647,8 @@ static void set_gc_parameters() + fprintf(stderr, + "can't open gc log file %s for writing, using default\n", gc_heap_file_ptr); + } ++ /* child processes should not inherit RUBY_GC_DATA_FILE to avoid clobbering */ ++ unsetenv("RUBY_GC_DATA_FILE"); + } + + min_slots_ptr = getenv("RUBY_HEAP_MIN_SLOTS"); +@@ -2619,6 +2760,9 @@ Init_GC() + rb_define_singleton_method(rb_mGC, "dump_file_and_line_info", rb_gc_dump_file_and_line_info, -1); + #endif + rb_define_singleton_method(rb_mGC, "log", rb_gc_log, 1); ++ rb_define_singleton_method(rb_mGC, "log_file", rb_gc_log_file, -1); ++ rb_define_singleton_method(rb_mGC, "enable_trace", rb_gc_enable_trace, 0); ++ rb_define_singleton_method(rb_mGC, "disable_trace", rb_gc_disable_trace, 0); + + rb_mObSpace = rb_define_module("ObjectSpace"); + rb_define_module_function(rb_mObSpace, "each_object", os_each_obj, -1); +diff --git a/intern.h b/intern.h +index 950ae9d..99696f1 100644 +--- a/intern.h ++++ b/intern.h +@@ -270,6 +270,8 @@ void rb_gc_call_finalizer_at_exit _((void)); + VALUE rb_gc_enable _((void)); + VALUE rb_gc_disable _((void)); + VALUE rb_gc_start _((void)); ++void rb_gc_before_fork _((void)); ++void rb_gc_after_fork _((void)); + /* hash.c */ + void st_foreach_safe _((struct st_table *, int (*)(ANYARGS), unsigned long)); + void rb_hash_foreach _((VALUE, int (*)(ANYARGS), VALUE)); +diff --git a/process.c b/process.c +index 8f6285d..ea28cb8 100644 +--- a/process.c ++++ b/process.c +@@ -1330,6 +1330,8 @@ rb_f_fork(obj) + fflush(stderr); + #endif + ++ rb_gc_before_fork(); ++ + before_exec(); + pid = fork(); + after_exec(); +@@ -1339,6 +1341,7 @@ rb_f_fork(obj) + #ifdef linux + after_exec(); + #endif ++ rb_gc_after_fork(); + rb_thread_atfork(); + if (rb_block_given_p()) { + int status; +@@ -1574,10 +1577,12 @@ rb_f_system(argc, argv) + + chfunc = signal(SIGCHLD, SIG_DFL); + retry: ++ rb_gc_before_fork(); + before_exec(); + pid = fork(); + if (pid == 0) { + /* child process */ ++ rb_gc_after_fork(); + rb_thread_atfork(); + rb_protect(proc_exec_args, (VALUE)&earg, NULL); + _exit(127); diff --git a/patches/ruby/1.8.7/p358/railsexpress/09-track-malloc-size.patch b/patches/ruby/1.8.7/p358/railsexpress/09-track-malloc-size.patch new file mode 100644 index 0000000000..d078748a44 --- /dev/null +++ b/patches/ruby/1.8.7/p358/railsexpress/09-track-malloc-size.patch @@ -0,0 +1,120 @@ +diff --git a/gc.c b/gc.c +index a3cbe91..30a1219 100644 +--- a/gc.c ++++ b/gc.c +@@ -79,6 +79,17 @@ void *alloca (); + + static unsigned long malloc_increase = 0; + static unsigned long malloc_limit = GC_MALLOC_LIMIT; ++ ++#ifdef HAVE_LONG_LONG ++static unsigned LONG_LONG gc_allocated_size = 0; ++static unsigned LONG_LONG gc_num_allocations = 0; ++#else ++static unsigned long gc_allocated_size = 0; ++static unsigned long gc_num_allocations = 0; ++#endif ++static int gc_statistics = 0; ++ ++ + static void run_final(); + static VALUE nomem_error; + static void garbage_collect(); +@@ -163,6 +174,11 @@ ruby_xmalloc(size) + } + malloc_increase += size; + ++ if (gc_statistics) { ++ gc_allocated_size += size; ++ gc_num_allocations += 1; ++ } ++ + return mem; + } + +@@ -220,7 +236,6 @@ ruby_xfree(x) + + extern int ruby_in_compile; + static int dont_gc; +-static int gc_statistics = 0; + static GC_TIME_TYPE gc_time = 0; + static int gc_collections = 0; + static int verbose_gc_stats = Qfalse; +@@ -329,11 +344,55 @@ rb_gc_clear_stats() + { + gc_collections = 0; + gc_time = 0; ++ gc_allocated_size = 0; ++ gc_num_allocations = 0; + return Qnil; + } + + /* + * call-seq: ++ * GC.allocated_size => Integer ++ * ++ * Returns the size of memory (in bytes) allocated since GC statistics collection ++ * was enabled. ++ * ++ * GC.allocated_size #=> 35 ++ * ++ */ ++VALUE ++rb_gc_allocated_size() ++{ ++#if HAVE_LONG_LONG ++ return ULL2NUM(gc_allocated_size); ++#else ++ return ULONG2NUM(gc_allocated_size); ++#endif ++} ++ ++/* ++ * call-seq: ++ * GC.num_allocations => Integer ++ * ++ * Returns the number of memory allocations since GC statistics collection ++ * was enabled. ++ * ++ * GC.num_allocations #=> 150 ++ * ++ */ ++VALUE ++rb_gc_num_allocations() ++{ ++#if HAVE_LONG_LONG ++ return ULL2NUM(gc_num_allocations); ++#else ++ return ULONG2NUM(gc_num_allocations); ++#endif ++} ++ ++/* ++ ++/* ++ * call-seq: + * GC.collections => Integer + * + * Returns the number of garbage collections performed while GC statistics collection +@@ -2753,6 +2812,8 @@ Init_GC() + rb_define_singleton_method(rb_mGC, "enable_stats", rb_gc_enable_stats, 0); + rb_define_singleton_method(rb_mGC, "disable_stats", rb_gc_disable_stats, 0); + rb_define_singleton_method(rb_mGC, "clear_stats", rb_gc_clear_stats, 0); ++ rb_define_singleton_method(rb_mGC, "allocated_size", rb_gc_allocated_size, 0); ++ rb_define_singleton_method(rb_mGC, "num_allocations", rb_gc_num_allocations, 0); + rb_define_singleton_method(rb_mGC, "collections", rb_gc_collections, 0); + rb_define_singleton_method(rb_mGC, "time", rb_gc_time, 0); + rb_define_singleton_method(rb_mGC, "dump", rb_gc_dump, 0); +diff --git a/intern.h b/intern.h +index 99696f1..1117614 100644 +--- a/intern.h ++++ b/intern.h +@@ -272,6 +272,8 @@ VALUE rb_gc_disable _((void)); + VALUE rb_gc_start _((void)); + void rb_gc_before_fork _((void)); + void rb_gc_after_fork _((void)); ++VALUE rb_gc_allocated_size _((void)); ++VALUE rb_gc_num_allocations _((void)); + /* hash.c */ + void st_foreach_safe _((struct st_table *, int (*)(ANYARGS), unsigned long)); + void rb_hash_foreach _((VALUE, int (*)(ANYARGS), VALUE)); diff --git a/patches/ruby/1.8.7/p358/railsexpress/10-track-object-allocation.patch b/patches/ruby/1.8.7/p358/railsexpress/10-track-object-allocation.patch new file mode 100644 index 0000000000..0eef717a24 --- /dev/null +++ b/patches/ruby/1.8.7/p358/railsexpress/10-track-object-allocation.patch @@ -0,0 +1,111 @@ +diff --git a/gc.c b/gc.c +index 30a1219..5b42b90 100644 +--- a/gc.c ++++ b/gc.c +@@ -96,6 +96,26 @@ static void garbage_collect(); + + int ruby_gc_stress = 0; + ++static unsigned long live_objects = 0; ++unsigned long rb_os_live_objects() ++{ ++ return live_objects; ++} ++ ++#if defined(HAVE_LONG_LONG) ++static unsigned LONG_LONG allocated_objects = 0; ++unsigned LONG_LONG rb_os_allocated_objects() ++{ ++ return allocated_objects; ++} ++#else ++static unsigned long allocated_objects = 0; ++unsigned long rb_os_allocated_objects() ++{ ++ return allocated_objects; ++} ++#endif ++ + NORETURN(void rb_exc_jump _((VALUE))); + + void +@@ -987,6 +1007,8 @@ rb_newobj() + RANY(obj)->file = ruby_sourcefile; + RANY(obj)->line = ruby_sourceline; + #endif ++ live_objects++; ++ allocated_objects++; + return obj; + } + +@@ -1825,6 +1847,7 @@ gc_sweep() + add_heap(); + } + during_gc = 0; ++ live_objects = live; + + if (do_gc_stats) { + fprintf(gc_data_file, "objects processed: %.7d\n", live+freed); +@@ -2790,6 +2813,35 @@ rb_obj_id(VALUE obj) + return (VALUE)((long)obj|FIXNUM_FLAG); + } + ++/* call-seq: ++ * ObjectSpace.live_objects => number ++ * ++ * Returns the count of objects currently allocated in the system. This goes ++ * down after the garbage collector runs. ++ */ ++static ++VALUE os_live_objects(VALUE self) ++{ ++ return ULONG2NUM(live_objects); ++} ++ ++/* call-seq: ++ * ObjectSpace.allocated_objects => number ++ * ++ * Returns the count of objects allocated since the Ruby interpreter has ++ * started. This number can only increase. To know how many objects are ++ * currently allocated, use ObjectSpace::live_objects ++ */ ++static ++VALUE os_allocated_objects(VALUE self) ++{ ++#if defined(HAVE_LONG_LONG) ++ return ULL2NUM(allocated_objects); ++#else ++ return ULONG2NUM(allocated_objects); ++#endif ++} ++ + /* + * The GC module provides an interface to Ruby's mark and + * sweep garbage collection mechanism. Some of the underlying methods +@@ -2833,6 +2885,9 @@ Init_GC() + rb_define_module_function(rb_mObSpace, "finalizers", finals, 0); + rb_define_module_function(rb_mObSpace, "call_finalizer", call_final, 1); + ++ rb_define_module_function(rb_mObSpace, "live_objects", os_live_objects, 0); ++ rb_define_module_function(rb_mObSpace, "allocated_objects", os_allocated_objects, 0); ++ + rb_define_module_function(rb_mObSpace, "define_finalizer", define_final, -1); + rb_define_module_function(rb_mObSpace, "undefine_finalizer", undefine_final, 1); + +diff --git a/intern.h b/intern.h +index 1117614..a87661d 100644 +--- a/intern.h ++++ b/intern.h +@@ -274,6 +274,12 @@ void rb_gc_before_fork _((void)); + void rb_gc_after_fork _((void)); + VALUE rb_gc_allocated_size _((void)); + VALUE rb_gc_num_allocations _((void)); ++unsigned long rb_os_live_objects _((void)); ++#ifdef HAVE_LONG_LONG ++unsigned LONG_LONG rb_os_allocated_objects _((void)); ++#else ++unsigned long rb_os_allocated_objects _((void)); ++#endif + /* hash.c */ + void st_foreach_safe _((struct st_table *, int (*)(ANYARGS), unsigned long)); + void rb_hash_foreach _((VALUE, int (*)(ANYARGS), VALUE)); diff --git a/patches/ruby/1.8.7/p358/railsexpress/11-expose-heap-slots.patch b/patches/ruby/1.8.7/p358/railsexpress/11-expose-heap-slots.patch new file mode 100644 index 0000000000..3ce846c6a0 --- /dev/null +++ b/patches/ruby/1.8.7/p358/railsexpress/11-expose-heap-slots.patch @@ -0,0 +1,70 @@ +diff --git a/gc.c b/gc.c +index 5b42b90..21b3f6b 100644 +--- a/gc.c ++++ b/gc.c +@@ -690,6 +690,7 @@ static int heaps_used = 0; + + static int heap_min_slots = 10000; + static int heap_slots = 10000; ++static int heap_size = 0; + + static int heap_free_min = 4096; + static int heap_slots_increment = 10000; +@@ -800,6 +801,21 @@ static void set_gc_parameters() + + /* + * call-seq: ++ * GC.heap_slots => Integer ++ * ++ * Returns the number of heap slots available for object allocations. ++ * ++ * GC.heap_slots #=> 10000 ++ * ++ */ ++VALUE ++rb_gc_heap_slots() ++{ ++ return INT2NUM(heap_size); ++} ++ ++/* ++ * call-seq: + * GC.dump => nil + * + * dumps information about the current GC data structures to the GC log file +@@ -967,6 +983,7 @@ add_heap() + heaps[heaps_used].limit = heap_slots; + break; + } ++ heap_size += heap_slots; + pend = p + heap_slots; + if (lomem == 0 || lomem > p) lomem = p; + if (himem < pend) himem = pend; +@@ -1828,6 +1845,7 @@ gc_sweep() + if (n == heaps[i].limit && freed > free_min) { + RVALUE *pp; + ++ heap_size -= n; + heaps[i].limit = 0; + for (pp = final_list; pp != final; pp = pp->as.free.next) { + pp->as.free.flags |= FL_SINGLETON; /* freeing page mark */ +@@ -2866,6 +2884,7 @@ Init_GC() + rb_define_singleton_method(rb_mGC, "clear_stats", rb_gc_clear_stats, 0); + rb_define_singleton_method(rb_mGC, "allocated_size", rb_gc_allocated_size, 0); + rb_define_singleton_method(rb_mGC, "num_allocations", rb_gc_num_allocations, 0); ++ rb_define_singleton_method(rb_mGC, "heap_slots", rb_gc_heap_slots, 0); + rb_define_singleton_method(rb_mGC, "collections", rb_gc_collections, 0); + rb_define_singleton_method(rb_mGC, "time", rb_gc_time, 0); + rb_define_singleton_method(rb_mGC, "dump", rb_gc_dump, 0); +diff --git a/intern.h b/intern.h +index a87661d..e8f3209 100644 +--- a/intern.h ++++ b/intern.h +@@ -274,6 +274,7 @@ void rb_gc_before_fork _((void)); + void rb_gc_after_fork _((void)); + VALUE rb_gc_allocated_size _((void)); + VALUE rb_gc_num_allocations _((void)); ++VALUE rb_gc_heap_slots _((void)); + unsigned long rb_os_live_objects _((void)); + #ifdef HAVE_LONG_LONG + unsigned LONG_LONG rb_os_allocated_objects _((void)); diff --git a/patches/ruby/1.8.7/p358/railsexpress/12-fix-heap-size-growth-logic.patch b/patches/ruby/1.8.7/p358/railsexpress/12-fix-heap-size-growth-logic.patch new file mode 100644 index 0000000000..1db93f224d --- /dev/null +++ b/patches/ruby/1.8.7/p358/railsexpress/12-fix-heap-size-growth-logic.patch @@ -0,0 +1,54 @@ +diff --git a/gc.c b/gc.c +index 21b3f6b..7db1ef6 100644 +--- a/gc.c ++++ b/gc.c +@@ -694,6 +694,7 @@ static int heap_size = 0; + + static int heap_free_min = 4096; + static int heap_slots_increment = 10000; ++static int initial_heap_slots_increment = 10000; + static double heap_slots_growth_factor = 1.8; + + static long initial_malloc_limit = GC_MALLOC_LIMIT; +@@ -771,14 +772,13 @@ static void set_gc_parameters() + if (verbose_gc_stats) { + fprintf(gc_data_file, "RUBY_HEAP_SLOTS_INCREMENT=%s\n", heap_slots_incr_ptr); + } +- if (heap_slots_incr_i > 0) { +- heap_slots_increment = heap_slots_incr_i; +- } ++ heap_slots_increment = heap_slots_incr_i; ++ initial_heap_slots_increment = heap_slots_increment; + } + + heap_slots_growth_factor_ptr = getenv("RUBY_HEAP_SLOTS_GROWTH_FACTOR"); + if (heap_slots_growth_factor_ptr != NULL) { +- double heap_slots_growth_factor_d = atoi(heap_slots_growth_factor_ptr); ++ double heap_slots_growth_factor_d = atof(heap_slots_growth_factor_ptr); + if (verbose_gc_stats) { + fprintf(gc_data_file, "RUBY_HEAP_SLOTS_GROWTH_FACTOR=%s\n", heap_slots_growth_factor_ptr); + } +@@ -988,8 +988,13 @@ add_heap() + if (lomem == 0 || lomem > p) lomem = p; + if (himem < pend) himem = pend; + heaps_used++; +- heap_slots += heap_slots_increment; +- heap_slots_increment *= heap_slots_growth_factor; ++ if (heaps_used == 1) ++ heap_slots = initial_heap_slots_increment; ++ else { ++ heap_slots_increment *= heap_slots_growth_factor; ++ heap_slots += heap_slots_increment; ++ } ++ + if (heap_slots <= 0) heap_slots = heap_min_slots; + + while (p < pend) { +@@ -1879,6 +1884,7 @@ gc_sweep() + live_counts[i], free_counts[i], obj_type(i)); + } + } ++ rb_gc_dump(); + fflush(gc_data_file); + } + diff --git a/patches/ruby/1.8.7/p358/railsexpress/13-heap-slot-size.patch b/patches/ruby/1.8.7/p358/railsexpress/13-heap-slot-size.patch new file mode 100644 index 0000000000..45091c14ae --- /dev/null +++ b/patches/ruby/1.8.7/p358/railsexpress/13-heap-slot-size.patch @@ -0,0 +1,12 @@ +diff --git a/gc.c b/gc.c +index 7db1ef6..57740d2 100644 +--- a/gc.c ++++ b/gc.c +@@ -2891,6 +2891,7 @@ Init_GC() + rb_define_singleton_method(rb_mGC, "allocated_size", rb_gc_allocated_size, 0); + rb_define_singleton_method(rb_mGC, "num_allocations", rb_gc_num_allocations, 0); + rb_define_singleton_method(rb_mGC, "heap_slots", rb_gc_heap_slots, 0); ++ rb_define_const(rb_mGC, "HEAP_SLOT_SIZE", INT2FIX(sizeof(RVALUE))); + rb_define_singleton_method(rb_mGC, "collections", rb_gc_collections, 0); + rb_define_singleton_method(rb_mGC, "time", rb_gc_time, 0); + rb_define_singleton_method(rb_mGC, "dump", rb_gc_dump, 0); diff --git a/patches/ruby/1.8.7/p358/railsexpress/14-add-trace-stats-enabled-methods.patch b/patches/ruby/1.8.7/p358/railsexpress/14-add-trace-stats-enabled-methods.patch new file mode 100644 index 0000000000..4e3c4f2132 --- /dev/null +++ b/patches/ruby/1.8.7/p358/railsexpress/14-add-trace-stats-enabled-methods.patch @@ -0,0 +1,66 @@ +diff --git a/gc.c b/gc.c +index 57740d2..2c34932 100644 +--- a/gc.c ++++ b/gc.c +@@ -350,6 +350,22 @@ rb_gc_disable_stats() + + /* + * call-seq: ++ * GC.stats_enabled? => true or false ++ * ++ * Check whether GC stats have been enabled. ++ * ++ * GC.stats_enabled? #=> false or true ++ * ++ */ ++ ++VALUE ++rb_gc_stats_enabled() ++{ ++ return gc_statistics ? Qtrue : Qfalse; ++} ++ ++/* ++ * call-seq: + * GC.clear_stats => nil + * + * Clears garbage collection statistics, returning nil. This resets the number +@@ -491,6 +507,22 @@ rb_gc_disable_trace() + return old; + } + ++/* ++ * call-seq: ++ * GC.trace_enabled? => true or false ++ * ++ * Check whether GC tracing has been enabled. ++ * ++ * GC.trace_enabled? #=> false or true ++ * ++ */ ++ ++VALUE ++rb_gc_trace_enabled() ++{ ++ return verbose_gc_stats ? Qtrue : Qfalse; ++} ++ + char* GC_LOGFILE_IVAR = "@gc_logfile_name"; + + /* +@@ -2887,6 +2919,7 @@ Init_GC() + + rb_define_singleton_method(rb_mGC, "enable_stats", rb_gc_enable_stats, 0); + rb_define_singleton_method(rb_mGC, "disable_stats", rb_gc_disable_stats, 0); ++ rb_define_singleton_method(rb_mGC, "stats_enabled?", rb_gc_stats_enabled, 0); + rb_define_singleton_method(rb_mGC, "clear_stats", rb_gc_clear_stats, 0); + rb_define_singleton_method(rb_mGC, "allocated_size", rb_gc_allocated_size, 0); + rb_define_singleton_method(rb_mGC, "num_allocations", rb_gc_num_allocations, 0); +@@ -2902,6 +2935,7 @@ Init_GC() + rb_define_singleton_method(rb_mGC, "log_file", rb_gc_log_file, -1); + rb_define_singleton_method(rb_mGC, "enable_trace", rb_gc_enable_trace, 0); + rb_define_singleton_method(rb_mGC, "disable_trace", rb_gc_disable_trace, 0); ++ rb_define_singleton_method(rb_mGC, "trace_enabled?", rb_gc_trace_enabled, 0); + + rb_mObSpace = rb_define_module("ObjectSpace"); + rb_define_module_function(rb_mObSpace, "each_object", os_each_obj, -1); diff --git a/patches/ruby/1.8.7/p358/railsexpress/15-track-live-dataset-size.patch b/patches/ruby/1.8.7/p358/railsexpress/15-track-live-dataset-size.patch new file mode 100644 index 0000000000..de44e6b365 --- /dev/null +++ b/patches/ruby/1.8.7/p358/railsexpress/15-track-live-dataset-size.patch @@ -0,0 +1,52 @@ +diff --git a/gc.c b/gc.c +index 2c34932..0ce7e68 100644 +--- a/gc.c ++++ b/gc.c +@@ -89,6 +89,7 @@ static unsigned long gc_num_allocations = 0; + #endif + static int gc_statistics = 0; + ++static unsigned long heap_slots_live_after_last_gc = 0; + + static void run_final(); + static VALUE nomem_error; +@@ -465,6 +466,23 @@ rb_gc_time() + #endif + } + ++/* ++ * call-seq: ++ * GC.heap_slots_live_after_last_gc => Integer ++ * ++ * Returns the number of heap slots which were live after the last garbage collection. ++ * ++ * GC.heap_slots_live_after_last_gc #=> 231223 ++ * ++ */ ++VALUE ++rb_gc_heap_slots_live_after_last_gc() ++{ ++ return ULONG2NUM(heap_slots_live_after_last_gc); ++} ++ ++ ++ + VALUE rb_mGC; + + /* +@@ -1903,6 +1921,7 @@ gc_sweep() + } + during_gc = 0; + live_objects = live; ++ heap_slots_live_after_last_gc = live; + + if (do_gc_stats) { + fprintf(gc_data_file, "objects processed: %.7d\n", live+freed); +@@ -2924,6 +2943,7 @@ Init_GC() + rb_define_singleton_method(rb_mGC, "allocated_size", rb_gc_allocated_size, 0); + rb_define_singleton_method(rb_mGC, "num_allocations", rb_gc_num_allocations, 0); + rb_define_singleton_method(rb_mGC, "heap_slots", rb_gc_heap_slots, 0); ++ rb_define_singleton_method(rb_mGC, "heap_slots_live_after_last_gc", rb_gc_heap_slots_live_after_last_gc, 0); + rb_define_const(rb_mGC, "HEAP_SLOT_SIZE", INT2FIX(sizeof(RVALUE))); + rb_define_singleton_method(rb_mGC, "collections", rb_gc_collections, 0); + rb_define_singleton_method(rb_mGC, "time", rb_gc_time, 0); diff --git a/patches/ruby/1.8.7/p358/railsexpress/16-add-object-size-information-to-heap-dump.patch b/patches/ruby/1.8.7/p358/railsexpress/16-add-object-size-information-to-heap-dump.patch new file mode 100644 index 0000000000..e974cdb63a --- /dev/null +++ b/patches/ruby/1.8.7/p358/railsexpress/16-add-object-size-information-to-heap-dump.patch @@ -0,0 +1,51 @@ +diff --git a/gc.c b/gc.c +index 0ce7e68..53450bf 100644 +--- a/gc.c ++++ b/gc.c +@@ -953,9 +953,21 @@ rb_gc_dump_file_and_line_info(int argc, VALUE *argv) + if (!p->as.basic.klass) { + fprintf(f, "__unknown__"); + } else { +- fprintf(f, rb_obj_classname((VALUE)p)); ++ fprintf(f, "%s", rb_obj_classname((VALUE)p)); + } + } ++ /* print object size for some known object types */ ++ switch (TYPE(p)) { ++ case T_STRING: ++ fprintf(f, ":%lu", RSTRING(p)->len); ++ break; ++ case T_ARRAY: ++ fprintf(f, ":%lu", RARRAY(p)->len); ++ break; ++ case T_HASH: ++ fprintf(f, ":%d", RHASH(p)->tbl->num_entries); ++ break; ++ } + } + fprintf(f, "\n"); + } +@@ -1924,10 +1936,10 @@ gc_sweep() + heap_slots_live_after_last_gc = live; + + if (do_gc_stats) { +- fprintf(gc_data_file, "objects processed: %.7d\n", live+freed); +- fprintf(gc_data_file, "live objects : %.7d\n", live); +- fprintf(gc_data_file, "freelist objects : %.7d\n", freed - really_freed); +- fprintf(gc_data_file, "freed objects : %.7d\n", really_freed); ++ fprintf(gc_data_file, "objects processed: %.7lu\n", live+freed); ++ fprintf(gc_data_file, "live objects : %.7lu\n", live); ++ fprintf(gc_data_file, "freelist objects : %.7lu\n", freed - really_freed); ++ fprintf(gc_data_file, "freed objects : %.7lu\n", really_freed); + for(i=0; i<256; i++) { + if (free_counts[i]>0 || live_counts[i]>0) { + fprintf(gc_data_file, +@@ -2258,7 +2270,7 @@ garbage_collect() + gc_time += musecs_used; + + if (verbose_gc_stats) { +- fprintf(gc_data_file, "GC time: %d msec\n", musecs_used / 1000); ++ fprintf(gc_data_file, "GC time: %ld msec\n", (long)(musecs_used / 1000)); + fflush(gc_data_file); + } + } diff --git a/patches/ruby/1.8.7/p358/railsexpress/17-caller-for-all-threads.patch b/patches/ruby/1.8.7/p358/railsexpress/17-caller-for-all-threads.patch new file mode 100644 index 0000000000..aa79d1602c --- /dev/null +++ b/patches/ruby/1.8.7/p358/railsexpress/17-caller-for-all-threads.patch @@ -0,0 +1,230 @@ +diff --git a/eval.c b/eval.c +index 356226e..a0fdc55 100644 +--- a/eval.c ++++ b/eval.c +@@ -8199,6 +8199,17 @@ rb_f_method_name() + } + } + ++/* Hash (Thread => Backtrace) used to collect backtrace for each threads. */ ++static VALUE backtrace_for_each_thread; ++ ++static int backtrace_level_for_each_thread; ++ ++static VALUE ++switch_thread_context_to_collect_backtrace(rb_thread_t next); ++ ++static VALUE ++rb_f_caller_for_all_threads(); ++ + void + Init_eval() + { +@@ -8244,6 +8255,7 @@ Init_eval() + rb_define_global_function("fail", rb_f_raise, -1); + + rb_define_global_function("caller", rb_f_caller, -1); ++ rb_define_global_function("caller_for_all_threads", rb_f_caller_for_all_threads, -1); + + rb_define_global_function("exit", rb_f_exit, -1); + rb_define_global_function("abort", rb_f_abort, -1); +@@ -10599,6 +10611,7 @@ static int th_sig, th_safe; + #define RESTORE_RAISE 5 + #define RESTORE_SIGNAL 6 + #define RESTORE_EXIT 7 ++#define RESTORE_BACKTRACE 8 + + extern VALUE *rb_gc_stack_start; + #ifdef __ia64 +@@ -10705,6 +10718,15 @@ rb_thread_switch(n) + } + rb_exc_raise(th_raise_exception); + break; ++ case RESTORE_BACKTRACE: ++ rb_hash_aset(backtrace_for_each_thread, curr_thread->thread, ++ backtrace(backtrace_level_for_each_thread)); ++ if (curr_thread != main_thread) { ++ switch_thread_context_to_collect_backtrace(curr_thread->next); ++ } else { ++ /* Circled back to main thread, cycle is complete. */ ++ } ++ break; + case RESTORE_NORMAL: + default: + break; +@@ -13875,3 +13897,74 @@ rb_throw(tag, val) + argv[1] = val; + rb_f_throw(2, argv); + } ++ ++static VALUE ++switch_thread_context_to_collect_backtrace(rb_thread_t next) ++{ ++ if (THREAD_SAVE_CONTEXT(curr_thread)) { ++ return Qnil; ++ } ++ curr_thread = next; ++ rb_thread_restore_context(next, RESTORE_BACKTRACE); ++ return Qnil; ++} ++ ++ ++/* ++ * call-seq: ++ * caller_for_all_threads(start=1) => array ++ * ++ * Returns the current execution stack for all threads ++ * ---a hash whose keys are thread instances and values ++ * the thread caller backtrace. ++ * ++ * Backtraces are array of hashes indicating location on the ++ * stack. Hash keys include ``:line'' or ``:file'' ++ * and ``:method'''. ++ * ++ * The optional _start_ parameter ++ * determines the number of initial stack entries to omit from the ++ * result. ++ * ++ * def a(skip) ++ * caller_for_all_threads(skip) ++ * end ++ * def b(skip) ++ * a(skip) ++ * end ++ * def c(skip) ++ * b(skip) ++ * end ++ * c(0) #=> ["prog:2:in `a'", "prog:5:in `b'", "prog:8:in `c'", "prog:10"] ++ * c(1) #=> ["prog:5:in `b'", "prog:8:in `c'", "prog:11"] ++ * c(2) #=> ["prog:8:in `c'", "prog:12"] ++ * c(3) #=> ["prog:13"] ++ */ ++static VALUE ++rb_f_caller_for_all_threads(argc, argv) ++ int argc; ++ VALUE *argv; ++{ ++ volatile int critical; ++ VALUE level; ++ VALUE result; ++ ++ rb_scan_args(argc, argv, "01", &level); ++ backtrace_level_for_each_thread = NIL_P(level) ? 0 : NUM2INT(level); ++ if (backtrace_level_for_each_thread < 0) { ++ rb_raise(rb_eArgError, "negative level (%d)", backtrace_level_for_each_thread); ++ } ++ ++ critical = rb_thread_critical; ++ rb_thread_critical = Qtrue; ++ ++ backtrace_for_each_thread = rb_hash_new(); ++ switch_thread_context_to_collect_backtrace(main_thread->next); ++ ++ result = backtrace_for_each_thread; ++ backtrace_for_each_thread = Qnil; ++ backtrace_for_each_thread = 0; ++ ++ rb_thread_critical = critical; ++ return result; ++} +diff --git a/test/callerforallthreads/test_caller_for_each_thread.rb b/test/callerforallthreads/test_caller_for_each_thread.rb +new file mode 100644 +index 0000000..6aebaed +--- /dev/null ++++ b/test/callerforallthreads/test_caller_for_each_thread.rb +@@ -0,0 +1,95 @@ ++# -*- ruby-indent-level: 4 -*- ++require 'thread' ++require 'test/unit' ++ ++class AClassWithNestedmethods ++ ++ def an_ultra_nested_method(skip) ++ caller_for_all_threads skip ++ end ++ ++ def a_nested_method(skip) ++ an_ultra_nested_method skip ++ end ++ ++ def a_method(skip=0) ++ a_nested_method skip ++ end ++ ++end ++ ++class CallerForEachThreadTest < Test::Unit::TestCase ++ ++ def testCollectMeaningfulBacktraceForASingleThread ++ backtraces = AClassWithNestedmethods.new.a_method ++ backtrace = backtraces[Thread.current] ++ assert_not_nil backtrace ++ assert_equal __FILE__ + ":8:in `an_ultra_nested_method'", backtrace[0] ++ assert_equal __FILE__ + ":12:in `a_nested_method'", backtrace[1] ++ assert_equal __FILE__ + ":16:in `a_method'", backtrace[2] ++ assert_equal __FILE__ + ":24:in `testCollectMeaningfulBacktraceForASingleThread'", ++ backtrace[3] ++ end ++ ++ def testCanSkipFirstStackEntries ++ backtraces = AClassWithNestedmethods.new.a_method 2 ++ backtrace = backtraces[Thread.current] ++ assert_not_nil backtrace ++ assert_equal __FILE__ + ":16:in `a_method'", backtrace[0] ++ assert_equal __FILE__ + ":35:in `testCanSkipFirstStackEntries'", ++ backtrace[1] ++ end ++ ++ def testCollectMeaningfulBacktraceForMultipleThreads ++ first_thread = Thread.new do ++ loop do ++ Thread.pass ++ sleep 1 ++ end ++ end ++ ++ second_thread = Thread.new do ++ loop do ++ Thread.pass ++ sleep 1 ++ end ++ end ++ ++ backtraces = AClassWithNestedmethods.new.a_method ++ ++ backtrace = backtraces[Thread.current] ++ assert_not_nil backtrace ++ assert_match __FILE__ + ":8:in `an_ultra_nested_method'", backtrace[0] ++ assert_match __FILE__ + ":12:in `a_nested_method'", backtrace[1] ++ assert_equal __FILE__ + ":16:in `a_method'", backtrace[2] ++ assert_equal __FILE__ + ":58:in `testCollectMeaningfulBacktraceForMultipleThreads'", ++ backtrace[3] ++ ++ backtrace = backtraces[first_thread] ++ assert_not_nil backtrace ++ assert_equal __FILE__ + ":47:in `testCollectMeaningfulBacktraceForMultipleThreads'", ++ backtrace[0] ++ assert_equal __FILE__ + ":45:in `loop'", ++ backtrace[1] ++ assert_equal __FILE__ + ":45:in `testCollectMeaningfulBacktraceForMultipleThreads'", ++ backtrace[2] ++ assert_equal __FILE__ + ":44:in `initialize'",backtrace[3] ++ assert_equal __FILE__ + ":44:in `new'", backtrace[4] ++ assert_equal __FILE__ + ":44:in `testCollectMeaningfulBacktraceForMultipleThreads'", ++ backtrace[5] ++ ++ backtrace = backtraces[second_thread] ++ assert_not_nil backtrace ++ assert_equal __FILE__ + ":53:in `testCollectMeaningfulBacktraceForMultipleThreads'", ++ backtrace[0] ++ assert_equal __FILE__ + ":52:in `loop'", backtrace[1] ++ assert_equal __FILE__ + ":52:in `testCollectMeaningfulBacktraceForMultipleThreads'", ++ backtrace[2] ++ assert_equal __FILE__ + ":51:in `initialize'",backtrace[3] ++ assert_equal __FILE__ + ":51:in `new'", backtrace[4] ++ assert_equal __FILE__ + ":51:in `testCollectMeaningfulBacktraceForMultipleThreads'", ++ backtrace[5] ++ end ++ ++end ++ diff --git a/patches/ruby/1.9.2/p318/railsexpress/01-railsbench-gc-patch.patch b/patches/ruby/1.9.2/p318/railsexpress/01-railsbench-gc-patch.patch new file mode 100644 index 0000000000..5b4792b3f7 --- /dev/null +++ b/patches/ruby/1.9.2/p318/railsexpress/01-railsbench-gc-patch.patch @@ -0,0 +1,1081 @@ +diff --git a/configure.in b/configure.in +index 3f94ac8..591aab9 100644 +--- a/configure.in ++++ b/configure.in +@@ -2209,6 +2209,14 @@ if test "$EXEEXT" = .exe; then + AC_SUBST(EXECUTABLE_EXTS) + fi + ++dnl enable gc debugging ++AC_ARG_ENABLE(gcdebug, ++ [ --enable-gcdebug build garbage collector with debugging enabled. ], ++ [enable_gcdebug=$enableval]) ++if test "$enable_gcdebug" = 'yes'; then ++ AC_DEFINE(GC_DEBUG, 1) ++fi ++ + dnl } + dnl build section { + +diff --git a/gc.c b/gc.c +index 564d260..27d9177 100644 +--- a/gc.c ++++ b/gc.c +@@ -78,6 +78,17 @@ void *alloca (); + #define GC_MALLOC_LIMIT 8000000 + #endif + ++#ifndef HAVE_LONG_LONG ++#define LONG_LONG long ++#endif ++ ++static size_t heap_free_min = 4096; ++static int heap_min_slots = 10000; ++static int heap_slots_increment = 10000; ++static int initial_heap_slots_increment = 10000; ++static double heap_slots_growth_factor = 1.8; ++static size_t initial_malloc_limit = GC_MALLOC_LIMIT; ++ + #define nomem_error GET_VM()->special_exceptions[ruby_error_nomemory] + + #define MARK_STACK_MAX 1024 +@@ -267,7 +278,7 @@ typedef struct RVALUE { + struct RComplex complex; + } as; + #ifdef GC_DEBUG +- const char *file; ++ VALUE file; + int line; + #endif + } RVALUE; +@@ -314,6 +325,8 @@ typedef struct rb_objspace { + struct { + int dont_gc; + int during_gc; ++ int gc_statistics; ++ int verbose_gc_stats; + } flags; + struct { + st_table *table; +@@ -334,6 +347,14 @@ typedef struct rb_objspace { + struct gc_list *global_list; + unsigned int count; + int gc_stress; ++ long heap_size; ++ unsigned LONG_LONG gc_time_accumulator; ++ FILE* gc_data_file; ++ long gc_collections; ++ unsigned LONG_LONG gc_allocated_size; ++ unsigned LONG_LONG gc_num_allocations; ++ unsigned long live_objects; ++ unsigned LONG_LONG allocated_objects; + } rb_objspace_t; + + #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE +@@ -357,6 +378,16 @@ int *ruby_initial_gc_stress_ptr = &rb_objspace.gc_stress; + #define heaps_freed objspace->heap.freed + #define dont_gc objspace->flags.dont_gc + #define during_gc objspace->flags.during_gc ++#define gc_statistics objspace->flags.gc_statistics ++#define verbose_gc_stats objspace->flags.verbose_gc_stats ++#define heap_size objspace->heap_size ++#define gc_time_accumulator objspace->gc_time_accumulator ++#define gc_data_file objspace->gc_data_file ++#define gc_collections objspace->gc_collections ++#define gc_allocated_size objspace->gc_allocated_size ++#define gc_num_allocations objspace->gc_num_allocations ++#define live_objects objspace->live_objects ++#define allocated_objects objspace->allocated_objects + #define finalizer_table objspace->final.table + #define deferred_final_list objspace->final.deferred + #define mark_stack objspace->markstack.buffer +@@ -375,7 +406,8 @@ rb_objspace_alloc(void) + { + rb_objspace_t *objspace = malloc(sizeof(rb_objspace_t)); + memset(objspace, 0, sizeof(*objspace)); +- malloc_limit = GC_MALLOC_LIMIT; ++ malloc_limit = initial_malloc_limit; ++ + ruby_gc_stress = ruby_initial_gc_stress; + + return objspace; +@@ -409,23 +441,8 @@ rb_objspace_free(rb_objspace_t *objspace) + } + #endif + +-/* tiny heap size */ +-/* 32KB */ +-/*#define HEAP_SIZE 0x8000 */ +-/* 128KB */ +-/*#define HEAP_SIZE 0x20000 */ +-/* 64KB */ +-/*#define HEAP_SIZE 0x10000 */ +-/* 16KB */ +-#define HEAP_SIZE 0x4000 +-/* 8KB */ +-/*#define HEAP_SIZE 0x2000 */ +-/* 4KB */ +-/*#define HEAP_SIZE 0x1000 */ +-/* 2KB */ +-/*#define HEAP_SIZE 0x800 */ +- +-#define HEAP_OBJ_LIMIT (HEAP_SIZE / sizeof(struct RVALUE)) ++#define HEAP_OBJ_LIMIT 8000 ++#define HEAP_SIZE (HEAP_OBJ_LIMIT * sizeof(struct RVALUE)) + + extern VALUE rb_cMutex; + extern st_table *rb_class_tbl; +@@ -673,6 +690,11 @@ vm_xmalloc(rb_objspace_t *objspace, size_t size) + mem = (size_t *)mem + 1; + #endif + ++ if (gc_statistics) { ++ gc_allocated_size += size; ++ gc_num_allocations += 1; ++ } ++ + return mem; + } + +@@ -780,6 +802,92 @@ ruby_xfree(void *x) + vm_xfree(&rb_objspace, x); + } + ++static void set_gc_parameters(rb_objspace_t *objspace) ++{ ++ char *envp; ++ ++ gc_data_file = stderr; ++ ++ envp = getenv("RUBY_GC_STATS"); ++ if (envp != NULL) { ++ int i = atoi(envp); ++ if (i > 0) { ++ verbose_gc_stats = 1; ++ } ++ /* child processes should not inherit RUBY_GC_STATS */ ++ ruby_unsetenv("RUBY_GC_STATS"); ++ } ++ ++ envp = getenv("RUBY_GC_DATA_FILE"); ++ if (envp != NULL) { ++ FILE* data_file = fopen(envp, "w"); ++ if (data_file != NULL) { ++ gc_data_file = data_file; ++ } ++ else { ++ fprintf(stderr, "can't open gc log file %s for writing, using default\n", envp); ++ } ++ /* child processes should not inherit RUBY_GC_DATA_FILE to avoid clobbering */ ++ ruby_unsetenv("RUBY_GC_DATA_FILE"); ++ } ++ ++ envp = getenv("RUBY_HEAP_MIN_SLOTS"); ++ if (envp != NULL) { ++ int i = atoi(envp); ++ if (verbose_gc_stats) { ++ fprintf(gc_data_file, "RUBY_HEAP_MIN_SLOTS=%s\n", envp); ++ } ++ if (i > 0) { ++ heap_min_slots = i; ++ } ++ } ++ ++ envp = getenv("RUBY_HEAP_FREE_MIN"); ++ if (envp != NULL) { ++ int i = atoi(envp); ++ if (verbose_gc_stats) { ++ fprintf(gc_data_file, "RUBY_HEAP_FREE_MIN=%s\n", envp); ++ } ++ if (i > 0) { ++ heap_free_min = i; ++ } ++ } ++ ++ envp = getenv("RUBY_HEAP_SLOTS_INCREMENT"); ++ if (envp != NULL) { ++ int i = atoi(envp); ++ if (verbose_gc_stats) { ++ fprintf(gc_data_file, "RUBY_HEAP_SLOTS_INCREMENT=%s\n", envp); ++ } ++ heap_slots_increment = i; ++ initial_heap_slots_increment = heap_slots_increment; ++ } ++ ++ envp = getenv("RUBY_HEAP_SLOTS_GROWTH_FACTOR"); ++ if (envp != NULL) { ++ double d = atof(envp); ++ if (verbose_gc_stats) { ++ fprintf(gc_data_file, "RUBY_HEAP_SLOTS_GROWTH_FACTOR=%s\n", envp); ++ } ++ if (d > 0) { ++ heap_slots_growth_factor = d; ++ } ++ } ++ ++ envp = getenv("RUBY_GC_MALLOC_LIMIT"); ++ if (envp != NULL) { ++ long l = atol(envp); ++ if (verbose_gc_stats) { ++ fprintf(gc_data_file, "RUBY_GC_MALLOC_LIMIT=%s\n", envp); ++ } ++ if (l > 0) { ++ initial_malloc_limit = l; ++ malloc_limit = initial_malloc_limit; ++ } ++ } ++ ++ fflush(gc_data_file); ++} + + /* + * call-seq: +@@ -826,6 +934,454 @@ rb_gc_disable(void) + return old ? Qtrue : Qfalse; + } + ++/* ++ * call-seq: ++ * GC.enable_stats => true or false ++ * ++ * Enables garbage collection statistics, returning true if garbage ++ * collection statistics was already enabled. ++ * ++ * GC.enable_stats #=> false or true ++ * GC.enable_stats #=> true ++ * ++ */ ++ ++VALUE ++rb_gc_enable_stats() ++{ ++ rb_objspace_t *objspace = &rb_objspace; ++ int old = gc_statistics; ++ gc_statistics = 1; ++ return old ? Qtrue : Qfalse; ++} ++ ++/* ++ * call-seq: ++ * GC.disable_stats => true or false ++ * ++ * Disables garbage collection statistics, returning true if garbage ++ * collection statistics was already disabled. ++ * ++ * GC.disable_stats #=> false or true ++ * GC.disable_stats #=> true ++ * ++ */ ++ ++VALUE ++rb_gc_disable_stats() ++{ ++ rb_objspace_t *objspace = &rb_objspace; ++ int old = gc_statistics; ++ gc_statistics = 0; ++ return old ? Qtrue : Qfalse; ++} ++ ++/* ++ * call-seq: ++ * GC.stats_enabled? => true or false ++ * ++ * Check whether GC stats have been enabled. ++ * ++ * GC.stats_enabled? #=> false or true ++ * ++ */ ++ ++VALUE ++rb_gc_stats_enabled() ++{ ++ rb_objspace_t *objspace = &rb_objspace; ++ return gc_statistics ? Qtrue : Qfalse; ++} ++ ++ ++/* ++ * call-seq: ++ * GC.clear_stats => nil ++ * ++ * Clears garbage collection statistics, returning nil. This resets the number ++ * of collections (GC.collections) and the time used (GC.time) to 0. ++ * ++ * GC.clear_stats #=> nil ++ * ++ */ ++ ++VALUE ++rb_gc_clear_stats() ++{ ++ rb_objspace_t *objspace = &rb_objspace; ++ gc_collections = 0; ++ gc_time_accumulator = 0; ++ gc_allocated_size = 0; ++ gc_num_allocations = 0; ++ return Qnil; ++} ++ ++/* ++ * call-seq: ++ * GC.allocated_size => Integer ++ * ++ * Returns the size of memory (in bytes) allocated since GC statistics collection ++ * was enabled. ++ * ++ * GC.allocated_size #=> 35 ++ * ++ */ ++ ++VALUE ++rb_gc_allocated_size() ++{ ++ rb_objspace_t *objspace = &rb_objspace; ++#if HAVE_LONG_LONG ++ return ULL2NUM(gc_allocated_size); ++#else ++ return ULONG2NUM(gc_allocated_size); ++#endif ++} ++ ++/* ++ * call-seq: ++ * GC.num_allocations => Integer ++ * ++ * Returns the number of memory allocations since GC statistics collection ++ * was enabled. ++ * ++ * GC.num_allocations #=> 150 ++ * ++ */ ++VALUE ++rb_gc_num_allocations() ++{ ++ rb_objspace_t *objspace = &rb_objspace; ++#if HAVE_LONG_LONG ++ return ULL2NUM(gc_num_allocations); ++#else ++ return ULONG2NUM(gc_num_allocations); ++#endif ++} ++ ++/* ++ * call-seq: ++ * GC.enable_trace => true or false ++ * ++ * Enables garbage collection tracing, returning true if garbage ++ * collection tracing was already enabled. ++ * ++ * GC.enable_trace #=> false or true ++ * GC.enable_trace #=> true ++ * ++ */ ++ ++VALUE ++rb_gc_enable_trace() ++{ ++ rb_objspace_t *objspace = &rb_objspace; ++ int old = verbose_gc_stats; ++ verbose_gc_stats = 1; ++ return old ? Qtrue : Qfalse; ++} ++ ++/* ++ * call-seq: ++ * GC.disable_trace => true or false ++ * ++ * Disables garbage collection tracing, returning true if garbage ++ * collection tracing was already disabled. ++ * ++ * GC.disable_trace #=> false or true ++ * GC.disable_trace #=> true ++ * ++ */ ++ ++VALUE ++rb_gc_disable_trace() ++{ ++ rb_objspace_t *objspace = &rb_objspace; ++ int old = verbose_gc_stats; ++ verbose_gc_stats = 0; ++ return old ? Qtrue : Qfalse; ++} ++ ++/* ++ * call-seq: ++ * GC.trace_enabled? => true or false ++ * ++ * Check whether GC tracing has been enabled. ++ * ++ * GC.trace_enabled? #=> false or true ++ * ++ */ ++ ++VALUE ++rb_gc_trace_enabled() ++{ ++ rb_objspace_t *objspace = &rb_objspace; ++ return verbose_gc_stats ? Qtrue : Qfalse; ++} ++ ++ ++const char* GC_LOGFILE_IVAR = "@gc_logfile_name"; ++ ++/* ++ * call-seq: ++ * GC.log_file(filename=nil, mode="w") => boolean ++ * ++ * Changes the GC data log file. Closes the currently open logfile. ++ * Returns true if the file was successfully opened for ++ * writing. Returns false if the file could not be opened for ++ * writing. Returns the name of the current logfile (or nil) if no ++ * parameter is given. Restores logging to stderr when given nil as ++ * an argument. ++ * ++ * GC.log_file #=> nil ++ * GC.log_file "/tmp/gc.log" #=> true ++ * GC.log_file #=> "/tmp/gc.log" ++ * GC.log_file nil #=> true ++ * ++ */ ++ ++VALUE ++rb_gc_log_file(int argc, VALUE *argv, VALUE self) ++{ ++ rb_objspace_t *objspace = &rb_objspace; ++ VALUE filename = Qnil; ++ VALUE mode_str = Qnil; ++ FILE* f = NULL; ++ const char* mode = "w"; ++ ++ VALUE current_logfile_name = rb_iv_get(rb_mGC, GC_LOGFILE_IVAR); ++ ++ if (argc==0) ++ return current_logfile_name; ++ ++ rb_scan_args(argc, argv, "02", &filename, &mode_str); ++ ++ if (filename == Qnil) { ++ /* close current logfile and reset logfile to stderr */ ++ if (gc_data_file != stderr) { ++ fclose(gc_data_file); ++ gc_data_file = stderr; ++ rb_iv_set(rb_mGC, GC_LOGFILE_IVAR, Qnil); ++ } ++ return Qtrue; ++ } ++ ++ /* we have a real logfile name */ ++ filename = StringValue(filename); ++ ++ if (rb_equal(current_logfile_name, filename) == Qtrue) { ++ /* do nothing if we get the file name we're already logging to */ ++ return Qtrue; ++ } ++ ++ /* get mode for file opening */ ++ if (mode_str != Qnil) ++ { ++ mode = RSTRING_PTR(StringValue(mode_str)); ++ } ++ ++ /* try to open file in given mode */ ++ if (f = fopen(RSTRING_PTR(filename), mode)) { ++ if (gc_data_file != stderr) { ++ fclose(gc_data_file); ++ } ++ gc_data_file = f; ++ rb_iv_set(rb_mGC, GC_LOGFILE_IVAR, filename); ++ } else { ++ return Qfalse; ++ } ++ return Qtrue; ++} ++ ++/* ++ * call-seq: ++ * GC.log String => String ++ * ++ * Logs string to the GC data file and returns it. ++ * ++ * GC.log "manual GC call" #=> "manual GC call" ++ * ++ */ ++ ++VALUE ++rb_gc_log(self, original_str) ++ VALUE self, original_str; ++{ ++ rb_objspace_t *objspace = &rb_objspace; ++ if (original_str == Qnil) { ++ fprintf(gc_data_file, "\n"); ++ } ++ else { ++ VALUE str = StringValue(original_str); ++ char *p = RSTRING_PTR(str); ++ fprintf(gc_data_file, "%s\n", p); ++ } ++ return original_str; ++} ++ ++/* ++ * call-seq: ++ * GC.dump => nil ++ * ++ * dumps information about the current GC data structures to the GC log file ++ * ++ * GC.dump #=> nil ++ * ++ */ ++ ++VALUE ++rb_gc_dump() ++{ ++ rb_objspace_t *objspace = &rb_objspace; ++ size_t i; ++ ++ for (i = 0; i < heaps_used; i++) { ++ size_t limit = heaps[i].limit; ++ fprintf(gc_data_file, "HEAP[%2lu]: size=%7lu\n", (unsigned long)i, (unsigned long)limit); ++ } ++ ++ return Qnil; ++} ++ ++static const char* obj_type(VALUE tp); ++ ++#ifdef GC_DEBUG ++/* ++ * call-seq: ++ * GC.dump_file_and_line_info(String, boolean) => nil ++ * ++ * dumps information on which currently allocated object was created by which file and on which line ++ * ++ * GC.dump_file_and_line_info(String, boolean) #=> nil ++ * ++ * The second parameter specifies whether class names should be included in the dump. ++ * Note that including class names will allocate additional string objects on the heap. ++ * ++ */ ++ ++VALUE ++rb_gc_dump_file_and_line_info(int argc, VALUE *argv) ++{ ++ rb_objspace_t *objspace = &rb_objspace; ++ VALUE filename, str, include_classnames = Qnil; ++ char *fname = NULL; ++ char *klass = NULL; ++ FILE* f = NULL; ++ size_t i = 0; ++ ++ rb_scan_args(argc, argv, "11", &filename, &include_classnames); ++ ++ str = StringValue(filename); ++ fname = RSTRING_PTR(str); ++ f = fopen(fname, "w"); ++ ++ for (i = 0; i < heaps_used; i++) { ++ RVALUE *p, *pend; ++ ++ p = heaps[i].slot; pend = p + heaps[i].limit; ++ for (;p < pend; p++) { ++ if (p->as.basic.flags) { ++ const char *src_filename = (p->file && p->file != Qnil )? RSTRING_PTR(p->file) : ""; ++ fprintf(f, "%s:%s:%d", obj_type(p->as.basic.flags & T_MASK), src_filename, (int)p->line); ++ // rb_obj_classname will create objects on the heap, we need a better solution ++ if (include_classnames == Qtrue) { ++ /* write the class */ ++ fprintf(f, ":"); ++ switch (BUILTIN_TYPE(p)) { ++ case T_NONE: ++ fprintf(f, "__none__"); ++ break; ++ case T_UNDEF: ++ fprintf(f, "__undef__"); ++ break; ++ case T_NODE: ++ fprintf(f, "__node__"); ++ break; ++ default: ++ if (!p->as.basic.klass) { ++ fprintf(f, "__unknown__"); ++ } else { ++ fprintf(f, "%s", rb_obj_classname((VALUE)p)); ++ } ++ } ++ /* print object size for some known object types */ ++ switch (BUILTIN_TYPE(p)) { ++ case T_STRING: ++ fprintf(f, ":%lu", RSTRING_LEN(p)); ++ break; ++ case T_ARRAY: ++ fprintf(f, ":%lu", RARRAY_LEN(p)); ++ break; ++ case T_HASH: ++ fprintf(f, ":%lu", (long unsigned int)RHASH_SIZE(p)); ++ break; ++ } ++ } ++ fprintf(f, "\n"); ++ } ++ } ++ } ++ fclose(f); ++ return Qnil; ++} ++#endif ++ ++/* ++ * call-seq: ++ * GC.heap_slots => Integer ++ * ++ * Returns the number of heap slots available for object allocations. ++ * ++ * GC.heap_slots #=> 10000 ++ * ++ */ ++VALUE ++rb_gc_heap_slots() ++{ ++ rb_objspace_t *objspace = &rb_objspace; ++ return LONG2NUM(heap_size); ++} ++ ++ ++/* ++ * call-seq: ++ * GC.collections => Integer ++ * ++ * Returns the number of garbage collections performed while GC statistics collection ++ * was enabled. ++ * ++ * GC.collections #=> 35 ++ * ++ */ ++ ++VALUE ++rb_gc_collections() ++{ ++ rb_objspace_t *objspace = &rb_objspace; ++ return LONG2NUM(gc_collections); ++} ++ ++/* ++ * call-seq: ++ * GC.time => Integer ++ * ++ * Returns the time spent during garbage collection while GC statistics collection ++ * was enabled (in micro seconds). ++ * ++ * GC.time #=> 20000 ++ * ++ */ ++ ++VALUE ++rb_gc_time() ++{ ++ rb_objspace_t *objspace = &rb_objspace; ++#if HAVE_LONG_LONG ++ return LL2NUM(gc_time_accumulator); ++#else ++ return LONG2NUM(gc_time_accumulator); ++#endif ++} ++ + VALUE rb_mGC; + + void +@@ -944,6 +1500,7 @@ assign_heap_slot(rb_objspace_t *objspace) + if (lomem == 0 || lomem > p) lomem = p; + if (himem < pend) himem = pend; + heaps_used++; ++ heap_size += objs; + + while (p < pend) { + p->as.free.flags = 0; +@@ -958,7 +1515,7 @@ init_heap(rb_objspace_t *objspace) + { + size_t add, i; + +- add = HEAP_MIN_SLOTS / HEAP_OBJ_LIMIT; ++ add = heap_min_slots / HEAP_OBJ_LIMIT; + + if (!add) { + add = 1; +@@ -979,7 +1536,7 @@ init_heap(rb_objspace_t *objspace) + static void + set_heaps_increment(rb_objspace_t *objspace) + { +- size_t next_heaps_length = (size_t)(heaps_used * 1.8); ++ size_t next_heaps_length = (size_t)(heaps_used * heap_slots_growth_factor); + + if (next_heaps_length == heaps_used) { + next_heaps_length++; +@@ -1005,6 +1562,22 @@ heaps_increment(rb_objspace_t *objspace) + + #define RANY(o) ((RVALUE*)(o)) + ++#ifdef GC_DEBUG ++static VALUE ++_rb_sourcefile(void) ++{ ++ rb_thread_t *th = GET_THREAD(); ++ rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(th, th->cfp); ++ ++ if (cfp) { ++ return cfp->iseq->filename; ++ } ++ else { ++ return Qnil; ++ } ++} ++#endif ++ + static VALUE + rb_newobj_from_heap(rb_objspace_t *objspace) + { +@@ -1022,9 +1595,11 @@ rb_newobj_from_heap(rb_objspace_t *objspace) + + MEMZERO((void*)obj, RVALUE, 1); + #ifdef GC_DEBUG +- RANY(obj)->file = rb_sourcefile(); ++ RANY(obj)->file = _rb_sourcefile(); + RANY(obj)->line = rb_sourceline(); + #endif ++ live_objects++; ++ allocated_objects++; + + return obj; + } +@@ -1525,6 +2100,12 @@ gc_mark_children(rb_objspace_t *objspace, VALUE ptr, int lev) + { + register RVALUE *obj = RANY(ptr); + ++#ifdef GC_DEBUG ++ if (obj->file && obj->file != Qnil && is_pointer_to_heap(objspace, (void*)obj->file)) { ++ gc_mark(objspace, obj->file, lev); ++ } ++#endif ++ + goto marking; /* skip */ + + again: +@@ -1534,6 +2115,12 @@ gc_mark_children(rb_objspace_t *objspace, VALUE ptr, int lev) + if (obj->as.basic.flags & FL_MARK) return; /* already marked */ + obj->as.basic.flags |= FL_MARK; + ++#ifdef GC_DEBUG ++ if (obj->file && obj->file != Qnil && is_pointer_to_heap(objspace, (void*)obj->file)) { ++ gc_mark(objspace, obj->file, lev); ++ } ++#endif ++ + marking: + if (FL_TEST(obj, FL_EXIVAR)) { + rb_mark_generic_ivar(ptr); +@@ -1851,6 +2438,39 @@ free_unused_heaps(rb_objspace_t *objspace) + } + } + ++static const char* obj_type(VALUE type) ++{ ++ switch (type) { ++ case T_NIL : return "NIL"; ++ case T_OBJECT : return "OBJECT"; ++ case T_CLASS : return "CLASS"; ++ case T_ICLASS : return "ICLASS"; ++ case T_MODULE : return "MODULE"; ++ case T_FLOAT : return "FLOAT"; ++ case T_COMPLEX: return "COMPLEX"; ++ case T_RATIONAL: return "RATIONAL"; ++ case T_STRING : return "STRING"; ++ case T_REGEXP : return "REGEXP"; ++ case T_ARRAY : return "ARRAY"; ++ case T_FIXNUM : return "FIXNUM"; ++ case T_HASH : return "HASH"; ++ case T_STRUCT : return "STRUCT"; ++ case T_BIGNUM : return "BIGNUM"; ++ case T_FILE : return "FILE"; ++ ++ case T_TRUE : return "TRUE"; ++ case T_FALSE : return "FALSE"; ++ case T_DATA : return "DATA"; ++ case T_MATCH : return "MATCH"; ++ case T_SYMBOL : return "SYMBOL"; ++ case T_ZOMBIE : return "ZOMBIE"; ++ ++ case T_UNDEF : return "UNDEF"; ++ case T_NODE : return "NODE"; ++ default: return "____"; ++ } ++} ++ + static void + gc_sweep(rb_objspace_t *objspace) + { +@@ -1859,12 +2479,27 @@ gc_sweep(rb_objspace_t *objspace) + size_t i; + size_t live = 0, free_min = 0, do_heap_free = 0; + ++ long max_blocks_to_free = heaps_used - (heap_min_slots / HEAP_OBJ_LIMIT); ++ int freed_blocks = 0; ++ ++ unsigned long processed = 0; ++ unsigned long freelist_size = 0; ++ unsigned long zombies = 0; ++ unsigned long free_counts[T_MASK]; ++ unsigned long live_counts[T_MASK]; ++ int do_gc_stats = gc_statistics & verbose_gc_stats; ++ ++ if (do_gc_stats) { ++ MEMZERO((void*)free_counts, unsigned long, T_MASK); ++ MEMZERO((void*)live_counts, unsigned long, T_MASK); ++ } ++ + do_heap_free = (size_t)((heaps_used * HEAP_OBJ_LIMIT) * 0.65); + free_min = (size_t)((heaps_used * HEAP_OBJ_LIMIT) * 0.2); + +- if (free_min < FREE_MIN) { ++ if (free_min < heap_free_min) { + do_heap_free = heaps_used * HEAP_OBJ_LIMIT; +- free_min = FREE_MIN; ++ free_min = heap_free_min; + } + + freelist = 0; +@@ -1881,10 +2516,15 @@ gc_sweep(rb_objspace_t *objspace) + p = heaps[i].slot; pend = p + heaps[i].limit; + while (p < pend) { + if (!(p->as.basic.flags & FL_MARK)) { ++ if (do_gc_stats && !p->as.basic.flags) { ++ /* slot was free before GC */ ++ freelist_size++; ++ } + if (p->as.basic.flags && + ((deferred = obj_free(objspace, (VALUE)p)) || + ((FL_TEST(p, FL_FINALIZE)) && need_call_final))) { + if (!deferred) { ++ if (do_gc_stats) zombies++; + p->as.free.flags = T_ZOMBIE; + RDATA(p)->dfree = 0; + } +@@ -1894,6 +2534,10 @@ gc_sweep(rb_objspace_t *objspace) + final_num++; + } + else { ++ if (do_gc_stats) { ++ VALUE obt = p->as.basic.flags & T_MASK; ++ if (obt) free_counts[obt]++; ++ } + add_freelist(objspace, p); + free_num++; + } +@@ -1901,16 +2545,23 @@ gc_sweep(rb_objspace_t *objspace) + else if (BUILTIN_TYPE(p) == T_ZOMBIE) { + /* objects to be finalized */ + /* do nothing remain marked */ ++ if (do_gc_stats) zombies++; + } + else { + RBASIC(p)->flags &= ~FL_MARK; + live++; ++ if (do_gc_stats) { ++ live_counts[p->as.basic.flags & T_MASK]++; ++ } + } + p++; + } +- if (final_num + free_num == heaps[i].limit && freed > do_heap_free) { ++ if (final_num + free_num == heaps[i].limit && freed > do_heap_free && freed_blocks < max_blocks_to_free) { + RVALUE *pp; + ++ freed_blocks += 1; ++ heap_size -= final_num + free_num; ++ + for (pp = final_list; pp != final; pp = pp->as.free.next) { + RDATA(pp)->dmark = (void (*)())(VALUE)&heaps[i]; + pp->as.free.flags |= FL_SINGLETON; /* freeing page mark */ +@@ -1922,11 +2573,13 @@ gc_sweep(rb_objspace_t *objspace) + else { + freed += free_num; + } ++ processed += heaps[i].limit; + } + GC_PROF_SET_MALLOC_INFO; + if (malloc_increase > malloc_limit) { + malloc_limit += (size_t)((malloc_increase - malloc_limit) * (double)live / (live + freed)); +- if (malloc_limit < GC_MALLOC_LIMIT) malloc_limit = GC_MALLOC_LIMIT; ++ if (malloc_limit < initial_malloc_limit) malloc_limit = initial_malloc_limit; ++ + } + malloc_increase = 0; + if (freed < free_min) { +@@ -1934,6 +2587,25 @@ gc_sweep(rb_objspace_t *objspace) + heaps_increment(objspace); + } + during_gc = 0; ++ live_objects = live; ++ ++ /* log gc stats if requested */ ++ if (do_gc_stats) { ++ fprintf(gc_data_file, "objects processed: %.7lu\n", (unsigned long)processed); ++ fprintf(gc_data_file, "live objects : %.7lu\n", (unsigned long)live); ++ fprintf(gc_data_file, "freelist objects : %.7lu\n", (unsigned long)freelist_size); ++ fprintf(gc_data_file, "freed objects : %.7lu\n", (unsigned long)freed); ++ fprintf(gc_data_file, "zombies : %.7lu\n", (unsigned long)zombies); ++ for(i=0; i0 || live_counts[i]>0) { ++ fprintf(gc_data_file, ++ "kept %.7lu / freed %.7lu objects of type %s\n", ++ (unsigned long)live_counts[i], (unsigned long)free_counts[i], obj_type((int)i)); ++ } ++ } ++ rb_gc_dump(); ++ fflush(gc_data_file); ++ } + + /* clear finalization list */ + if (final_list) { +@@ -2140,6 +2812,7 @@ void rb_gc_mark_encodings(void); + static int + garbage_collect(rb_objspace_t *objspace) + { ++ struct timeval gctv1, gctv2; + struct gc_list *list; + rb_thread_t *th = GET_THREAD(); + INIT_GC_PROF_PARAMS; +@@ -2162,6 +2835,14 @@ garbage_collect(rb_objspace_t *objspace) + during_gc++; + objspace->count++; + ++ if (gc_statistics) { ++ gc_collections++; ++ gettimeofday(&gctv1, NULL); ++ if (verbose_gc_stats) { ++ fprintf(gc_data_file, "Garbage collection started\n"); ++ } ++ } ++ + GC_PROF_TIMER_START; + GC_PROF_MARK_TIMER_START; + SET_STACK_END; +@@ -2216,6 +2897,19 @@ garbage_collect(rb_objspace_t *objspace) + + GC_PROF_TIMER_STOP; + if (GC_NOTIFY) printf("end garbage_collect()\n"); ++ ++ if (gc_statistics) { ++ unsigned LONG_LONG musecs_used; ++ gettimeofday(&gctv2, NULL); ++ musecs_used = ((unsigned LONG_LONG)(gctv2.tv_sec - gctv1.tv_sec) * 1000000) + (gctv2.tv_usec - gctv1.tv_usec); ++ gc_time_accumulator += musecs_used; ++ ++ if (verbose_gc_stats) { ++ fprintf(gc_data_file, "GC time: %lu msec\n", (unsigned long)(musecs_used / 1000)); ++ fflush(gc_data_file); ++ } ++ } ++ + return TRUE; + } + +@@ -2298,6 +2992,7 @@ Init_stack(volatile VALUE *addr) + void + Init_heap(void) + { ++ set_gc_parameters(&rb_objspace); + init_heap(&rb_objspace); + } + +@@ -2982,6 +3677,49 @@ count_objects(int argc, VALUE *argv, VALUE os) + return hash; + } + ++/* call-seq: ++ * ObjectSpace.live_objects => number ++ * ++ * Returns the count of objects currently allocated in the system. This goes ++ * down after the garbage collector runs. ++ */ ++static ++VALUE os_live_objects(VALUE self) ++{ ++ rb_objspace_t *objspace = &rb_objspace; ++ return ULONG2NUM(live_objects); ++} ++ ++unsigned long rb_os_live_objects() ++{ ++ rb_objspace_t *objspace = &rb_objspace; ++ return live_objects; ++} ++ ++/* call-seq: ++ * ObjectSpace.allocated_objects => number ++ * ++ * Returns the count of objects allocated since the Ruby interpreter has ++ * started. This number can only increase. To know how many objects are ++ * currently allocated, use ObjectSpace::live_objects ++ */ ++static ++VALUE os_allocated_objects(VALUE self) ++{ ++ rb_objspace_t *objspace = &rb_objspace; ++#if defined(HAVE_LONG_LONG) ++ return ULL2NUM(allocated_objects); ++#else ++ return ULONG2NUM(allocated_objects); ++#endif ++} ++ ++unsigned LONG_LONG rb_os_allocated_objects() ++{ ++ rb_objspace_t *objspace = &rb_objspace; ++ return allocated_objects; ++} ++ + /* + * call-seq: + * GC.count -> Integer +@@ -3188,6 +3926,28 @@ Init_GC(void) + rb_define_singleton_method(rb_mGC, "count", gc_count, 0); + rb_define_method(rb_mGC, "garbage_collect", rb_gc_start, 0); + ++ rb_define_singleton_method(rb_mGC, "enable_stats", rb_gc_enable_stats, 0); ++ rb_define_singleton_method(rb_mGC, "disable_stats", rb_gc_disable_stats, 0); ++ rb_define_singleton_method(rb_mGC, "stats_enabled?", rb_gc_stats_enabled, 0); ++ rb_define_singleton_method(rb_mGC, "clear_stats", rb_gc_clear_stats, 0); ++ rb_define_singleton_method(rb_mGC, "allocated_size", rb_gc_allocated_size, 0); ++ rb_define_singleton_method(rb_mGC, "num_allocations", rb_gc_num_allocations, 0); ++ rb_define_singleton_method(rb_mGC, "heap_slots", rb_gc_heap_slots, 0); ++ rb_define_const(rb_mGC, "HEAP_SLOT_SIZE", INT2FIX(sizeof(RVALUE))); ++ ++ rb_define_singleton_method(rb_mGC, "log", rb_gc_log, 1); ++ rb_define_singleton_method(rb_mGC, "log_file", rb_gc_log_file, -1); ++ rb_define_singleton_method(rb_mGC, "enable_trace", rb_gc_enable_trace, 0); ++ rb_define_singleton_method(rb_mGC, "disable_trace", rb_gc_disable_trace, 0); ++ rb_define_singleton_method(rb_mGC, "trace_enabled?", rb_gc_trace_enabled, 0); ++ ++ rb_define_singleton_method(rb_mGC, "collections", rb_gc_collections, 0); ++ rb_define_singleton_method(rb_mGC, "time", rb_gc_time, 0); ++ rb_define_singleton_method(rb_mGC, "dump", rb_gc_dump, 0); ++#ifdef GC_DEBUG ++ rb_define_singleton_method(rb_mGC, "dump_file_and_line_info", rb_gc_dump_file_and_line_info, -1); ++#endif ++ + rb_mProfiler = rb_define_module_under(rb_mGC, "Profiler"); + rb_define_singleton_method(rb_mProfiler, "enabled?", gc_profile_enable_get, 0); + rb_define_singleton_method(rb_mProfiler, "enable", gc_profile_enable, 0); +@@ -3201,6 +3961,9 @@ Init_GC(void) + rb_define_module_function(rb_mObSpace, "each_object", os_each_obj, -1); + rb_define_module_function(rb_mObSpace, "garbage_collect", rb_gc_start, 0); + ++ rb_define_module_function(rb_mObSpace, "live_objects", os_live_objects, 0); ++ rb_define_module_function(rb_mObSpace, "allocated_objects", os_allocated_objects, 0); ++ + rb_define_module_function(rb_mObSpace, "define_finalizer", define_final, -1); + rb_define_module_function(rb_mObSpace, "undefine_finalizer", undefine_final, 1); + +diff --git a/sample/test.rb b/sample/test.rb +old mode 100644 +new mode 100755 diff --git a/patches/ruby/1.9.2/p318/railsexpress/02-display-more-detailed-stack-trace.patch b/patches/ruby/1.9.2/p318/railsexpress/02-display-more-detailed-stack-trace.patch new file mode 100644 index 0000000000..232b7ff6ce --- /dev/null +++ b/patches/ruby/1.9.2/p318/railsexpress/02-display-more-detailed-stack-trace.patch @@ -0,0 +1,15 @@ +diff --git a/eval_error.c b/eval_error.c +index 41fcbb0..9e065f9 100644 +--- a/eval_error.c ++++ b/eval_error.c +@@ -166,8 +166,8 @@ error_print(void) + int skip = eclass == rb_eSysStackError; + + #define TRACE_MAX (TRACE_HEAD+TRACE_TAIL+5) +-#define TRACE_HEAD 8 +-#define TRACE_TAIL 5 ++#define TRACE_HEAD 100 ++#define TRACE_TAIL 100 + + for (i = 1; i < len; i++) { + if (TYPE(ptr[i]) == T_STRING) { diff --git a/patches/ruby/1.9.2/p318/railsexpress/03-fork-support-for-gc-logging.patch b/patches/ruby/1.9.2/p318/railsexpress/03-fork-support-for-gc-logging.patch new file mode 100644 index 0000000000..7126c9103e --- /dev/null +++ b/patches/ruby/1.9.2/p318/railsexpress/03-fork-support-for-gc-logging.patch @@ -0,0 +1,68 @@ +diff --git a/gc.c b/gc.c +index 27d9177..7673687 100644 +--- a/gc.c ++++ b/gc.c +@@ -1193,6 +1193,34 @@ rb_gc_log_file(int argc, VALUE *argv, VALUE self) + } + + /* ++ * Called from process.c before a fork. Flushes the gc log file to ++ * avoid writing the buffered output twice (once in the parent, and ++ * once in the child). ++ */ ++void ++rb_gc_before_fork() ++{ ++ rb_objspace_t *objspace = &rb_objspace; ++ fflush(gc_data_file); ++} ++ ++/* ++ * Called from process.c after a fork in the child process. Turns off ++ * logging, disables GC stats and resets all gc counters and timing ++ * information. ++ */ ++void ++rb_gc_after_fork() ++{ ++ rb_objspace_t *objspace = &rb_objspace; ++ rb_gc_disable_stats(); ++ rb_gc_clear_stats(); ++ rb_gc_disable_trace(); ++ gc_data_file = stderr; ++ rb_iv_set(rb_mGC, GC_LOGFILE_IVAR, Qnil); ++} ++ ++/* + * call-seq: + * GC.log String => String + * +diff --git a/include/ruby/intern.h b/include/ruby/intern.h +index f0cff67..a374c7e 100644 +--- a/include/ruby/intern.h ++++ b/include/ruby/intern.h +@@ -390,6 +390,8 @@ void rb_gc_call_finalizer_at_exit(void); + VALUE rb_gc_enable(void); + VALUE rb_gc_disable(void); + VALUE rb_gc_start(void); ++void rb_gc_before_fork _((void)); ++void rb_gc_after_fork _((void)); + #define Init_stack(addr) ruby_init_stack(addr) + /* hash.c */ + void st_foreach_safe(struct st_table *, int (*)(ANYARGS), st_data_t); +diff --git a/process.c b/process.c +index c180492..d20d44c 100644 +--- a/process.c ++++ b/process.c +@@ -2617,9 +2617,11 @@ rb_f_fork(VALUE obj) + rb_pid_t pid; + + rb_secure(2); ++ rb_gc_before_fork(); + + switch (pid = rb_fork(0, 0, 0, Qnil)) { + case 0: ++ rb_gc_after_fork(); + rb_thread_atfork(); + if (rb_block_given_p()) { + int status; diff --git a/patches/ruby/1.9.2/p318/railsexpress/04-track-live-dataset-size.patch b/patches/ruby/1.9.2/p318/railsexpress/04-track-live-dataset-size.patch new file mode 100644 index 0000000000..9f28390919 --- /dev/null +++ b/patches/ruby/1.9.2/p318/railsexpress/04-track-live-dataset-size.patch @@ -0,0 +1,69 @@ +diff --git a/gc.c b/gc.c +index 7673687..4d9ca88 100644 +--- a/gc.c ++++ b/gc.c +@@ -248,7 +248,6 @@ getrusage_time(void) + } while(0) + #endif + +- + #if defined(_MSC_VER) || defined(__BORLANDC__) || defined(__CYGWIN__) + #pragma pack(push, 1) /* magic for reducing sizeof(RVALUE): 24 -> 20 */ + #endif +@@ -355,6 +354,7 @@ typedef struct rb_objspace { + unsigned LONG_LONG gc_num_allocations; + unsigned long live_objects; + unsigned LONG_LONG allocated_objects; ++ unsigned long heap_slots_live_after_last_gc; + } rb_objspace_t; + + #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE +@@ -388,6 +388,7 @@ int *ruby_initial_gc_stress_ptr = &rb_objspace.gc_stress; + #define gc_num_allocations objspace->gc_num_allocations + #define live_objects objspace->live_objects + #define allocated_objects objspace->allocated_objects ++#define heap_slots_live_after_last_gc objspace->heap_slots_live_after_last_gc + #define finalizer_table objspace->final.table + #define deferred_final_list objspace->final.deferred + #define mark_stack objspace->markstack.buffer +@@ -1410,6 +1411,24 @@ rb_gc_time() + #endif + } + ++/* ++ * call-seq: ++ * GC.heap_slots_live_after_last_gc => Integer ++ * ++ * Returns the number of heap slots which were live after the last garbage collection. ++ * ++ * GC.heap_slots_live_after_last_gc #=> 231223 ++ * ++ */ ++VALUE ++rb_gc_heap_slots_live_after_last_gc() ++{ ++ rb_objspace_t *objspace = &rb_objspace; ++ return ULONG2NUM(heap_slots_live_after_last_gc); ++} ++ ++ ++ + VALUE rb_mGC; + + void +@@ -2616,6 +2635,7 @@ gc_sweep(rb_objspace_t *objspace) + } + during_gc = 0; + live_objects = live; ++ heap_slots_live_after_last_gc = live; + + /* log gc stats if requested */ + if (do_gc_stats) { +@@ -3961,6 +3981,7 @@ Init_GC(void) + rb_define_singleton_method(rb_mGC, "allocated_size", rb_gc_allocated_size, 0); + rb_define_singleton_method(rb_mGC, "num_allocations", rb_gc_num_allocations, 0); + rb_define_singleton_method(rb_mGC, "heap_slots", rb_gc_heap_slots, 0); ++ rb_define_singleton_method(rb_mGC, "heap_slots_live_after_last_gc", rb_gc_heap_slots_live_after_last_gc, 0); + rb_define_const(rb_mGC, "HEAP_SLOT_SIZE", INT2FIX(sizeof(RVALUE))); + + rb_define_singleton_method(rb_mGC, "log", rb_gc_log, 1); diff --git a/patchsets/ruby/1.8.7/head/railsexpress b/patchsets/ruby/1.8.7/head/railsexpress new file mode 100644 index 0000000000..23a8bba83c --- /dev/null +++ b/patchsets/ruby/1.8.7/head/railsexpress @@ -0,0 +1,17 @@ +railsexpress/01-ignore-generated-files.patch +railsexpress/02-fix-tests-for-osx.patch +railsexpress/03-sigvtalrm-fix.patch +railsexpress/04-railsbench-gc-patch.patch +railsexpress/05-display-full-stack-trace.patch +railsexpress/06-better-source-file-tracing.patch +railsexpress/07-heap-dump-support.patch +railsexpress/08-fork-support-for-gc-logging.patch +railsexpress/09-track-malloc-size.patch +railsexpress/10-track-object-allocation.patch +railsexpress/11-expose-heap-slots.patch +railsexpress/12-fix-heap-size-growth-logic.patch +railsexpress/13-heap-slot-size.patch +railsexpress/14-add-trace-stats-enabled-methods.patch +railsexpress/15-track-live-dataset-size.patch +railsexpress/16-add-object-size-information-to-heap-dump.patch +railsexpress/17-caller-for-all-threads.patch diff --git a/patchsets/ruby/1.8.7/p358/railsexpress b/patchsets/ruby/1.8.7/p358/railsexpress new file mode 100644 index 0000000000..23a8bba83c --- /dev/null +++ b/patchsets/ruby/1.8.7/p358/railsexpress @@ -0,0 +1,17 @@ +railsexpress/01-ignore-generated-files.patch +railsexpress/02-fix-tests-for-osx.patch +railsexpress/03-sigvtalrm-fix.patch +railsexpress/04-railsbench-gc-patch.patch +railsexpress/05-display-full-stack-trace.patch +railsexpress/06-better-source-file-tracing.patch +railsexpress/07-heap-dump-support.patch +railsexpress/08-fork-support-for-gc-logging.patch +railsexpress/09-track-malloc-size.patch +railsexpress/10-track-object-allocation.patch +railsexpress/11-expose-heap-slots.patch +railsexpress/12-fix-heap-size-growth-logic.patch +railsexpress/13-heap-slot-size.patch +railsexpress/14-add-trace-stats-enabled-methods.patch +railsexpress/15-track-live-dataset-size.patch +railsexpress/16-add-object-size-information-to-heap-dump.patch +railsexpress/17-caller-for-all-threads.patch diff --git a/patchsets/ruby/1.9.2/p318/railsexpress b/patchsets/ruby/1.9.2/p318/railsexpress new file mode 100644 index 0000000000..3e7ae4d7ca --- /dev/null +++ b/patchsets/ruby/1.9.2/p318/railsexpress @@ -0,0 +1,4 @@ +railsexpress/01-railsbench-gc-patch.patch +railsexpress/02-display-more-detailed-stack-trace.patch +railsexpress/03-fork-support-for-gc-logging.patch +railsexpress/04-track-live-dataset-size.patch