Skip to content

HTTPS clone URL

Subversion checkout URL

You can clone with
or
.
Download ZIP
Browse files

merge railsexpress patches, closes #1119

  • Loading branch information...
commit c97e36d6dab232fd806f78fc6a1e9da41e3ccd44 2 parents 336e0f3 + bcb075d
@mpapis mpapis authored
Showing with 3,724 additions and 95 deletions.
  1. +101 −0 patches/ruby/1.8.7/p370/railsexpress/01-ignore-generated-files.patch
  2. +139 −0 patches/ruby/1.8.7/p370/railsexpress/02-fix-tests-for-osx.patch
  3. +25 −0 patches/ruby/1.8.7/p370/railsexpress/03-sigvtalrm-fix.patch
  4. +1,875 −0 patches/ruby/1.8.7/p370/railsexpress/04-railsbench-gc-patch.patch
  5. +15 −0 patches/ruby/1.8.7/p370/railsexpress/05-display-full-stack-trace.patch
  6. +13 −0 patches/ruby/1.8.7/p370/railsexpress/06-better-source-file-tracing.patch
  7. +159 −0 patches/ruby/1.8.7/p370/railsexpress/07-heap-dump-support.patch
  8. +249 −0 patches/ruby/1.8.7/p370/railsexpress/08-fork-support-for-gc-logging.patch
  9. +120 −0 patches/ruby/1.8.7/p370/railsexpress/09-track-malloc-size.patch
  10. +111 −0 patches/ruby/1.8.7/p370/railsexpress/10-track-object-allocation.patch
  11. +70 −0 patches/ruby/1.8.7/p370/railsexpress/11-expose-heap-slots.patch
  12. +53 −0 patches/ruby/1.8.7/p370/railsexpress/12-fix-heap-size-growth-logic.patch
  13. +12 −0 patches/ruby/1.8.7/p370/railsexpress/13-heap-slot-size.patch
  14. +66 −0 patches/ruby/1.8.7/p370/railsexpress/14-add-trace-stats-enabled-methods.patch
  15. +52 −0 patches/ruby/1.8.7/p370/railsexpress/15-track-live-dataset-size.patch
  16. +51 −0 patches/ruby/1.8.7/p370/railsexpress/16-add-object-size-information-to-heap-dump.patch
  17. +229 −0 patches/ruby/1.8.7/p370/railsexpress/17-caller-for-all-threads.patch
  18. +26 −0 patches/ruby/1.8.7/p370/railsexpress/18-fix-zlib-deflate.patch
  19. +28 −0 patches/ruby/1.8.7/p370/railsexpress/19-fix-broken-seek-on-osx.patch
  20. +24 −0 patches/ruby/1.9.3/head/railsexpress/01-avoid-using-broken-ffi-headers.patch
  21. +0 −24 patches/ruby/1.9.3/head/railsexpress/01-revert-f6b49243eb0c21bea1c4198cdd52a549e6ead075.patch
  22. +0 −13 patches/ruby/1.9.3/head/railsexpress/02-backport-c2086cc7ff1142b14c95c.patch
  23. +2 −2 patches/ruby/1.9.3/head/railsexpress/{03-railsbench-gc.patch → 02-railsbench-gc.patch}
  24. 0  ...railsexpress/{04-display-more-detailed-stack-trace.patch → 03-display-more-detailed-stack-trace.patch}
  25. +1 −1  .../1.9.3/head/railsexpress/{05-fork-support-for-gc-logging.patch → 04-fork-support-for-gc-logging.patch}
  26. 0  ...hes/ruby/1.9.3/head/railsexpress/{06-track-live-dataset-size.patch → 05-track-live-dataset-size.patch}
  27. +13 −0 patches/ruby/1.9.3/head/railsexpress/06-webrick_204_304_keep_alive_fix.patch
  28. +84 −0 patches/ruby/1.9.3/head/railsexpress/07-export-a-few-more-symbols-for-ruby-prof.patch
  29. +13 −0 patches/ruby/1.9.3/p125/railsexpress/09-webrick_204_304_keep_alive_fix.patch
  30. +36 −43 patches/ruby/1.9.3/p194/railsexpress/03-railsbench-gc.patch
  31. +2 −2 patches/ruby/1.9.3/p194/railsexpress/05-fork-support-for-gc-logging.patch
  32. +4 −4 patches/ruby/1.9.3/p194/railsexpress/06-track-live-dataset-size.patch
  33. +13 −0 patches/ruby/1.9.3/p194/railsexpress/07-webrick_204_304_keep_alive_fix.patch
  34. +84 −0 patches/ruby/1.9.3/p194/railsexpress/08-export-a-few-more-symbols-for-ruby-prof.patch
  35. +24 −0 patches/ruby/1.9.3/p194/railsexpress/09-avoid-using-broken-ffi-headers.patch
  36. +19 −0 patchsets/ruby/1.8.7/p370/railsexpress
  37. +7 −6 patchsets/ruby/1.9.3/head/railsexpress
  38. +1 −0  patchsets/ruby/1.9.3/p125/railsexpress
  39. +3 −0  patchsets/ruby/1.9.3/p194/railsexpress
View
101 patches/ruby/1.8.7/p370/railsexpress/01-ignore-generated-files.patch
@@ -0,0 +1,101 @@
+diff --git a/.gitignore b/.gitignore
+new file mode 100644
+index 0000000..00c347a
+--- /dev/null
++++ b/.gitignore
+@@ -0,0 +1,95 @@
++.ext
++.installed.list
++.rbconfig.time
++Makefile
++autom4te.cache/
++config.h
++config.status
++configure
++ext/Win32API/Makefile
++ext/bigdecimal/Makefile
++ext/curses/Makefile
++ext/dbm/Makefile
++ext/digest/Makefile
++ext/digest/bubblebabble/Makefile
++ext/digest/md5/Makefile
++ext/digest/rmd160/Makefile
++ext/digest/sha1/Makefile
++ext/digest/sha2/Makefile
++ext/dl/Makefile
++ext/dl/call.func
++ext/dl/callback.func
++ext/dl/cbtable.func
++ext/dl/dlconfig.h
++ext/dl/dlconfig.rb
++ext/enumerator/Makefile
++ext/etc/Makefile
++ext/fcntl/Makefile
++ext/gdbm/Makefile
++ext/iconv/Makefile
++ext/io/wait/Makefile
++ext/nkf/Makefile
++ext/openssl/Makefile
++ext/openssl/extconf.h
++ext/pty/Makefile
++ext/racc/cparse/Makefile
++ext/readline/Makefile
++ext/sdbm/Makefile
++ext/socket/Makefile
++ext/stringio/Makefile
++ext/strscan/Makefile
++ext/syck/Makefile
++ext/syslog/Makefile
++ext/thread/Makefile
++ext/tk/Makefile
++ext/tk/tkutil/Makefile
++ext/win32ole/Makefile
++ext/win32ole/.document
++ext/zlib/Makefile
++largefile.h
++miniruby
++parse.c
++rbconfig.rb
++ruby
++enc.mk
++ext/bigdecimal/extconf.h
++ext/continuation/
++ext/coverage/
++ext/curses/extconf.h
++ext/dbm/extconf.h
++ext/digest/bubblebabble/extconf.h
++ext/digest/extconf.h
++ext/digest/md5/extconf.h
++ext/digest/rmd160/extconf.h
++ext/digest/sha1/extconf.h
++ext/digest/sha2/extconf.h
++ext/dl/callback.h
++ext/dl/extconf.h
++ext/etc/extconf.h
++ext/fcntl/extconf.h
++ext/fiber/
++ext/iconv/extconf.h
++ext/io/wait/extconf.h
++ext/json/
++ext/nkf/extconf.h
++ext/pty/extconf.h
++ext/racc/cparse/extconf.h
++ext/readline/extconf.h
++ext/ripper/
++ext/sdbm/extconf.h
++ext/socket/constants.h
++ext/socket/extconf.h
++ext/stringio/extconf.h
++ext/strscan/extconf.h
++ext/syck/extconf.h
++ext/syslog/extconf.h
++ext/tk/extconf.h
++ext/tk/tkutil/extconf.h
++ext/zlib/extconf.h
++miniprelude.c
++prelude.c
++revision.h
++*.dylib
++*.log
++*.dSYM
++patches-ruby*
View
139 patches/ruby/1.8.7/p370/railsexpress/02-fix-tests-for-osx.patch
@@ -0,0 +1,139 @@
+diff --git a/test/drb/drbtest.rb b/test/drb/drbtest.rb
+index bc16ab1..c12b168 100644
+--- a/test/drb/drbtest.rb
++++ b/test/drb/drbtest.rb
+@@ -22,7 +22,7 @@ class DRbService
+ %w(ut_drb.rb ut_array.rb ut_port.rb ut_large.rb ut_safe1.rb ut_eval.rb).each do |nm|
+ add_service_command(nm)
+ end
+- @server = @@server = DRb::DRbServer.new('druby://localhost:0', @@manager, {})
++ @server = @@server = DRb::DRbServer.new('druby://127.0.0.1:0', @@manager, {})
+ @@manager.uri = @@server.uri
+ def self.manager
+ @@manager
+@@ -79,16 +79,16 @@ module DRbCore
+ end
+
+ def test_00_DRbObject
+- ro = DRbObject.new(nil, 'druby://localhost:12345')
+- assert_equal('druby://localhost:12345', ro.__drburi)
++ ro = DRbObject.new(nil, 'druby://127.0.0.1:12345')
++ assert_equal('druby://127.0.0.1:12345', ro.__drburi)
+ assert_equal(nil, ro.__drbref)
+
+- ro = DRbObject.new_with_uri('druby://localhost:12345')
+- assert_equal('druby://localhost:12345', ro.__drburi)
++ ro = DRbObject.new_with_uri('druby://127.0.0.1:12345')
++ assert_equal('druby://127.0.0.1:12345', ro.__drburi)
+ assert_equal(nil, ro.__drbref)
+
+- ro = DRbObject.new_with_uri('druby://localhost:12345?foobar')
+- assert_equal('druby://localhost:12345', ro.__drburi)
++ ro = DRbObject.new_with_uri('druby://127.0.0.1:12345?foobar')
++ assert_equal('druby://127.0.0.1:12345', ro.__drburi)
+ assert_equal(DRb::DRbURIOption.new('foobar'), ro.__drbref)
+ end
+
+diff --git a/test/drb/ut_drb.rb b/test/drb/ut_drb.rb
+index f5720cf..265713d 100644
+--- a/test/drb/ut_drb.rb
++++ b/test/drb/ut_drb.rb
+@@ -154,7 +154,7 @@ if __FILE__ == $0
+
+ DRb::DRbServer.default_argc_limit(8)
+ DRb::DRbServer.default_load_limit(4096)
+- DRb.start_service('druby://localhost:0', DRbEx.new)
++ DRb.start_service('druby://127.0.0.1:0', DRbEx.new)
+ es = DRb::ExtServ.new(ARGV.shift, ARGV.shift)
+ DRb.thread.join
+ end
+diff --git a/test/drb/ut_drb_drbssl.rb b/test/drb/ut_drb_drbssl.rb
+index 0a2191e..bca3012 100644
+--- a/test/drb/ut_drb_drbssl.rb
++++ b/test/drb/ut_drb_drbssl.rb
+@@ -18,7 +18,7 @@ if __FILE__ == $0
+
+ DRb::DRbServer.default_argc_limit(8)
+ DRb::DRbServer.default_load_limit(4096)
+- DRb.start_service('drbssl://localhost:0', DRbEx.new, config)
++ DRb.start_service('drbssl://127.0.0.1:0', DRbEx.new, config)
+ es = DRb::ExtServ.new(ARGV.shift, ARGV.shift)
+ DRb.thread.join
+ end
+diff --git a/test/drb/ut_eval.rb b/test/drb/ut_eval.rb
+index 4df963e..9127939 100644
+--- a/test/drb/ut_eval.rb
++++ b/test/drb/ut_eval.rb
+@@ -3,7 +3,7 @@ require 'drb/extserv'
+
+ class EvalAttack
+ def initialize
+- @four = DRb::DRbServer.new('druby://localhost:0', self, {:safe_level => 4})
++ @four = DRb::DRbServer.new('druby://127.0.0.1:0', self, {:safe_level => 4})
+ end
+
+ def four
+@@ -25,7 +25,7 @@ if __FILE__ == $0
+
+ $SAFE = 1
+
+- DRb.start_service('druby://localhost:0', EvalAttack.new, {:safe_level => 2})
++ DRb.start_service('druby://127.0.0.1:0', EvalAttack.new, {:safe_level => 2})
+ es = DRb::ExtServ.new(ARGV.shift, ARGV.shift)
+ DRb.thread.join
+ end
+diff --git a/test/drb/ut_large.rb b/test/drb/ut_large.rb
+index d6717c5..0aefd1b 100644
+--- a/test/drb/ut_large.rb
++++ b/test/drb/ut_large.rb
+@@ -31,7 +31,7 @@ if __FILE__ == $0
+
+ DRb::DRbServer.default_argc_limit(3)
+ DRb::DRbServer.default_load_limit(100000)
+- DRb.start_service('druby://localhost:0', DRbLarge.new)
++ DRb.start_service('druby://127.0.0.1:0', DRbLarge.new)
+ es = DRb::ExtServ.new(ARGV.shift, ARGV.shift)
+ DRb.thread.join
+ end
+diff --git a/test/drb/ut_safe1.rb b/test/drb/ut_safe1.rb
+index 4df8e1e..5e7fe82 100644
+--- a/test/drb/ut_safe1.rb
++++ b/test/drb/ut_safe1.rb
+@@ -8,7 +8,7 @@ if __FILE__ == $0
+ it
+ end
+
+- DRb.start_service('druby://localhost:0', [1, 2, 'III', 4, "five", 6],
++ DRb.start_service('druby://127.0.0.1:0', [1, 2, 'III', 4, "five", 6],
+ {:safe_level => 1})
+ es = DRb::ExtServ.new(ARGV.shift, ARGV.shift)
+ DRb.thread.join
+diff --git a/test/net/pop/test_pop.rb b/test/net/pop/test_pop.rb
+index c8aa9a8..129ca08 100644
+--- a/test/net/pop/test_pop.rb
++++ b/test/net/pop/test_pop.rb
+@@ -3,10 +3,14 @@ require 'test/unit'
+ require 'digest/md5'
+
+ class TestPOP < Test::Unit::TestCase
++ def localhost
++ '127.0.0.1'
++ end
++
+ def setup
+ @users = {'user' => 'pass' }
+ @ok_user = 'user'
+- @stamp_base = "#{$$}.#{Time.now.to_i}@localhost"
++ @stamp_base = "#{$$}.#{Time.now.to_i}@#{localhost}"
+ end
+
+ def test_pop_auth_ok
+@@ -64,7 +68,7 @@ class TestPOP < Test::Unit::TestCase
+ end
+
+ def pop_test(apop=false)
+- host = 'localhost'
++ host = localhost
+ server = TCPServer.new(host, 0)
+ port = server.addr[1]
+ thread = Thread.start do
View
25 patches/ruby/1.8.7/p370/railsexpress/03-sigvtalrm-fix.patch
@@ -0,0 +1,25 @@
+diff --git a/eval.c b/eval.c
+index c2049c7..f49044a 100644
+--- a/eval.c
++++ b/eval.c
+@@ -12483,6 +12483,11 @@ rb_thread_start_0(fn, arg, th)
+ curr_thread->next = th;
+ th->priority = curr_thread->priority;
+ th->thgroup = curr_thread->thgroup;
++#if defined(HAVE_SETITIMER) || defined(_THREAD_SAFE)
++ if (!thread_init) {
++ rb_thread_start_timer();
++ }
++#endif
+ }
+ START_TIMER();
+
+@@ -13211,7 +13216,9 @@ rb_thread_atfork()
+ main_thread = curr_thread;
+ curr_thread->next = curr_thread;
+ curr_thread->prev = curr_thread;
+- STOP_TIMER();
++#if defined(HAVE_SETITIMER) || defined(_THREAD_SAFE)
++ rb_thread_stop_timer();
++#endif
+ }
View
1,875 patches/ruby/1.8.7/p370/railsexpress/04-railsbench-gc-patch.patch
@@ -0,0 +1,1875 @@
+diff --git a/gc.c b/gc.c
+index fa45cd1..ab71d22 100644
+--- a/gc.c
++++ b/gc.c
+@@ -22,8 +22,16 @@
+ #include <setjmp.h>
+ #include <sys/types.h>
+
++#ifdef _WIN32
++#include <string.h>
++#else
++#include <strings.h>
++#endif
++
+ #ifdef HAVE_SYS_TIME_H
+ #include <sys/time.h>
++#elif defined(_WIN32)
++#include <time.h>
+ #endif
+
+ #ifdef HAVE_SYS_RESOURCE_H
+@@ -42,7 +50,6 @@ void rb_io_fptr_finalize _((struct rb_io_t*));
+ #ifdef __CYGWIN__
+ int _setjmp(), _longjmp();
+ #endif
+-
+ /* Make alloca work the best possible way. */
+ #ifdef __GNUC__
+ # ifndef atarist
+@@ -86,12 +93,12 @@ rb_memerror()
+ rb_thread_t th = rb_curr_thread;
+
+ if (!nomem_error ||
+- (rb_thread_raised_p(th, RAISED_NOMEMORY) && rb_safe_level() < 4)) {
+- fprintf(stderr, "[FATAL] failed to allocate memory\n");
+- exit(1);
++ (rb_thread_raised_p(th, RAISED_NOMEMORY) && rb_safe_level() < 4)) {
++ fprintf(stderr, "[FATAL] failed to allocate memory\n");
++ exit(1);
+ }
+ if (rb_thread_raised_p(th, RAISED_NOMEMORY)) {
+- rb_exc_jump(nomem_error);
++ rb_exc_jump(nomem_error);
+ }
+ rb_thread_raised_set(th, RAISED_NOMEMORY);
+ rb_exc_raise(nomem_error);
+@@ -139,7 +146,7 @@ ruby_xmalloc(size)
+ void *mem;
+
+ if (size < 0) {
+- rb_raise(rb_eNoMemError, "negative allocation size (or too big)");
++ rb_raise(rb_eNoMemError, "negative allocation size (or too big)");
+ }
+ if (size == 0) size = 1;
+
+@@ -148,11 +155,11 @@ ruby_xmalloc(size)
+ }
+ RUBY_CRITICAL(mem = malloc(size));
+ if (!mem) {
+- garbage_collect();
+- RUBY_CRITICAL(mem = malloc(size));
+- if (!mem) {
+- rb_memerror();
+- }
++ garbage_collect();
++ RUBY_CRITICAL(mem = malloc(size));
++ if (!mem) {
++ rb_memerror();
++ }
+ }
+ malloc_increase += size;
+
+@@ -179,17 +186,17 @@ ruby_xrealloc(ptr, size)
+ void *mem;
+
+ if (size < 0) {
+- rb_raise(rb_eArgError, "negative re-allocation size");
++ rb_raise(rb_eArgError, "negative re-allocation size");
+ }
+ if (!ptr) return xmalloc(size);
+ if (size == 0) size = 1;
+ if (ruby_gc_stress) garbage_collect();
+ RUBY_CRITICAL(mem = realloc(ptr, size));
+ if (!mem) {
+- garbage_collect();
+- RUBY_CRITICAL(mem = realloc(ptr, size));
+- if (!mem) {
+- rb_memerror();
++ garbage_collect();
++ RUBY_CRITICAL(mem = realloc(ptr, size));
++ if (!mem) {
++ rb_memerror();
+ }
+ }
+ malloc_increase += size;
+@@ -202,11 +209,20 @@ ruby_xfree(x)
+ void *x;
+ {
+ if (x)
+- RUBY_CRITICAL(free(x));
++ RUBY_CRITICAL(free(x));
+ }
+
++#if HAVE_LONG_LONG
++#define GC_TIME_TYPE LONG_LONG
++#else
++#define GC_TIME_TYPE long
++#endif
++
+ extern int ruby_in_compile;
+ static int dont_gc;
++static int gc_statistics = 0;
++static GC_TIME_TYPE gc_time = 0;
++static int gc_collections = 0;
+ static int during_gc;
+ static int need_call_final = 0;
+ static st_table *finalizer_table = 0;
+@@ -241,7 +257,7 @@ rb_gc_enable()
+ * Disables garbage collection, returning <code>true</code> if garbage
+ * collection was already disabled.
+ *
+- * GC.disable #=> false
++ * GC.disable #=> false or true
+ * GC.disable #=> true
+ *
+ */
+@@ -255,6 +271,104 @@ rb_gc_disable()
+ return old;
+ }
+
++/*
++ * call-seq:
++ * GC.enable_stats => true or false
++ *
++ * Enables garbage collection statistics, returning <code>true</code> if garbage
++ * collection statistics was already enabled.
++ *
++ * GC.enable_stats #=> false or true
++ * GC.enable_stats #=> true
++ *
++ */
++
++VALUE
++rb_gc_enable_stats()
++{
++ int old = gc_statistics;
++ gc_statistics = Qtrue;
++ return old;
++}
++
++/*
++ * call-seq:
++ * GC.disable_stats => true or false
++ *
++ * Disables garbage collection statistics, returning <code>true</code> if garbage
++ * collection statistics was already disabled.
++ *
++ * GC.disable_stats #=> false or true
++ * GC.disable_stats #=> true
++ *
++ */
++
++VALUE
++rb_gc_disable_stats()
++{
++ int old = gc_statistics;
++ gc_statistics = Qfalse;
++ return old;
++}
++
++/*
++ * call-seq:
++ * GC.clear_stats => nil
++ *
++ * Clears garbage collection statistics, returning nil. This resets the number
++ * of collections (GC.collections) and the time used (GC.time) to 0.
++ *
++ * GC.clear_stats #=> nil
++ *
++ */
++
++VALUE
++rb_gc_clear_stats()
++{
++ gc_collections = 0;
++ gc_time = 0;
++ return Qnil;
++}
++
++/*
++ * call-seq:
++ * GC.collections => Integer
++ *
++ * Returns the number of garbage collections performed while GC statistics collection
++ * was enabled.
++ *
++ * GC.collections #=> 35
++ *
++ */
++
++VALUE
++rb_gc_collections()
++{
++ return INT2NUM(gc_collections);
++}
++
++/*
++ * call-seq:
++ * GC.time => Integer
++ *
++ * Returns the time spent during garbage collection while GC statistics collection
++ * was enabled (in micro seconds).
++ *
++ * GC.time #=> 20000
++ *
++ */
++
++VALUE
++rb_gc_time()
++{
++#if HAVE_LONG_LONG
++ return LL2NUM(gc_time);
++#else
++ return LONG2NUM(gc_time);
++#endif
++}
++
++
+ VALUE rb_mGC;
+
+ static struct gc_list {
+@@ -281,19 +395,19 @@ rb_gc_unregister_address(addr)
+ struct gc_list *tmp = global_List;
+
+ if (tmp->varptr == addr) {
+- global_List = tmp->next;
+- RUBY_CRITICAL(free(tmp));
+- return;
++ global_List = tmp->next;
++ RUBY_CRITICAL(free(tmp));
++ return;
+ }
+ while (tmp->next) {
+- if (tmp->next->varptr == addr) {
+- struct gc_list *t = tmp->next;
++ if (tmp->next->varptr == addr) {
++ struct gc_list *t = tmp->next;
+
+- tmp->next = tmp->next->next;
+- RUBY_CRITICAL(free(t));
+- break;
+- }
+- tmp = tmp->next;
++ tmp->next = tmp->next->next;
++ RUBY_CRITICAL(free(t));
++ break;
++ }
++ tmp = tmp->next;
+ }
+ }
+
+@@ -312,26 +426,26 @@ rb_global_variable(var)
+
+ typedef struct RVALUE {
+ union {
+- struct {
+- unsigned long flags; /* always 0 for freed obj */
+- struct RVALUE *next;
+- } free;
+- struct RBasic basic;
+- struct RObject object;
+- struct RClass klass;
+- struct RFloat flonum;
+- struct RString string;
+- struct RArray array;
+- struct RRegexp regexp;
+- struct RHash hash;
+- struct RData data;
+- struct RStruct rstruct;
+- struct RBignum bignum;
+- struct RFile file;
+- struct RNode node;
+- struct RMatch match;
+- struct RVarmap varmap;
+- struct SCOPE scope;
++ struct {
++ unsigned long flags; /* always 0 for freed obj */
++ struct RVALUE *next;
++ } free;
++ struct RBasic basic;
++ struct RObject object;
++ struct RClass klass;
++ struct RFloat flonum;
++ struct RString string;
++ struct RArray array;
++ struct RRegexp regexp;
++ struct RHash hash;
++ struct RData data;
++ struct RStruct rstruct;
++ struct RBignum bignum;
++ struct RFile file;
++ struct RNode node;
++ struct RMatch match;
++ struct RVarmap varmap;
++ struct SCOPE scope;
+ } as;
+ #ifdef GC_DEBUG
+ char *file;
+@@ -346,7 +460,7 @@ typedef struct RVALUE {
+ static RVALUE *freelist = 0;
+ static RVALUE *deferred_final_list = 0;
+
+-#define HEAPS_INCREMENT 10
++static int heaps_increment = 10;
+ static struct heaps_slot {
+ void *membase;
+ RVALUE *slot;
+@@ -355,45 +469,197 @@ static struct heaps_slot {
+ static int heaps_length = 0;
+ static int heaps_used = 0;
+
+-#define HEAP_MIN_SLOTS 10000
+-static int heap_slots = HEAP_MIN_SLOTS;
++static int heap_min_slots = 10000;
++static int heap_slots = 10000;
+
+-#define FREE_MIN 4096
++static int heap_free_min = 4096;
++static int heap_slots_increment = 10000;
++static double heap_slots_growth_factor = 1.8;
++
++static long initial_malloc_limit = GC_MALLOC_LIMIT;
++
++static int verbose_gc_stats = Qfalse;
++
++static FILE* gc_data_file = NULL;
+
+ static RVALUE *himem, *lomem;
+
++static void set_gc_parameters()
++{
++ char *gc_stats_ptr, *min_slots_ptr, *free_min_ptr, *heap_slots_incr_ptr,
++ *heap_incr_ptr, *malloc_limit_ptr, *gc_heap_file_ptr, *heap_slots_growth_factor_ptr;
++
++ gc_data_file = stderr;
++
++ gc_stats_ptr = getenv("RUBY_GC_STATS");
++ if (gc_stats_ptr != NULL) {
++ int gc_stats_i = atoi(gc_stats_ptr);
++ if (gc_stats_i > 0) {
++ verbose_gc_stats = Qtrue;
++ }
++ }
++
++ gc_heap_file_ptr = getenv("RUBY_GC_DATA_FILE");
++ if (gc_heap_file_ptr != NULL) {
++ FILE* data_file = fopen(gc_heap_file_ptr, "w");
++ if (data_file != NULL) {
++ gc_data_file = data_file;
++ }
++ else {
++ fprintf(stderr,
++ "can't open gc log file %s for writing, using default\n", gc_heap_file_ptr);
++ }
++ }
++
++ min_slots_ptr = getenv("RUBY_HEAP_MIN_SLOTS");
++ if (min_slots_ptr != NULL) {
++ int min_slots_i = atoi(min_slots_ptr);
++ if (verbose_gc_stats) {
++ fprintf(gc_data_file, "RUBY_HEAP_MIN_SLOTS=%s\n", min_slots_ptr);
++ }
++ if (min_slots_i > 0) {
++ heap_slots = min_slots_i;
++ heap_min_slots = min_slots_i;
++ }
++ }
++
++ free_min_ptr = getenv("RUBY_HEAP_FREE_MIN");
++ if (free_min_ptr != NULL) {
++ int free_min_i = atoi(free_min_ptr);
++ if (verbose_gc_stats) {
++ fprintf(gc_data_file, "RUBY_HEAP_FREE_MIN=%s\n", free_min_ptr);
++ }
++ if (free_min_i > 0) {
++ heap_free_min = free_min_i;
++ }
++ }
++
++ heap_incr_ptr = getenv("RUBY_HEAP_INCREMENT");
++ if (heap_incr_ptr != NULL) {
++ int heap_incr_i = atoi(heap_incr_ptr);
++ if (verbose_gc_stats) {
++ fprintf(gc_data_file, "RUBY_HEAP_INCREMENT=%s\n", heap_incr_ptr);
++ }
++ if (heap_incr_i > 0) {
++ heaps_increment = heap_incr_i;
++ }
++ }
++
++ heap_slots_incr_ptr = getenv("RUBY_HEAP_SLOTS_INCREMENT");
++ if (heap_slots_incr_ptr != NULL) {
++ int heap_slots_incr_i = atoi(heap_slots_incr_ptr);
++ if (verbose_gc_stats) {
++ fprintf(gc_data_file, "RUBY_HEAP_SLOTS_INCREMENT=%s\n", heap_slots_incr_ptr);
++ }
++ if (heap_slots_incr_i > 0) {
++ heap_slots_increment = heap_slots_incr_i;
++ }
++ }
++
++ heap_slots_growth_factor_ptr = getenv("RUBY_HEAP_SLOTS_GROWTH_FACTOR");
++ if (heap_slots_growth_factor_ptr != NULL) {
++ double heap_slots_growth_factor_d = atoi(heap_slots_growth_factor_ptr);
++ if (verbose_gc_stats) {
++ fprintf(gc_data_file, "RUBY_HEAP_SLOTS_GROWTH_FACTOR=%s\n", heap_slots_growth_factor_ptr);
++ }
++ if (heap_slots_growth_factor_d > 0) {
++ heap_slots_growth_factor = heap_slots_growth_factor_d;
++ }
++ }
++
++ malloc_limit_ptr = getenv("RUBY_GC_MALLOC_LIMIT");
++ if (malloc_limit_ptr != NULL) {
++ int malloc_limit_i = atol(malloc_limit_ptr);
++ if (verbose_gc_stats) {
++ fprintf(gc_data_file, "RUBY_GC_MALLOC_LIMIT=%s\n", malloc_limit_ptr);
++ }
++ if (malloc_limit_i > 0) {
++ initial_malloc_limit = malloc_limit_i;
++ }
++ }
++}
++
++/*
++ * call-seq:
++ * GC.dump => nil
++ *
++ * dumps information about the current GC data structures to the GC log file
++ *
++ * GC.dump #=> nil
++ *
++ */
++
++VALUE
++rb_gc_dump()
++{
++ int i;
++
++ for (i = 0; i < heaps_used; i++) {
++ int heap_size = heaps[i].limit;
++ fprintf(gc_data_file, "HEAP[%2d]: size=%7d\n", i, heap_size);
++ }
++
++ return Qnil;
++}
++
++/*
++ * call-seq:
++ * GC.log String => String
++ *
++ * Logs string to the GC data file and returns it.
++ *
++ * GC.log "manual GC call" #=> "manual GC call"
++ *
++ */
++
++VALUE
++rb_gc_log(self, original_str)
++ VALUE self, original_str;
++{
++ if (original_str == Qnil) {
++ fprintf(gc_data_file, "\n");
++ }
++ else {
++ VALUE str = StringValue(original_str);
++ char *p = RSTRING(str)->ptr;
++ fprintf(gc_data_file, "%s\n", p);
++ }
++ return original_str;
++}
++
++
+ static void
+ add_heap()
+ {
+ RVALUE *p, *pend;
+
+ if (heaps_used == heaps_length) {
+- /* Realloc heaps */
+- struct heaps_slot *p;
+- int length;
+-
+- heaps_length += HEAPS_INCREMENT;
+- length = heaps_length*sizeof(struct heaps_slot);
+- RUBY_CRITICAL(
+- if (heaps_used > 0) {
+- p = (struct heaps_slot *)realloc(heaps, length);
+- if (p) heaps = p;
+- }
+- else {
+- p = heaps = (struct heaps_slot *)malloc(length);
+- });
+- if (p == 0) rb_memerror();
++ /* Realloc heaps */
++ struct heaps_slot *p;
++ int length;
++
++ heaps_length += heaps_increment;
++ length = heaps_length*sizeof(struct heaps_slot);
++ RUBY_CRITICAL(
++ if (heaps_used > 0) {
++ p = (struct heaps_slot *)realloc(heaps, length);
++ if (p) heaps = p;
++ }
++ else {
++ p = heaps = (struct heaps_slot *)malloc(length);
++ });
++ if (p == 0) rb_memerror();
+ }
+
+ for (;;) {
+- RUBY_CRITICAL(p = (RVALUE*)malloc(sizeof(RVALUE)*(heap_slots+1)));
+- if (p == 0) {
+- if (heap_slots == HEAP_MIN_SLOTS) {
+- rb_memerror();
+- }
+- heap_slots = HEAP_MIN_SLOTS;
+- continue;
+- }
++ RUBY_CRITICAL(p = (RVALUE*)malloc(sizeof(RVALUE)*(heap_slots+1)));
++ if (p == 0) {
++ if (heap_slots == heap_min_slots) {
++ rb_memerror();
++ }
++ heap_slots = heap_min_slots;
++ continue;
++ }
+ heaps[heaps_used].membase = p;
+ if ((VALUE)p % sizeof(RVALUE) == 0)
+ heap_slots += 1;
+@@ -401,25 +667,26 @@ add_heap()
+ p = (RVALUE*)((VALUE)p + sizeof(RVALUE) - ((VALUE)p % sizeof(RVALUE)));
+ heaps[heaps_used].slot = p;
+ heaps[heaps_used].limit = heap_slots;
+- break;
++ break;
+ }
+ pend = p + heap_slots;
+ if (lomem == 0 || lomem > p) lomem = p;
+ if (himem < pend) himem = pend;
+ heaps_used++;
+- heap_slots *= 1.8;
+- if (heap_slots <= 0) heap_slots = HEAP_MIN_SLOTS;
++ heap_slots += heap_slots_increment;
++ heap_slots_increment *= heap_slots_growth_factor;
++ if (heap_slots <= 0) heap_slots = heap_min_slots;
+
+ while (p < pend) {
+- p->as.free.flags = 0;
+- p->as.free.next = freelist;
+- freelist = p;
+- p++;
++ p->as.free.flags = 0;
++ p->as.free.next = freelist;
++ freelist = p;
++ p++;
+ }
+ }
+ #define RANY(o) ((RVALUE*)(o))
+
+-int
++int
+ rb_during_gc()
+ {
+ return during_gc;
+@@ -431,7 +698,7 @@ rb_newobj()
+ VALUE obj;
+
+ if (during_gc)
+- rb_bug("object allocation during garbage collection phase");
++ rb_bug("object allocation during garbage collection phase");
+
+ if (ruby_gc_stress || !freelist) garbage_collect();
+
+@@ -580,13 +847,13 @@ rb_source_filename(f)
+ st_data_t name;
+
+ if (!st_lookup(source_filenames, (st_data_t)f, &name)) {
+- long len = strlen(f) + 1;
+- char *ptr = ALLOC_N(char, len + 1);
+- name = (st_data_t)ptr;
+- *ptr++ = 0;
+- MEMCPY(ptr, f, char, len);
+- st_add_direct(source_filenames, (st_data_t)ptr, name);
+- return ptr;
++ long len = strlen(f) + 1;
++ char *ptr = ALLOC_N(char, len + 1);
++ name = (st_data_t)ptr;
++ *ptr++ = 0;
++ MEMCPY(ptr, f, char, len);
++ st_add_direct(source_filenames, (st_data_t)ptr, name);
++ return ptr;
+ }
+ return (char *)name + 1;
+ }
+@@ -596,7 +863,7 @@ mark_source_filename(f)
+ char *f;
+ {
+ if (f) {
+- f[-1] = 1;
++ f[-1] = 1;
+ }
+ }
+
+@@ -605,12 +872,12 @@ sweep_source_filename(key, value)
+ char *key, *value;
+ {
+ if (*value) {
+- *value = 0;
+- return ST_CONTINUE;
++ *value = 0;
++ return ST_CONTINUE;
+ }
+ else {
+- free(value);
+- return ST_DELETE;
++ free(value);
++ return ST_DELETE;
+ }
+ }
+
+@@ -625,14 +892,14 @@ gc_mark_all()
+
+ init_mark_stack();
+ for (i = 0; i < heaps_used; i++) {
+- p = heaps[i].slot; pend = p + heaps[i].limit;
+- while (p < pend) {
+- if ((p->as.basic.flags & FL_MARK) &&
+- (p->as.basic.flags != FL_MARK)) {
+- gc_mark_children((VALUE)p, 0);
+- }
+- p++;
+- }
++ p = heaps[i].slot; pend = p + heaps[i].limit;
++ while (p < pend) {
++ if ((p->as.basic.flags & FL_MARK) &&
++ (p->as.basic.flags != FL_MARK)) {
++ gc_mark_children((VALUE)p, 0);
++ }
++ p++;
++ }
+ }
+ }
+
+@@ -647,8 +914,8 @@ gc_mark_rest()
+
+ init_mark_stack();
+ while(p != tmp_arry){
+- p--;
+- gc_mark_children(*p, 0);
++ p--;
++ gc_mark_children(*p, 0);
+ }
+ }
+
+@@ -665,9 +932,9 @@ is_pointer_to_heap(ptr)
+
+ /* check if p looks like a pointer */
+ for (i=0; i < heaps_used; i++) {
+- heap_org = heaps[i].slot;
+- if (heap_org <= p && p < heap_org + heaps[i].limit)
+- return Qtrue;
++ heap_org = heaps[i].slot;
++ if (heap_org <= p && p < heap_org + heaps[i].limit)
++ return Qtrue;
+ }
+ return Qfalse;
+ }
+@@ -680,10 +947,10 @@ mark_locations_array(x, n)
+ VALUE v;
+ while (n--) {
+ v = *x;
+- if (is_pointer_to_heap((void *)v)) {
+- gc_mark(v, 0);
+- }
+- x++;
++ if (is_pointer_to_heap((void *)v)) {
++ gc_mark(v, 0);
++ }
++ x++;
+ }
+ }
+
+@@ -780,7 +1047,7 @@ rb_gc_mark_maybe(obj)
+ VALUE obj;
+ {
+ if (is_pointer_to_heap((void *)obj)) {
+- gc_mark(obj, 0);
++ gc_mark(obj, 0);
+ }
+ }
+
+@@ -828,7 +1095,7 @@ gc_mark_children(ptr, lev)
+ {
+ register RVALUE *obj = RANY(ptr);
+
+- goto marking; /* skip */
++ goto marking; /* skip */
+
+ again:
+ obj = RANY(ptr);
+@@ -839,148 +1106,148 @@ gc_mark_children(ptr, lev)
+
+ marking:
+ if (FL_TEST(obj, FL_EXIVAR)) {
+- rb_mark_generic_ivar(ptr);
++ rb_mark_generic_ivar(ptr);
+ }
+
+ switch (obj->as.basic.flags & T_MASK) {
+ case T_NIL:
+ case T_FIXNUM:
+- rb_bug("rb_gc_mark() called for broken object");
+- break;
++ rb_bug("rb_gc_mark() called for broken object");
++ break;
+
+ case T_NODE:
+- mark_source_filename(obj->as.node.nd_file);
+- switch (nd_type(obj)) {
+- case NODE_IF: /* 1,2,3 */
+- case NODE_FOR:
+- case NODE_ITER:
+- case NODE_CREF:
+- case NODE_WHEN:
+- case NODE_MASGN:
+- case NODE_RESCUE:
+- case NODE_RESBODY:
+- case NODE_CLASS:
+- gc_mark((VALUE)obj->as.node.u2.node, lev);
+- /* fall through */
+- case NODE_BLOCK: /* 1,3 */
+- case NODE_ARRAY:
+- case NODE_DSTR:
+- case NODE_DXSTR:
+- case NODE_DREGX:
+- case NODE_DREGX_ONCE:
+- case NODE_FBODY:
+- case NODE_ENSURE:
+- case NODE_CALL:
+- case NODE_DEFS:
+- case NODE_OP_ASGN1:
+- gc_mark((VALUE)obj->as.node.u1.node, lev);
+- /* fall through */
+- case NODE_SUPER: /* 3 */
+- case NODE_FCALL:
+- case NODE_DEFN:
+- case NODE_NEWLINE:
+- ptr = (VALUE)obj->as.node.u3.node;
+- goto again;
+-
+- case NODE_WHILE: /* 1,2 */
+- case NODE_UNTIL:
+- case NODE_AND:
+- case NODE_OR:
+- case NODE_CASE:
+- case NODE_SCLASS:
+- case NODE_DOT2:
+- case NODE_DOT3:
+- case NODE_FLIP2:
+- case NODE_FLIP3:
+- case NODE_MATCH2:
+- case NODE_MATCH3:
+- case NODE_OP_ASGN_OR:
+- case NODE_OP_ASGN_AND:
+- case NODE_MODULE:
+- case NODE_ALIAS:
+- case NODE_VALIAS:
+- case NODE_ARGS:
+- gc_mark((VALUE)obj->as.node.u1.node, lev);
+- /* fall through */
+- case NODE_METHOD: /* 2 */
+- case NODE_NOT:
+- case NODE_GASGN:
+- case NODE_LASGN:
+- case NODE_DASGN:
+- case NODE_DASGN_CURR:
+- case NODE_IASGN:
+- case NODE_CVDECL:
+- case NODE_CVASGN:
+- case NODE_COLON3:
+- case NODE_OPT_N:
+- case NODE_EVSTR:
+- case NODE_UNDEF:
+- ptr = (VALUE)obj->as.node.u2.node;
+- goto again;
+-
+- case NODE_HASH: /* 1 */
+- case NODE_LIT:
+- case NODE_STR:
+- case NODE_XSTR:
+- case NODE_DEFINED:
+- case NODE_MATCH:
+- case NODE_RETURN:
+- case NODE_BREAK:
+- case NODE_NEXT:
+- case NODE_YIELD:
+- case NODE_COLON2:
+- case NODE_SPLAT:
+- case NODE_TO_ARY:
+- case NODE_SVALUE:
+- ptr = (VALUE)obj->as.node.u1.node;
+- goto again;
+-
+- case NODE_SCOPE: /* 2,3 */
+- case NODE_BLOCK_PASS:
+- case NODE_CDECL:
+- gc_mark((VALUE)obj->as.node.u3.node, lev);
+- ptr = (VALUE)obj->as.node.u2.node;
+- goto again;
+-
+- case NODE_ZARRAY: /* - */
+- case NODE_ZSUPER:
+- case NODE_CFUNC:
+- case NODE_VCALL:
+- case NODE_GVAR:
+- case NODE_LVAR:
+- case NODE_DVAR:
+- case NODE_IVAR:
+- case NODE_CVAR:
+- case NODE_NTH_REF:
+- case NODE_BACK_REF:
+- case NODE_REDO:
+- case NODE_RETRY:
+- case NODE_SELF:
+- case NODE_NIL:
+- case NODE_TRUE:
+- case NODE_FALSE:
+- case NODE_ATTRSET:
+- case NODE_BLOCK_ARG:
+- case NODE_POSTEXE:
+- break;
+- case NODE_ALLOCA:
+- mark_locations_array((VALUE*)obj->as.node.u1.value,
+- obj->as.node.u3.cnt);
+- ptr = (VALUE)obj->as.node.u2.node;
+- goto again;
+-
+- default: /* unlisted NODE */
+- if (is_pointer_to_heap(obj->as.node.u1.node)) {
+- gc_mark((VALUE)obj->as.node.u1.node, lev);
+- }
+- if (is_pointer_to_heap(obj->as.node.u2.node)) {
+- gc_mark((VALUE)obj->as.node.u2.node, lev);
+- }
+- if (is_pointer_to_heap(obj->as.node.u3.node)) {
+- gc_mark((VALUE)obj->as.node.u3.node, lev);
+- }
+- }
+- return; /* no need to mark class. */
++ mark_source_filename(obj->as.node.nd_file);
++ switch (nd_type(obj)) {
++ case NODE_IF: /* 1,2,3 */
++ case NODE_FOR:
++ case NODE_ITER:
++ case NODE_CREF:
++ case NODE_WHEN:
++ case NODE_MASGN:
++ case NODE_RESCUE:
++ case NODE_RESBODY:
++ case NODE_CLASS:
++ gc_mark((VALUE)obj->as.node.u2.node, lev);
++ /* fall through */
++ case NODE_BLOCK: /* 1,3 */
++ case NODE_ARRAY:
++ case NODE_DSTR:
++ case NODE_DXSTR:
++ case NODE_DREGX:
++ case NODE_DREGX_ONCE:
++ case NODE_FBODY:
++ case NODE_ENSURE:
++ case NODE_CALL:
++ case NODE_DEFS:
++ case NODE_OP_ASGN1:
++ gc_mark((VALUE)obj->as.node.u1.node, lev);
++ /* fall through */
++ case NODE_SUPER: /* 3 */
++ case NODE_FCALL:
++ case NODE_DEFN:
++ case NODE_NEWLINE:
++ ptr = (VALUE)obj->as.node.u3.node;
++ goto again;
++
++ case NODE_WHILE: /* 1,2 */
++ case NODE_UNTIL:
++ case NODE_AND:
++ case NODE_OR:
++ case NODE_CASE:
++ case NODE_SCLASS:
++ case NODE_DOT2:
++ case NODE_DOT3:
++ case NODE_FLIP2:
++ case NODE_FLIP3:
++ case NODE_MATCH2:
++ case NODE_MATCH3:
++ case NODE_OP_ASGN_OR:
++ case NODE_OP_ASGN_AND:
++ case NODE_MODULE:
++ case NODE_ALIAS:
++ case NODE_VALIAS:
++ case NODE_ARGS:
++ gc_mark((VALUE)obj->as.node.u1.node, lev);
++ /* fall through */
++ case NODE_METHOD: /* 2 */
++ case NODE_NOT:
++ case NODE_GASGN:
++ case NODE_LASGN:
++ case NODE_DASGN:
++ case NODE_DASGN_CURR:
++ case NODE_IASGN:
++ case NODE_CVDECL:
++ case NODE_CVASGN:
++ case NODE_COLON3:
++ case NODE_OPT_N:
++ case NODE_EVSTR:
++ case NODE_UNDEF:
++ ptr = (VALUE)obj->as.node.u2.node;
++ goto again;
++
++ case NODE_HASH: /* 1 */
++ case NODE_LIT:
++ case NODE_STR:
++ case NODE_XSTR:
++ case NODE_DEFINED:
++ case NODE_MATCH:
++ case NODE_RETURN:
++ case NODE_BREAK:
++ case NODE_NEXT:
++ case NODE_YIELD:
++ case NODE_COLON2:
++ case NODE_SPLAT:
++ case NODE_TO_ARY:
++ case NODE_SVALUE:
++ ptr = (VALUE)obj->as.node.u1.node;
++ goto again;
++
++ case NODE_SCOPE: /* 2,3 */
++ case NODE_BLOCK_PASS:
++ case NODE_CDECL:
++ gc_mark((VALUE)obj->as.node.u3.node, lev);
++ ptr = (VALUE)obj->as.node.u2.node;
++ goto again;
++
++ case NODE_ZARRAY: /* - */
++ case NODE_ZSUPER:
++ case NODE_CFUNC:
++ case NODE_VCALL:
++ case NODE_GVAR:
++ case NODE_LVAR:
++ case NODE_DVAR:
++ case NODE_IVAR:
++ case NODE_CVAR:
++ case NODE_NTH_REF:
++ case NODE_BACK_REF:
++ case NODE_REDO:
++ case NODE_RETRY:
++ case NODE_SELF:
++ case NODE_NIL:
++ case NODE_TRUE:
++ case NODE_FALSE:
++ case NODE_ATTRSET:
++ case NODE_BLOCK_ARG:
++ case NODE_POSTEXE:
++ break;
++ case NODE_ALLOCA:
++ mark_locations_array((VALUE*)obj->as.node.u1.value,
++ obj->as.node.u3.cnt);
++ ptr = (VALUE)obj->as.node.u2.node;
++ goto again;
++
++ default: /* unlisted NODE */
++ if (is_pointer_to_heap(obj->as.node.u1.node)) {
++ gc_mark((VALUE)obj->as.node.u1.node, lev);
++ }
++ if (is_pointer_to_heap(obj->as.node.u2.node)) {
++ gc_mark((VALUE)obj->as.node.u2.node, lev);
++ }
++ if (is_pointer_to_heap(obj->as.node.u3.node)) {
++ gc_mark((VALUE)obj->as.node.u3.node, lev);
++ }
++ }
++ return; /* no need to mark class. */
+ }
+
+ gc_mark(obj->as.basic.klass, lev);
+@@ -988,92 +1255,92 @@ gc_mark_children(ptr, lev)
+ case T_ICLASS:
+ case T_CLASS:
+ case T_MODULE:
+- mark_tbl(obj->as.klass.m_tbl, lev);
+- mark_tbl(obj->as.klass.iv_tbl, lev);
+- ptr = obj->as.klass.super;
+- goto again;
++ mark_tbl(obj->as.klass.m_tbl, lev);
++ mark_tbl(obj->as.klass.iv_tbl, lev);
++ ptr = obj->as.klass.super;
++ goto again;
+
+ case T_ARRAY:
+- if (FL_TEST(obj, ELTS_SHARED)) {
+- ptr = obj->as.array.aux.shared;
+- goto again;
+- }
+- else {
+- long i, len = obj->as.array.len;
+- VALUE *ptr = obj->as.array.ptr;
++ if (FL_TEST(obj, ELTS_SHARED)) {
++ ptr = obj->as.array.aux.shared;
++ goto again;
++ }
++ else {
++ long i, len = obj->as.array.len;
++ VALUE *ptr = obj->as.array.ptr;
+
+- for (i=0; i < len; i++) {
+- gc_mark(*ptr++, lev);
+- }
+- }
+- break;
++ for (i=0; i < len; i++) {
++ gc_mark(*ptr++, lev);
++ }
++ }
++ break;
+
+ case T_HASH:
+- mark_hash(obj->as.hash.tbl, lev);
+- ptr = obj->as.hash.ifnone;
+- goto again;
++ mark_hash(obj->as.hash.tbl, lev);
++ ptr = obj->as.hash.ifnone;
++ goto again;
+
+ case T_STRING:
+ #define STR_ASSOC FL_USER3 /* copied from string.c */
+- if (FL_TEST(obj, ELTS_SHARED|STR_ASSOC)) {
+- ptr = obj->as.string.aux.shared;
+- goto again;
+- }
+- break;
++ if (FL_TEST(obj, ELTS_SHARED|STR_ASSOC)) {
++ ptr = obj->as.string.aux.shared;
++ goto again;
++ }
++ break;
+
+ case T_DATA:
+- if (obj->as.data.dmark) (*obj->as.data.dmark)(DATA_PTR(obj));
+- break;
++ if (obj->as.data.dmark) (*obj->as.data.dmark)(DATA_PTR(obj));
++ break;
+
+ case T_OBJECT:
+- mark_tbl(obj->as.object.iv_tbl, lev);
+- break;
++ mark_tbl(obj->as.object.iv_tbl, lev);
++ break;
+
+ case T_FILE:
+ case T_REGEXP:
+ case T_FLOAT:
+ case T_BIGNUM:
+ case T_BLKTAG:
+- break;
++ break;
+
+ case T_MATCH:
+- if (obj->as.match.str) {
+- ptr = obj->as.match.str;
+- goto again;
+- }
+- break;
++ if (obj->as.match.str) {
++ ptr = obj->as.match.str;
++ goto again;
++ }
++ break;
+
+ case T_VARMAP:
+- gc_mark(obj->as.varmap.val, lev);
+- ptr = (VALUE)obj->as.varmap.next;
+- goto again;
++ gc_mark(obj->as.varmap.val, lev);
++ ptr = (VALUE)obj->as.varmap.next;
++ goto again;
+
+ case T_SCOPE:
+- if (obj->as.scope.local_vars && (obj->as.scope.flags & SCOPE_MALLOC)) {
+- int n = obj->as.scope.local_tbl[0]+1;
+- VALUE *vars = &obj->as.scope.local_vars[-1];
++ if (obj->as.scope.local_vars && (obj->as.scope.flags & SCOPE_MALLOC)) {
++ int n = obj->as.scope.local_tbl[0]+1;
++ VALUE *vars = &obj->as.scope.local_vars[-1];
+
+- while (n--) {
+- gc_mark(*vars++, lev);
+- }
+- }
+- break;
++ while (n--) {
++ gc_mark(*vars++, lev);
++ }
++ }
++ break;
+
+ case T_STRUCT:
+- {
+- long len = obj->as.rstruct.len;
+- VALUE *ptr = obj->as.rstruct.ptr;
++ {
++ long len = obj->as.rstruct.len;
++ VALUE *ptr = obj->as.rstruct.ptr;
+
+- while (len--) {
+- gc_mark(*ptr++, lev);
+- }
+- }
+- break;
++ while (len--) {
++ gc_mark(*ptr++, lev);
++ }
++ }
++ break;
+
+ default:
+- rb_bug("rb_gc_mark(): unknown data type 0x%lx(0x%lx) %s",
+- obj->as.basic.flags & T_MASK, obj,
+- is_pointer_to_heap(obj) ? "corrupted object" : "non object");
++ rb_bug("rb_gc_mark(): unknown data type 0x%lx(0x%lx) %s",
++ obj->as.basic.flags & T_MASK, obj,
++ is_pointer_to_heap(obj) ? "corrupted object" : "non object");
+ }
+ }
+
+@@ -1102,22 +1369,55 @@ finalize_list(p)
+ }
+ }
+
++static char* obj_type(int tp)
++{
++ switch (tp) {
++ case T_NIL : return "NIL";
++ case T_OBJECT : return "OBJECT";
++ case T_CLASS : return "CLASS";
++ case T_ICLASS : return "ICLASS";
++ case T_MODULE : return "MODULE";
++ case T_FLOAT : return "FLOAT";
++ case T_STRING : return "STRING";
++ case T_REGEXP : return "REGEXP";
++ case T_ARRAY : return "ARRAY";
++ case T_FIXNUM : return "FIXNUM";
++ case T_HASH : return "HASH";
++ case T_STRUCT : return "STRUCT";
++ case T_BIGNUM : return "BIGNUM";
++ case T_FILE : return "FILE";
++
++ case T_TRUE : return "TRUE";
++ case T_FALSE : return "FALSE";
++ case T_DATA : return "DATA";
++ case T_MATCH : return "MATCH";
++ case T_SYMBOL : return "SYMBOL";
++
++ case T_BLKTAG : return "BLKTAG";
++ case T_UNDEF : return "UNDEF";
++ case T_VARMAP : return "VARMAP";
++ case T_SCOPE : return "SCOPE";
++ case T_NODE : return "NODE";
++ default: return "____";
++ }
++}
++
+ static void
+ free_unused_heaps()
+ {
+ int i, j;
+
+ for (i = j = 1; j < heaps_used; i++) {
+- if (heaps[i].limit == 0) {
+- free(heaps[i].membase);
+- heaps_used--;
+- }
+- else {
+- if (i != j) {
+- heaps[j] = heaps[i];
+- }
+- j++;
+- }
++ if (heaps[i].limit == 0) {
++ free(heaps[i].membase);
++ heaps_used--;
++ }
++ else {
++ if (i != j) {
++ heaps[j] = heaps[i];
++ }
++ j++;
++ }
+ }
+ }
+
+@@ -1134,24 +1434,33 @@ gc_sweep()
+ unsigned long live = 0;
+ unsigned long free_min = 0;
+
++ unsigned long really_freed = 0;
++ int free_counts[256];
++ int live_counts[256];
++ int do_gc_stats = gc_statistics & verbose_gc_stats;
++
+ for (i = 0; i < heaps_used; i++) {
+ free_min += heaps[i].limit;
+ }
+ free_min = free_min * 0.2;
+- if (free_min < FREE_MIN)
+- free_min = FREE_MIN;
++ if (free_min < heap_free_min)
++ free_min = heap_free_min;
++
++ if (do_gc_stats) {
++ for (i = 0 ; i< 256; i++) { free_counts[i] = live_counts[i] = 0; }
++ }
+
+ if (ruby_in_compile && ruby_parser_stack_on_heap()) {
+- /* should not reclaim nodes during compilation
++ /* should not reclaim nodes during compilation
+ if yacc's semantic stack is not allocated on machine stack */
+- for (i = 0; i < heaps_used; i++) {
+- p = heaps[i].slot; pend = p + heaps[i].limit;
+- while (p < pend) {
+- if (!(p->as.basic.flags&FL_MARK) && BUILTIN_TYPE(p) == T_NODE)
+- gc_mark((VALUE)p, 0);
+- p++;
+- }
+- }
++ for (i = 0; i < heaps_used; i++) {
++ p = heaps[i].slot; pend = p + heaps[i].limit;
++ while (p < pend) {
++ if (!(p->as.basic.flags&FL_MARK) && BUILTIN_TYPE(p) == T_NODE)
++ gc_mark((VALUE)p, 0);
++ p++;
++ }
++ }
+ }
+
+ mark_source_filename(ruby_sourcefile);
+@@ -1172,7 +1481,7 @@ gc_sweep()
+ while (p < pend) {
+ if (!(p->as.basic.flags & FL_MARK)) {
+ if (p->as.basic.flags &&
+- ((deferred = obj_free((VALUE)p)) ||
++ (((do_gc_stats && really_freed++), deferred = obj_free((VALUE)p)) ||
+ ((FL_TEST(p, FL_FINALIZE)) && need_call_final))) {
+ if (!deferred) {
+ p->as.free.flags = T_DEFERRED;
+@@ -1183,6 +1492,12 @@ gc_sweep()
+ final_list = p;
+ }
+ else {
++ if (do_gc_stats) {
++ int obt = p->as.basic.flags & T_MASK;
++ if (obt) {
++ free_counts[obt]++;
++ }
++ }
+ add_freelist(p);
+ }
+ n++;
+@@ -1194,6 +1509,9 @@ gc_sweep()
+ else {
+ RBASIC(p)->flags &= ~FL_MARK;
+ live++;
++ if (do_gc_stats) {
++ live_counts[RANY((VALUE)p)->as.basic.flags & T_MASK]++;
++ }
+ }
+ p++;
+ }
+@@ -1211,15 +1529,29 @@ gc_sweep()
+ }
+ }
+ if (malloc_increase > malloc_limit) {
+- malloc_limit += (malloc_increase - malloc_limit) * (double)live / (live + freed);
+- if (malloc_limit < GC_MALLOC_LIMIT) malloc_limit = GC_MALLOC_LIMIT;
++ malloc_limit += (malloc_increase - malloc_limit) * (double)live / (live + freed);
++ if (malloc_limit < initial_malloc_limit) malloc_limit = initial_malloc_limit;
+ }
+ malloc_increase = 0;
+ if (freed < free_min) {
+- add_heap();
++ add_heap();
+ }
+ during_gc = 0;
+
++ if (do_gc_stats) {
++ fprintf(gc_data_file, "objects processed: %.7d\n", live+freed);
++ fprintf(gc_data_file, "live objects : %.7d\n", live);
++ fprintf(gc_data_file, "freelist objects : %.7d\n", freed - really_freed);
++ fprintf(gc_data_file, "freed objects : %.7d\n", really_freed);
++ for(i=0; i<256; i++) {
++ if (free_counts[i]>0 || live_counts[i]>0) {
++ fprintf(gc_data_file,
++ "kept %.7d / freed %.7d objects of type %s\n",
++ live_counts[i], free_counts[i], obj_type(i));
++ }
++ }
++ }
++
+ /* clear finalization list */
+ if (final_list) {
+ deferred_final_list = final_list;
+@@ -1260,51 +1592,51 @@ obj_free(obj)
+ case T_FIXNUM:
+ case T_TRUE:
+ case T_FALSE:
+- rb_bug("obj_free() called for broken object");
+- break;
++ rb_bug("obj_free() called for broken object");
++ break;
+ }
+
+ if (FL_TEST(obj, FL_EXIVAR)) {
+- rb_free_generic_ivar((VALUE)obj);
++ rb_free_generic_ivar((VALUE)obj);
+ }
+
+ switch (BUILTIN_TYPE(obj)) {
+ case T_OBJECT:
+- if (RANY(obj)->as.object.iv_tbl) {
+- st_free_table(RANY(obj)->as.object.iv_tbl);
+- }
+- break;
++ if (RANY(obj)->as.object.iv_tbl) {
++ st_free_table(RANY(obj)->as.object.iv_tbl);
++ }
++ break;
+ case T_MODULE:
+ case T_CLASS:
+- rb_clear_cache_by_class((VALUE)obj);
+- st_free_table(RANY(obj)->as.klass.m_tbl);
+- if (RANY(obj)->as.object.iv_tbl) {
+- st_free_table(RANY(obj)->as.object.iv_tbl);
+- }
+- break;
++ rb_clear_cache_by_class((VALUE)obj);
++ st_free_table(RANY(obj)->as.klass.m_tbl);
++ if (RANY(obj)->as.object.iv_tbl) {
++ st_free_table(RANY(obj)->as.object.iv_tbl);
++ }
++ break;
+ case T_STRING:
+- if (RANY(obj)->as.string.ptr && !FL_TEST(obj, ELTS_SHARED)) {
+- RUBY_CRITICAL(free(RANY(obj)->as.string.ptr));
+- }
+- break;
++ if (RANY(obj)->as.string.ptr && !FL_TEST(obj, ELTS_SHARED)) {
++ RUBY_CRITICAL(free(RANY(obj)->as.string.ptr));
++ }
++ break;
+ case T_ARRAY:
+- if (RANY(obj)->as.array.ptr && !FL_TEST(obj, ELTS_SHARED)) {
+- RUBY_CRITICAL(free(RANY(obj)->as.array.ptr));
+- }
+- break;
++ if (RANY(obj)->as.array.ptr && !FL_TEST(obj, ELTS_SHARED)) {
++ RUBY_CRITICAL(free(RANY(obj)->as.array.ptr));
++ }
++ break;
+ case T_HASH:
+- if (RANY(obj)->as.hash.tbl) {
+- st_free_table(RANY(obj)->as.hash.tbl);
+- }
+- break;
++ if (RANY(obj)->as.hash.tbl) {
++ st_free_table(RANY(obj)->as.hash.tbl);
++ }
++ break;
+ case T_REGEXP:
+- if (RANY(obj)->as.regexp.ptr) {
+- re_free_pattern(RANY(obj)->as.regexp.ptr);
+- }
+- if (RANY(obj)->as.regexp.str) {
+- RUBY_CRITICAL(free(RANY(obj)->as.regexp.str));
+- }
+- break;
++ if (RANY(obj)->as.regexp.ptr) {
++ re_free_pattern(RANY(obj)->as.regexp.ptr);
++ }
++ if (RANY(obj)->as.regexp.str) {
++ RUBY_CRITICAL(free(RANY(obj)->as.regexp.str));
++ }
++ break;
+ case T_DATA:
+ if (DATA_PTR(obj)) {
+ if ((long)RANY(obj)->as.data.dfree == -1) {
+@@ -1317,11 +1649,11 @@ obj_free(obj)
+ }
+ break;
+ case T_MATCH:
+- if (RANY(obj)->as.match.regs) {
+- re_free_registers(RANY(obj)->as.match.regs);
+- RUBY_CRITICAL(free(RANY(obj)->as.match.regs));
+- }
+- break;
++ if (RANY(obj)->as.match.regs) {
++ re_free_registers(RANY(obj)->as.match.regs);
++ RUBY_CRITICAL(free(RANY(obj)->as.match.regs));
++ }
++ break;
+ case T_FILE:
+ if (RANY(obj)->as.file.fptr) {
+ struct rb_io_t *fptr = RANY(obj)->as.file.fptr;
+@@ -1332,19 +1664,19 @@ obj_free(obj)
+ }
+ break;
+ case T_ICLASS:
+- /* iClass shares table with the module */
+- break;
++ /* iClass shares table with the module */
++ break;
+
+ case T_FLOAT:
+ case T_VARMAP:
+ case T_BLKTAG:
+- break;
++ break;
+
+ case T_BIGNUM:
+- if (RANY(obj)->as.bignum.digits) {
+- RUBY_CRITICAL(free(RANY(obj)->as.bignum.digits));
+- }
+- break;
++ if (RANY(obj)->as.bignum.digits) {
++ RUBY_CRITICAL(free(RANY(obj)->as.bignum.digits));
++ }
++ break;
+ case T_NODE:
+ switch (nd_type(obj)) {
+ case NODE_SCOPE:
+@@ -1359,7 +1691,7 @@ obj_free(obj)
+ break; /* no need to free iv_tbl */
+
+ case T_SCOPE:
+- if (RANY(obj)->as.scope.local_vars &&
++ if (RANY(obj)->as.scope.local_vars &&
+ RANY(obj)->as.scope.flags != SCOPE_ALLOCA) {
+ VALUE *vars = RANY(obj)->as.scope.local_vars-1;
+ if (!(RANY(obj)->as.scope.flags & SCOPE_CLONE) && vars[0] == 0)
+@@ -1370,14 +1702,14 @@ obj_free(obj)
+ break;
+
+ case T_STRUCT:
+- if (RANY(obj)->as.rstruct.ptr) {
+- RUBY_CRITICAL(free(RANY(obj)->as.rstruct.ptr));
+- }
+- break;
++ if (RANY(obj)->as.rstruct.ptr) {
++ RUBY_CRITICAL(free(RANY(obj)->as.rstruct.ptr));
++ }
++ break;
+
+ default:
+- rb_bug("gc_sweep(): unknown data type 0x%lx(0x%lx)",
+- RANY(obj)->as.basic.flags & T_MASK, obj);
++ rb_bug("gc_sweep(): unknown data type 0x%lx(0x%lx)",
++ RANY(obj)->as.basic.flags & T_MASK, obj);
+ }
+
+ return 0;
+@@ -1407,18 +1739,18 @@ _rb_setjmp:\n\
+ typedef unsigned long rb_jmp_buf[6];
+ __asm__ (".align 4\n\
+ _rb_setjmp:\n\
+- pushl %ebp\n\
+- movl %esp,%ebp\n\
+- movl 8(%ebp),%ebp\n\
+- movl %eax,(%ebp)\n\
+- movl %ebx,4(%ebp)\n\
+- movl %ecx,8(%ebp)\n\
+- movl %edx,12(%ebp)\n\
+- movl %esi,16(%ebp)\n\
+- movl %edi,20(%ebp)\n\
+- popl %ebp\n\
+- xorl %eax,%eax\n\
+- ret");
++ pushl %ebp\n\
++ movl %esp,%ebp\n\
++ movl 8(%ebp),%ebp\n\
++ movl %eax,(%ebp)\n\
++ movl %ebx,4(%ebp)\n\
++ movl %ecx,8(%ebp)\n\
++ movl %edx,12(%ebp)\n\
++ movl %esi,16(%ebp)\n\
++ movl %edi,20(%ebp)\n\
++ popl %ebp\n\
++ xorl %eax,%eax\n\
++ ret");
+ #endif
+ #endif
+ int rb_setjmp (rb_jmp_buf);
+@@ -1431,41 +1763,50 @@ garbage_collect()
+ struct gc_list *list;
+ struct FRAME * volatile frame; /* gcc 2.7.2.3 -O2 bug?? */
+ jmp_buf save_regs_gc_mark;
++ struct timeval gctv1, gctv2;
+ SET_STACK_END;
+
+ #ifdef HAVE_NATIVETHREAD
+ if (!is_ruby_native_thread()) {
+- rb_bug("cross-thread violation on rb_gc()");
++ rb_bug("cross-thread violation on rb_gc()");
+ }
+ #endif
+ if (dont_gc || during_gc) {
+- if (!freelist) {
+- add_heap();
+- }
+- return;
++ if (!freelist) {
++ add_heap();
++ }
++ return;
+ }
+ if (during_gc) return;
+ during_gc++;
+
++ if (gc_statistics) {
++ gc_collections++;
++ gettimeofday(&gctv1, NULL);
++ if (verbose_gc_stats) {
++ fprintf(gc_data_file, "Garbage collection started\n");
++ }
++ }
++
+ init_mark_stack();
+
+ gc_mark((VALUE)ruby_current_node, 0);
+
+ /* mark frame stack */
+ for (frame = ruby_frame; frame; frame = frame->prev) {
+- rb_gc_mark_frame(frame);
+- if (frame->tmp) {
+- struct FRAME *tmp = frame->tmp;
+- while (tmp) {
+- rb_gc_mark_frame(tmp);
+- tmp = tmp->prev;
+- }
+- }
++ rb_gc_mark_frame(frame);
++ if (frame->tmp) {
++ struct FRAME *tmp = frame->tmp;
++ while (tmp) {
++ rb_gc_mark_frame(tmp);
++ tmp = tmp->prev;
++ }
++ }
+ }
+ gc_mark((VALUE)ruby_scope, 0);
+ gc_mark((VALUE)ruby_dyna_vars, 0);
+ if (finalizer_table) {
+- mark_tbl(finalizer_table, 0);
++ mark_tbl(finalizer_table, 0);
+ }
+
+ FLUSH_REGISTER_WINDOWS;
+@@ -1478,9 +1819,9 @@ garbage_collect()
+ rb_gc_mark_locations(rb_gc_stack_start, (VALUE*)STACK_END + 1);
+ #else
+ if ((VALUE*)STACK_END < rb_gc_stack_start)
+- rb_gc_mark_locations((VALUE*)STACK_END, rb_gc_stack_start);
++ rb_gc_mark_locations((VALUE*)STACK_END, rb_gc_stack_start);
+ else
+- rb_gc_mark_locations(rb_gc_stack_start, (VALUE*)STACK_END + 1);
++ rb_gc_mark_locations(rb_gc_stack_start, (VALUE*)STACK_END + 1);
+ #endif
+ #ifdef __ia64
+ /* mark backing store (flushed register window on the stack) */
+@@ -1489,13 +1830,13 @@ garbage_collect()
+ #endif
+ #if defined(__human68k__) || defined(__mc68000__)
+ rb_gc_mark_locations((VALUE*)((char*)STACK_END + 2),
+- (VALUE*)((char*)rb_gc_stack_start + 2));
++ (VALUE*)((char*)rb_gc_stack_start + 2));
+ #endif
+ rb_gc_mark_threads();
+
+ /* mark protected global variables */
+ for (list = global_List; list; list = list->next) {
+- rb_gc_mark_maybe(*list->varptr);
++ rb_gc_mark_maybe(*list->varptr);
+ }
+ rb_mark_end_proc();
+ rb_gc_mark_global_tbl();
+@@ -1510,18 +1851,30 @@ garbage_collect()
+
+ /* gc_mark objects whose marking are not completed*/
+ do {
+- while (!MARK_STACK_EMPTY) {
+- if (mark_stack_overflow){
+- gc_mark_all();
+- }
+- else {
+- gc_mark_rest();
+- }
+- }
+- rb_gc_abort_threads();
++ while (!MARK_STACK_EMPTY) {
++ if (mark_stack_overflow){
++ gc_mark_all();
++ }
++ else {
++ gc_mark_rest();
++ }
++ }
++ rb_gc_abort_threads();
+ } while (!MARK_STACK_EMPTY);
+
+ gc_sweep();
++
++ if (gc_statistics) {
++ GC_TIME_TYPE musecs_used;
++ gettimeofday(&gctv2, NULL);
++ musecs_used = ((GC_TIME_TYPE)(gctv2.tv_sec - gctv1.tv_sec) * 1000000) + (gctv2.tv_usec - gctv1.tv_usec);
++ gc_time += musecs_used;
++
++ if (verbose_gc_stats) {
++ fprintf(gc_data_file, "GC time: %d msec\n", musecs_used / 1000);
++ fflush(gc_data_file);
++ }
++ }
+ }
+
+ void
+@@ -1588,8 +1941,8 @@ Init_stack(addr)
+ memset(&m, 0, sizeof(m));
+ VirtualQuery(&m, &m, sizeof(m));
+ rb_gc_stack_start =
+- STACK_UPPER((VALUE *)&m, (VALUE *)m.BaseAddress,
+- (VALUE *)((char *)m.BaseAddress + m.RegionSize) - 1);
++ STACK_UPPER((VALUE *)&m, (VALUE *)m.BaseAddress,
++ (VALUE *)((char *)m.BaseAddress + m.RegionSize) - 1);
+ #elif defined(STACK_END_ADDRESS)
+ {
+ extern void *STACK_END_ADDRESS;
+@@ -1599,24 +1952,24 @@ Init_stack(addr)
+ if (!addr) addr = (void *)&addr;
+ STACK_UPPER(&addr, addr, ++addr);
+ if (rb_gc_stack_start) {
+- if (STACK_UPPER(&addr,
+- rb_gc_stack_start > addr,
+- rb_gc_stack_start < addr))
+- rb_gc_stack_start = addr;
+- return;
++ if (STACK_UPPER(&addr,
++ rb_gc_stack_start > addr,
++ rb_gc_stack_start < addr))
++ rb_gc_stack_start = addr;
++ return;
+ }
+ rb_gc_stack_start = addr;
+ #endif
+ #ifdef HAVE_GETRLIMIT
+ {
+- struct rlimit rlim;
++ struct rlimit rlim;
+
+- if (getrlimit(RLIMIT_STACK, &rlim) == 0) {
+- unsigned int space = rlim.rlim_cur/5;
++ if (getrlimit(RLIMIT_STACK, &rlim) == 0) {
++ unsigned int space = rlim.rlim_cur/5;
+
+- if (space > 1024*1024) space = 1024*1024;
+- STACK_LEVEL_MAX = (rlim.rlim_cur - space) / sizeof(VALUE);
+- }
++ if (space > 1024*1024) space = 1024*1024;
++ STACK_LEVEL_MAX = (rlim.rlim_cur - space) / sizeof(VALUE);
++ }
+ }
+ #endif
+ }
+@@ -1652,16 +2005,16 @@ void ruby_init_stack(VALUE *addr
+ }
+ #elif defined _WIN32
+ {
+- MEMORY_BASIC_INFORMATION mi;
+- DWORD size;
+- DWORD space;
+-
+- if (VirtualQuery(&mi, &mi, sizeof(mi))) {
+- size = (char *)mi.BaseAddress - (char *)mi.AllocationBase;
+- space = size / 5;
+- if (space > 1024*1024) space = 1024*1024;
+- STACK_LEVEL_MAX = (size - space) / sizeof(VALUE);
+- }
++ MEMORY_BASIC_INFORMATION mi;
++ DWORD size;
++ DWORD space;
++
++ if (VirtualQuery(&mi, &mi, sizeof(mi))) {
++ size = (char *)mi.BaseAddress - (char *)mi.AllocationBase;
++ space = size / 5;
++ if (space > 1024*1024) space = 1024*1024;
++ STACK_LEVEL_MAX = (size - space) / sizeof(VALUE);
++ }
+ }
+ #endif
+ }
+@@ -1701,8 +2054,9 @@ void
+ Init_heap()
+ {
+ if (!rb_gc_stack_start) {
+- Init_stack(0);
++ Init_stack(0);
+ }
++ set_gc_parameters();
+ add_heap();
+ }
+
+@@ -1715,7 +2069,7 @@ os_obj_of(of)
+ volatile VALUE v;
+
+ for (i = 0; i < heaps_used; i++) {
+- RVALUE *p, *pend;
++ RVALUE *p, *pend;
+
+ p = heaps[i].slot; pend = p + heaps[i].limit;
+ for (;p < pend; p++) {
+@@ -1808,8 +2162,8 @@ add_final(os, block)
+ {
+ rb_warn("ObjectSpace::add_finalizer is deprecated; use define_finalizer");
+ if (!rb_respond_to(block, rb_intern("call"))) {
+- rb_raise(rb_eArgError, "wrong type argument %s (should be callable)",
+- rb_obj_classname(block));
++ rb_raise(rb_eArgError, "wrong type argument %s (should be callable)",
++ rb_obj_classname(block));
+ }
+ rb_ary_push(finalizers, block);
+ return block;
+@@ -1864,7 +2218,7 @@ undefine_final(os, obj)
+ VALUE os, obj;
+ {
+ if (finalizer_table) {
+- st_delete(finalizer_table, (st_data_t*)&obj, 0);
++ st_delete(finalizer_table, (st_data_t*)&obj, 0);
+ }
+ return obj;
+ }
+@@ -1888,11 +2242,11 @@ define_final(argc, argv, os)
+
+ rb_scan_args(argc, argv, "11", &obj, &block);
+ if (argc == 1) {
+- block = rb_block_proc();
++ block = rb_block_proc();
+ }
+ else if (!rb_respond_to(block, rb_intern("call"))) {
+- rb_raise(rb_eArgError, "wrong type argument %s (should be callable)",
+- rb_obj_classname(block));
++ rb_raise(rb_eArgError, "wrong type argument %s (should be callable)",
++ rb_obj_classname(block));
+ }
+ need_call_final = 1;
+ if (!FL_ABLE(obj)) {
+@@ -1905,10 +2259,10 @@ define_final(argc, argv, os)
+ OBJ_FREEZE(block);
+
+ if (!finalizer_table) {
+- finalizer_table = st_init_numtable();
++ finalizer_table = st_init_numtable();
+ }
+ if (st_lookup(finalizer_table, obj, &table)) {
+- rb_ary_push(table, block);
++ rb_ary_push(table, block);
+ }
+ else {
+ table = rb_ary_new3(1, block);
+@@ -1927,7 +2281,7 @@ rb_gc_copy_finalizer(dest, obj)
+ if (!finalizer_table) return;
+ if (!FL_TEST(obj, FL_FINALIZE)) return;
+ if (st_lookup(finalizer_table, obj, &table)) {
+- st_insert(finalizer_table, dest, table);
++ st_insert(finalizer_table, dest, table);
+ }
+ RBASIC(dest)->flags |= FL_FINALIZE;
+ }
+@@ -1957,18 +2311,18 @@ run_final(obj)
+ args[1] = 0;
+ args[2] = (VALUE)ruby_safe_level;
+ for (i=0; i<RARRAY(finalizers)->len; i++) {
+- args[0] = RARRAY(finalizers)->ptr[i];
+- if (!args[1]) args[1] = rb_ary_new3(1, objid);
+- rb_protect((VALUE(*)_((VALUE)))run_single_final, (VALUE)args, &status);
++ args[0] = RARRAY(finalizers)->ptr[i];
++ if (!args[1]) args[1] = rb_ary_new3(1, objid);
++ rb_protect((VALUE(*)_((VALUE)))run_single_final, (VALUE)args, &status);
+ }
+ if (finalizer_table && st_delete(finalizer_table, (st_data_t*)&obj, &table)) {
+- for (i=0; i<RARRAY(table)->len; i++) {
+- VALUE final = RARRAY(table)->ptr[i];
+- args[0] = RARRAY(final)->ptr[1];
+- if (!args[1]) args[1] = rb_ary_new3(1, objid);
+- args[2] = FIX2INT(RARRAY(final)->ptr[0]);
+- rb_protect((VALUE(*)_((VALUE)))run_single_final, (VALUE)args, &status);
+- }
++ for (i=0; i<RARRAY(table)->len; i++) {
++ VALUE final = RARRAY(table)->ptr[i];
++ args[0] = RARRAY(final)->ptr[1];
++ if (!args[1]) args[1] = rb_ary_new3(1, objid);
++ args[2] = FIX2INT(RARRAY(final)->ptr[0]);
++ rb_protect((VALUE(*)_((VALUE)))run_single_final, (VALUE)args, &status);
++ }
+ }
+ rb_thread_critical = critical_save;
+ }
+@@ -1980,8 +2334,8 @@ rb_gc_finalize_deferred()
+
+ deferred_final_list = 0;
+ if (p) {
+- finalize_list(p);
+- free_unused_heaps();
++ finalize_list(p);
++ free_unused_heaps();
+ }
+ }
+
+@@ -2061,7 +2415,7 @@ id2ref(obj, objid)
+ if (ptr == Qfalse) return Qfalse;
+ if (ptr == Qnil) return Qnil;
+ if (FIXNUM_P(ptr)) return (VALUE)ptr;
+- ptr = objid ^ FIXNUM_FLAG; /* unset FIXNUM_FLAG */
++ ptr = objid ^ FIXNUM_FLAG; /* unset FIXNUM_FLAG */
+
+ if ((ptr % sizeof(RVALUE)) == (4 << 2)) {
+ ID symid = ptr / sizeof(RVALUE);
+@@ -2075,7 +2429,7 @@ id2ref(obj, objid)
+ rb_raise(rb_eRangeError, "0x%lx is not id value", p0);
+ }
+ if (BUILTIN_TYPE(ptr) == 0 || RBASIC(ptr)->klass == 0) {
+- rb_raise(rb_eRangeError, "0x%lx is recycled object", p0);
++ rb_raise(rb_eRangeError, "0x%lx is recycled object", p0);
+ }
+ return (VALUE)ptr;
+ }
+@@ -2166,6 +2520,14 @@ Init_GC()
+ rb_define_singleton_method(rb_mGC, "stress=", gc_stress_set, 1);
+ rb_define_method(rb_mGC, "garbage_collect", rb_gc_start, 0);
+
++ rb_define_singleton_method(rb_mGC, "enable_stats", rb_gc_enable_stats, 0);
++ rb_define_singleton_method(rb_mGC, "disable_stats", rb_gc_disable_stats, 0);
++ rb_define_singleton_method(rb_mGC, "clear_stats", rb_gc_clear_stats, 0);
++ rb_define_singleton_method(rb_mGC, "collections", rb_gc_collections, 0);
++ rb_define_singleton_method(rb_mGC, "time", rb_gc_time, 0);
++ rb_define_singleton_method(rb_mGC, "dump", rb_gc_dump, 0);
++ rb_define_singleton_method(rb_mGC, "log", rb_gc_log, 1);
++
+ rb_mObSpace = rb_define_module("ObjectSpace");
+ rb_define_module_function(rb_mObSpace, "each_object", os_each_obj, -1);
+ rb_define_module_function(rb_mObSpace, "garbage_collect", rb_gc_start, 0);
+@@ -2188,7 +2550,7 @@ Init_GC()
+
+ rb_global_variable(&nomem_error);
+ nomem_error = rb_exc_new3(rb_eNoMemError,
+- rb_obj_freeze(rb_str_new2("failed to allocate memory")));
++ rb_obj_freeze(rb_str_new2("failed to allocate memory")));
+ OBJ_TAINT(nomem_error);
+ OBJ_FREEZE(nomem_error);
View
15 patches/ruby/1.8.7/p370/railsexpress/05-display-full-stack-trace.patch
@@ -0,0 +1,15 @@
+diff --git a/eval.c b/eval.c
+index f49044a..213e78c 100644
+--- a/eval.c
++++ b/eval.c
+@@ -1335,8 +1335,8 @@ error_print()
+ int truncate = eclass == rb_eSysStackError;
+
+ #define TRACE_MAX (TRACE_HEAD+TRACE_TAIL+5)
+-#define TRACE_HEAD 8
+-#define TRACE_TAIL 5
++#define TRACE_HEAD 100
++#define TRACE_TAIL 100
+
+ ep = RARRAY(errat);
+ for (i=1; i<ep->len; i++) {
View
13 patches/ruby/1.8.7/p370/railsexpress/06-better-source-file-tracing.patch
@@ -0,0 +1,13 @@
+diff --git a/eval.c b/eval.c
+index 213e78c..c37136f 100644
+--- a/eval.c
++++ b/eval.c
+@@ -1171,7 +1171,7 @@ static VALUE trace_func = 0;
+ static int tracing = 0;
+ static void call_trace_func _((rb_event_t,NODE*,VALUE,ID,VALUE));
+
+-#if 0
++#if 1
+ #define SET_CURRENT_SOURCE() (ruby_sourcefile = ruby_current_node->nd_file, \
+ ruby_sourceline = nd_line(ruby_current_node))
+ #else
View
159 patches/ruby/1.8.7/p370/railsexpress/07-heap-dump-support.patch
@@ -0,0 +1,159 @@
+diff --git a/configure.in b/configure.in
+index 62b34a8..4be088c 100644
+--- a/configure.in
++++ b/configure.in
+@@ -1595,6 +1595,14 @@ fi
+ LDFLAGS="-L. $LDFLAGS"
+ AC_SUBST(ARCHFILE)
+
++dnl enable gc debugging
++AC_ARG_ENABLE(gcdebug,
++ [ --enable-gcdebug build garbage collector with debugging enabled. ],
++ [enable_gcdebug=$enableval])
++if test "$enable_gcdebug" = 'yes'; then
++ AC_DEFINE(GC_DEBUG, 1)
++fi
++
+ dnl build rdoc index if requested
+ RDOCTARGET=""
+ AC_ARG_ENABLE(install-doc,
+diff --git a/gc.c b/gc.c
+index ab71d22..9ad716f 100644
+--- a/gc.c
++++ b/gc.c
+@@ -411,7 +411,6 @@ rb_gc_unregister_address(addr)
+ }
+ }
+
+-#undef GC_DEBUG
+
+ void
+ rb_global_variable(var)
+@@ -602,6 +601,85 @@ rb_gc_dump()
+ return Qnil;
+ }
+
++
++static char* obj_type(int tp);
++
++#ifdef GC_DEBUG
++/*
++ * call-seq:
++ * GC.dump_file_and_line_info(String, boolean) => nil
++ *
++ * dumps information on which currently allocated object was created by which file and on which line
++ *
++ * GC.dump_file_and_line_info(String, boolean) #=> nil
++ *
++ * The second parameter specifies whether class names should be included in the dump.
++ * Note that including class names will allocate additional string objects on the heap.
++ *
++ */
++
++VALUE
++rb_gc_dump_file_and_line_info(int argc, VALUE *argv)
++{
++ VALUE filename, str, include_classnames = Qnil;
++ char *fname = NULL;
++ char *klass = NULL;
++ FILE* f = NULL;
++ int i,n = 0;
++
++ rb_scan_args(argc, argv, "11", &filename, &include_classnames);
++
++ str = StringValue(filename);
++ fname = RSTRING(str)->ptr;
++ f = fopen(fname, "w");
++
++ for (i = 0; i < heaps_used; i++) {
++ RVALUE *p, *pend;
++
++ p = heaps[i].slot; pend = p + heaps[i].limit;
++ for (;p < pend; p++) {
++ if (p->as.basic.flags) {
++ fprintf(f, "%s:%s:%d", obj_type(p->as.basic.flags & T_MASK), p->file, p->line);
++ // rb_obj_classname will create objects on the heap, we need a better solution
++ if (include_classnames == Qtrue) {
++ /* write the class */
++ fprintf(f, ":");
++ switch (TYPE(p)) {
++ case T_NONE:
++ fprintf(f, "__none__");
++ break;
++ case T_BLKTAG:
++ fprintf(f, "__blktag__");
++ break;
++ case T_UNDEF:
++ fprintf(f, "__undef__");
++ break;
++ case T_VARMAP:
++ fprintf(f, "__varmap__");
++ break;
++ case T_SCOPE:
++ fprintf(f, "__scope__");
++ break;
++ case T_NODE:
++ fprintf(f, "__node__");
++ break;
++ default:
++ if (!p->as.basic.klass) {
++ fprintf(f, "__unknown__");
++ } else {
++ fprintf(f, rb_obj_classname((VALUE)p));
++ }
++ }
++ }
++ fprintf(f, "\n");
++ }
++ }
++ }
++ fclose(f);
++ return Qnil;
++}
++#endif
++
+ /*
+ * call-seq:
+ * GC.log String => String
+@@ -1066,6 +1144,11 @@ gc_mark(ptr, lev)
+ if (obj->as.basic.flags & FL_MARK) return; /* already marked */
+ obj->as.basic.flags |= FL_MARK;
+
++#ifdef GC_DEBUG
++ /* mark our new reference point for sourcefile objects */
++ mark_source_filename(RANY(obj)->file);
++#endif
++
+ if (lev > GC_LEVEL_MAX || (lev == 0 && ruby_stack_check())) {
+ if (!mark_stack_overflow) {
+ if (mark_stack_ptr - mark_stack < MARK_STACK_MAX) {
+@@ -1104,6 +1187,11 @@ gc_mark_children(ptr, lev)
+ if (obj->as.basic.flags & FL_MARK) return; /* already marked */
+ obj->as.basic.flags |= FL_MARK;
+
++#ifdef GC_DEBUG
++ /* mark our new reference point for sourcefile objects */
++ mark_source_filename(RANY(obj)->file);
++#endif
++
+ marking:
+ if (FL_TEST(obj, FL_EXIVAR)) {
+ rb_mark_generic_ivar(ptr);
+@@ -1550,6 +1638,7 @@ gc_sweep()
+ live_counts[i], free_counts[i], obj_type(i));
+ }
+ }
++ fflush(gc_data_file);
+ }
+
+ /* clear finalization list */
+@@ -2526,6 +2615,9 @@ Init_GC()
+ rb_define_singleton_method(rb_mGC, "collections", rb_gc_collections, 0);
+ rb_define_singleton_method(rb_mGC, "time", rb_gc_time, 0);
+ rb_define_singleton_method(rb_mGC, "dump", rb_gc_dump, 0);
++#ifdef GC_DEBUG
++ rb_define_singleton_method(rb_mGC, "dump_file_and_line_info", rb_gc_dump_file_and_line_info, -1);
++#endif
+ rb_define_singleton_method(rb_mGC, "log", rb_gc_log, 1);
+
+ rb_mObSpace = rb_define_module("ObjectSpace");
View
249 patches/ruby/1.8.7/p370/railsexpress/08-fork-support-for-gc-logging.patch
@@ -0,0 +1,249 @@
+diff --git a/gc.c b/gc.c
+index 9ad716f..a3cbe91 100644
+--- a/gc.c
++++ b/gc.c
+@@ -223,6 +223,8 @@ static int dont_gc;
+ static int gc_statistics = 0;
+ static GC_TIME_TYPE gc_time = 0;
+ static int gc_collections = 0;
++static int verbose_gc_stats = Qfalse;
++static FILE* gc_data_file = NULL;
+ static int during_gc;
+ static int need_call_final = 0;
+ static st_table *finalizer_table = 0;
+@@ -368,9 +370,148 @@ rb_gc_time()
+ #endif
+ }
+
+-
+ VALUE rb_mGC;
+
++/*
++ * call-seq:
++ * GC.enable_trace => true or false
++ *
++ * Enables garbage collection tracing, returning <code>true</code> if garbage
++ * collection tracing was already enabled.
++ *
++ * GC.enable_trace #=> false or true
++ * GC.enable_trace #=> true
++ *
++ */
++
++VALUE
++rb_gc_enable_trace()
++{
++ int old = verbose_gc_stats;
++ verbose_gc_stats = Qtrue;
++ return old;
++}
++
++/*
++ * call-seq:
++ * GC.disable_trace => true or false
++ *
++ * Disables garbage collection tracing, returning <code>true</code> if garbage
++ * collection tracing was already disabled.
++ *
++ * GC.disable_trace #=> false or true
++ * GC.disable_trace #=> true
++ *
++ */
++
++VALUE
++rb_gc_disable_trace()
++{
++ int old = verbose_gc_stats;
++ verbose_gc_stats = Qfalse;
++ return old;
++}
++
++char* GC_LOGFILE_IVAR = "@gc_logfile_name";
++
++/*
++ * call-seq:
++ * GC.log_file(filename=nil, mode="w") => boolean
++ *
++ * Changes the GC data log file. Closes the currently open logfile.
++ * Returns true if the file was successfully opened for
++ * writing. Returns false if the file could not be opened for
++ * writing. Returns the name of the current logfile (or nil) if no
++ * parameter is given. Restores logging to stderr when given nil as
++ * an argument.
++ *
++ * GC.log_file #=> nil
++ * GC.log_file "/tmp/gc.log" #=> true
++ * GC.log_file #=> "/tmp/gc.log"
++ * GC.log_file nil #=> true
++ *
++ */
++
++VALUE
++rb_gc_log_file(int argc, VALUE *argv, VALUE self)
++{
++ VALUE filename = Qnil;
++ VALUE mode_str = Qnil;
++ FILE* f = NULL;
++ char* mode = "w";
++
++ VALUE current_logfile_name = rb_iv_get(rb_mGC, GC_LOGFILE_IVAR);
++
++ if (argc==0)
++ return current_logfile_name;
++
++ rb_scan_args(argc, argv, "02", &filename, &mode_str);
++
++ if (filename == Qnil) {
++ /* close current logfile and reset logfile to stderr */
++ if (gc_data_file != stderr) {
++ fclose(gc_data_file);
++ gc_data_file = stderr;
++ rb_iv_set(rb_mGC, GC_LOGFILE_IVAR, Qnil);
++ }
++ return Qtrue;
++ }
++
++ /* we have a real logfile name */
++ filename = StringValue(filename);
++
++ if (rb_equal(current_logfile_name, filename) == Qtrue) {
++ /* do nothing if we get the file name we're already logging to */
++ return Qtrue;
++ }
++
++ /* get mode for file opening */
++ if (mode_str != Qnil)
++ {
++ mode = RSTRING(StringValue(mode_str))->ptr;
++ }
++
++ /* try to open file in given mode */
++ if (f = fopen(RSTRING(filename)->ptr, mode)) {
++ if (gc_data_file != stderr) {
++ fclose(gc_data_file);
++ }
++ gc_data_file = f;
++ rb_iv_set(rb_mGC, GC_LOGFILE_IVAR, filename);
++ } else {
++ return Qfalse;
++ }
++ return Qtrue;
++}
++
++
++/*
++ * Called from process.c before a fork. Flushes the gc log file to
++ * avoid writing the buffered output twice (once in the parent, and
++ * once in the child).
++ */
++void
++rb_gc_before_fork()
++{
++ /* flush gc log file */
++ fflush(gc_data_file);
++}
++
++/*
++ * Called from process.c after a fork in the child process. Turns off
++ * logging, disables GC stats and resets all gc counters and timing
++ * information.
++ */
++void
++rb_gc_after_fork()
++{
++ rb_gc_disable_stats();
++ rb_gc_clear_stats();
++ rb_gc_disable_trace();
++ gc_data_file = stderr;
++ rb_iv_set(rb_mGC, GC_LOGFILE_IVAR, Qnil);
++}
++
+ static struct gc_list {
+ VALUE *varptr;
+ struct gc_list *next;
+@@ -477,10 +618,6 @@ static double heap_slots_growth_factor = 1.8;
+
+ static long initial_malloc_limit = GC_MALLOC_LIMIT;
+
+-static int verbose_gc_stats = Qfalse;
+-
+-static FILE* gc_data_file = NULL;
+-
+ static RVALUE *himem, *lomem;
+
+ static void set_gc_parameters()
+@@ -496,6 +633,8 @@ static void set_gc_parameters()
+ if (gc_stats_i > 0) {
+ verbose_gc_stats = Qtrue;
+ }
++ /* child processes should not inherit RUBY_GC_STATS */
++ unsetenv("RUBY_GC_STATS");
+ }
+
+ gc_heap_file_ptr = getenv("RUBY_GC_DATA_FILE");
+@@ -508,6 +647,8 @@ static void set_gc_parameters()
+ fprintf(stderr,
+ "can't open gc log file %s for writing, using default\n", gc_heap_file_ptr);
+ }
++ /* child processes should not inherit RUBY_GC_DATA_FILE to avoid clobbering */
++ unsetenv("RUBY_GC_DATA_FILE");
+ }
+
+ min_slots_ptr = getenv("RUBY_HEAP_MIN_SLOTS");
+@@ -2619,6 +2760,9 @@ Init_GC()
+ rb_define_singleton_method(rb_mGC, "dump_file_and_line_info", rb_gc_dump_file_and_line_info, -1);
+ #endif
+ rb_define_singleton_method(rb_mGC, "log", rb_gc_log, 1);
++ rb_define_singleton_method(rb_mGC, "log_file", rb_gc_log_file, -1);
++ rb_define_singleton_method(rb_mGC, "enable_trace", rb_gc_enable_trace, 0);
++ rb_define_singleton_method(rb_mGC, "disable_trace", rb_gc_disable_trace, 0);
+
+ rb_mObSpace = rb_define_module("ObjectSpace");
+ rb_define_module_function(rb_mObSpace, "each_object", os_each_obj, -1);
+diff --git a/intern.h b/intern.h
+index 950ae9d..99696f1 100644
+--- a/intern.h
++++ b/intern.h
+@@ -270,6 +270,8 @@ void rb_gc_call_finalizer_at_exit _((void));
+ VALUE rb_gc_enable _((void));
+ VALUE rb_gc_disable _((void));
+ VALUE rb_gc_start _((void));
++void rb_gc_before_fork _((void));
++void rb_gc_after_fork _((void));
+ /* hash.c */
+ void st_foreach_safe _((struct st_table *, int (*)(ANYARGS), unsigned long));
+ void rb_hash_foreach _((VALUE, int (*)(ANYARGS), VALUE));
+diff --git a/process.c b/process.c
+index 8f6285d..ea28cb8 100644
+--- a/process.c
++++ b/process.c
+@@ -1330,6 +1330,8 @@ rb_f_fork(obj)
+ fflush(stderr);
+ #endif
+
++ rb_gc_before_fork();
++
+ before_exec();
+ pid = fork();
+ after_exec();
+@@ -1339,6 +1341,7 @@ rb_f_fork(obj)
+ #ifdef linux
+ after_exec();
+ #endif
++ rb_gc_after_fork();
+ rb_thread_atfork();
+ if (rb_block_given_p()) {
+ int status;
+@@ -1574,10 +1577,12 @@ rb_f_system(argc, argv)
+
+ chfunc = signal(SIGCHLD, SIG_DFL);
+ retry:
++ rb_gc_before_fork();
+ before_exec();
+ pid = fork();
+ if (pid == 0) {
+ /* child process */
++ rb_gc_after_fork();
+ rb_thread_atfork();
+ rb_protect(proc_exec_args, (VALUE)&earg, NULL);
+ _exit(127);
View
120 patches/ruby/1.8.7/p370/railsexpress/09-track-malloc-size.patch
@@ -0,0 +1,120 @@
+diff --git a/gc.c b/gc.c
+index a3cbe91..30a1219 100644
+--- a/gc.c
++++ b/gc.c
+@@ -79,6 +79,17 @@ void *alloca ();
+
+ static unsigned long malloc_increase = 0;
+ static unsigned long malloc_limit = GC_MALLOC_LIMIT;
++
++#ifdef HAVE_LONG_LONG
++static unsigned LONG_LONG gc_allocated_size = 0;
++static unsigned LONG_LONG gc_num_allocations = 0;
++#else
++static unsigned long gc_allocated_size = 0;
++static unsigned long gc_num_allocations = 0;
++#endif
++static int gc_statistics = 0;