Skip to content

HTTPS clone URL

Subversion checkout URL

You can clone with
or
.
Download ZIP
Browse files

Merge pull request #1414 from skaes/patches-for-193p362

railsexpress patches for latest 1.9.3 release (p362)
  • Loading branch information...
commit 85a830a3f4eb52a8874b5f1180f4a64a2695ce3f 2 parents b7a836f + 24887d1
@richo richo authored
Showing with 13,227 additions and 277 deletions.
  1. +3 −2 patches/ruby/1.9.3/head/railsexpress/01-fix-make-clean.patch
  2. +133 −132 patches/ruby/1.9.3/head/railsexpress/02-railsbench-gc.patch
  3. +4 −4 patches/ruby/1.9.3/head/railsexpress/03-display-more-detailed-stack-trace.patch
  4. +10 −10 patches/ruby/1.9.3/head/railsexpress/04-fork-support-for-gc-logging.patch
  5. +8 −8 patches/ruby/1.9.3/head/railsexpress/05-track-live-dataset-size.patch
  6. +21 −21 patches/ruby/1.9.3/head/railsexpress/07-export-a-few-more-symbols-for-ruby-prof.patch
  7. +20 −20 patches/ruby/1.9.3/head/railsexpress/08-thread-variables.patch
  8. +80 −80 patches/ruby/1.9.3/head/railsexpress/09-faster-loading.patch
  9. +2,308 −0 patches/ruby/1.9.3/head/railsexpress/10-falcon-st-opt.patch
  10. +2,588 −0 patches/ruby/1.9.3/head/railsexpress/11-falcon-sparse-array.patch
  11. +301 −0 patches/ruby/1.9.3/head/railsexpress/12-falcon-array-queue.patch
  12. +13 −0 patches/ruby/1.9.3/p362/railsexpress/01-fix-make-clean.patch
  13. +1,235 −0 patches/ruby/1.9.3/p362/railsexpress/02-railsbench-gc.patch
  14. +15 −0 patches/ruby/1.9.3/p362/railsexpress/03-display-more-detailed-stack-trace.patch
  15. +68 −0 patches/ruby/1.9.3/p362/railsexpress/04-fork-support-for-gc-logging.patch
  16. +45 −0 patches/ruby/1.9.3/p362/railsexpress/05-track-live-dataset-size.patch
  17. +13 −0 patches/ruby/1.9.3/p362/railsexpress/06-webrick_204_304_keep_alive_fix.patch
  18. +84 −0 patches/ruby/1.9.3/p362/railsexpress/07-export-a-few-more-symbols-for-ruby-prof.patch
  19. +319 −0 patches/ruby/1.9.3/p362/railsexpress/08-thread-variables.patch
  20. +747 −0 patches/ruby/1.9.3/p362/railsexpress/09-faster-loading.patch
  21. +2,308 −0 patches/ruby/1.9.3/p362/railsexpress/10-falcon-st-opt.patch
  22. +2,588 −0 patches/ruby/1.9.3/p362/railsexpress/11-falcon-sparse-array.patch
  23. +301 −0 patches/ruby/1.9.3/p362/railsexpress/12-falcon-array-queue.patch
  24. +3 −0  patchsets/ruby/1.9.3/head/railsexpress
  25. +12 −0 patchsets/ruby/1.9.3/p362/railsexpress
View
5 patches/ruby/1.9.3/head/railsexpress/01-fix-make-clean.patch
@@ -1,12 +1,13 @@
diff --git a/lib/mkmf.rb b/lib/mkmf.rb
-index f4f84a6..f73f7ed 100644
+index 08053a9..361fd08 100644
--- a/lib/mkmf.rb
+++ b/lib/mkmf.rb
@@ -2191,7 +2191,7 @@ def init_mkmf(config = CONFIG, rbconfig = RbConfig::CONFIG)
$LOCAL_LIBS = ""
-
+
$cleanfiles = config_string('CLEANFILES') {|s| Shellwords.shellwords(s)} || []
- $cleanfiles << "mkmf.log"
+ $cleanfiles << "mkmf.log .*.time"
$distcleanfiles = config_string('DISTCLEANFILES') {|s| Shellwords.shellwords(s)} || []
$distcleandirs = config_string('DISTCLEANDIRS') {|s| Shellwords.shellwords(s)} || []
+
View
265 patches/ruby/1.9.3/head/railsexpress/02-railsbench-gc.patch
@@ -1,26 +1,26 @@
diff --git a/configure.in b/configure.in
-index b006a01..af5acc4 100644
+index c64d981..6099a92 100644
--- a/configure.in
+++ b/configure.in
-@@ -2473,6 +2473,10 @@ if test "$EXEEXT" = .exe; then
+@@ -2416,6 +2416,10 @@ if test "$EXEEXT" = .exe; then
AC_SUBST(EXECUTABLE_EXTS)
fi
-
+
+dnl enable gc debugging
+AC_ARG_ENABLE(gcdebug,
+ AS_HELP_STRING([--enable-gcdebug], [build garbage collector with debugging enabled]),
+ [AC_DEFINE(GC_DEBUG,1)])
dnl }
dnl build section {
-
+
diff --git a/gc.c b/gc.c
-index e65d0ec..169dfab 100644
+index e38930d..c728587 100644
--- a/gc.c
+++ b/gc.c
-@@ -96,6 +96,15 @@ ruby_gc_params_t initial_params = {
+@@ -97,6 +97,15 @@ ruby_gc_params_t initial_params = {
#endif
};
-
+
+#ifndef HAVE_LONG_LONG
+#define LONG_LONG long
+#endif
@@ -31,10 +31,10 @@ index e65d0ec..169dfab 100644
+static double heap_slots_growth_factor = 1.8;
+
#define nomem_error GET_VM()->special_exceptions[ruby_error_nomemory]
-
- #define MARK_STACK_MAX 1024
-@@ -291,7 +300,7 @@ typedef struct RVALUE {
- struct RComplex complex;
+
+ #if SIZEOF_LONG == SIZEOF_VOIDP
+@@ -301,7 +310,7 @@ typedef struct RVALUE {
+ struct RComplex complex;
} as;
#ifdef GC_DEBUG
- const char *file;
@@ -42,10 +42,10 @@ index e65d0ec..169dfab 100644
int line;
#endif
} RVALUE;
-@@ -345,11 +354,25 @@ typedef struct rb_objspace {
- size_t free_min;
- size_t final_num;
- size_t do_heap_free;
+@@ -371,11 +380,25 @@ typedef struct rb_objspace {
+ size_t free_min;
+ size_t final_num;
+ size_t do_heap_free;
+ unsigned long max_blocks_to_free;
+ unsigned long freed_blocks;
} heap;
@@ -60,15 +60,15 @@ index e65d0ec..169dfab 100644
+ unsigned long live_after_last_mark_phase;
+ } stats;
+ struct {
- int dont_gc;
- int dont_lazy_sweep;
- int during_gc;
+ int dont_gc;
+ int dont_lazy_sweep;
+ int during_gc;
+ int gc_statistics;
+ int verbose_gc_stats;
} flags;
struct {
- st_table *table;
-@@ -370,6 +393,14 @@ typedef struct rb_objspace {
+ st_table *table;
+@@ -392,6 +415,14 @@ typedef struct rb_objspace {
struct gc_list *global_list;
size_t count;
int gc_stress;
@@ -81,9 +81,9 @@ index e65d0ec..169dfab 100644
+ unsigned long live_objects;
+ unsigned LONG_LONG allocated_objects;
} rb_objspace_t;
-
+
#if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
-@@ -392,6 +423,16 @@ int *ruby_initial_gc_stress_ptr = &rb_objspace.gc_stress;
+@@ -414,6 +445,16 @@ int *ruby_initial_gc_stress_ptr = &rb_objspace.gc_stress;
#define heaps_freed objspace->heap.freed
#define dont_gc objspace->flags.dont_gc
#define during_gc objspace->flags.during_gc
@@ -99,8 +99,8 @@ index e65d0ec..169dfab 100644
+#define allocated_objects objspace->allocated_objects
#define finalizer_table objspace->final.table
#define deferred_final_list objspace->final.deferred
- #define mark_stack objspace->markstack.buffer
-@@ -402,6 +443,14 @@ int *ruby_initial_gc_stress_ptr = &rb_objspace.gc_stress;
+ #define global_List objspace->global_list
+@@ -421,6 +462,14 @@ int *ruby_initial_gc_stress_ptr = &rb_objspace.gc_stress;
#define initial_malloc_limit initial_params.initial_malloc_limit
#define initial_heap_min_slots initial_params.initial_heap_min_slots
#define initial_free_min initial_params.initial_free_min
@@ -112,10 +112,10 @@ index e65d0ec..169dfab 100644
+#define freed_objects objspace->stats.freed_objects
+#define gc_time_accumulator_before_gc objspace->stats.gc_time_accumulator_before_gc
+#define live_after_last_mark_phase objspace->stats.live_after_last_mark_phase
-
+
static void rb_objspace_call_finalizer(rb_objspace_t *objspace);
-
-@@ -423,24 +472,59 @@ static void initial_expand_heap(rb_objspace_t *objspace);
+
+@@ -443,24 +492,59 @@ static void init_mark_stack(mark_stack_t *stack);
void
rb_gc_set_params(void)
{
@@ -125,9 +125,9 @@ index e65d0ec..169dfab 100644
+ rb_objspace_t *objspace = &rb_objspace;
+
+ gc_data_file = stderr;
-
+
if (rb_safe_level() > 0) return;
-
+
- malloc_limit_ptr = getenv("RUBY_GC_MALLOC_LIMIT");
- if (malloc_limit_ptr != NULL) {
- int malloc_limit_i = atoi(malloc_limit_ptr);
@@ -161,15 +161,15 @@ index e65d0ec..169dfab 100644
+ if (verbose_gc_stats) {
+ fprintf(gc_data_file, "RUBY_GC_MALLOC_LIMIT=%s\n", envp);
+ }
- if (RTEST(ruby_verbose))
- fprintf(stderr, "malloc_limit=%d (%d)\n",
- malloc_limit_i, initial_malloc_limit);
- if (malloc_limit_i > 0) {
- initial_malloc_limit = malloc_limit_i;
+ if (RTEST(ruby_verbose))
+ fprintf(stderr, "malloc_limit=%d (%d)\n",
+ malloc_limit_i, initial_malloc_limit);
+ if (malloc_limit_i > 0) {
+ initial_malloc_limit = malloc_limit_i;
+ // malloc_limit = initial_malloc_limit;
- }
+ }
}
-
+
- heap_min_slots_ptr = getenv("RUBY_HEAP_MIN_SLOTS");
- if (heap_min_slots_ptr != NULL) {
- int heap_min_slots_i = atoi(heap_min_slots_ptr);
@@ -179,13 +179,13 @@ index e65d0ec..169dfab 100644
+ if (verbose_gc_stats) {
+ fprintf(gc_data_file, "RUBY_HEAP_MIN_SLOTS=%s\n", envp);
+ }
- if (RTEST(ruby_verbose))
- fprintf(stderr, "heap_min_slots=%d (%d)\n",
- heap_min_slots_i, initial_heap_min_slots);
-@@ -450,15 +534,42 @@ rb_gc_set_params(void)
- }
+ if (RTEST(ruby_verbose))
+ fprintf(stderr, "heap_min_slots=%d (%d)\n",
+ heap_min_slots_i, initial_heap_min_slots);
+@@ -470,15 +554,42 @@ rb_gc_set_params(void)
+ }
}
-
+
- free_min_ptr = getenv("RUBY_FREE_MIN");
- if (free_min_ptr != NULL) {
- int free_min_i = atoi(free_min_ptr);
@@ -196,11 +196,11 @@ index e65d0ec..169dfab 100644
+ if (verbose_gc_stats) {
+ fprintf(gc_data_file, "RUBY_HEAP_FREE_MIN=%s\n", envp);
+ }
- if (RTEST(ruby_verbose))
- fprintf(stderr, "free_min=%d (%d)\n", free_min_i, initial_free_min);
- if (free_min_i > 0) {
- initial_free_min = free_min_i;
- }
+ if (RTEST(ruby_verbose))
+ fprintf(stderr, "free_min=%d (%d)\n", free_min_i, initial_free_min);
+ if (free_min_i > 0) {
+ initial_free_min = free_min_i;
+ }
}
+
+ envp = getenv("RUBY_HEAP_SLOTS_INCREMENT");
@@ -226,12 +226,12 @@ index e65d0ec..169dfab 100644
+
+ fflush(gc_data_file);
}
-
+
#if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
-@@ -753,6 +864,11 @@ vm_malloc_fixup(rb_objspace_t *objspace, void *mem, size_t size)
+@@ -775,6 +886,11 @@ vm_malloc_fixup(rb_objspace_t *objspace, void *mem, size_t size)
mem = (size_t *)mem + 1;
#endif
-
+
+ if (gc_statistics) {
+ gc_allocated_size += size;
+ gc_num_allocations += 1;
@@ -239,11 +239,11 @@ index e65d0ec..169dfab 100644
+
return mem;
}
-
-@@ -813,6 +929,13 @@ vm_xrealloc(rb_objspace_t *objspace, void *ptr, size_t size)
+
+@@ -835,6 +951,13 @@ vm_xrealloc(rb_objspace_t *objspace, void *ptr, size_t size)
mem = (size_t *)mem + 1;
#endif
-
+
+ /* TODO: we can't count correctly unless we store old size on heap
+ if (gc_statistics) {
+ gc_allocated_size += size;
@@ -253,19 +253,19 @@ index e65d0ec..169dfab 100644
+
return mem;
}
-
-@@ -894,7 +1017,6 @@ ruby_xfree(void *x)
- vm_xfree(&rb_objspace, x);
+
+@@ -916,7 +1039,6 @@ ruby_xfree(void *x)
+ vm_xfree(&rb_objspace, x);
}
-
+
-
/*
* call-seq:
* GC.enable -> true or false
-@@ -940,6 +1062,455 @@ rb_gc_disable(void)
+@@ -962,6 +1084,455 @@ rb_gc_disable(void)
return old ? Qtrue : Qfalse;
}
-
+
+/*
+ * call-seq:
+ * GC.enable_stats => true or false
@@ -716,9 +716,9 @@ index e65d0ec..169dfab 100644
+}
+
VALUE rb_mGC;
-
+
void
-@@ -1011,6 +1582,12 @@ allocate_sorted_heaps(rb_objspace_t *objspace, size_t next_heaps_length)
+@@ -1033,6 +1604,12 @@ allocate_sorted_heaps(rb_objspace_t *objspace, size_t next_heaps_length)
static void
assign_heap_slot(rb_objspace_t *objspace)
{
@@ -731,27 +731,27 @@ index e65d0ec..169dfab 100644
RVALUE *p, *pend, *membase;
struct heaps_slot *slot;
size_t hi, lo, mid;
-@@ -1072,6 +1649,7 @@ assign_heap_slot(rb_objspace_t *objspace)
+@@ -1094,6 +1671,7 @@ assign_heap_slot(rb_objspace_t *objspace)
if (lomem == 0 || lomem > p) lomem = p;
if (himem < pend) himem = pend;
heaps_used++;
+ heap_size += objs;
-
+
while (p < pend) {
- p->as.free.flags = 0;
-@@ -1127,7 +1705,7 @@ initial_expand_heap(rb_objspace_t *objspace)
+ p->as.free.flags = 0;
+@@ -1150,7 +1728,7 @@ initial_expand_heap(rb_objspace_t *objspace)
static void
set_heaps_increment(rb_objspace_t *objspace)
{
- size_t next_heaps_length = (size_t)(heaps_used * 1.8);
+ size_t next_heaps_length = (size_t)(heaps_used * heap_slots_growth_factor);
-
+
if (next_heaps_length == heaps_used) {
next_heaps_length++;
-@@ -1160,6 +1738,22 @@ rb_during_gc(void)
-
+@@ -1183,6 +1761,22 @@ rb_during_gc(void)
+
#define RANY(o) ((RVALUE*)(o))
-
+
+#ifdef GC_DEBUG
+static VALUE
+_rb_sourcefile(void)
@@ -771,8 +771,8 @@ index e65d0ec..169dfab 100644
VALUE
rb_newobj(void)
{
-@@ -1191,9 +1785,11 @@ rb_newobj(void)
-
+@@ -1214,9 +1808,11 @@ rb_newobj(void)
+
MEMZERO((void*)obj, RVALUE, 1);
#ifdef GC_DEBUG
- RANY(obj)->file = rb_sourcefile();
@@ -782,12 +782,12 @@ index e65d0ec..169dfab 100644
+ live_objects++;
+ allocated_objects++;
GC_PROF_INC_LIVE_NUM;
-
+
return obj;
-@@ -1660,6 +2256,12 @@ gc_mark_children(rb_objspace_t *objspace, VALUE ptr, int lev)
+@@ -1768,6 +2364,12 @@ gc_mark_children(rb_objspace_t *objspace, VALUE ptr)
{
register RVALUE *obj = RANY(ptr);
-
+
+#ifdef GC_DEBUG
+ if (obj->file && obj->file != Qnil && is_pointer_to_heap(objspace, (void*)obj->file)) {
+ gc_mark(objspace, obj->file, lev);
@@ -795,12 +795,12 @@ index e65d0ec..169dfab 100644
+#endif
+
goto marking; /* skip */
-
+
again:
-@@ -1670,6 +2272,12 @@ gc_mark_children(rb_objspace_t *objspace, VALUE ptr, int lev)
+@@ -1778,6 +2380,12 @@ gc_mark_children(rb_objspace_t *objspace, VALUE ptr)
obj->as.basic.flags |= FL_MARK;
objspace->heap.live_num++;
-
+
+#ifdef GC_DEBUG
+ if (obj->file && obj->file != Qnil && is_pointer_to_heap(objspace, (void*)obj->file)) {
+ gc_mark(objspace, obj->file, lev);
@@ -809,11 +809,11 @@ index e65d0ec..169dfab 100644
+
marking:
if (FL_TEST(obj, FL_EXIVAR)) {
- rb_mark_generic_ivar(ptr);
-@@ -2012,6 +2620,25 @@ free_unused_heaps(rb_objspace_t *objspace)
+ rb_mark_generic_ivar(ptr);
+@@ -2120,6 +2728,25 @@ free_unused_heaps(rb_objspace_t *objspace)
}
}
-
+
+static inline unsigned long
+elapsed_musecs(struct timeval since)
+{
@@ -836,7 +836,7 @@ index e65d0ec..169dfab 100644
static void
slot_sweep(rb_objspace_t *objspace, struct heaps_slot *sweep_slot)
{
-@@ -2019,14 +2646,23 @@ slot_sweep(rb_objspace_t *objspace, struct heaps_slot *sweep_slot)
+@@ -2127,14 +2754,23 @@ slot_sweep(rb_objspace_t *objspace, struct heaps_slot *sweep_slot)
RVALUE *p, *pend;
RVALUE *free = freelist, *final = deferred_final_list;
int deferred;
@@ -844,7 +844,7 @@ index e65d0ec..169dfab 100644
+
+ struct timeval tv1;
+ if (gc_statistics) gettimeofday(&tv1, NULL);
-
+
p = sweep_slot->slot; pend = p + sweep_slot->limit;
while (p < pend) {
if (!(p->as.basic.flags & FL_MARK)) {
@@ -854,13 +854,13 @@ index e65d0ec..169dfab 100644
+ }
if (p->as.basic.flags &&
((deferred = obj_free(objspace, (VALUE)p)) ||
- (FL_TEST(p, FL_FINALIZE)))) {
+ (FL_TEST(p, FL_FINALIZE)))) {
if (!deferred) {
+ if (do_gc_stats) zombies++;
p->as.free.flags = T_ZOMBIE;
RDATA(p)->dfree = 0;
}
-@@ -2036,6 +2672,10 @@ slot_sweep(rb_objspace_t *objspace, struct heaps_slot *sweep_slot)
+@@ -2144,6 +2780,10 @@ slot_sweep(rb_objspace_t *objspace, struct heaps_slot *sweep_slot)
final_num++;
}
else {
@@ -871,7 +871,7 @@ index e65d0ec..169dfab 100644
add_freelist(objspace, p);
free_num++;
}
-@@ -2043,13 +2683,22 @@ slot_sweep(rb_objspace_t *objspace, struct heaps_slot *sweep_slot)
+@@ -2151,13 +2791,22 @@ slot_sweep(rb_objspace_t *objspace, struct heaps_slot *sweep_slot)
else if (BUILTIN_TYPE(p) == T_ZOMBIE) {
/* objects to be finalized */
/* do nothing remain marked */
@@ -894,8 +894,8 @@ index e65d0ec..169dfab 100644
+ final_num + free_num == sweep_slot->limit &&
objspace->heap.free_num > objspace->heap.do_heap_free) {
RVALUE *pp;
-
-@@ -2060,6 +2709,8 @@ slot_sweep(rb_objspace_t *objspace, struct heaps_slot *sweep_slot)
+
+@@ -2168,6 +2817,8 @@ slot_sweep(rb_objspace_t *objspace, struct heaps_slot *sweep_slot)
sweep_slot->limit = final_num;
freelist = free; /* cancel this page from freelist */
unlink_heap_slot(objspace, sweep_slot);
@@ -904,7 +904,7 @@ index e65d0ec..169dfab 100644
}
else {
objspace->heap.free_num += free_num;
-@@ -2072,6 +2723,10 @@ slot_sweep(rb_objspace_t *objspace, struct heaps_slot *sweep_slot)
+@@ -2180,6 +2831,10 @@ slot_sweep(rb_objspace_t *objspace, struct heaps_slot *sweep_slot)
RUBY_VM_SET_FINALIZER_INTERRUPT(th);
}
}
@@ -913,9 +913,9 @@ index e65d0ec..169dfab 100644
+ gc_time_accumulator += elapsed_musecs(tv1);
+ }
}
-
+
static int
-@@ -2092,6 +2747,21 @@ ready_to_gc(rb_objspace_t *objspace)
+@@ -2200,6 +2855,21 @@ ready_to_gc(rb_objspace_t *objspace)
static void
before_gc_sweep(rb_objspace_t *objspace)
{
@@ -937,7 +937,7 @@ index e65d0ec..169dfab 100644
freelist = 0;
objspace->heap.do_heap_free = (size_t)((heaps_used * HEAP_OBJ_LIMIT) * 0.65);
objspace->heap.free_min = (size_t)((heaps_used * HEAP_OBJ_LIMIT) * 0.2);
-@@ -2111,8 +2781,13 @@ before_gc_sweep(rb_objspace_t *objspace)
+@@ -2219,8 +2889,13 @@ before_gc_sweep(rb_objspace_t *objspace)
static void
after_gc_sweep(rb_objspace_t *objspace)
{
@@ -945,15 +945,15 @@ index e65d0ec..169dfab 100644
+ struct timeval tv1;
+
GC_PROF_SET_MALLOC_INFO;
-
+
+ if (gc_statistics) gettimeofday(&tv1, NULL);
+
if (objspace->heap.free_num < objspace->heap.free_min) {
set_heaps_increment(objspace);
heaps_increment(objspace);
-@@ -2125,6 +2800,29 @@ after_gc_sweep(rb_objspace_t *objspace)
+@@ -2233,6 +2908,29 @@ after_gc_sweep(rb_objspace_t *objspace)
malloc_increase = 0;
-
+
free_unused_heaps(objspace);
+
+ if (gc_statistics) {
@@ -979,12 +979,12 @@ index e65d0ec..169dfab 100644
+ }
+ }
}
-
+
static int
-@@ -2158,9 +2856,11 @@ rest_sweep(rb_objspace_t *objspace)
-
+@@ -2266,9 +2964,11 @@ rest_sweep(rb_objspace_t *objspace)
+
static void gc_marks(rb_objspace_t *objspace);
-
+
+/* only called from rb_new_obj */
static int
gc_lazy_sweep(rb_objspace_t *objspace)
@@ -992,8 +992,8 @@ index e65d0ec..169dfab 100644
+ struct timeval gctv1;
int res;
INIT_GC_PROF_PARAMS;
-
-@@ -2182,7 +2882,6 @@ gc_lazy_sweep(rb_objspace_t *objspace)
+
+@@ -2290,7 +2990,6 @@ gc_lazy_sweep(rb_objspace_t *objspace)
GC_PROF_TIMER_STOP(Qfalse);
return res;
}
@@ -1001,7 +1001,7 @@ index e65d0ec..169dfab 100644
}
else {
if (heaps_increment(objspace)) {
-@@ -2190,6 +2889,18 @@ gc_lazy_sweep(rb_objspace_t *objspace)
+@@ -2298,6 +2997,18 @@ gc_lazy_sweep(rb_objspace_t *objspace)
return TRUE;
}
}
@@ -1017,13 +1017,13 @@ index e65d0ec..169dfab 100644
+ }
+ */
+ }
-
+
gc_marks(objspace);
-
-@@ -2198,6 +2909,10 @@ gc_lazy_sweep(rb_objspace_t *objspace)
- set_heaps_increment(objspace);
+
+@@ -2306,6 +3017,10 @@ gc_lazy_sweep(rb_objspace_t *objspace)
+ set_heaps_increment(objspace);
}
-
+
+ if (gc_statistics) {
+ gc_time_accumulator += elapsed_musecs(gctv1);
+ }
@@ -1031,18 +1031,18 @@ index e65d0ec..169dfab 100644
GC_PROF_SWEEP_TIMER_START;
if(!(res = lazy_sweep(objspace))) {
after_gc_sweep(objspace);
-@@ -2209,6 +2924,7 @@ gc_lazy_sweep(rb_objspace_t *objspace)
+@@ -2317,6 +3032,7 @@ gc_lazy_sweep(rb_objspace_t *objspace)
GC_PROF_SWEEP_TIMER_STOP;
-
+
GC_PROF_TIMER_STOP(Qtrue);
+
return res;
}
-
-@@ -2435,9 +3151,15 @@ gc_marks(rb_objspace_t *objspace)
+
+@@ -2543,9 +3259,15 @@ gc_marks(rb_objspace_t *objspace)
rb_thread_t *th = GET_THREAD();
GC_PROF_MARK_TIMER_START;
-
+
+ /*
+ if (gc_statistics & verbose_gc_stats) {
+ fprintf(gc_data_file, "Marking objects\n");
@@ -1053,29 +1053,29 @@ index e65d0ec..169dfab 100644
objspace->count++;
-
+ live_objects = 0;
-
+
SET_STACK_END;
-
-@@ -2477,11 +3199,15 @@ gc_marks(rb_objspace_t *objspace)
- }
- }
+
+@@ -2577,11 +3299,15 @@ gc_marks(rb_objspace_t *objspace)
+ gc_mark_stacked_objects(objspace);
+
GC_PROF_MARK_TIMER_STOP;
+
+ live_after_last_mark_phase = objspace->heap.live_num;
}
-
+
static int
garbage_collect(rb_objspace_t *objspace)
{
+ struct timeval gctv1;
+
INIT_GC_PROF_PARAMS;
-
+
if (GC_NOTIFY) printf("start garbage_collect()\n");
-@@ -2497,15 +3223,31 @@ garbage_collect(rb_objspace_t *objspace)
-
+@@ -2597,15 +3323,31 @@ garbage_collect(rb_objspace_t *objspace)
+
rest_sweep(objspace);
-
+
+ if (gc_statistics) {
+ gc_time_accumulator_before_gc = gc_time_accumulator;
+ gc_collections++;
@@ -1089,7 +1089,7 @@ index e65d0ec..169dfab 100644
+
during_gc++;
gc_marks(objspace);
-
+
+ if (gc_statistics) {
+ gc_time_accumulator += elapsed_musecs(gctv1);
+ }
@@ -1097,17 +1097,17 @@ index e65d0ec..169dfab 100644
GC_PROF_SWEEP_TIMER_START;
gc_sweep(objspace);
GC_PROF_SWEEP_TIMER_STOP;
-
+
GC_PROF_TIMER_STOP(Qtrue);
if (GC_NOTIFY) printf("end garbage_collect()\n");
+
return TRUE;
}
-
-@@ -2994,6 +3736,39 @@ rb_gc_call_finalizer_at_exit(void)
+
+@@ -3094,6 +3836,39 @@ rb_gc_call_finalizer_at_exit(void)
rb_objspace_call_finalizer(&rb_objspace);
}
-
+
+static const char* obj_type(VALUE type)
+{
+ switch (type) {
@@ -1144,10 +1144,10 @@ index e65d0ec..169dfab 100644
static void
rb_objspace_call_finalizer(rb_objspace_t *objspace)
{
-@@ -3307,6 +4082,49 @@ count_objects(int argc, VALUE *argv, VALUE os)
+@@ -3408,6 +4183,49 @@ count_objects(int argc, VALUE *argv, VALUE os)
return hash;
}
-
+
+/* call-seq:
+ * ObjectSpace.live_objects => number
+ *
@@ -1194,10 +1194,10 @@ index e65d0ec..169dfab 100644
/*
* call-seq:
* GC.count -> Integer
-@@ -3599,6 +4417,28 @@ Init_GC(void)
+@@ -3700,6 +4518,28 @@ Init_GC(void)
rb_define_singleton_method(rb_mGC, "stat", gc_stat, -1);
rb_define_method(rb_mGC, "garbage_collect", rb_gc_start, 0);
-
+
+ rb_define_singleton_method(rb_mGC, "enable_stats", rb_gc_enable_stats, 0);
+ rb_define_singleton_method(rb_mGC, "disable_stats", rb_gc_disable_stats, 0);
+ rb_define_singleton_method(rb_mGC, "stats_enabled?", rb_gc_stats_enabled, 0);
@@ -1223,12 +1223,13 @@ index e65d0ec..169dfab 100644
rb_mProfiler = rb_define_module_under(rb_mGC, "Profiler");
rb_define_singleton_method(rb_mProfiler, "enabled?", gc_profile_enable_get, 0);
rb_define_singleton_method(rb_mProfiler, "enable", gc_profile_enable, 0);
-@@ -3612,6 +4452,9 @@ Init_GC(void)
+@@ -3713,6 +4553,9 @@ Init_GC(void)
rb_define_module_function(rb_mObSpace, "each_object", os_each_obj, -1);
rb_define_module_function(rb_mObSpace, "garbage_collect", rb_gc_start, 0);
-
+
+ rb_define_module_function(rb_mObSpace, "live_objects", os_live_objects, 0);
+ rb_define_module_function(rb_mObSpace, "allocated_objects", os_allocated_objects, 0);
+
rb_define_module_function(rb_mObSpace, "define_finalizer", define_final, -1);
rb_define_module_function(rb_mObSpace, "undefine_finalizer", undefine_final, 1);
+
View
8 patches/ruby/1.9.3/head/railsexpress/03-display-more-detailed-stack-trace.patch
@@ -4,12 +4,12 @@ index fd06adf..69c3b48 100644
+++ b/eval_error.c
@@ -164,8 +164,8 @@ error_print(void)
int skip = eclass == rb_eSysStackError;
-
+
#define TRACE_MAX (TRACE_HEAD+TRACE_TAIL+5)
-#define TRACE_HEAD 8
-#define TRACE_TAIL 5
+#define TRACE_HEAD 100
+#define TRACE_TAIL 100
-
- for (i = 1; i < len; i++) {
- if (TYPE(ptr[i]) == T_STRING) {
+
+ for (i = 1; i < len; i++) {
+ if (TYPE(ptr[i]) == T_STRING) {
View
20 patches/ruby/1.9.3/head/railsexpress/04-fork-support-for-gc-logging.patch
@@ -1,10 +1,10 @@
diff --git a/gc.c b/gc.c
-index 169dfab..629a73a 100644
+index c728587..6b6c9f0 100644
--- a/gc.c
+++ b/gc.c
-@@ -1322,6 +1322,34 @@ rb_gc_log_file(int argc, VALUE *argv, VALUE self)
+@@ -1344,6 +1344,34 @@ rb_gc_log_file(int argc, VALUE *argv, VALUE self)
}
-
+
/*
+ * Called from process.c before a fork. Flushes the gc log file to
+ * avoid writing the buffered output twice (once in the parent, and
@@ -38,10 +38,10 @@ index 169dfab..629a73a 100644
* GC.log String => String
*
diff --git a/include/ruby/intern.h b/include/ruby/intern.h
-index 927b536..9da266e 100644
+index 9745afd..7c3d970 100644
--- a/include/ruby/intern.h
+++ b/include/ruby/intern.h
-@@ -425,6 +425,8 @@ void rb_gc_call_finalizer_at_exit(void);
+@@ -426,6 +426,8 @@ void rb_gc_call_finalizer_at_exit(void);
VALUE rb_gc_enable(void);
VALUE rb_gc_disable(void);
VALUE rb_gc_start(void);
@@ -56,13 +56,13 @@ index 99cfc69..8bee602 100644
+++ b/process.c
@@ -2804,9 +2804,11 @@ rb_f_fork(VALUE obj)
rb_pid_t pid;
-
+
rb_secure(2);
+ rb_gc_before_fork();
-
+
switch (pid = rb_fork(0, 0, 0, Qnil)) {
case 0:
+ rb_gc_after_fork();
- rb_thread_atfork();
- if (rb_block_given_p()) {
- int status;
+ rb_thread_atfork();
+ if (rb_block_given_p()) {
+ int status;
View
16 patches/ruby/1.9.3/head/railsexpress/05-track-live-dataset-size.patch
@@ -1,19 +1,19 @@
diff --git a/gc.c b/gc.c
-index 629a73a..8ab287e 100644
+index 6b6c9f0..c995568 100644
--- a/gc.c
+++ b/gc.c
-@@ -270,7 +270,6 @@ getrusage_time(void)
+@@ -280,7 +280,6 @@ getrusage_time(void)
#define GC_PROF_DEC_LIVE_NUM
#endif
-
+
-
#if defined(_MSC_VER) || defined(__BORLANDC__) || defined(__CYGWIN__)
#pragma pack(push, 1) /* magic for reducing sizeof(RVALUE): 24 -> 20 */
#endif
-@@ -1539,6 +1538,24 @@ rb_gc_time()
+@@ -1561,6 +1560,24 @@ rb_gc_time()
#endif
}
-
+
+/*
+ * call-seq:
+ * GC.heap_slots_live_after_last_gc => Integer
@@ -33,13 +33,13 @@ index 629a73a..8ab287e 100644
+
+
VALUE rb_mGC;
-
+
void
-@@ -4452,6 +4469,7 @@ Init_GC(void)
+@@ -4553,6 +4570,7 @@ Init_GC(void)
rb_define_singleton_method(rb_mGC, "allocated_size", rb_gc_allocated_size, 0);
rb_define_singleton_method(rb_mGC, "num_allocations", rb_gc_num_allocations, 0);
rb_define_singleton_method(rb_mGC, "heap_slots", rb_gc_heap_slots, 0);
+ rb_define_singleton_method(rb_mGC, "heap_slots_live_after_last_gc", rb_gc_heap_slots_live_after_last_gc, 0);
rb_define_const(rb_mGC, "HEAP_SLOT_SIZE", INT2FIX(sizeof(RVALUE)));
-
+
rb_define_singleton_method(rb_mGC, "log", rb_gc_log, 1);
View
42 patches/ruby/1.9.3/head/railsexpress/07-export-a-few-more-symbols-for-ruby-prof.patch
@@ -1,83 +1,83 @@
diff --git a/gc.c b/gc.c
-index 8ab287e..a77ef2c 100644
+index c995568..52b2c56 100644
--- a/gc.c
+++ b/gc.c
-@@ -1029,6 +1029,7 @@ ruby_xfree(void *x)
+@@ -1051,6 +1051,7 @@ ruby_xfree(void *x)
*
*/
-
+
+RUBY_FUNC_EXPORTED
VALUE
rb_gc_enable(void)
{
-@@ -1051,6 +1052,7 @@ rb_gc_enable(void)
+@@ -1073,6 +1074,7 @@ rb_gc_enable(void)
*
*/
-
+
+RUBY_FUNC_EXPORTED
VALUE
rb_gc_disable(void)
{
-@@ -1073,6 +1075,7 @@ rb_gc_disable(void)
+@@ -1095,6 +1097,7 @@ rb_gc_disable(void)
*
*/
-
+
+RUBY_FUNC_EXPORTED
VALUE
rb_gc_enable_stats()
{
-@@ -1094,6 +1097,7 @@ rb_gc_enable_stats()
+@@ -1116,6 +1119,7 @@ rb_gc_enable_stats()
*
*/
-
+
+RUBY_FUNC_EXPORTED
VALUE
rb_gc_disable_stats()
{
-@@ -1113,6 +1117,7 @@ rb_gc_disable_stats()
+@@ -1135,6 +1139,7 @@ rb_gc_disable_stats()
*
*/
-
+
+RUBY_FUNC_EXPORTED
VALUE
rb_gc_stats_enabled()
{
-@@ -1132,6 +1137,7 @@ rb_gc_stats_enabled()
+@@ -1154,6 +1159,7 @@ rb_gc_stats_enabled()
*
*/
-
+
+RUBY_FUNC_EXPORTED
VALUE
rb_gc_clear_stats()
{
-@@ -1199,6 +1205,7 @@ rb_gc_num_allocations()
+@@ -1221,6 +1227,7 @@ rb_gc_num_allocations()
*
*/
-
+
+RUBY_FUNC_EXPORTED
VALUE
rb_gc_enable_trace()
{
-@@ -1220,6 +1227,7 @@ rb_gc_enable_trace()
+@@ -1242,6 +1249,7 @@ rb_gc_enable_trace()
*
*/
-
+
+RUBY_FUNC_EXPORTED
VALUE
rb_gc_disable_trace()
{
-@@ -1239,6 +1247,7 @@ rb_gc_disable_trace()
+@@ -1261,6 +1269,7 @@ rb_gc_disable_trace()
*
*/
-
+
+RUBY_FUNC_EXPORTED
VALUE
rb_gc_trace_enabled()
{
-@@ -1267,6 +1276,7 @@ const char* GC_LOGFILE_IVAR = "@gc_logfile_name";
+@@ -1289,6 +1298,7 @@ const char* GC_LOGFILE_IVAR = "@gc_logfile_name";
*
*/
-
+
+RUBY_FUNC_EXPORTED
VALUE
rb_gc_log_file(int argc, VALUE *argv, VALUE self)
View
40 patches/ruby/1.9.3/head/railsexpress/08-thread-variables.patch
@@ -5,7 +5,7 @@ index 30fec33..9f6c172 100644
@@ -103,6 +103,16 @@ with all sufficient information, see the ChangeLog file.
* String#prepend
* String#byteslice
-
+
+ * Thread
+ * added method:
+ * added Thread#thread_variable_get for getting thread local variables
@@ -26,7 +26,7 @@ index e8cb3b1..9ff491d 100644
@@ -27,6 +27,79 @@ class TestThread < Test::Unit::TestCase
end
end
-
+
+ def test_main_thread_variable_in_enumerator
+ assert_equal Thread.main, Thread.current
+
@@ -104,7 +104,7 @@ index e8cb3b1..9ff491d 100644
m = Mutex.new
r = 0
diff --git a/thread.c b/thread.c
-index eb0be9f..a8721c4 100644
+index 5393e4b..6a915d6 100644
--- a/thread.c
+++ b/thread.c
@@ -2112,7 +2112,9 @@ rb_thread_local_aset(VALUE thread, ID id, VALUE val)
@@ -116,10 +116,10 @@ index eb0be9f..a8721c4 100644
+ * thread-local variables, please see <code>Thread#thread_variable_set</code>
+ * and <code>Thread#thread_variable_get</code>.
*/
-
+
static VALUE
@@ -2123,6 +2125,80 @@ rb_thread_aset(VALUE self, VALUE id, VALUE val)
-
+
/*
* call-seq:
+ * thr.thread_variable_get(key) -> obj or nil
@@ -200,19 +200,19 @@ index eb0be9f..a8721c4 100644
*
* Returns <code>true</code> if the given string (or symbol) exists as a
@@ -2993,6 +3069,9 @@ rb_gc_save_machine_context(rb_thread_t *th)
-
+
/*
*
+ * For thread-local variables, please see <code>Thread#thread_local_get</code>
+ * and <code>Thread#thread_local_set</code>.
+ *
*/
-
+
void
@@ -3195,6 +3274,76 @@ thgroup_list_i(st_data_t key, st_data_t val, st_data_t data)
return ST_CONTINUE;
}
-
+
+static int
+keys_i(VALUE key, VALUE value, VALUE ary)
+{
@@ -286,7 +286,7 @@ index eb0be9f..a8721c4 100644
/*
* call-seq:
* thgrp.list -> array
-@@ -4670,6 +4819,10 @@ Init_Thread(void)
+@@ -4686,6 +4835,10 @@ Init_Thread(void)
rb_define_method(rb_cThread, "priority", rb_thread_priority, 0);
rb_define_method(rb_cThread, "priority=", rb_thread_priority_set, 1);
rb_define_method(rb_cThread, "status", rb_thread_status, 0);
@@ -298,22 +298,22 @@ index eb0be9f..a8721c4 100644
rb_define_method(rb_cThread, "stop?", rb_thread_stop_p, 0);
rb_define_method(rb_cThread, "abort_on_exception", rb_thread_abort_exc, 0);
diff --git a/vm.c b/vm.c
-index 63141ba..553f180 100644
+index 4dd242f..f00b193 100644
--- a/vm.c
+++ b/vm.c
-@@ -1896,6 +1896,7 @@ ruby_thread_init(VALUE self)
+@@ -1900,6 +1900,7 @@ ruby_thread_init(VALUE self)
GetThreadPtr(self, th);
-
+
th_init(th, self);
+ rb_iv_set(self, "locals", rb_hash_new());
th->vm = vm;
-
+
th->top_wrapper = 0;
-@@ -2164,6 +2165,7 @@ Init_VM(void)
-
- /* create main thread */
- th_self = th->self = TypedData_Wrap_Struct(rb_cThread, &thread_data_type, th);
+@@ -2168,6 +2169,7 @@ Init_VM(void)
+
+ /* create main thread */
+ th_self = th->self = TypedData_Wrap_Struct(rb_cThread, &thread_data_type, th);
+ rb_iv_set(th_self, "locals", rb_hash_new());
- vm->main_thread = th;
- vm->running_thread = th;
- th->vm = vm;
+ vm->main_thread = th;
+ vm->running_thread = th;
+ th->vm = vm;
View
160 patches/ruby/1.9.3/head/railsexpress/09-faster-loading.patch
@@ -5,7 +5,7 @@ index e427cb3..c33d106 100644
@@ -295,6 +295,22 @@ rb_ary_frozen_p(VALUE ary)
return Qfalse;
}
-
+
+/* This can be used to take a snapshot of an array (with
+ e.g. rb_ary_replace) and check later whether the array has been
+ modified from the snapshot. The snapshot is cheap, though if
@@ -26,13 +26,13 @@ index e427cb3..c33d106 100644
ary_alloc(VALUE klass)
{
diff --git a/file.c b/file.c
-index c1db6d7..a28eb6a 100644
+index 50fa5c7..e991866 100644
--- a/file.c
+++ b/file.c
-@@ -148,23 +148,32 @@ file_path_convert(VALUE name)
+@@ -149,23 +149,32 @@ file_path_convert(VALUE name)
return name;
}
-
+
-static VALUE
-rb_get_path_check(VALUE obj, int level)
+VALUE
@@ -41,18 +41,18 @@ index c1db6d7..a28eb6a 100644
VALUE tmp;
ID to_path;
- rb_encoding *enc;
-
+
if (insecure_obj_p(obj, level)) {
- rb_insecure_operation();
+ rb_insecure_operation();
}
-
+
+ if (RB_TYPE_P(obj, T_STRING)) {
+ return obj;
+ }
CONST_ID(to_path, "to_path");
tmp = rb_check_funcall(obj, to_path, 0, 0);
if (tmp == Qundef) {
- tmp = obj;
+ tmp = obj;
}
StringValue(tmp);
+ return tmp;
@@ -62,13 +62,13 @@ index c1db6d7..a28eb6a 100644
+rb_get_path_check_convert(VALUE obj, VALUE tmp, int level)
+{
+ rb_encoding *enc;
-
+
tmp = file_path_convert(tmp);
if (obj != tmp && insecure_obj_p(tmp, level)) {
-@@ -182,6 +191,13 @@ rb_get_path_check(VALUE obj, int level)
+@@ -183,6 +192,13 @@ rb_get_path_check(VALUE obj, int level)
return rb_str_new4(tmp);
}
-
+
+static VALUE
+rb_get_path_check(VALUE obj, int level)
+{
@@ -79,32 +79,32 @@ index c1db6d7..a28eb6a 100644
VALUE
rb_get_path_no_checksafe(VALUE obj)
{
-@@ -3249,7 +3265,6 @@ rb_file_expand_path(VALUE fname, VALUE dname)
+@@ -3250,7 +3266,6 @@ rb_file_expand_path(VALUE fname, VALUE dname)
VALUE
rb_file_expand_path_fast(VALUE fname, VALUE dname)
{
- check_expand_path_args(fname, dname);
return rb_file_expand_path_internal(fname, dname, 0, 0, EXPAND_PATH_BUFFER());
}
-
-@@ -5237,7 +5252,7 @@ rb_find_file_ext_safe(VALUE *filep, const char *const *ext, int safe_level)
- rb_raise(rb_eSecurityError, "loading from non-absolute path %s", f);
+
+@@ -5238,7 +5253,7 @@ rb_find_file_ext_safe(VALUE *filep, const char *const *ext, int safe_level)
+ rb_raise(rb_eSecurityError, "loading from non-absolute path %s", f);
}
-
+
- RB_GC_GUARD(load_path) = rb_get_load_path();
+ RB_GC_GUARD(load_path) = rb_get_expanded_load_path();
if (!load_path) return 0;
-
+
fname = rb_str_dup(*filep);
-@@ -5302,7 +5317,7 @@ rb_find_file_safe(VALUE path, int safe_level)
- rb_raise(rb_eSecurityError, "loading from non-absolute path %s", f);
+@@ -5303,7 +5318,7 @@ rb_find_file_safe(VALUE path, int safe_level)
+ rb_raise(rb_eSecurityError, "loading from non-absolute path %s", f);
}
-
+
- RB_GC_GUARD(load_path) = rb_get_load_path();
+ RB_GC_GUARD(load_path) = rb_get_expanded_load_path();
if (load_path) {
- long i;
-
+ long i;
+
diff --git a/hash.c b/hash.c
index fbd8237..8423558 100644
--- a/hash.c
@@ -112,14 +112,14 @@ index fbd8237..8423558 100644
@@ -1087,7 +1087,7 @@ clear_i(VALUE key, VALUE value, VALUE dummy)
*
*/
-
+
-static VALUE
+VALUE
rb_hash_clear(VALUE hash)
{
rb_hash_modify_check(hash);
diff --git a/include/ruby/intern.h b/include/ruby/intern.h
-index 9da266e..b82252a 100644
+index 7c3d970..ab1cc94 100644
--- a/include/ruby/intern.h
+++ b/include/ruby/intern.h
@@ -56,6 +56,7 @@ VALUE rb_ary_tmp_new(long);
@@ -130,7 +130,7 @@ index 9da266e..b82252a 100644
VALUE rb_ary_aref(int, VALUE*, VALUE);
VALUE rb_ary_subseq(VALUE, long, long);
void rb_ary_store(VALUE, long, VALUE);
-@@ -442,6 +443,7 @@ VALUE rb_hash_lookup(VALUE, VALUE);
+@@ -443,6 +444,7 @@ VALUE rb_hash_lookup(VALUE, VALUE);
VALUE rb_hash_lookup2(VALUE, VALUE, VALUE);
VALUE rb_hash_fetch(VALUE, VALUE);
VALUE rb_hash_aset(VALUE, VALUE, VALUE);
@@ -149,14 +149,14 @@ index 59c9284..f5af903 100644
+VALUE rb_get_path_check_to_string(VALUE, int);
+VALUE rb_get_path_check_convert(VALUE, VALUE, int);
void Init_File(void);
-
+
#ifdef _WIN32
@@ -119,6 +121,7 @@ VALUE rb_iseq_clone(VALUE iseqval, VALUE newcbase);
-
+
/* load.c */
VALUE rb_get_load_path(void);
+VALUE rb_get_expanded_load_path(void);
-
+
/* math.c */
VALUE rb_math_atan2(VALUE, VALUE);
diff --git a/load.c b/load.c
@@ -166,7 +166,7 @@ index 163ec4c..e766880 100644
@@ -18,7 +18,6 @@ VALUE ruby_dln_librefs;
#define IS_DLEXT(e) (strcmp((e), DLEXT) == 0)
#endif
-
+
-
static const char *const loadable_ext[] = {
".rb", DLEXT,
@@ -174,7 +174,7 @@ index 163ec4c..e766880 100644
@@ -34,21 +33,120 @@ rb_get_load_path(void)
return load_path;
}
-
+
-VALUE
-rb_get_expanded_load_path(void)
+enum expand_type {
@@ -199,7 +199,7 @@ index 163ec4c..e766880 100644
VALUE ary;
long i;
+ int level = rb_safe_level();
-
+
ary = rb_ary_new2(RARRAY_LEN(load_path));
for (i = 0; i < RARRAY_LEN(load_path); ++i) {
- VALUE path = rb_file_expand_path_fast(RARRAY_PTR(load_path)[i], Qnil);
@@ -297,12 +297,12 @@ index 163ec4c..e766880 100644
+ }
+ return vm->expanded_load_path;
}
-
+
static VALUE
@@ -63,12 +161,121 @@ get_loaded_features(void)
return GET_VM()->loaded_features;
}
-
+
+static void
+reset_loaded_features_snapshot(void)
+{
@@ -321,7 +321,7 @@ index 163ec4c..e766880 100644
{
return GET_VM()->loading_table;
}
-
+
+static void
+features_index_add_single(VALUE short_feature, VALUE offset)
+{
@@ -420,19 +420,19 @@ index 163ec4c..e766880 100644
+*/
static VALUE
loaded_feature_path(const char *name, long vlen, const char *feature, long len,
- int type, VALUE load_path)
+ int type, VALUE load_path)
@@ -77,7 +284,7 @@ loaded_feature_path(const char *name, long vlen, const char *feature, long len,
long plen;
const char *e;
-
+
- if(vlen < len) return 0;
+ if (vlen < len+1) return 0;
if (!strncmp(name+(vlen-len),feature,len)){
- plen = vlen - len - 1;
+ plen = vlen - len - 1;
} else {
@@ -88,23 +295,22 @@ loaded_feature_path(const char *name, long vlen, const char *feature, long len,
- return 0;
- plen = e - name - len - 1;
+ return 0;
+ plen = e - name - len - 1;
}
+ if (type == 's' && !IS_DLEXT(&name[plen+len+1])
+ || type == 'r' && !IS_RBEXT(&name[plen+len+1])
@@ -443,11 +443,11 @@ index 163ec4c..e766880 100644
+ (possibly empty) and prefix is some string of length plen. */
+
for (i = 0; i < RARRAY_LEN(load_path); ++i) {
- VALUE p = RARRAY_PTR(load_path)[i];
- const char *s = StringValuePtr(p);
- long n = RSTRING_LEN(p);
-
- if (n != plen ) continue;
+ VALUE p = RARRAY_PTR(load_path)[i];
+ const char *s = StringValuePtr(p);
+ long n = RSTRING_LEN(p);
+
+ if (n != plen ) continue;
- if (n && (strncmp(name, s, n) || name[n] != '/')) continue;
- switch (type) {
- case 's':
@@ -474,7 +474,7 @@ index 163ec4c..e766880 100644
long i, len, elen, n;
st_table *loading_tbl;
@@ -151,8 +357,39 @@ rb_feature_p(const char *feature, const char *ext, int rb, int expanded, const c
- type = 0;
+ type = 0;
}
features = get_loaded_features();
- for (i = 0; i < RARRAY_LEN(features); ++i) {
@@ -512,33 +512,33 @@ index 163ec4c..e766880 100644
+ for (i = 0; this_feature_index != Qnil && i < RARRAY_LEN(this_feature_index); i++) {
+ long index = FIX2LONG(rb_ary_entry(this_feature_index, i));
+ v = RARRAY_PTR(features)[index];
- f = StringValuePtr(v);
- if ((n = RSTRING_LEN(v)) < len) continue;
- if (strncmp(f, feature, len) != 0) {
+ f = StringValuePtr(v);
+ if ((n = RSTRING_LEN(v)) < len) continue;
+ if (strncmp(f, feature, len) != 0) {
@@ -175,6 +412,7 @@ rb_feature_p(const char *feature, const char *ext, int rb, int expanded, const c
- return 'r';
- }
+ return 'r';
+ }
}
+
loading_tbl = get_loading_table();
if (loading_tbl) {
- f = 0;
+ f = 0;
@@ -183,7 +421,7 @@ rb_feature_p(const char *feature, const char *ext, int rb, int expanded, const c
- fs.name = feature;
- fs.len = len;
- fs.type = type;
+ fs.name = feature;
+ fs.len = len;
+ fs.type = type;
- fs.load_path = load_path ? load_path : rb_get_load_path();
+ fs.load_path = load_path ? load_path : rb_get_expanded_load_path();
- fs.result = 0;
- st_foreach(loading_tbl, loaded_feature_path_i, (st_data_t)&fs);
- if ((f = fs.result) != 0) {
+ fs.result = 0;
+ st_foreach(loading_tbl, loaded_feature_path_i, (st_data_t)&fs);
+ if ((f = fs.result) != 0) {
@@ -233,7 +471,7 @@ rb_feature_provided(const char *feature, const char **loading)
-
+
if (*feature == '.' &&
- (feature[1] == '/' || strncmp(feature+1, "./", 2) == 0)) {
+ (feature[1] == '/' || strncmp(feature+1, "./", 2) == 0)) {
- fullpath = rb_file_expand_path_fast(rb_str_new2(feature), Qnil);
+ fullpath = rb_file_expand_path_fast(rb_get_path(rb_str_new2(feature)), Qnil);
- feature = RSTRING_PTR(fullpath);
+ feature = RSTRING_PTR(fullpath);
}
if (ext && !strchr(ext, '/')) {
@@ -254,11 +492,18 @@ rb_feature_provided(const char *feature, const char **loading)
@@ -550,8 +550,8 @@ index 163ec4c..e766880 100644
+
+ features = get_loaded_features();
+ if (OBJ_FROZEN(features)) {
- rb_raise(rb_eRuntimeError,
- "$LOADED_FEATURES is frozen; cannot append feature");
+ rb_raise(rb_eRuntimeError,
+ "$LOADED_FEATURES is frozen; cannot append feature");
}
- rb_ary_push(get_loaded_features(), feature);
+ rb_str_freeze(feature);
@@ -560,7 +560,7 @@ index 163ec4c..e766880 100644
+ features_index_add(feature, INT2FIX(RARRAY_LEN(features)-1));
+ reset_loaded_features_snapshot();
}
-
+
void
@@ -774,10 +1019,15 @@ Init_load()
rb_alias_variable(rb_intern("$-I"), id_load_path);
@@ -569,13 +569,13 @@ index 163ec4c..e766880 100644
+ vm->expanded_load_path = rb_ary_new();
+ vm->load_path_snapshot = rb_ary_new();
+ vm->load_path_check_cache = 0;
-
+
rb_define_virtual_variable("$\"", get_loaded_features, 0);
rb_define_virtual_variable("$LOADED_FEATURES", get_loaded_features, 0);
vm->loaded_features = rb_ary_new();
+ vm->loaded_features_snapshot = rb_ary_new();
+ vm->loaded_features_index = rb_hash_new();
-
+
rb_define_global_function("load", rb_f_load, -1);
rb_define_global_function("require", rb_f_require, 1);
diff --git a/ruby.c b/ruby.c
@@ -583,13 +583,13 @@ index 3ddd96c..7ffc78e 100644
--- a/ruby.c
+++ b/ruby.c
@@ -1366,7 +1366,8 @@ process_options(int argc, char **argv, struct cmdline_options *opt)
- long i;
- VALUE load_path = GET_VM()->load_path;
- for (i = 0; i < RARRAY_LEN(load_path); ++i) {
+ long i;
+ VALUE load_path = GET_VM()->load_path;
+ for (i = 0; i < RARRAY_LEN(load_path); ++i) {
- rb_enc_associate(RARRAY_PTR(load_path)[i], lenc);
+ RARRAY_PTR(load_path)[i] =
+ rb_enc_associate(rb_str_dup(RARRAY_PTR(load_path)[i]), lenc);
- }
+ }
}
if (!(opt->disable & DISABLE_BIT(gems))) {
diff --git a/test/ruby/test_require.rb b/test/ruby/test_require.rb
@@ -712,24 +712,24 @@ index 58a9ee2..ec75096 100644
+ end
end
diff --git a/vm.c b/vm.c
-index 553f180..3436898 100644
+index f00b193..4a97fba 100644
--- a/vm.c
+++ b/vm.c
-@@ -1578,7 +1578,12 @@ rb_vm_mark(void *ptr)
- RUBY_MARK_UNLESS_NULL(vm->thgroup_default);
- RUBY_MARK_UNLESS_NULL(vm->mark_object_ary);
- RUBY_MARK_UNLESS_NULL(vm->load_path);
+@@ -1582,7 +1582,12 @@ rb_vm_mark(void *ptr)
+ RUBY_MARK_UNLESS_NULL(vm->thgroup_default);
+ RUBY_MARK_UNLESS_NULL(vm->mark_object_ary);
+ RUBY_MARK_UNLESS_NULL(vm->load_path);
+ RUBY_MARK_UNLESS_NULL(vm->load_path_snapshot);
+ RUBY_MARK_UNLESS_NULL(vm->load_path_check_cache);
+ RUBY_MARK_UNLESS_NULL(vm->expanded_load_path);
- RUBY_MARK_UNLESS_NULL(vm->loaded_features);
+ RUBY_MARK_UNLESS_NULL(vm->loaded_features);
+ RUBY_MARK_UNLESS_NULL(vm->loaded_features_snapshot);
+ RUBY_MARK_UNLESS_NULL(vm->loaded_features_index);
- RUBY_MARK_UNLESS_NULL(vm->top_self);
- RUBY_MARK_UNLESS_NULL(vm->coverages);
- rb_gc_mark_locations(vm->special_exceptions, vm->special_exceptions + ruby_special_error_count);
+ RUBY_MARK_UNLESS_NULL(vm->top_self);
+ RUBY_MARK_UNLESS_NULL(vm->coverages);
+ rb_gc_mark_locations(vm->special_exceptions, vm->special_exceptions + ruby_special_error_count);
diff --git a/vm_core.h b/vm_core.h
-index 60146f0..7b25806 100644
+index 1a10162..c6829a3 100644
--- a/vm_core.h
+++ b/vm_core.h
@@ -298,7 +298,12 @@ typedef struct rb_vm_struct {
@@ -743,5 +743,5 @@ index 60146f0..7b25806 100644
+ VALUE loaded_features_snapshot;
+ VALUE loaded_features_index;
struct st_table *loading_table;
-
+
/* signal */
View
2,308 patches/ruby/1.9.3/head/railsexpress/10-falcon-st-opt.patch
@@ -0,0 +1,2308 @@
+diff --git a/common.mk b/common.mk
+index ccc9647..1d8e442 100644
+--- a/common.mk
++++ b/common.mk
+@@ -638,7 +638,8 @@ file.$(OBJEXT): {$(VPATH)}file.c $(RUBY_H_INCLUDES) {$(VPATH)}io.h \
+ gc.$(OBJEXT): {$(VPATH)}gc.c $(RUBY_H_INCLUDES) {$(VPATH)}re.h \
+ {$(VPATH)}regex.h $(ENCODING_H_INCLUDES) $(VM_CORE_H_INCLUDES) \
+ {$(VPATH)}gc.h {$(VPATH)}io.h {$(VPATH)}eval_intern.h {$(VPATH)}util.h \
+- {$(VPATH)}debug.h {$(VPATH)}internal.h {$(VPATH)}constant.h
++ {$(VPATH)}debug.h {$(VPATH)}internal.h {$(VPATH)}constant.h \
++ {$(VPATH)}pool_alloc.inc.h {$(VPATH)}pool_alloc.h
+ hash.$(OBJEXT): {$(VPATH)}hash.c $(RUBY_H_INCLUDES) {$(VPATH)}util.h \
+ $(ENCODING_H_INCLUDES)
+ inits.$(OBJEXT): {$(VPATH)}inits.c $(RUBY_H_INCLUDES) \
+@@ -702,7 +703,7 @@ signal.$(OBJEXT): {$(VPATH)}signal.c $(RUBY_H_INCLUDES) \
+ $(VM_CORE_H_INCLUDES) {$(VPATH)}debug.h
+ sprintf.$(OBJEXT): {$(VPATH)}sprintf.c $(RUBY_H_INCLUDES) {$(VPATH)}re.h \
+ {$(VPATH)}regex.h {$(VPATH)}vsnprintf.c $(ENCODING_H_INCLUDES)
+-st.$(OBJEXT): {$(VPATH)}st.c $(RUBY_H_INCLUDES)
++st.$(OBJEXT): {$(VPATH)}st.c $(RUBY_H_INCLUDES) {$(VPATH)}pool_alloc.h
+ strftime.$(OBJEXT): {$(VPATH)}strftime.c $(RUBY_H_INCLUDES) \
+ {$(VPATH)}timev.h
+ string.$(OBJEXT): {$(VPATH)}string.c $(RUBY_H_INCLUDES) {$(VPATH)}re.h \
+diff --git a/configure.in b/configure.in
+index 6099a92..ee6012e 100644
+--- a/configure.in
++++ b/configure.in
+@@ -1367,7 +1367,8 @@ AC_CHECK_FUNCS(fmod killpg wait4 waitpid fork spawnv syscall __syscall chroot ge
+ setsid telldir seekdir fchmod cosh sinh tanh log2 round\
+ setuid setgid daemon select_large_fdset setenv unsetenv\
+ mktime timegm gmtime_r clock_gettime gettimeofday poll ppoll\
+- pread sendfile shutdown sigaltstack dl_iterate_phdr)
++ pread sendfile shutdown sigaltstack dl_iterate_phdr\
++ dup3 pipe2 posix_memalign memalign)
+
+ AC_CACHE_CHECK(for unsetenv returns a value, rb_cv_unsetenv_return_value,
+ [AC_TRY_COMPILE([
+diff --git a/ext/-test-/st/numhash/numhash.c b/ext/-test-/st/numhash/numhash.c
+index e186cd4..53d9e1b 100644
+--- a/ext/-test-/st/numhash/numhash.c
++++ b/ext/-test-/st/numhash/numhash.c
+@@ -54,7 +54,7 @@ numhash_i(st_data_t key, st_data_t value, st_data_t arg, int error)
+ static VALUE
+ numhash_each(VALUE self)
+ {
+- return st_foreach((st_table *)DATA_PTR(self), numhash_i, self) ? Qtrue : Qfalse;
++ return st_foreach_check((st_table *)DATA_PTR(self), numhash_i, self, 0) ? Qtrue : Qfalse;
+ }
+
+ void
+diff --git a/gc.c b/gc.c
+index 52b2c56..7f78316 100644
+--- a/gc.c
++++ b/gc.c
+@@ -20,11 +20,13 @@
+ #include "vm_core.h"
+ #include "internal.h"
+ #include "gc.h"
++#include "pool_alloc.h"
+ #include "constant.h"
+ #include "ruby_atomic.h"
+ #include <stdio.h>
+ #include <setjmp.h>
+ #include <sys/types.h>
++#include <assert.h>
+
+ #ifdef HAVE_SYS_TIME_H
+ #include <sys/time.h>
+@@ -36,7 +38,12 @@
+
+ #if defined _WIN32 || defined __CYGWIN__
+ #include <windows.h>
++#elif defined(HAVE_POSIX_MEMALIGN)
++#elif defined(HAVE_MEMALIGN)
++#include <malloc.h>
+ #endif
++static void aligned_free(void *);
++static void *aligned_malloc(size_t alignment, size_t size);
+
+ #ifdef HAVE_VALGRIND_MEMCHECK_H
+ # include <valgrind/memcheck.h>
+@@ -355,6 +362,24 @@ typedef struct mark_stack {
+
+ #define CALC_EXACT_MALLOC_SIZE 0
+
++#ifdef POOL_ALLOC_API
++/* POOL ALLOC API */
++#define POOL_ALLOC_PART 1
++#include "pool_alloc.inc.h"
++#undef POOL_ALLOC_PART
++
++typedef struct pool_layout_t pool_layout_t;
++struct pool_layout_t {
++ pool_header
++ p6, /* st_table && st_table_entry */
++ p11; /* st_table.bins init size */
++} pool_layout = {
++ INIT_POOL(void*[6]),
++ INIT_POOL(void*[11])
++};
++static void pool_finalize_header(pool_header *header);
++#endif
++
+ typedef struct rb_objspace {
+ struct {
+ size_t limit;
+@@ -364,6 +389,9 @@ typedef struct rb_objspace {
+ size_t allocations;
+ #endif
+ } malloc_params;
++#ifdef POOL_ALLOC_API
++ pool_layout_t *pool_headers;
++#endif
+ struct {
+ size_t increment;
+ struct heaps_slot *ptr;
+@@ -429,7 +457,11 @@ typedef struct rb_objspace {
+ #define ruby_initial_gc_stress initial_params.gc_stress
+ int *ruby_initial_gc_stress_ptr = &ruby_initial_gc_stress;
+ #else
++# ifdef POOL_ALLOC_API
++static rb_objspace_t rb_objspace = {{GC_MALLOC_LIMIT}, &pool_layout, {HEAP_MIN_SLOTS}};
++# else
+ static rb_objspace_t rb_objspace = {{GC_MALLOC_LIMIT}, {HEAP_MIN_SLOTS}};
++# endif
+ int *ruby_initial_gc_stress_ptr = &rb_objspace.gc_stress;
+ #endif
+ #define malloc_limit objspace->malloc_params.limit
+@@ -480,6 +512,10 @@ rb_objspace_alloc(void)
+ memset(objspace, 0, sizeof(*objspace));
+ malloc_limit = initial_malloc_limit;
+ ruby_gc_stress = ruby_initial_gc_stress;
++#ifdef POOL_ALLOC_API
++ objspace->pool_headers = (pool_layout_t*) malloc(sizeof(pool_layout));
++ memcpy(objspace->pool_headers, &pool_layout, sizeof(pool_layout));
++#endif
+
+ return objspace;
+ }
+@@ -623,6 +659,13 @@ rb_objspace_free(rb_objspace_t *objspace)
+ heaps = 0;
+ }
+ free_stack_chunks(&objspace->mark_stack);
++#ifdef POOL_ALLOC_API
++ if (objspace->pool_headers) {
++ pool_finalize_header(&objspace->pool_headers->p6);
++ pool_finalize_header(&objspace->pool_headers->p11);
++ free(objspace->pool_headers);
++ }
++#endif
+ free(objspace);
+ }
+ #endif
+@@ -1147,6 +1190,27 @@ rb_gc_stats_enabled()
+ return gc_statistics ? Qtrue : Qfalse;
+ }
+
++#ifdef POOL_ALLOC_API
++/* POOL ALLOC API */
++#define POOL_ALLOC_PART 2
++#include "pool_alloc.inc.h"
++#undef POOL_ALLOC_PART
++
++void
++ruby_xpool_free(void *ptr)
++{
++ pool_free_entry((void**)ptr);
++}
++
++#define CONCRET_POOL_MALLOC(pnts) \
++void * ruby_xpool_malloc_##pnts##p () { \
++ return pool_alloc_entry(&rb_objspace.pool_headers->p##pnts ); \
++}
++CONCRET_POOL_MALLOC(6)
++CONCRET_POOL_MALLOC(11)
++#undef CONCRET_POOL_MALLOC
++
++#endif
+
+ /*
+ * call-seq:
+@@ -1656,6 +1720,55 @@ allocate_sorted_heaps(rb_objspace_t *objspace, size_t next_heaps_length)
+ heaps_length = next_heaps_length;
+ }
+
++static void *
++aligned_malloc(size_t alignment, size_t size)
++{
++ void *res;
++
++#if defined __MINGW32__
++ res = __mingw_aligned_malloc(size, alignment);
++#elif defined _WIN32 && !defined __CYGWIN__
++ res = _aligned_malloc(size, alignment);
++#elif defined(HAVE_POSIX_MEMALIGN)
++ if (posix_memalign(&res, alignment, size) == 0) {
++ return res;
++ }
++ else {
++ return NULL;
++ }
++#elif defined(HAVE_MEMALIGN)
++ res = memalign(alignment, size);
++#else
++ char* aligned;
++ res = malloc(alignment + size + sizeof(void*));
++ aligned = (char*)res + alignment + sizeof(void*);
++ aligned -= ((VALUE)aligned & (alignment - 1));
++ ((void**)aligned)[-1] = res;
++ res = (void*)aligned;
++#endif
++
++#if defined(_DEBUG) || defined(GC_DEBUG)
++ /* alignment must be a power of 2 */
++ assert((alignment - 1) & alignment == 0);
++ assert(alignment % sizeof(void*) == 0);
++#endif
++ return res;
++}
++
++static void
++aligned_free(void *ptr)
++{
++#if defined __MINGW32__
++ __mingw_aligned_free(ptr);
++#elif defined _WIN32 && !defined __CYGWIN__
++ _aligned_free(ptr);
++#elif defined(HAVE_MEMALIGN) || defined(HAVE_POSIX_MEMALIGN)
++ free(ptr);
++#else
++ free(((void**)ptr)[-1]);
++#endif
++}
++
+ static void
+ assign_heap_slot(rb_objspace_t *objspace)
+ {
+diff --git a/hash.c b/hash.c
+index 8423558..4cb2e2d 100644
+--- a/hash.c
++++ b/hash.c
+@@ -44,7 +44,7 @@ rb_any_cmp(VALUE a, VALUE b)
+ if (FIXNUM_P(a) && FIXNUM_P(b)) {
+ return a != b;
+ }
+- if (TYPE(a) == T_STRING && RBASIC(a)->klass == rb_cString &&
++ if (RB_TYPE_P(a, T_STRING) && RBASIC(a)->klass == rb_cString &&
+ TYPE(b) == T_STRING && RBASIC(b)->klass == rb_cString) {
+ return rb_str_hash_cmp(a, b);
+ }
+@@ -80,20 +80,14 @@ rb_any_hash(VALUE a)
+ VALUE hval;
+ st_index_t hnum;
+
+- switch (TYPE(a)) {
+- case T_FIXNUM:
+- case T_SYMBOL:
+- case T_NIL:
+- case T_FALSE:
+- case T_TRUE:
+- hnum = rb_hash_end(rb_hash_start((unsigned int)a));
+- break;
+-
+- case T_STRING:
++ if (SPECIAL_CONST_P(a)) {
++ if (a == Qundef) return 0;
++ hnum = rb_hash_end(rb_hash_start((st_index_t)a));
++ }
++ else if (BUILTIN_TYPE(a) == T_STRING) {
+ hnum = rb_str_hash(a);
+- break;
+-
+- default:
++ }
++ else {
+ hval = rb_hash(a);
+ hnum = FIX2LONG(hval);
+ }
+@@ -106,10 +100,8 @@ static const struct st_hash_type objhash = {
+ rb_any_hash,
+ };
+
+-static const struct st_hash_type identhash = {
+- st_numcmp,
+- st_numhash,
+-};
++extern const struct st_hash_type st_hashtype_num;
++#define identhash st_hashtype_num
+
+ typedef int st_foreach_func(st_data_t, st_data_t, st_data_t);
+
+@@ -124,7 +116,6 @@ foreach_safe_i(st_data_t key, st_data_t value, struct foreach_safe_arg *arg)
+ {
+ int status;
+
+- if (key == Qundef) return ST_CONTINUE;
+ status = (*arg->func)(key, value, arg->arg);
+ if (status == ST_CONTINUE) {
+ return ST_CHECK;
+@@ -140,7 +131,7 @@ st_foreach_safe(st_table *table, int (*func)(ANYARGS), st_data_t a)
+ arg.tbl = table;
+ arg.func = (st_foreach_func *)func;
+ arg.arg = a;
+- if (st_foreach(table, foreach_safe_i, (st_data_t)&arg)) {
++ if (st_foreach_check(table, foreach_safe_i, (st_data_t)&arg, 0)) {
+ rb_raise(rb_eRuntimeError, "hash modified during iteration");
+ }
+ }
+@@ -154,21 +145,21 @@ struct hash_foreach_arg {
+ };
+
+ static int
+-hash_foreach_iter(st_data_t key, st_data_t value, struct hash_foreach_arg *arg)
++hash_foreach_iter(st_data_t key, st_data_t value, st_data_t argp)
+ {
++ struct hash_foreach_arg *arg = (struct hash_foreach_arg *)argp;
+ int status;
+ st_table *tbl;
+
+ tbl = RHASH(arg->hash)->ntbl;
+- if ((VALUE)key == Qundef) return ST_CONTINUE;
+ status = (*arg->func)((VALUE)key, (VALUE)value, arg->arg);
+ if (RHASH(arg->hash)->ntbl != tbl) {
+ rb_raise(rb_eRuntimeError, "rehash occurred during iteration");
+ }
+ switch (status) {
+ case ST_DELETE:
+- st_delete_safe(tbl, &key, 0, Qundef);
+ FL_SET(arg->hash, HASH_DELETED);
++ return ST_DELETE;
+ case ST_CONTINUE:
+ break;
+ case ST_STOP:
+@@ -184,7 +175,7 @@ hash_foreach_ensure(VALUE hash)
+
+ if (RHASH(hash)->iter_lev == 0) {
+ if (FL_TEST(hash, HASH_DELETED)) {
+- st_cleanup_safe(RHASH(hash)->ntbl, Qundef);
++ st_cleanup_safe(RHASH(hash)->ntbl, (st_data_t)Qundef);
+ FL_UNSET(hash, HASH_DELETED);
+ }
+ }
+@@ -192,9 +183,10 @@ hash_foreach_ensure(VALUE hash)
+ }
+
+ static VALUE
+-hash_foreach_call(struct hash_foreach_arg *arg)
++hash_foreach_call(VALUE arg)
+ {
+- if (st_foreach(RHASH(arg->hash)->ntbl, hash_foreach_iter, (st_data_t)arg)) {
++ VALUE hash = ((struct hash_foreach_arg *)arg)->hash;
++ if (st_foreach_check(RHASH(hash)->ntbl, hash_foreach_iter, (st_data_t)arg, (st_data_t)Qundef)) {
+ rb_raise(rb_eRuntimeError, "hash modified during iteration");
+ }
+ return Qnil;
+@@ -447,7 +439,7 @@ rb_hash_rehash_i(VALUE key, VALUE value, VALUE arg)
+ {
+ st_table *tbl = (st_table *)arg;
+
+- if (key != Qundef) st_insert(tbl, key, value);
++ st_insert(tbl, (st_data_t)key, (st_data_t)value);
+ return ST_CONTINUE;
+ }
+
+@@ -490,6 +482,20 @@ rb_hash_rehash(VALUE hash)
+ return hash;
+ }
+
++static VALUE
++hash_default_value(VALUE hash, VALUE key)
++{
++ if (rb_method_basic_definition_p(CLASS_OF(hash), id_default)) {
++ VALUE ifnone = RHASH_IFNONE(hash);
++ if (!FL_TEST(hash, HASH_PROC_DEFAULT)) return ifnone;
++ if (key == Qundef) return Qnil;
++ return rb_funcall(ifnone, id_yield, 2, hash, key);
++ }
++ else {
++ return rb_funcall(hash, id_default, 1, key);
++ }
++}
++
+ /*
+ * call-seq:
+ * hsh[key] -> value
+@@ -510,13 +516,7 @@ rb_hash_aref(VALUE hash, VALUE key)
+ st_data_t val;
+
+ if (!RHASH(hash)->ntbl || !st_lookup(RHASH(hash)->ntbl, key, &val)) {
+- if (!FL_TEST(hash, HASH_PROC_DEFAULT) &&
+- rb_method_basic_definition_p(CLASS_OF(hash), id_default)) {
+- return RHASH_IFNONE(hash);
+- }
+- else {
+- return rb_funcall(hash, id_default, 1, key);
+- }
++ return hash_default_value(hash, key);
+ }
+ return (VALUE)val;
+ }
+@@ -659,7 +659,7 @@ rb_hash_default(int argc, VALUE *argv, VALUE hash)
+ static VALUE
+ rb_hash_set_default(VALUE hash, VALUE ifnone)
+ {
+- rb_hash_modify(hash);
++ rb_hash_modify_check(hash);
+ RHASH_IFNONE(hash) = ifnone;
+ FL_UNSET(hash, HASH_PROC_DEFAULT);
+ return ifnone;
+@@ -707,7 +707,7 @@ rb_hash_set_default_proc(VALUE hash, VALUE proc)
+ {
+ VALUE b;
+
+- rb_hash_modify(hash);
++ rb_hash_modify_check(hash);
+ b = rb_check_convert_type(proc, T_DATA, "Proc", "to_proc");
+ if (NIL_P(b) || !rb_obj_is_proc(b)) {
+ rb_raise(rb_eTypeError,
+@@ -776,7 +776,7 @@ rb_hash_delete_key(VALUE hash, VALUE key)
+ if (!RHASH(hash)->ntbl)
+ return Qundef;
+ if (RHASH(hash)->iter_lev > 0) {
+- if (st_delete_safe(RHASH(hash)->ntbl, &ktmp, &val, Qundef)) {
++ if (st_delete_safe(RHASH(hash)->ntbl, &ktmp, &val, (st_data_t)Qundef)) {
+ FL_SET(hash, HASH_DELETED);
+ return (VALUE)val;
+ }
+@@ -809,7 +809,7 @@ rb_hash_delete(VALUE hash, VALUE key)
+ {
+ VALUE val;
+
+- rb_hash_modify(hash);
++ rb_hash_modify_check(hash);
+ val = rb_hash_delete_key(hash, key);
+ if (val != Qundef) return val;
+ if (rb_block_given_p()) {
+@@ -828,7 +828,6 @@ shift_i(VALUE key, VALUE value, VALUE arg)
+ {
+ struct shift_var *var = (struct shift_var *)arg;
+
+- if (key == Qundef) return ST_CONTINUE;
+ if (var->key != Qundef) return ST_STOP;
+ var->key = key;
+ var->val = value;
+@@ -840,7 +839,6 @@ shift_i_safe(VALUE key, VALUE value, VALUE arg)
+ {
+ struct shift_var *var = (struct shift_var *)arg;
+
+- if (key == Qundef) return ST_CONTINUE;
+ var->key = key;
+ var->val = value;
+ return ST_STOP;
+@@ -864,29 +862,25 @@ rb_hash_shift(VALUE hash)
+ {
+ struct shift_var var;
+
+- rb_hash_modify(hash);
+- var.key = Qundef;
+- rb_hash_foreach(hash, RHASH(hash)->iter_lev > 0 ? shift_i_safe : shift_i,
+- (VALUE)&var);
+-
+- if (var.key != Qundef) {
+- if (RHASH(hash)->iter_lev > 0) {
+- rb_hash_delete_key(hash, var.key);
++ rb_hash_modify_check(hash);
++ if (RHASH(hash)->ntbl) {
++ var.key = Qundef;
++ rb_hash_foreach(hash, RHASH(hash)->iter_lev > 0 ? shift_i_safe : shift_i,
++ (VALUE)&var);
++
++ if (var.key != Qundef) {
++ if (RHASH(hash)->iter_lev > 0) {
++ rb_hash_delete_key(hash, var.key);
++ }
++ return rb_assoc_new(var.key, var.val);
+ }
+- return rb_assoc_new(var.key, var.val);
+- }
+- else if (FL_TEST(hash, HASH_PROC_DEFAULT)) {
+- return rb_funcall(RHASH_IFNONE(hash), id_yield, 2, hash, Qnil);
+- }
+- else {
+- return RHASH_IFNONE(hash);
+ }
++ return hash_default_value(hash, Qnil);
+ }
+
+ static int
+ delete_if_i(VALUE key, VALUE value, VALUE hash)
+ {
+- if (key == Qundef) return ST_CONTINUE;
+ if (RTEST(rb_yield_values(2, key, value))) {
+ rb_hash_delete_key(hash, key);
+ }
+@@ -912,8 +906,9 @@ VALUE
+ rb_hash_delete_if(VALUE hash)
+ {
+ RETURN_ENUMERATOR(hash, 0, 0);
+- rb_hash_modify(hash);
+- rb_hash_foreach(hash, delete_if_i, hash);
++ rb_hash_modify_check(hash);
++ if (RHASH(hash)->ntbl)
++ rb_hash_foreach(hash, delete_if_i, hash);
+ return hash;
+ }
+
+@@ -984,7 +979,6 @@ rb_hash_values_at(int argc, VALUE *argv, VALUE hash)
+ static int
+ select_i(VALUE key, VALUE value, VALUE result)
+ {
+- if (key == Qundef) return ST_CONTINUE;
+ if (RTEST(rb_yield_values(2, key, value)))
+ rb_hash_aset(result, key, value);
+ return ST_CONTINUE;
+@@ -1018,7 +1012,6 @@ rb_hash_select(VALUE hash)
+ static int
+ keep_if_i(VALUE key, VALUE value, VALUE hash)
+ {
+- if (key == Qundef) return ST_CONTINUE;
+ if (!RTEST(rb_yield_values(2, key, value))) {
+ return ST_DELETE;
+ }
+@@ -1040,7 +1033,7 @@ rb_hash_select_bang(VALUE hash)
+ st_index_t n;
+
+ RETURN_ENUMERATOR(hash, 0, 0);
+- rb_hash_modify(hash);
++ rb_hash_modify_check(hash);
+ if (!RHASH(hash)->ntbl)
+ return Qnil;
+ n = RHASH(hash)->ntbl->num_entries;
+@@ -1065,8 +1058,9 @@ VALUE
+ rb_hash_keep_if(VALUE hash)
+ {
+ RETURN_ENUMERATOR(hash, 0, 0);
+- rb_hash_modify(hash);
+- rb_hash_foreach(hash, keep_if_i, hash);
++ rb_hash_modify_check(hash);
++ if (RHASH(hash)->ntbl)
++ rb_hash_foreach(hash, keep_if_i, hash);
+ return hash;
+ }
+
+@@ -1144,9 +1138,7 @@ rb_hash_aset(VALUE hash, VALUE key, VALUE val)
+ static int
+ replace_i(VALUE key, VALUE val, VALUE hash)
+ {
+- if (key != Qundef) {
+- rb_hash_aset(hash, key, val);
+- }
++ rb_hash_aset(hash, key, val);
+
+ return ST_CONTINUE;
+ }
+@@ -1227,7 +1219,6 @@ rb_hash_empty_p(VALUE hash)
+ static int
+ each_value_i(VALUE key, VALUE value)
+ {
+- if (key == Qundef) return ST_CONTINUE;
+ rb_yield(value);
+ return ST_CONTINUE;
+ }
+@@ -1262,7 +1253,6 @@ rb_hash_each_value(VALUE hash)
+ static int
+ each_key_i(VALUE key, VALUE value)
+ {
+- if (key == Qundef) return ST_CONTINUE;
+ rb_yield(key);
+ return ST_CONTINUE;
+ }
+@@ -1296,7 +1286,6 @@ rb_hash_each_key(VALUE hash)
+ static int
+ each_pair_i(VALUE key, VALUE value)
+ {
+- if (key == Qundef) return ST_CONTINUE;
+ rb_yield(rb_assoc_new(key, value));
+ return ST_CONTINUE;
+ }
+@@ -1334,7 +1323,6 @@ rb_hash_each_pair(VALUE hash)
+ static int
+ to_a_i(VALUE key, VALUE value, VALUE ary)
+ {
+- if (key == Qundef) return ST_CONTINUE;
+ rb_ary_push(ary, rb_assoc_new(key, value));
+ return ST_CONTINUE;
+ }
+@@ -1367,7 +1355,6 @@ inspect_i(VALUE key, VALUE value, VALUE str)
+ {
+ VALUE str2;
+
+- if (key == Qundef) return ST_CONTINUE;
+ str2 = rb_inspect(key);
+ if (RSTRING_LEN(str) > 1) {
+ rb_str_cat2(str, ", ");
+@@ -1434,7 +1421,6 @@ rb_hash_to_hash(VALUE hash)
+ static int
+ keys_i(VALUE key, VALUE value, VALUE ary)
+ {
+- if (key == Qundef) return ST_CONTINUE;
+ rb_ary_push(ary, key);
+ return ST_CONTINUE;
+ }
+@@ -1465,7 +1451,6 @@ rb_hash_keys(VALUE hash)
+ static int
+ values_i(VALUE key, VALUE value, VALUE ary)
+ {
+- if (key == Qundef) return ST_CONTINUE;
+ rb_ary_push(ary, value);
+ return ST_CONTINUE;
+ }
+@@ -1524,7 +1509,6 @@ rb_hash_search_value(VALUE key, VALUE value, VALUE arg)
+ {
+ VALUE *data = (VALUE *)arg;
+
+- if (key == Qundef) return ST_CONTINUE;
+ if (rb_equal(value, data[1])) {
+ data[0] = Qtrue;
+ return ST_STOP;
+@@ -1568,7 +1552,6 @@ eql_i(VALUE key, VALUE val1, VALUE arg)
+ struct equal_data *data = (struct equal_data *)arg;
+ st_data_t val2;
+
+- if (key == Qundef) return ST_CONTINUE;
+ if (!st_lookup(data->tbl, key, &val2)) {
+ data->result = Qfalse;
+ return ST_STOP;
+@@ -1599,7 +1582,7 @@ hash_equal(VALUE hash1, VALUE hash2, int eql)
+ struct equal_data data;
+
+ if (hash1 == hash2) return Qtrue;
+- if (TYPE(hash2) != T_HASH) {
++ if (!RB_TYPE_P(hash2, T_HASH)) {
+ if (!rb_respond_to(hash2, rb_intern("to_hash"))) {
+ return Qfalse;
+ }
+@@ -1670,7 +1653,6 @@ hash_i(VALUE key, VALUE val, VALUE arg)
+ st_index_t *hval = (st_index_t *)arg;
+ st_index_t hdata[2];
+
+- if (key == Qundef) return ST_CONTINUE;
+ hdata[0] = rb_hash(key);
+ hdata[1] = rb_hash(val);
+ *hval ^= st_hash(hdata, sizeof(hdata), 0);
+@@ -1711,7 +1693,6 @@ rb_hash_hash(VALUE hash)
+ static int
+ rb_hash_invert_i(VALUE key, VALUE value, VALUE hash)
+ {
+- if (key == Qundef) return ST_CONTINUE;
+ rb_hash_aset(hash, value, key);
+ return ST_CONTINUE;
+ }
+@@ -1740,7 +1721,6 @@ rb_hash_invert(VALUE hash)
+ static int
+ rb_hash_update_i(VALUE key, VALUE value, VALUE hash)
+ {
+- if (key == Qundef) return ST_CONTINUE;
+ hash_update(hash, key);
+ st_insert(RHASH(hash)->ntbl, key, value);
+ return ST_CONTINUE;
+@@ -1749,7 +1729,6 @@ rb_hash_update_i(VALUE key, VALUE value, VALUE hash)
+ static int
+ rb_hash_update_block_i(VALUE key, VALUE value, VALUE hash)
+ {
+- if (key == Qundef) return ST_CONTINUE;
+ if (rb_hash_has_key(hash, key)) {
+ value = rb_yield_values(3, key, rb_hash_aref(hash, key), value);
+ }
+@@ -1806,7 +1785,6 @@ rb_hash_update_func_i(VALUE key, VALUE value, VALUE arg0)
+ struct update_arg *arg = (struct update_arg *)arg0;
+ VALUE hash = arg->hash;
+
+- if (key == Qundef) return ST_CONTINUE;
+ if (rb_hash_has_key(hash, key)) {
+ value = (*arg->func)(key, rb_hash_aref(hash, key), value);
+ }
+@@ -1863,7 +1841,6 @@ assoc_i(VALUE key, VALUE val, VALUE arg)
+ {
+ VALUE *args = (VALUE *)arg;
+
+- if (key == Qundef) return ST_CONTINUE;
+ if (RTEST(rb_equal(args[0], key))) {
+ args[1] = rb_assoc_new(key, val);
+ return ST_STOP;
+@@ -1901,7 +1878,6 @@ rassoc_i(VALUE key, VALUE val, VALUE arg)
+ {
+ VALUE *args = (VALUE *)arg;
+
+- if (key == Qundef) return ST_CONTINUE;
+ if (RTEST(rb_equal(args[0], val))) {
+ args[1] = rb_assoc_new(key, val);
+ return ST_STOP;
+@@ -2198,7 +2174,7 @@ rb_env_path_tainted(void)
+ }
+
+ #if defined(_WIN32) || (defined(HAVE_SETENV) && defined(HAVE_UNSETENV))
+-#elif defined __sun__
++#elif defined __sun
+ static int
+ in_origenv(const char *str)
+ {
+@@ -2286,7 +2262,7 @@ ruby_setenv(const char *name, const char *value)
+ rb_sys_fail("unsetenv");
+ #endif
+ }
+-#elif defined __sun__
++#elif defined __sun
+ size_t len;
+ char **env_ptr, *str;
+ if (strchr(name, '=')) {
+@@ -3084,11 +3060,9 @@ env_invert(void)
+ static int
+ env_replace_i(VALUE key, VALUE val, VALUE keys)
+ {
+- if (key != Qundef) {
+- env_aset(Qnil, key, val);
+- if (rb_ary_includes(keys, key)) {
+- rb_ary_delete(keys, key);
+- }
++ env_aset(Qnil, key, val);
++ if (rb_ary_includes(keys, key)) {
++ rb_ary_delete(keys, key);
+ }
+ return ST_CONTINUE;
+ }
+@@ -3120,12 +3094,10 @@ env_replace(VALUE env, VALUE hash)
+ static int
+ env_update_i(VALUE key, VALUE val)
+ {
+- if (key != Qundef) {
+- if (rb_block_given_p()) {
+- val = rb_yield_values(3, key, rb_f_getenv(Qnil, key), val);
+- }
+- env_aset(Qnil, key, val);
++ if (rb_block_given_p()) {
++ val = rb_yield_values(3, key, rb_f_getenv(Qnil, key), val);
+ }
++ env_aset(Qnil, key, val);
+ return ST_CONTINUE;
+ }
+
+@@ -3150,15 +3122,116 @@ env_update(VALUE env, VALUE hash)
+ }
+
+ /*
+- * A <code>Hash</code> is a collection of key-value pairs. It is
+- * similar to an <code>Array</code>, except that indexing is done via
+- * arbitrary keys of any object type, not an integer index. Hashes enumerate
+- * their values in the order that the corresponding keys were inserted.
++ * A Hash is a dictionary-like collection of unique keys and their values.
++ * Also called associative arrays, they are similar to Arrays, but where an
++ * Array uses integers as its index, a Hash allows you to use any object
++ * type.
++ *
++ * Hashes enumerate their values in the order that the corresponding keys
++ * were inserted.
++ *
++ * A Hash can be easily created by using its implicit form:
++ *
++ * grades = { "Jane Doe" => 10, "Jim Doe" => 6 }
++ *
++ * Hashes allow an alternate syntax form when your keys are always symbols.
++ * Instead of
++ *
++ * options = { :font_size => 10, :font_family => "Arial" }
++ *
++ * You could write it as:
++ *
++ * options = { font_size: 10, font_family: "Arial" }
++ *
++ * Each named key is a symbol you can access in hash:
++ *
++ * options[:font_size] # => 10
++ *
++ * A Hash can also be created through its ::new method:
++ *
++ * grades = Hash.new
++ * grades["Dorothy Doe"] = 9
+ *
+ * Hashes have a <em>default value</em> that is returned when accessing
+- * keys that do not exist in the hash. By default, that value is
+- * <code>nil</code>.
++ * keys that do not exist in the hash. If no default is set +nil+ is used.
++ * You can set the default value by sending it as an argument to Hash.new:
++ *
++ * grades = Hash.new(0)
++ *
++ * Or by using the #default= method:
++ *
++ * grades = {"Timmy Doe" => 8}
++ * grades.default = 0
++ *
++ * Accessing a value in a Hash requires using its key:
++ *
++ * puts grades["Jane Doe"] # => 10
++ *
++ * === Common Uses
++ *
++ * Hashes are an easy way to represent data structures, such as
++ *
++ * books = {}
++ * books[:matz] = "The Ruby Language"
++ * books[:black] = "The Well-Grounded Rubyist"
++ *
++ * Hashes are also commonly used as a way to have named parameters in
++ * functions. Note that no brackets are used below. If a hash is the last
++ * argument on a method call, no braces are needed, thus creating a really
++ * clean interface:
++ *
++ * Person.create(name: "John Doe", age: 27)
++ *
++ * def self.create(params)
++ * @name = params[:name]
++ * @age = params[:age]
++ * end
++ *
++ * === Hash Keys
++ *
++ * Two objects refer to the same hash key when their <code>hash</code> value
++ * is identical and the two objects are <code>eql?</code> to each other.
++ *
++ * A user-defined class may be used as a hash key if the <code>hash</code>
++ * and <code>eql?</code> methods are overridden to provide meaningful
++ * behavior. By default, separate instances refer to separate hash keys.
++ *
++ * A typical implementation of <code>hash</code> is based on the
++ * object's data while <code>eql?</code> is usually aliased to the overridden
++ * <code>==</code> method:
++ *
++ * class Book
++ * attr_reader :author, :title
++ *
++ * def initialize(author, title)
++ * @author = author
++ * @title = title
++ * end
++ *
++ * def ==(other)
++ * self.class === other and
++ * other.author == @author and
++ * other.title == @title
++ * end
++ *
++ * alias eql? ==
++ *
++ * def hash
++ * @author.hash ^ @title.hash # XOR
++ * end
++ * end
++ *
++ * book1 = Book.new 'matz', 'Ruby in a Nutshell'
++ * book2 = Book.new 'matz', 'Ruby in a Nutshell'
++ *
++ * reviews = {}
++ *
++ * reviews[book1] = 'Great reference!'
++ * reviews[book2] = 'Nice and compact!'
++ *
++ * reviews.length #=> 1
+ *
++ * See also Object#hash and Object#eql?
+ */
+
+ void
+diff --git a/include/ruby/st.h b/include/ruby/st.h
+index 50f2a75..119dfde 100644
+--- a/include/ruby/st.h
++++ b/include/ruby/st.h
+@@ -36,7 +36,7 @@ typedef unsigned long st_data_t;
+ #elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
+ typedef unsigned LONG_LONG st_data_t;
+ #else
+-# error ---->> st.c requires sizeof(void*) == sizeof(long) to be compiled. <<----
++# error ---->> st.c requires sizeof(void*) == sizeof(long) or sizeof(LONG_LONG) to be compiled. <<----
+ #endif
+ #define ST_DATA_T_DEFINED
+
+@@ -74,6 +74,11 @@ struct st_hash_type {
+
+ #define ST_INDEX_BITS (sizeof(st_index_t) * CHAR_BIT)
+
++typedef struct st_packed_entry {
++ st_index_t hash;
++ st_data_t key, val;
++} st_packed_entry;
++
+ struct st_table {
+ const struct st_hash_type *type;
+ st_index_t num_bins;
+@@ -91,8 +96,17 @@ struct st_table {
+ __extension__
+ #endif
+ st_index_t num_entries : ST_INDEX_BITS - 1;
+- struct st_table_entry **bins;
+- struct st_table_entry *head, *tail;
++ union {
++ struct {
++ struct st_table_entry **bins;
++ struct st_table_entry *head, *tail;
++ } big;
++ struct {
++ struct st_packed_entry *entries;
++ st_index_t real_entries;
++ } packed;
++ st_packed_entry upacked;
++ } as;
+ };
+
+ #define st_is_member(table,key) st_lookup((table),(key),(st_data_t *)0)
+@@ -114,6 +128,7 @@ int st_insert2(st_table *, st_data_t, st_data_t, st_data_t (*)(st_data_t));
+ int st_lookup(st_table *, st_data_t, st_data_t *);
+ int st_get_key(st_table *, st_data_t, st_data_t *);
+ int st_foreach(st_table *, int (*)(ANYARGS), st_data_t);
++int st_foreach_check(st_table *, int (*)(ANYARGS), st_data_t, st_data_t);
+ int st_reverse_foreach(st_table *, int (*)(ANYARGS), st_data_t);
+ void st_add_direct(st_table *, st_data_t, st_data_t);
+ void st_free_table(st_table *);
+diff --git a/pool_alloc.h b/pool_alloc.h
+new file mode 100644
+index 0000000..957708e
+--- /dev/null
++++ b/pool_alloc.h
+@@ -0,0 +1,11 @@
++#ifndef POOL_ALLOC_H
++#define POOL_ALLOC_H
++
++#define POOL_ALLOC_API
++#ifdef POOL_ALLOC_API
++void ruby_xpool_free(void *ptr);
++void *ruby_xpool_malloc_6p();
++void *ruby_xpool_malloc_11p();
++#endif
++
++#endif
+diff --git a/pool_alloc.inc.h b/pool_alloc.inc.h
+new file mode 100644
+index 0000000..a7879ab
+--- /dev/null
++++ b/pool_alloc.inc.h
+@@ -0,0 +1,156 @@
++/*
++ * this is generic pool allocator
++ * you should define following macroses:
++ * ITEM_NAME - unique identifier, which allows to hold functions in a namespace
++ * ITEM_TYPEDEF(name) - passed to typedef to localize item type
++ * free_entry - desired name of function for free entry
++ * alloc_entry - defired name of function for allocate entry
++ */
++
++#if POOL_ALLOC_PART == 1
++#ifdef HEAP_ALIGN_LOG
++#define DEFAULT_POOL_SIZE (1 << HEAP_ALIGN_LOG)
++#else
++#define DEFAULT_POOL_SIZE (sizeof(void*) * 2048)
++#endif
++typedef unsigned int pool_holder_counter;
++
++typedef struct pool_entry_list pool_entry_list;
++typedef struct pool_holder pool_holder;
++
++typedef struct pool_header {
++ pool_holder *first;
++ pool_holder *_black_magick;
++ pool_holder_counter size; // size of entry in sizeof(void*) items
++ pool_holder_counter total; // size of entry in sizeof(void*) items
++} pool_header;
++
++struct pool_holder