Skip to content

HTTPS clone URL

Subversion checkout URL

You can clone with HTTPS or Subversion.

Download ZIP
Browse files

added the two trunk accepted falcon patches

  • Loading branch information...
commit de18cd2d80eb2d0296b653032ccd5959a6594f17 1 parent b0ba726
Stefan Kaes skaes authored
3  patches/ruby/1.9.3/p327/railsexpress/01-fix-make-clean.patch
View
@@ -4,9 +4,10 @@ index f4f84a6..f73f7ed 100644
+++ b/lib/mkmf.rb
@@ -2191,7 +2191,7 @@ def init_mkmf(config = CONFIG, rbconfig = RbConfig::CONFIG)
$LOCAL_LIBS = ""
-
+
$cleanfiles = config_string('CLEANFILES') {|s| Shellwords.shellwords(s)} || []
- $cleanfiles << "mkmf.log"
+ $cleanfiles << "mkmf.log .*.time"
$distcleanfiles = config_string('DISTCLEANFILES') {|s| Shellwords.shellwords(s)} || []
$distcleandirs = config_string('DISTCLEANDIRS') {|s| Shellwords.shellwords(s)} || []
+
173 patches/ruby/1.9.3/p327/railsexpress/02-railsbench-gc.patch
View
@@ -5,14 +5,14 @@ index b006a01..af5acc4 100644
@@ -2473,6 +2473,10 @@ if test "$EXEEXT" = .exe; then
AC_SUBST(EXECUTABLE_EXTS)
fi
-
+
+dnl enable gc debugging
+AC_ARG_ENABLE(gcdebug,
+ AS_HELP_STRING([--enable-gcdebug], [build garbage collector with debugging enabled]),
+ [AC_DEFINE(GC_DEBUG,1)])
dnl }
dnl build section {
-
+
diff --git a/gc.c b/gc.c
index e65d0ec..169dfab 100644
--- a/gc.c
@@ -20,7 +20,7 @@ index e65d0ec..169dfab 100644
@@ -96,6 +96,15 @@ ruby_gc_params_t initial_params = {
#endif
};
-
+
+#ifndef HAVE_LONG_LONG
+#define LONG_LONG long
+#endif
@@ -31,10 +31,10 @@ index e65d0ec..169dfab 100644
+static double heap_slots_growth_factor = 1.8;
+
#define nomem_error GET_VM()->special_exceptions[ruby_error_nomemory]
-
+
#define MARK_STACK_MAX 1024
@@ -291,7 +300,7 @@ typedef struct RVALUE {
- struct RComplex complex;
+ struct RComplex complex;
} as;
#ifdef GC_DEBUG
- const char *file;
@@ -43,9 +43,9 @@ index e65d0ec..169dfab 100644
#endif
} RVALUE;
@@ -345,11 +354,25 @@ typedef struct rb_objspace {
- size_t free_min;
- size_t final_num;
- size_t do_heap_free;
+ size_t free_min;
+ size_t final_num;
+ size_t do_heap_free;
+ unsigned long max_blocks_to_free;
+ unsigned long freed_blocks;
} heap;
@@ -60,14 +60,14 @@ index e65d0ec..169dfab 100644
+ unsigned long live_after_last_mark_phase;
+ } stats;
+ struct {
- int dont_gc;
- int dont_lazy_sweep;
- int during_gc;
+ int dont_gc;
+ int dont_lazy_sweep;
+ int during_gc;
+ int gc_statistics;
+ int verbose_gc_stats;
} flags;
struct {
- st_table *table;
+ st_table *table;
@@ -370,6 +393,14 @@ typedef struct rb_objspace {
struct gc_list *global_list;
size_t count;
@@ -81,7 +81,7 @@ index e65d0ec..169dfab 100644
+ unsigned long live_objects;
+ unsigned LONG_LONG allocated_objects;
} rb_objspace_t;
-
+
#if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
@@ -392,6 +423,16 @@ int *ruby_initial_gc_stress_ptr = &rb_objspace.gc_stress;
#define heaps_freed objspace->heap.freed
@@ -112,9 +112,9 @@ index e65d0ec..169dfab 100644
+#define freed_objects objspace->stats.freed_objects
+#define gc_time_accumulator_before_gc objspace->stats.gc_time_accumulator_before_gc
+#define live_after_last_mark_phase objspace->stats.live_after_last_mark_phase
-
+
static void rb_objspace_call_finalizer(rb_objspace_t *objspace);
-
+
@@ -423,24 +472,59 @@ static void initial_expand_heap(rb_objspace_t *objspace);
void
rb_gc_set_params(void)
@@ -125,9 +125,9 @@ index e65d0ec..169dfab 100644
+ rb_objspace_t *objspace = &rb_objspace;
+
+ gc_data_file = stderr;
-
+
if (rb_safe_level() > 0) return;
-
+
- malloc_limit_ptr = getenv("RUBY_GC_MALLOC_LIMIT");
- if (malloc_limit_ptr != NULL) {
- int malloc_limit_i = atoi(malloc_limit_ptr);
@@ -161,15 +161,15 @@ index e65d0ec..169dfab 100644
+ if (verbose_gc_stats) {
+ fprintf(gc_data_file, "RUBY_GC_MALLOC_LIMIT=%s\n", envp);
+ }
- if (RTEST(ruby_verbose))
- fprintf(stderr, "malloc_limit=%d (%d)\n",
- malloc_limit_i, initial_malloc_limit);
- if (malloc_limit_i > 0) {
- initial_malloc_limit = malloc_limit_i;
+ if (RTEST(ruby_verbose))
+ fprintf(stderr, "malloc_limit=%d (%d)\n",
+ malloc_limit_i, initial_malloc_limit);
+ if (malloc_limit_i > 0) {
+ initial_malloc_limit = malloc_limit_i;
+ // malloc_limit = initial_malloc_limit;
- }
+ }
}
-
+
- heap_min_slots_ptr = getenv("RUBY_HEAP_MIN_SLOTS");
- if (heap_min_slots_ptr != NULL) {
- int heap_min_slots_i = atoi(heap_min_slots_ptr);
@@ -179,13 +179,13 @@ index e65d0ec..169dfab 100644
+ if (verbose_gc_stats) {
+ fprintf(gc_data_file, "RUBY_HEAP_MIN_SLOTS=%s\n", envp);
+ }
- if (RTEST(ruby_verbose))
- fprintf(stderr, "heap_min_slots=%d (%d)\n",
- heap_min_slots_i, initial_heap_min_slots);
+ if (RTEST(ruby_verbose))
+ fprintf(stderr, "heap_min_slots=%d (%d)\n",
+ heap_min_slots_i, initial_heap_min_slots);
@@ -450,15 +534,42 @@ rb_gc_set_params(void)
- }
+ }
}
-
+
- free_min_ptr = getenv("RUBY_FREE_MIN");
- if (free_min_ptr != NULL) {
- int free_min_i = atoi(free_min_ptr);
@@ -196,11 +196,11 @@ index e65d0ec..169dfab 100644
+ if (verbose_gc_stats) {
+ fprintf(gc_data_file, "RUBY_HEAP_FREE_MIN=%s\n", envp);
+ }
- if (RTEST(ruby_verbose))
- fprintf(stderr, "free_min=%d (%d)\n", free_min_i, initial_free_min);
- if (free_min_i > 0) {
- initial_free_min = free_min_i;
- }
+ if (RTEST(ruby_verbose))
+ fprintf(stderr, "free_min=%d (%d)\n", free_min_i, initial_free_min);
+ if (free_min_i > 0) {
+ initial_free_min = free_min_i;
+ }
}
+
+ envp = getenv("RUBY_HEAP_SLOTS_INCREMENT");
@@ -226,12 +226,12 @@ index e65d0ec..169dfab 100644
+
+ fflush(gc_data_file);
}
-
+
#if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
@@ -753,6 +864,11 @@ vm_malloc_fixup(rb_objspace_t *objspace, void *mem, size_t size)
mem = (size_t *)mem + 1;
#endif
-
+
+ if (gc_statistics) {
+ gc_allocated_size += size;
+ gc_num_allocations += 1;
@@ -239,11 +239,11 @@ index e65d0ec..169dfab 100644
+
return mem;
}
-
+
@@ -813,6 +929,13 @@ vm_xrealloc(rb_objspace_t *objspace, void *ptr, size_t size)
mem = (size_t *)mem + 1;
#endif
-
+
+ /* TODO: we can't count correctly unless we store old size on heap
+ if (gc_statistics) {
+ gc_allocated_size += size;
@@ -253,11 +253,11 @@ index e65d0ec..169dfab 100644
+
return mem;
}
-
+
@@ -894,7 +1017,6 @@ ruby_xfree(void *x)
- vm_xfree(&rb_objspace, x);
+ vm_xfree(&rb_objspace, x);
}
-
+
-
/*
* call-seq:
@@ -265,7 +265,7 @@ index e65d0ec..169dfab 100644
@@ -940,6 +1062,455 @@ rb_gc_disable(void)
return old ? Qtrue : Qfalse;
}
-
+
+/*
+ * call-seq:
+ * GC.enable_stats => true or false
@@ -716,7 +716,7 @@ index e65d0ec..169dfab 100644
+}
+
VALUE rb_mGC;
-
+
void
@@ -1011,6 +1582,12 @@ allocate_sorted_heaps(rb_objspace_t *objspace, size_t next_heaps_length)
static void
@@ -736,22 +736,22 @@ index e65d0ec..169dfab 100644
if (himem < pend) himem = pend;
heaps_used++;
+ heap_size += objs;
-
+
while (p < pend) {
- p->as.free.flags = 0;
+ p->as.free.flags = 0;
@@ -1127,7 +1705,7 @@ initial_expand_heap(rb_objspace_t *objspace)
static void
set_heaps_increment(rb_objspace_t *objspace)
{
- size_t next_heaps_length = (size_t)(heaps_used * 1.8);
+ size_t next_heaps_length = (size_t)(heaps_used * heap_slots_growth_factor);
-
+
if (next_heaps_length == heaps_used) {
next_heaps_length++;
@@ -1160,6 +1738,22 @@ rb_during_gc(void)
-
+
#define RANY(o) ((RVALUE*)(o))
-
+
+#ifdef GC_DEBUG
+static VALUE
+_rb_sourcefile(void)
@@ -772,7 +772,7 @@ index e65d0ec..169dfab 100644
rb_newobj(void)
{
@@ -1191,9 +1785,11 @@ rb_newobj(void)
-
+
MEMZERO((void*)obj, RVALUE, 1);
#ifdef GC_DEBUG
- RANY(obj)->file = rb_sourcefile();
@@ -782,12 +782,12 @@ index e65d0ec..169dfab 100644
+ live_objects++;
+ allocated_objects++;
GC_PROF_INC_LIVE_NUM;
-
+
return obj;
@@ -1660,6 +2256,12 @@ gc_mark_children(rb_objspace_t *objspace, VALUE ptr, int lev)
{
register RVALUE *obj = RANY(ptr);
-
+
+#ifdef GC_DEBUG
+ if (obj->file && obj->file != Qnil && is_pointer_to_heap(objspace, (void*)obj->file)) {
+ gc_mark(objspace, obj->file, lev);
@@ -795,12 +795,12 @@ index e65d0ec..169dfab 100644
+#endif
+
goto marking; /* skip */
-
+
again:
@@ -1670,6 +2272,12 @@ gc_mark_children(rb_objspace_t *objspace, VALUE ptr, int lev)
obj->as.basic.flags |= FL_MARK;
objspace->heap.live_num++;
-
+
+#ifdef GC_DEBUG
+ if (obj->file && obj->file != Qnil && is_pointer_to_heap(objspace, (void*)obj->file)) {
+ gc_mark(objspace, obj->file, lev);
@@ -809,11 +809,11 @@ index e65d0ec..169dfab 100644
+
marking:
if (FL_TEST(obj, FL_EXIVAR)) {
- rb_mark_generic_ivar(ptr);
+ rb_mark_generic_ivar(ptr);
@@ -2012,6 +2620,25 @@ free_unused_heaps(rb_objspace_t *objspace)
}
}
-
+
+static inline unsigned long
+elapsed_musecs(struct timeval since)
+{
@@ -844,7 +844,7 @@ index e65d0ec..169dfab 100644
+
+ struct timeval tv1;
+ if (gc_statistics) gettimeofday(&tv1, NULL);
-
+
p = sweep_slot->slot; pend = p + sweep_slot->limit;
while (p < pend) {
if (!(p->as.basic.flags & FL_MARK)) {
@@ -854,7 +854,7 @@ index e65d0ec..169dfab 100644
+ }
if (p->as.basic.flags &&
((deferred = obj_free(objspace, (VALUE)p)) ||
- (FL_TEST(p, FL_FINALIZE)))) {
+ (FL_TEST(p, FL_FINALIZE)))) {
if (!deferred) {
+ if (do_gc_stats) zombies++;
p->as.free.flags = T_ZOMBIE;
@@ -894,7 +894,7 @@ index e65d0ec..169dfab 100644
+ final_num + free_num == sweep_slot->limit &&
objspace->heap.free_num > objspace->heap.do_heap_free) {
RVALUE *pp;
-
+
@@ -2060,6 +2709,8 @@ slot_sweep(rb_objspace_t *objspace, struct heaps_slot *sweep_slot)
sweep_slot->limit = final_num;
freelist = free; /* cancel this page from freelist */
@@ -913,7 +913,7 @@ index e65d0ec..169dfab 100644
+ gc_time_accumulator += elapsed_musecs(tv1);
+ }
}
-
+
static int
@@ -2092,6 +2747,21 @@ ready_to_gc(rb_objspace_t *objspace)
static void
@@ -945,7 +945,7 @@ index e65d0ec..169dfab 100644
+ struct timeval tv1;
+
GC_PROF_SET_MALLOC_INFO;
-
+
+ if (gc_statistics) gettimeofday(&tv1, NULL);
+
if (objspace->heap.free_num < objspace->heap.free_min) {
@@ -953,7 +953,7 @@ index e65d0ec..169dfab 100644
heaps_increment(objspace);
@@ -2125,6 +2800,29 @@ after_gc_sweep(rb_objspace_t *objspace)
malloc_increase = 0;
-
+
free_unused_heaps(objspace);
+
+ if (gc_statistics) {
@@ -979,12 +979,12 @@ index e65d0ec..169dfab 100644
+ }
+ }
}
-
+
static int
@@ -2158,9 +2856,11 @@ rest_sweep(rb_objspace_t *objspace)
-
+
static void gc_marks(rb_objspace_t *objspace);
-
+
+/* only called from rb_new_obj */
static int
gc_lazy_sweep(rb_objspace_t *objspace)
@@ -992,7 +992,7 @@ index e65d0ec..169dfab 100644
+ struct timeval gctv1;
int res;
INIT_GC_PROF_PARAMS;
-
+
@@ -2182,7 +2882,6 @@ gc_lazy_sweep(rb_objspace_t *objspace)
GC_PROF_TIMER_STOP(Qfalse);
return res;
@@ -1017,13 +1017,13 @@ index e65d0ec..169dfab 100644
+ }
+ */
+ }
-
+
gc_marks(objspace);
-
+
@@ -2198,6 +2909,10 @@ gc_lazy_sweep(rb_objspace_t *objspace)
- set_heaps_increment(objspace);
+ set_heaps_increment(objspace);
}
-
+
+ if (gc_statistics) {
+ gc_time_accumulator += elapsed_musecs(gctv1);
+ }
@@ -1033,16 +1033,16 @@ index e65d0ec..169dfab 100644
after_gc_sweep(objspace);
@@ -2209,6 +2924,7 @@ gc_lazy_sweep(rb_objspace_t *objspace)
GC_PROF_SWEEP_TIMER_STOP;
-
+
GC_PROF_TIMER_STOP(Qtrue);
+
return res;
}
-
+
@@ -2435,9 +3151,15 @@ gc_marks(rb_objspace_t *objspace)
rb_thread_t *th = GET_THREAD();
GC_PROF_MARK_TIMER_START;
-
+
+ /*
+ if (gc_statistics & verbose_gc_stats) {
+ fprintf(gc_data_file, "Marking objects\n");
@@ -1053,29 +1053,29 @@ index e65d0ec..169dfab 100644
objspace->count++;
-
+ live_objects = 0;
-
+
SET_STACK_END;
-
+
@@ -2477,11 +3199,15 @@ gc_marks(rb_objspace_t *objspace)
- }
+ }
}
GC_PROF_MARK_TIMER_STOP;
+
+ live_after_last_mark_phase = objspace->heap.live_num;
}
-
+
static int
garbage_collect(rb_objspace_t *objspace)
{
+ struct timeval gctv1;
+
INIT_GC_PROF_PARAMS;
-
+
if (GC_NOTIFY) printf("start garbage_collect()\n");
@@ -2497,15 +3223,31 @@ garbage_collect(rb_objspace_t *objspace)
-
+
rest_sweep(objspace);
-
+
+ if (gc_statistics) {
+ gc_time_accumulator_before_gc = gc_time_accumulator;
+ gc_collections++;
@@ -1089,7 +1089,7 @@ index e65d0ec..169dfab 100644
+
during_gc++;
gc_marks(objspace);
-
+
+ if (gc_statistics) {
+ gc_time_accumulator += elapsed_musecs(gctv1);
+ }
@@ -1097,17 +1097,17 @@ index e65d0ec..169dfab 100644
GC_PROF_SWEEP_TIMER_START;
gc_sweep(objspace);
GC_PROF_SWEEP_TIMER_STOP;
-
+
GC_PROF_TIMER_STOP(Qtrue);
if (GC_NOTIFY) printf("end garbage_collect()\n");
+
return TRUE;
}
-
+
@@ -2994,6 +3736,39 @@ rb_gc_call_finalizer_at_exit(void)
rb_objspace_call_finalizer(&rb_objspace);
}
-
+
+static const char* obj_type(VALUE type)
+{
+ switch (type) {
@@ -1147,7 +1147,7 @@ index e65d0ec..169dfab 100644
@@ -3307,6 +4082,49 @@ count_objects(int argc, VALUE *argv, VALUE os)
return hash;
}
-
+
+/* call-seq:
+ * ObjectSpace.live_objects => number
+ *
@@ -1197,7 +1197,7 @@ index e65d0ec..169dfab 100644
@@ -3599,6 +4417,28 @@ Init_GC(void)
rb_define_singleton_method(rb_mGC, "stat", gc_stat, -1);
rb_define_method(rb_mGC, "garbage_collect", rb_gc_start, 0);
-
+
+ rb_define_singleton_method(rb_mGC, "enable_stats", rb_gc_enable_stats, 0);
+ rb_define_singleton_method(rb_mGC, "disable_stats", rb_gc_disable_stats, 0);
+ rb_define_singleton_method(rb_mGC, "stats_enabled?", rb_gc_stats_enabled, 0);
@@ -1226,9 +1226,10 @@ index e65d0ec..169dfab 100644
@@ -3612,6 +4452,9 @@ Init_GC(void)
rb_define_module_function(rb_mObSpace, "each_object", os_each_obj, -1);
rb_define_module_function(rb_mObSpace, "garbage_collect", rb_gc_start, 0);
-
+
+ rb_define_module_function(rb_mObSpace, "live_objects", os_live_objects, 0);
+ rb_define_module_function(rb_mObSpace, "allocated_objects", os_allocated_objects, 0);
+
rb_define_module_function(rb_mObSpace, "define_finalizer", define_final, -1);
rb_define_module_function(rb_mObSpace, "undefine_finalizer", undefine_final, 1);
+
8 patches/ruby/1.9.3/p327/railsexpress/03-display-more-detailed-stack-trace.patch
View
@@ -4,12 +4,12 @@ index fd06adf..69c3b48 100644
+++ b/eval_error.c
@@ -164,8 +164,8 @@ error_print(void)
int skip = eclass == rb_eSysStackError;
-
+
#define TRACE_MAX (TRACE_HEAD+TRACE_TAIL+5)
-#define TRACE_HEAD 8
-#define TRACE_TAIL 5
+#define TRACE_HEAD 100
+#define TRACE_TAIL 100
-
- for (i = 1; i < len; i++) {
- if (TYPE(ptr[i]) == T_STRING) {
+
+ for (i = 1; i < len; i++) {
+ if (TYPE(ptr[i]) == T_STRING) {
12 patches/ruby/1.9.3/p327/railsexpress/04-fork-support-for-gc-logging.patch
View
@@ -4,7 +4,7 @@ index 169dfab..629a73a 100644
+++ b/gc.c
@@ -1322,6 +1322,34 @@ rb_gc_log_file(int argc, VALUE *argv, VALUE self)
}
-
+
/*
+ * Called from process.c before a fork. Flushes the gc log file to
+ * avoid writing the buffered output twice (once in the parent, and
@@ -56,13 +56,13 @@ index 99cfc69..8bee602 100644
+++ b/process.c
@@ -2804,9 +2804,11 @@ rb_f_fork(VALUE obj)
rb_pid_t pid;
-
+
rb_secure(2);
+ rb_gc_before_fork();
-
+
switch (pid = rb_fork(0, 0, 0, Qnil)) {
case 0:
+ rb_gc_after_fork();
- rb_thread_atfork();
- if (rb_block_given_p()) {
- int status;
+ rb_thread_atfork();
+ if (rb_block_given_p()) {
+ int status;
8 patches/ruby/1.9.3/p327/railsexpress/05-track-live-dataset-size.patch
View
@@ -5,7 +5,7 @@ index 629a73a..8ab287e 100644
@@ -270,7 +270,6 @@ getrusage_time(void)
#define GC_PROF_DEC_LIVE_NUM
#endif
-
+
-
#if defined(_MSC_VER) || defined(__BORLANDC__) || defined(__CYGWIN__)
#pragma pack(push, 1) /* magic for reducing sizeof(RVALUE): 24 -> 20 */
@@ -13,7 +13,7 @@ index 629a73a..8ab287e 100644
@@ -1539,6 +1538,24 @@ rb_gc_time()
#endif
}
-
+
+/*
+ * call-seq:
+ * GC.heap_slots_live_after_last_gc => Integer
@@ -33,7 +33,7 @@ index 629a73a..8ab287e 100644
+
+
VALUE rb_mGC;
-
+
void
@@ -4452,6 +4469,7 @@ Init_GC(void)
rb_define_singleton_method(rb_mGC, "allocated_size", rb_gc_allocated_size, 0);
@@ -41,5 +41,5 @@ index 629a73a..8ab287e 100644
rb_define_singleton_method(rb_mGC, "heap_slots", rb_gc_heap_slots, 0);
+ rb_define_singleton_method(rb_mGC, "heap_slots_live_after_last_gc", rb_gc_heap_slots_live_after_last_gc, 0);
rb_define_const(rb_mGC, "HEAP_SLOT_SIZE", INT2FIX(sizeof(RVALUE)));
-
+
rb_define_singleton_method(rb_mGC, "log", rb_gc_log, 1);
20 patches/ruby/1.9.3/p327/railsexpress/07-export-a-few-more-symbols-for-ruby-prof.patch
View
@@ -5,7 +5,7 @@ index 8ab287e..a77ef2c 100644
@@ -1029,6 +1029,7 @@ ruby_xfree(void *x)
*
*/
-
+
+RUBY_FUNC_EXPORTED
VALUE
rb_gc_enable(void)
@@ -13,7 +13,7 @@ index 8ab287e..a77ef2c 100644
@@ -1051,6 +1052,7 @@ rb_gc_enable(void)
*
*/
-
+
+RUBY_FUNC_EXPORTED
VALUE
rb_gc_disable(void)
@@ -21,7 +21,7 @@ index 8ab287e..a77ef2c 100644
@@ -1073,6 +1075,7 @@ rb_gc_disable(void)
*
*/
-
+
+RUBY_FUNC_EXPORTED
VALUE
rb_gc_enable_stats()
@@ -29,7 +29,7 @@ index 8ab287e..a77ef2c 100644
@@ -1094,6 +1097,7 @@ rb_gc_enable_stats()
*
*/
-
+
+RUBY_FUNC_EXPORTED
VALUE
rb_gc_disable_stats()
@@ -37,7 +37,7 @@ index 8ab287e..a77ef2c 100644
@@ -1113,6 +1117,7 @@ rb_gc_disable_stats()
*
*/
-
+
+RUBY_FUNC_EXPORTED
VALUE
rb_gc_stats_enabled()
@@ -45,7 +45,7 @@ index 8ab287e..a77ef2c 100644
@@ -1132,6 +1137,7 @@ rb_gc_stats_enabled()
*
*/
-
+
+RUBY_FUNC_EXPORTED
VALUE
rb_gc_clear_stats()
@@ -53,7 +53,7 @@ index 8ab287e..a77ef2c 100644
@@ -1199,6 +1205,7 @@ rb_gc_num_allocations()
*
*/
-
+
+RUBY_FUNC_EXPORTED
VALUE
rb_gc_enable_trace()
@@ -61,7 +61,7 @@ index 8ab287e..a77ef2c 100644
@@ -1220,6 +1227,7 @@ rb_gc_enable_trace()
*
*/
-
+
+RUBY_FUNC_EXPORTED
VALUE
rb_gc_disable_trace()
@@ -69,7 +69,7 @@ index 8ab287e..a77ef2c 100644
@@ -1239,6 +1247,7 @@ rb_gc_disable_trace()
*
*/
-
+
+RUBY_FUNC_EXPORTED
VALUE
rb_gc_trace_enabled()
@@ -77,7 +77,7 @@ index 8ab287e..a77ef2c 100644
@@ -1267,6 +1276,7 @@ const char* GC_LOGFILE_IVAR = "@gc_logfile_name";
*
*/
-
+
+RUBY_FUNC_EXPORTED
VALUE
rb_gc_log_file(int argc, VALUE *argv, VALUE self)
30 patches/ruby/1.9.3/p327/railsexpress/08-thread-variables.patch
View
@@ -5,7 +5,7 @@ index 30fec33..9f6c172 100644
@@ -103,6 +103,16 @@ with all sufficient information, see the ChangeLog file.
* String#prepend
* String#byteslice
-
+
+ * Thread
+ * added method:
+ * added Thread#thread_variable_get for getting thread local variables
@@ -26,7 +26,7 @@ index e8cb3b1..9ff491d 100644
@@ -27,6 +27,79 @@ class TestThread < Test::Unit::TestCase
end
end
-
+
+ def test_main_thread_variable_in_enumerator
+ assert_equal Thread.main, Thread.current
+
@@ -116,10 +116,10 @@ index eb0be9f..a8721c4 100644
+ * thread-local variables, please see <code>Thread#thread_variable_set</code>
+ * and <code>Thread#thread_variable_get</code>.
*/
-
+
static VALUE
@@ -2123,6 +2125,80 @@ rb_thread_aset(VALUE self, VALUE id, VALUE val)
-
+
/*
* call-seq:
+ * thr.thread_variable_get(key) -> obj or nil
@@ -200,19 +200,19 @@ index eb0be9f..a8721c4 100644
*
* Returns <code>true</code> if the given string (or symbol) exists as a
@@ -2993,6 +3069,9 @@ rb_gc_save_machine_context(rb_thread_t *th)
-
+
/*
*
+ * For thread-local variables, please see <code>Thread#thread_local_get</code>
+ * and <code>Thread#thread_local_set</code>.
+ *
*/
-
+
void
@@ -3195,6 +3274,76 @@ thgroup_list_i(st_data_t key, st_data_t val, st_data_t data)
return ST_CONTINUE;
}
-
+
+static int
+keys_i(VALUE key, VALUE value, VALUE ary)
+{
@@ -303,17 +303,17 @@ index 63141ba..553f180 100644
+++ b/vm.c
@@ -1896,6 +1896,7 @@ ruby_thread_init(VALUE self)
GetThreadPtr(self, th);
-
+
th_init(th, self);
+ rb_iv_set(self, "locals", rb_hash_new());
th->vm = vm;
-
+
th->top_wrapper = 0;
@@ -2164,6 +2165,7 @@ Init_VM(void)
-
- /* create main thread */
- th_self = th->self = TypedData_Wrap_Struct(rb_cThread, &thread_data_type, th);
+
+ /* create main thread */
+ th_self = th->self = TypedData_Wrap_Struct(rb_cThread, &thread_data_type, th);
+ rb_iv_set(th_self, "locals", rb_hash_new());
- vm->main_thread = th;
- vm->running_thread = th;
- th->vm = vm;
+ vm->main_thread = th;
+ vm->running_thread = th;
+ th->vm = vm;
747 patches/ruby/1.9.3/p327/railsexpress/09-faster-loading.patch
View
@@ -0,0 +1,747 @@
+diff --git a/array.c b/array.c
+index e427cb3..c33d106 100644
+--- a/array.c
++++ b/array.c
+@@ -295,6 +295,22 @@ rb_ary_frozen_p(VALUE ary)
+ return Qfalse;
+ }
+
++/* This can be used to take a snapshot of an array (with
++ e.g. rb_ary_replace) and check later whether the array has been
++ modified from the snapshot. The snapshot is cheap, though if
++ something does modify the array it will pay the cost of copying
++ it. */
++VALUE
++rb_ary_shared_with_p(VALUE ary1, VALUE ary2)
++{
++ if (!ARY_EMBED_P(ary1) && ARY_SHARED_P(ary1)
++ && !ARY_EMBED_P(ary2) && ARY_SHARED_P(ary2)
++ && RARRAY(ary1)->as.heap.aux.shared == RARRAY(ary2)->as.heap.aux.shared) {
++ return Qtrue;
++ }
++ return Qfalse;
++}
++
+ static VALUE
+ ary_alloc(VALUE klass)
+ {
+diff --git a/file.c b/file.c
+index c1db6d7..a28eb6a 100644
+--- a/file.c
++++ b/file.c
+@@ -148,23 +148,32 @@ file_path_convert(VALUE name)
+ return name;
+ }
+
+-static VALUE
+-rb_get_path_check(VALUE obj, int level)
++VALUE
++rb_get_path_check_to_string(VALUE obj, int level)
+ {
+ VALUE tmp;
+ ID to_path;
+- rb_encoding *enc;
+
+ if (insecure_obj_p(obj, level)) {
+ rb_insecure_operation();
+ }
+
++ if (RB_TYPE_P(obj, T_STRING)) {
++ return obj;
++ }
+ CONST_ID(to_path, "to_path");
+ tmp = rb_check_funcall(obj, to_path, 0, 0);
+ if (tmp == Qundef) {
+ tmp = obj;
+ }
+ StringValue(tmp);
++ return tmp;
++}
++
++VALUE
++rb_get_path_check_convert(VALUE obj, VALUE tmp, int level)
++{
++ rb_encoding *enc;
+
+ tmp = file_path_convert(tmp);
+ if (obj != tmp && insecure_obj_p(tmp, level)) {
+@@ -182,6 +191,13 @@ rb_get_path_check(VALUE obj, int level)
+ return rb_str_new4(tmp);
+ }
+
++static VALUE
++rb_get_path_check(VALUE obj, int level)
++{
++ VALUE tmp = rb_get_path_check_to_string(obj, level);
++ return rb_get_path_check_convert(obj, tmp, level);
++}
++
+ VALUE
+ rb_get_path_no_checksafe(VALUE obj)
+ {
+@@ -3249,7 +3265,6 @@ rb_file_expand_path(VALUE fname, VALUE dname)
+ VALUE
+ rb_file_expand_path_fast(VALUE fname, VALUE dname)
+ {
+- check_expand_path_args(fname, dname);
+ return rb_file_expand_path_internal(fname, dname, 0, 0, EXPAND_PATH_BUFFER());
+ }
+
+@@ -5237,7 +5252,7 @@ rb_find_file_ext_safe(VALUE *filep, const char *const *ext, int safe_level)
+ rb_raise(rb_eSecurityError, "loading from non-absolute path %s", f);
+ }
+
+- RB_GC_GUARD(load_path) = rb_get_load_path();
++ RB_GC_GUARD(load_path) = rb_get_expanded_load_path();
+ if (!load_path) return 0;
+
+ fname = rb_str_dup(*filep);
+@@ -5302,7 +5317,7 @@ rb_find_file_safe(VALUE path, int safe_level)
+ rb_raise(rb_eSecurityError, "loading from non-absolute path %s", f);
+ }
+
+- RB_GC_GUARD(load_path) = rb_get_load_path();
++ RB_GC_GUARD(load_path) = rb_get_expanded_load_path();
+ if (load_path) {
+ long i;
+
+diff --git a/hash.c b/hash.c
+index fbd8237..8423558 100644
+--- a/hash.c
++++ b/hash.c
+@@ -1087,7 +1087,7 @@ clear_i(VALUE key, VALUE value, VALUE dummy)
+ *
+ */
+
+-static VALUE
++VALUE
+ rb_hash_clear(VALUE hash)
+ {
+ rb_hash_modify_check(hash);
+diff --git a/include/ruby/intern.h b/include/ruby/intern.h
+index 9da266e..b82252a 100644
+--- a/include/ruby/intern.h
++++ b/include/ruby/intern.h
+@@ -56,6 +56,7 @@ VALUE rb_ary_tmp_new(long);
+ void rb_ary_free(VALUE);
+ void rb_ary_modify(VALUE);
+ VALUE rb_ary_freeze(VALUE);
++VALUE rb_ary_shared_with_p(VALUE, VALUE);
+ VALUE rb_ary_aref(int, VALUE*, VALUE);
+ VALUE rb_ary_subseq(VALUE, long, long);
+ void rb_ary_store(VALUE, long, VALUE);
+@@ -442,6 +443,7 @@ VALUE rb_hash_lookup(VALUE, VALUE);
+ VALUE rb_hash_lookup2(VALUE, VALUE, VALUE);
+ VALUE rb_hash_fetch(VALUE, VALUE);
+ VALUE rb_hash_aset(VALUE, VALUE, VALUE);
++VALUE rb_hash_clear(VALUE);
+ VALUE rb_hash_delete_if(VALUE);
+ VALUE rb_hash_delete(VALUE,VALUE);
+ typedef VALUE rb_hash_update_func(VALUE newkey, VALUE oldkey, VALUE value);
+diff --git a/internal.h b/internal.h
+index 59c9284..f5af903 100644
+--- a/internal.h
++++ b/internal.h
+@@ -94,6 +94,8 @@ VALUE rb_home_dir(const char *user, VALUE result);
+ VALUE rb_realpath_internal(VALUE basedir, VALUE path, int strict);
+ VALUE rb_file_expand_path_fast(VALUE, VALUE);
+ VALUE rb_file_expand_path_internal(VALUE, VALUE, int, int, VALUE);
++VALUE rb_get_path_check_to_string(VALUE, int);
++VALUE rb_get_path_check_convert(VALUE, VALUE, int);
+ void Init_File(void);
+
+ #ifdef _WIN32
+@@ -119,6 +121,7 @@ VALUE rb_iseq_clone(VALUE iseqval, VALUE newcbase);
+
+ /* load.c */
+ VALUE rb_get_load_path(void);
++VALUE rb_get_expanded_load_path(void);
+
+ /* math.c */
+ VALUE rb_math_atan2(VALUE, VALUE);
+diff --git a/load.c b/load.c
+index 163ec4c..e766880 100644
+--- a/load.c
++++ b/load.c
+@@ -18,7 +18,6 @@ VALUE ruby_dln_librefs;
+ #define IS_DLEXT(e) (strcmp((e), DLEXT) == 0)
+ #endif
+
+-
+ static const char *const loadable_ext[] = {
+ ".rb", DLEXT,
+ #ifdef DLEXT2
+@@ -34,21 +33,120 @@ rb_get_load_path(void)
+ return load_path;
+ }
+
+-VALUE
+-rb_get_expanded_load_path(void)
++enum expand_type {
++ EXPAND_ALL,
++ EXPAND_RELATIVE,
++ EXPAND_HOME,
++ EXPAND_NON_CACHE
++};
++
++/* Construct expanded load path and store it to cache.
++ We rebuild load path partially if the cache is invalid.
++ We don't cache non string object and expand it every times. We ensure that
++ string objects in $LOAD_PATH are frozen.
++ */
++static void
++rb_construct_expanded_load_path(int type, int *has_relative, int *has_non_cache)
+ {
+- VALUE load_path = rb_get_load_path();
++ rb_vm_t *vm = GET_VM();
++ VALUE load_path = vm->load_path;
++ VALUE expanded_load_path = vm->expanded_load_path;
+ VALUE ary;
+ long i;
++ int level = rb_safe_level();
+
+ ary = rb_ary_new2(RARRAY_LEN(load_path));
+ for (i = 0; i < RARRAY_LEN(load_path); ++i) {
+- VALUE path = rb_file_expand_path_fast(RARRAY_PTR(load_path)[i], Qnil);
+- rb_str_freeze(path);
+- rb_ary_push(ary, path);
++ VALUE path, as_str, expanded_path;
++ int is_string, non_cache;
++ char *as_cstr;
++ as_str = path = RARRAY_PTR(load_path)[i];
++ is_string = RB_TYPE_P(path, T_STRING) ? 1 : 0;
++ non_cache = !is_string ? 1 : 0;
++ as_str = rb_get_path_check_to_string(path, level);
++ as_cstr = RSTRING_PTR(as_str);
++
++ if (!non_cache) {
++ if ((type == EXPAND_RELATIVE &&
++ rb_is_absolute_path(as_cstr)) ||
++ (type == EXPAND_HOME &&
++ (!as_cstr[0] || as_cstr[0] != '~')) ||
++ (type == EXPAND_NON_CACHE)) {
++ /* Use cached expanded path. */
++ rb_ary_push(ary, RARRAY_PTR(expanded_load_path)[i]);
++ continue;
++ }
++ }
++ if (!*has_relative && !rb_is_absolute_path(as_cstr))
++ *has_relative = 1;
++ if (!*has_non_cache && non_cache)
++ *has_non_cache = 1;
++ /* Freeze only string object. We expand other objects every times. */
++ if (is_string)
++ rb_str_freeze(path);
++ as_str = rb_get_path_check_convert(path, as_str, level);
++ expanded_path = rb_file_expand_path_fast(as_str, Qnil);
++ rb_str_freeze(expanded_path);
++ rb_ary_push(ary, expanded_path);
+ }
+ rb_obj_freeze(ary);
+- return ary;
++ vm->expanded_load_path = ary;
++ rb_ary_replace(vm->load_path_snapshot, vm->load_path);
++}
++
++static VALUE
++load_path_getcwd(void)
++{
++ char *cwd = my_getcwd();
++ VALUE cwd_str = rb_filesystem_str_new_cstr(cwd);
++ xfree(cwd);
++ return cwd_str;
++}
++
++VALUE
++rb_get_expanded_load_path(void)
++{
++ rb_vm_t *vm = GET_VM();
++ const VALUE non_cache = Qtrue;
++
++ if (!rb_ary_shared_with_p(vm->load_path_snapshot, vm->load_path)) {
++ /* The load path was modified. Rebuild the expanded load path. */
++ int has_relative = 0, has_non_cache = 0;
++ rb_construct_expanded_load_path(EXPAND_ALL, &has_relative, &has_non_cache);
++ if (has_relative) {
++ vm->load_path_check_cache = load_path_getcwd();
++ }
++ else if (has_non_cache) {
++ /* Non string object. */
++ vm->load_path_check_cache = non_cache;
++ }
++ else {
++ vm->load_path_check_cache = 0;
++ }
++ }
++ else if (vm->load_path_check_cache == non_cache) {
++ int has_relative = 1, has_non_cache = 1;
++ /* Expand only non-cacheable objects. */
++ rb_construct_expanded_load_path(EXPAND_NON_CACHE,
++ &has_relative, &has_non_cache);
++ }
++ else if (vm->load_path_check_cache) {
++ int has_relative = 1, has_non_cache = 1;
++ VALUE cwd = load_path_getcwd();
++ if (!rb_str_equal(vm->load_path_check_cache, cwd)) {
++ /* Current working directory or filesystem encoding was changed.
++ Expand relative load path and non-cacheable objects again. */
++ vm->load_path_check_cache = cwd;
++ rb_construct_expanded_load_path(EXPAND_RELATIVE,
++ &has_relative, &has_non_cache);
++ }
++ else {
++ /* Expand only tilde (User HOME) and non-cacheable objects. */
++ rb_construct_expanded_load_path(EXPAND_HOME,
++ &has_relative, &has_non_cache);
++ }
++ }
++ return vm->expanded_load_path;
+ }
+
+ static VALUE
+@@ -63,12 +161,121 @@ get_loaded_features(void)
+ return GET_VM()->loaded_features;
+ }
+
++static void
++reset_loaded_features_snapshot(void)
++{
++ rb_vm_t *vm = GET_VM();
++ rb_ary_replace(vm->loaded_features_snapshot, vm->loaded_features);
++}
++
++static VALUE
++get_loaded_features_index_raw(void)
++{
++ return GET_VM()->loaded_features_index;
++}
++
+ static st_table *
+ get_loading_table(void)
+ {
+ return GET_VM()->loading_table;
+ }
+
++static void
++features_index_add_single(VALUE short_feature, VALUE offset)
++{
++ VALUE features_index, this_feature_index;
++ features_index = get_loaded_features_index_raw();
++ if ((this_feature_index = rb_hash_lookup(features_index, short_feature)) == Qnil) {
++ this_feature_index = rb_ary_new();
++ rb_hash_aset(features_index, short_feature, this_feature_index);
++ }
++ rb_ary_push(this_feature_index, offset);
++}
++
++/* Add to the loaded-features index all the required entries for
++ `feature`, located at `offset` in $LOADED_FEATURES. We add an
++ index entry at each string `short_feature` for which
++ feature == "#{prefix}#{short_feature}#{e}"
++ where `e` is empty or matches %r{^\.[^./]*$}, and `prefix` is empty
++ or ends in '/'. This maintains the invariant that `rb_feature_p()`
++ relies on for its fast lookup.
++*/
++static void
++features_index_add(VALUE feature, VALUE offset)
++{
++ VALUE short_feature;
++ const char *feature_str, *feature_end, *ext, *p;
++
++ feature_str = StringValuePtr(feature);
++ feature_end = feature_str + RSTRING_LEN(feature);
++
++ for (ext = feature_end; ext > feature_str; ext--)
++ if (*ext == '.' || *ext == '/')
++ break;
++ if (*ext != '.')
++ ext = NULL;
++ /* Now `ext` points to the only string matching %r{^\.[^./]*$} that is
++ at the end of `feature`, or is NULL if there is no such string. */
++
++ p = ext ? ext : feature_end;
++ while (1) {
++ p--;
++ while (p >= feature_str && *p != '/')
++ p--;
++ if (p < feature_str)
++ break;
++ /* Now *p == '/'. We reach this point for every '/' in `feature`. */
++ short_feature = rb_str_substr(feature, p + 1 - feature_str, feature_end - p - 1);
++ features_index_add_single(short_feature, offset);
++ if (ext) {
++ short_feature = rb_str_substr(feature, p + 1 - feature_str, ext - p - 1);
++ features_index_add_single(short_feature, offset);
++ }
++ }
++ features_index_add_single(feature, offset);
++ if (ext) {
++ short_feature = rb_str_substr(feature, 0, ext - feature_str);
++ features_index_add_single(short_feature, offset);
++ }
++}
++
++static VALUE
++get_loaded_features_index(void)
++{
++ VALUE features;
++ int i;
++ rb_vm_t *vm = GET_VM();
++
++ if (!rb_ary_shared_with_p(vm->loaded_features_snapshot, vm->loaded_features)) {
++ /* The sharing was broken; something (other than us in rb_provide_feature())
++ modified loaded_features. Rebuild the index. */
++ rb_hash_clear(vm->loaded_features_index);
++ features = vm->loaded_features;
++ for (i = 0; i < RARRAY_LEN(features); i++) {
++ VALUE entry, as_str;
++ as_str = entry = rb_ary_entry(features, i);
++ StringValue(as_str);
++ if (as_str != entry)
++ rb_ary_store(features, i, as_str);
++ rb_str_freeze(as_str);
++ features_index_add(as_str, INT2FIX(i));
++ }
++ reset_loaded_features_snapshot();
++ }
++ return vm->loaded_features_index;
++}
++
++/* This searches `load_path` for a value such that
++ name == "#{load_path[i]}/#{feature}"
++ if `feature` is a suffix of `name`, or otherwise
++ name == "#{load_path[i]}/#{feature}#{ext}"
++ for an acceptable string `ext`. It returns
++ `load_path[i].to_str` if found, else 0.
++
++ If type is 's', then `ext` is acceptable only if IS_DLEXT(ext);
++ if 'r', then only if IS_RBEXT(ext); otherwise `ext` may be absent
++ or have any value matching `%r{^\.[^./]*$}`.
++*/
+ static VALUE
+ loaded_feature_path(const char *name, long vlen, const char *feature, long len,
+ int type, VALUE load_path)
+@@ -77,7 +284,7 @@ loaded_feature_path(const char *name, long vlen, const char *feature, long len,
+ long plen;
+ const char *e;
+
+- if(vlen < len) return 0;
++ if (vlen < len+1) return 0;
+ if (!strncmp(name+(vlen-len),feature,len)){
+ plen = vlen - len - 1;
+ } else {
+@@ -88,23 +295,22 @@ loaded_feature_path(const char *name, long vlen, const char *feature, long len,
+ return 0;
+ plen = e - name - len - 1;
+ }
++ if (type == 's' && !IS_DLEXT(&name[plen+len+1])
++ || type == 'r' && !IS_RBEXT(&name[plen+len+1])
++ || name[plen] != '/') {
++ return 0;
++ }
++ /* Now name == "#{prefix}/#{feature}#{ext}" where ext is acceptable
++ (possibly empty) and prefix is some string of length plen. */
++
+ for (i = 0; i < RARRAY_LEN(load_path); ++i) {
+ VALUE p = RARRAY_PTR(load_path)[i];
+ const char *s = StringValuePtr(p);
+ long n = RSTRING_LEN(p);
+
+ if (n != plen ) continue;
+- if (n && (strncmp(name, s, n) || name[n] != '/')) continue;
+- switch (type) {
+- case 's':
+- if (IS_DLEXT(&name[n+len+1])) return p;
+- break;
+- case 'r':
+- if (IS_RBEXT(&name[n+len+1])) return p;
+- break;
+- default:
+- return p;
+- }
++ if (n && strncmp(name, s, n)) continue;
++ return p;
+ }
+ return 0;
+ }
+@@ -132,7 +338,7 @@ loaded_feature_path_i(st_data_t v, st_data_t b, st_data_t f)
+ static int
+ rb_feature_p(const char *feature, const char *ext, int rb, int expanded, const char **fn)
+ {
+- VALUE v, features, p, load_path = 0;
++ VALUE features, features_index, feature_val, this_feature_index, v, p, load_path = 0;
+ const char *f, *e;
+ long i, len, elen, n;
+ st_table *loading_tbl;
+@@ -151,8 +357,39 @@ rb_feature_p(const char *feature, const char *ext, int rb, int expanded, const c
+ type = 0;
+ }
+ features = get_loaded_features();
+- for (i = 0; i < RARRAY_LEN(features); ++i) {
+- v = RARRAY_PTR(features)[i];
++ features_index = get_loaded_features_index();
++
++ feature_val = rb_str_new(feature, len);
++ this_feature_index = rb_hash_lookup(features_index, feature_val);
++ /* We search `features` for an entry such that either
++ "#{features[i]}" == "#{load_path[j]}/#{feature}#{e}"
++ for some j, or
++ "#{features[i]}" == "#{feature}#{e}"
++ Here `e` is an "allowed" extension -- either empty or one
++ of the extensions accepted by IS_RBEXT, IS_SOEXT, or
++ IS_DLEXT. Further, if `ext && rb` then `IS_RBEXT(e)`,
++ and if `ext && !rb` then `IS_SOEXT(e) || IS_DLEXT(e)`.
++
++ If `expanded`, then only the latter form (without load_path[j])
++ is accepted. Otherwise either form is accepted, *unless* `ext`
++ is false and an otherwise-matching entry of the first form is
++ preceded by an entry of the form
++ "#{features[i2]}" == "#{load_path[j2]}/#{feature}#{e2}"
++ where `e2` matches %r{^\.[^./]*$} but is not an allowed extension.
++ After a "distractor" entry of this form, only entries of the
++ form "#{feature}#{e}" are accepted.
++
++ In `rb_provide_feature()` and `get_loaded_features_index()` we
++ maintain an invariant that the array `this_feature_index` will
++ point to every entry in `features` which has the form
++ "#{prefix}#{feature}#{e}"
++ where `e` is empty or matches %r{^\.[^./]*$}, and `prefix` is empty
++ or ends in '/'. This includes both match forms above, as well
++ as any distractors, so we may ignore all other entries in `features`.
++ */
++ for (i = 0; this_feature_index != Qnil && i < RARRAY_LEN(this_feature_index); i++) {
++ long index = FIX2LONG(rb_ary_entry(this_feature_index, i));
++ v = RARRAY_PTR(features)[index];
+ f = StringValuePtr(v);
+ if ((n = RSTRING_LEN(v)) < len) continue;
+ if (strncmp(f, feature, len) != 0) {
+@@ -175,6 +412,7 @@ rb_feature_p(const char *feature, const char *ext, int rb, int expanded, const c
+ return 'r';
+ }
+ }
++
+ loading_tbl = get_loading_table();
+ if (loading_tbl) {
+ f = 0;
+@@ -183,7 +421,7 @@ rb_feature_p(const char *feature, const char *ext, int rb, int expanded, const c
+ fs.name = feature;
+ fs.len = len;
+ fs.type = type;
+- fs.load_path = load_path ? load_path : rb_get_load_path();
++ fs.load_path = load_path ? load_path : rb_get_expanded_load_path();
+ fs.result = 0;
+ st_foreach(loading_tbl, loaded_feature_path_i, (st_data_t)&fs);
+ if ((f = fs.result) != 0) {
+@@ -233,7 +471,7 @@ rb_feature_provided(const char *feature, const char **loading)
+
+ if (*feature == '.' &&
+ (feature[1] == '/' || strncmp(feature+1, "./", 2) == 0)) {
+- fullpath = rb_file_expand_path_fast(rb_str_new2(feature), Qnil);
++ fullpath = rb_file_expand_path_fast(rb_get_path(rb_str_new2(feature)), Qnil);
+ feature = RSTRING_PTR(fullpath);
+ }
+ if (ext && !strchr(ext, '/')) {
+@@ -254,11 +492,18 @@ rb_feature_provided(const char *feature, const char **loading)
+ static void
+ rb_provide_feature(VALUE feature)
+ {
+- if (OBJ_FROZEN(get_loaded_features())) {
++ VALUE features;
++
++ features = get_loaded_features();
++ if (OBJ_FROZEN(features)) {
+ rb_raise(rb_eRuntimeError,
+ "$LOADED_FEATURES is frozen; cannot append feature");
+ }
+- rb_ary_push(get_loaded_features(), feature);
++ rb_str_freeze(feature);
++
++ rb_ary_push(features, feature);
++ features_index_add(feature, INT2FIX(RARRAY_LEN(features)-1));
++ reset_loaded_features_snapshot();
+ }
+
+ void
+@@ -774,10 +1019,15 @@ Init_load()
+ rb_alias_variable(rb_intern("$-I"), id_load_path);
+ rb_alias_variable(rb_intern("$LOAD_PATH"), id_load_path);
+ vm->load_path = rb_ary_new();
++ vm->expanded_load_path = rb_ary_new();
++ vm->load_path_snapshot = rb_ary_new();
++ vm->load_path_check_cache = 0;
+
+ rb_define_virtual_variable("$\"", get_loaded_features, 0);
+ rb_define_virtual_variable("$LOADED_FEATURES", get_loaded_features, 0);
+ vm->loaded_features = rb_ary_new();
++ vm->loaded_features_snapshot = rb_ary_new();
++ vm->loaded_features_index = rb_hash_new();
+
+ rb_define_global_function("load", rb_f_load, -1);
+ rb_define_global_function("require", rb_f_require, 1);
+diff --git a/ruby.c b/ruby.c
+index 3ddd96c..7ffc78e 100644
+--- a/ruby.c
++++ b/ruby.c
+@@ -1366,7 +1366,8 @@ process_options(int argc, char **argv, struct cmdline_options *opt)
+ long i;
+ VALUE load_path = GET_VM()->load_path;
+ for (i = 0; i < RARRAY_LEN(load_path); ++i) {
+- rb_enc_associate(RARRAY_PTR(load_path)[i], lenc);
++ RARRAY_PTR(load_path)[i] =
++ rb_enc_associate(rb_str_dup(RARRAY_PTR(load_path)[i]), lenc);
+ }
+ }
+ if (!(opt->disable & DISABLE_BIT(gems))) {
+diff --git a/test/ruby/test_require.rb b/test/ruby/test_require.rb
+index 58a9ee2..ec75096 100644
+--- a/test/ruby/test_require.rb
++++ b/test/ruby/test_require.rb
+@@ -356,4 +356,114 @@ class TestRequire < Test::Unit::TestCase
+ $:.replace(loadpath)
+ $".replace(features)
+ end
++
++ def test_require_changed_current_dir
++ bug7158 = '[ruby-core:47970]'
++ Dir.mktmpdir {|tmp|
++ Dir.chdir(tmp) {
++ Dir.mkdir("a")
++ Dir.mkdir("b")
++ open(File.join("a", "foo.rb"), "w") {}
++ open(File.join("b", "bar.rb"), "w") {|f|
++ f.puts "p :ok"
++ }
++ assert_in_out_err([], <<-INPUT, %w(:ok), [], bug7158)
++ $: << "."
++ Dir.chdir("a")
++ require "foo"
++ Dir.chdir("../b")
++ p :ng unless require "bar"
++ Dir.chdir("..")
++ p :ng if require "b/bar"
++ INPUT
++ }
++ }
++ end
++
++ def test_require_not_modified_load_path
++ bug7158 = '[ruby-core:47970]'
++ Dir.mktmpdir {|tmp|
++ Dir.chdir(tmp) {
++ open("foo.rb", "w") {}
++ assert_in_out_err([], <<-INPUT, %w(:ok), [], bug7158)
++ a = Object.new
++ def a.to_str
++ "#{tmp}"
++ end
++ $: << a
++ require "foo"
++ last_path = $:.pop
++ p :ok if last_path == a && last_path.class == Object
++ INPUT
++ }
++ }
++ end
++
++ def test_require_changed_home
++ bug7158 = '[ruby-core:47970]'
++ Dir.mktmpdir {|tmp|
++ Dir.chdir(tmp) {
++ open("foo.rb", "w") {}
++ Dir.mkdir("a")
++ open(File.join("a", "bar.rb"), "w") {}
++ assert_in_out_err([], <<-INPUT, %w(:ok), [], bug7158)
++ $: << '~'
++ ENV['HOME'] = "#{tmp}"
++ require "foo"
++ ENV['HOME'] = "#{tmp}/a"
++ p :ok if require "bar"
++ INPUT
++ }
++ }
++ end
++
++ def test_require_to_path_redefined_in_load_path
++ bug7158 = '[ruby-core:47970]'
++ Dir.mktmpdir {|tmp|
++ Dir.chdir(tmp) {
++ open("foo.rb", "w") {}
++ assert_in_out_err(["RUBYOPT"=>nil], <<-INPUT, %w(:ok), [], bug7158)
++ a = Object.new
++ def a.to_path
++ "bar"
++ end
++ $: << a
++ begin
++ require "foo"
++ p :ng
++ rescue LoadError
++ end
++ def a.to_path
++ "#{tmp}"
++ end
++ p :ok if require "foo"
++ INPUT
++ }
++ }
++ end
++
++ def test_require_to_str_redefined_in_load_path
++ bug7158 = '[ruby-core:47970]'
++ Dir.mktmpdir {|tmp|
++ Dir.chdir(tmp) {
++ open("foo.rb", "w") {}
++ assert_in_out_err(["RUBYOPT"=>nil], <<-INPUT, %w(:ok), [], bug7158)
++ a = Object.new
++ def a.to_str
++ "foo"
++ end
++ $: << a
++ begin
++ require "foo"
++ p :ng
++ rescue LoadError
++ end
++ def a.to_str
++ "#{tmp}"
++ end
++ p :ok if require "foo"
++ INPUT
++ }
++ }
++ end
+ end
+diff --git a/vm.c b/vm.c
+index 553f180..3436898 100644
+--- a/vm.c
++++ b/vm.c
+@@ -1578,7 +1578,12 @@ rb_vm_mark(void *ptr)
+ RUBY_MARK_UNLESS_NULL(vm->thgroup_default);
+ RUBY_MARK_UNLESS_NULL(vm->mark_object_ary);
+ RUBY_MARK_UNLESS_NULL(vm->load_path);
++ RUBY_MARK_UNLESS_NULL(vm->load_path_snapshot);
++ RUBY_MARK_UNLESS_NULL(vm->load_path_check_cache);
++ RUBY_MARK_UNLESS_NULL(vm->expanded_load_path);
+ RUBY_MARK_UNLESS_NULL(vm->loaded_features);
++ RUBY_MARK_UNLESS_NULL(vm->loaded_features_snapshot);
++ RUBY_MARK_UNLESS_NULL(vm->loaded_features_index);
+ RUBY_MARK_UNLESS_NULL(vm->top_self);
+ RUBY_MARK_UNLESS_NULL(vm->coverages);
+ rb_gc_mark_locations(vm->special_exceptions, vm->special_exceptions + ruby_special_error_count);
+diff --git a/vm_core.h b/vm_core.h
+index 60146f0..7b25806 100644
+--- a/vm_core.h
++++ b/vm_core.h
+@@ -298,7 +298,12 @@ typedef struct rb_vm_struct {
+ /* load */
+ VALUE top_self;
+ VALUE load_path;
++ VALUE load_path_snapshot;
++ VALUE load_path_check_cache;
++ VALUE expanded_load_path;
+ VALUE loaded_features;
++ VALUE loaded_features_snapshot;
++ VALUE loaded_features_index;
+ struct st_table *loading_table;
+
+ /* signal */
2,337 patches/ruby/1.9.3/p327/railsexpress/10-falcon-st-opt.patch
View
@@ -0,0 +1,2337 @@
+diff --git a/common.mk b/common.mk
+index c9ef641..56e52b6 100644
+--- a/common.mk
++++ b/common.mk
+@@ -638,7 +638,8 @@ file.$(OBJEXT): {$(VPATH)}file.c $(RUBY_H_INCLUDES) {$(VPATH)}io.h \
+ gc.$(OBJEXT): {$(VPATH)}gc.c $(RUBY_H_INCLUDES) {$(VPATH)}re.h \
+ {$(VPATH)}regex.h $(ENCODING_H_INCLUDES) $(VM_CORE_H_INCLUDES) \
+ {$(VPATH)}gc.h {$(VPATH)}io.h {$(VPATH)}eval_intern.h {$(VPATH)}util.h \
+- {$(VPATH)}debug.h {$(VPATH)}internal.h {$(VPATH)}constant.h
++ {$(VPATH)}debug.h {$(VPATH)}internal.h {$(VPATH)}constant.h \
++ {$(VPATH)}pool_alloc.inc.h {$(VPATH)}pool_alloc.h
+ hash.$(OBJEXT): {$(VPATH)}hash.c $(RUBY_H_INCLUDES) {$(VPATH)}util.h \
+ $(ENCODING_H_INCLUDES)
+ inits.$(OBJEXT): {$(VPATH)}inits.c $(RUBY_H_INCLUDES) \
+@@ -702,7 +703,7 @@ signal.$(OBJEXT): {$(VPATH)}signal.c $(RUBY_H_INCLUDES) \
+ $(VM_CORE_H_INCLUDES) {$(VPATH)}debug.h
+ sprintf.$(OBJEXT): {$(VPATH)}sprintf.c $(RUBY_H_INCLUDES) {$(VPATH)}re.h \
+ {$(VPATH)}regex.h {$(VPATH)}vsnprintf.c $(ENCODING_H_INCLUDES)
+-st.$(OBJEXT): {$(VPATH)}st.c $(RUBY_H_INCLUDES)
++st.$(OBJEXT): {$(VPATH)}st.c $(RUBY_H_INCLUDES) {$(VPATH)}pool_alloc.h
+ strftime.$(OBJEXT): {$(VPATH)}strftime.c $(RUBY_H_INCLUDES) \
+ {$(VPATH)}timev.h
+ string.$(OBJEXT): {$(VPATH)}string.c $(RUBY_H_INCLUDES) {$(VPATH)}re.h \
+diff --git a/configure.in b/configure.in
+index af5acc4..354b9f2 100644
+--- a/configure.in
++++ b/configure.in
+@@ -1324,6 +1324,29 @@ main() {
+ CFLAGS="$save_CFLAGS"])
+ AC_DEFINE_UNQUOTED(GC_MARK_STACKFRAME_WORD, $rb_cv_gc_mark_stackframe_word)
+
++AS_CASE(["$target_os"],
++[openbsd*], [
++ AC_CACHE_CHECK(for heap align log on openbsd, rb_cv_page_size_log,
++ [rb_cv_page_size_log=no
++ for page_log in 12 13; do
++ AC_TRY_RUN([
++#include <math.h>
++#include <unistd.h>
++
++int
++main() {
++ if ((int)log2((double)sysconf(_SC_PAGESIZE)) != $page_log) return 1;
++ return 0;
++}
++ ],
++ rb_cv_page_size_log="$page_log"; break)
++ done])
++ if test $rb_cv_page_size_log != no; then
++ AC_DEFINE_UNQUOTED(HEAP_ALIGN_LOG, $rb_cv_page_size_log)
++ else
++ AC_DEFINE_UNQUOTED(HEAP_ALIGN_LOG, 12)
++ fi
++])
+
+ dnl Checks for library functions.
+ AC_TYPE_GETGROUPS
+@@ -1424,7 +1447,8 @@ AC_CHECK_FUNCS(fmod killpg wait4 waitpid fork spawnv syscall __syscall chroot ge
+ setsid telldir seekdir fchmod cosh sinh tanh log2 round\
+ setuid setgid daemon select_large_fdset setenv unsetenv\
+ mktime timegm gmtime_r clock_gettime gettimeofday poll ppoll\
+- pread sendfile shutdown sigaltstack dl_iterate_phdr)
++ pread sendfile shutdown sigaltstack dl_iterate_phdr\
++ dup3 pipe2 posix_memalign memalign)
+
+ AC_CACHE_CHECK(for unsetenv returns a value, rb_cv_unsetenv_return_value,
+ [AC_TRY_COMPILE([
+diff --git a/ext/-test-/st/numhash/numhash.c b/ext/-test-/st/numhash/numhash.c
+index e186cd4..53d9e1b 100644
+--- a/ext/-test-/st/numhash/numhash.c
++++ b/ext/-test-/st/numhash/numhash.c
+@@ -54,7 +54,7 @@ numhash_i(st_data_t key, st_data_t value, st_data_t arg, int error)
+ static VALUE
+ numhash_each(VALUE self)
+ {
+- return st_foreach((st_table *)DATA_PTR(self), numhash_i, self) ? Qtrue : Qfalse;
++ return st_foreach_check((st_table *)DATA_PTR(self), numhash_i, self, 0) ? Qtrue : Qfalse;
+ }
+
+ void
+diff --git a/gc.c b/gc.c
+index a77ef2c..41daff1 100644
+--- a/gc.c
++++ b/gc.c
+@@ -20,10 +20,12 @@
+ #include "vm_core.h"
+ #include "internal.h"
+ #include "gc.h"
++#include "pool_alloc.h"
+ #include "constant.h"
+ #include <stdio.h>
+ #include <setjmp.h>
+ #include <sys/types.h>
++#include <assert.h>
+
+ #ifdef HAVE_SYS_TIME_H
+ #include <sys/time.h>
+@@ -35,7 +37,12 @@
+
+ #if defined _WIN32 || defined __CYGWIN__
+ #include <windows.h>
++#elif defined(HAVE_POSIX_MEMALIGN)
++#elif defined(HAVE_MEMALIGN)
++#include <malloc.h>
+ #endif
++static void aligned_free(void *);
++static void *aligned_malloc(size_t alignment, size_t size);
+
+ #ifdef HAVE_VALGRIND_MEMCHECK_H
+ # include <valgrind/memcheck.h>
+@@ -329,6 +336,24 @@ struct gc_list {
+
+ #define CALC_EXACT_MALLOC_SIZE 0
+
++#ifdef POOL_ALLOC_API
++/* POOL ALLOC API */
++#define POOL_ALLOC_PART 1
++#include "pool_alloc.inc.h"
++#undef POOL_ALLOC_PART
++
++typedef struct pool_layout_t pool_layout_t;
++struct pool_layout_t {
++ pool_header
++ p6, /* st_table && st_table_entry */
++ p11; /* st_table.bins init size */
++} pool_layout = {
++ INIT_POOL(void*[6]),
++ INIT_POOL(void*[11])
++};
++static void pool_finalize_header(pool_header *header);
++#endif
++
+ typedef struct rb_objspace {
+ struct {
+ size_t limit;
+@@ -338,6 +363,9 @@ typedef struct rb_objspace {
+ size_t allocations;
+ #endif
+ } malloc_params;
++#ifdef POOL_ALLOC_API
++ pool_layout_t *pool_headers;
++#endif
+ struct {
+ size_t increment;
+ struct heaps_slot *ptr;
+@@ -407,7 +435,11 @@ typedef struct rb_objspace {
+ #define ruby_initial_gc_stress initial_params.gc_stress
+ int *ruby_initial_gc_stress_ptr = &ruby_initial_gc_stress;
+ #else
++# ifdef POOL_ALLOC_API
++static rb_objspace_t rb_objspace = {{GC_MALLOC_LIMIT}, &pool_layout, {HEAP_MIN_SLOTS}};
++# else
+ static rb_objspace_t rb_objspace = {{GC_MALLOC_LIMIT}, {HEAP_MIN_SLOTS}};
++# endif
+ int *ruby_initial_gc_stress_ptr = &rb_objspace.gc_stress;
+ #endif
+ #define malloc_limit objspace->malloc_params.limit
+@@ -461,6 +493,10 @@ rb_objspace_alloc(void)
+ memset(objspace, 0, sizeof(*objspace));
+ malloc_limit = initial_malloc_limit;
+ ruby_gc_stress = ruby_initial_gc_stress;
++#ifdef POOL_ALLOC_API
++ objspace->pool_headers = (pool_layout_t*) malloc(sizeof(pool_layout));
++ memcpy(objspace->pool_headers, &pool_layout, sizeof(pool_layout));
++#endif
+
+ return objspace;
+ }
+@@ -601,6 +637,13 @@ rb_objspace_free(rb_objspace_t *objspace)
+ heaps_used = 0;
+ heaps = 0;
+ }
++#ifdef POOL_ALLOC_API
++ if (objspace->pool_headers) {
++ pool_finalize_header(&objspace->pool_headers->p6);
++ pool_finalize_header(&objspace->pool_headers->p11);
++ free(objspace->pool_headers);
++ }
++#endif
+ free(objspace);
+ }
+ #endif
+@@ -1125,6 +1168,27 @@ rb_gc_stats_enabled()
+ return gc_statistics ? Qtrue : Qfalse;
+ }
+
++#ifdef POOL_ALLOC_API
++/* POOL ALLOC API */
++#define POOL_ALLOC_PART 2
++#include "pool_alloc.inc.h"
++#undef POOL_ALLOC_PART
++
++void
++ruby_xpool_free(void *ptr)
++{
++ pool_free_entry((void**)ptr);
++}
++
++#define CONCRET_POOL_MALLOC(pnts) \
++void * ruby_xpool_malloc_##pnts##p () { \
++ return pool_alloc_entry(&rb_objspace.pool_headers->p##pnts ); \
++}
++CONCRET_POOL_MALLOC(6)
++CONCRET_POOL_MALLOC(11)
++#undef CONCRET_POOL_MALLOC
++
++#endif
+
+ /*
+ * call-seq:
+@@ -1634,6 +1698,55 @@ allocate_sorted_heaps(rb_objspace_t *objspace, size_t next_heaps_length)
+ heaps_length = next_heaps_length;
+ }
+
++static void *
++aligned_malloc(size_t alignment, size_t size)
++{
++ void *res;
++
++#if defined __MINGW32__
++ res = __mingw_aligned_malloc(size, alignment);
++#elif defined _WIN32 && !defined __CYGWIN__
++ res = _aligned_malloc(size, alignment);
++#elif defined(HAVE_POSIX_MEMALIGN)
++ if (posix_memalign(&res, alignment, size) == 0) {
++ return res;
++ }
++ else {
++ return NULL;
++ }
++#elif defined(HAVE_MEMALIGN)
++ res = memalign(alignment, size);
++#else
++ char* aligned;
++ res = malloc(alignment + size + sizeof(void*));
++ aligned = (char*)res + alignment + sizeof(void*);
++ aligned -= ((VALUE)aligned & (alignment - 1));
++ ((void**)aligned)[-1] = res;
++ res = (void*)aligned;
++#endif
++
++#if defined(_DEBUG) || defined(GC_DEBUG)
++ /* alignment must be a power of 2 */
++ assert((alignment - 1) & alignment == 0);
++ assert(alignment % sizeof(void*) == 0);
++#endif
++ return res;
++}
++
++static void
++aligned_free(void *ptr)
++{
++#if defined __MINGW32__
++ __mingw_aligned_free(ptr);
++#elif defined _WIN32 && !defined __CYGWIN__
++ _aligned_free(ptr);
++#elif defined(HAVE_MEMALIGN) || defined(HAVE_POSIX_MEMALIGN)
++ free(ptr);
++#else
++ free(((void**)ptr)[-1]);
++#endif
++}
++
+ static void
+ assign_heap_slot(rb_objspace_t *objspace)
+ {
+diff --git a/hash.c b/hash.c
+index 8423558..4cb2e2d 100644
+--- a/hash.c
++++ b/hash.c
+@@ -44,7 +44,7 @@ rb_any_cmp(VALUE a, VALUE b)
+ if (FIXNUM_P(a) && FIXNUM_P(b)) {
+ return a != b;
+ }
+- if (TYPE(a) == T_STRING && RBASIC(a)->klass == rb_cString &&
++ if (RB_TYPE_P(a, T_STRING) && RBASIC(a)->klass == rb_cString &&
+ TYPE(b) == T_STRING && RBASIC(b)->klass == rb_cString) {
+ return rb_str_hash_cmp(a, b);
+ }
+@@ -80,20 +80,14 @@ rb_any_hash(VALUE a)
+ VALUE hval;
+ st_index_t hnum;
+
+- switch (TYPE(a)) {
+- case T_FIXNUM:
+- case T_SYMBOL:
+- case T_NIL:
+- case T_FALSE:
+- case T_TRUE:
+- hnum = rb_hash_end(rb_hash_start((unsigned int)a));
+- break;
+-
+- case T_STRING:
++ if (SPECIAL_CONST_P(a)) {
++ if (a == Qundef) return 0;
++ hnum = rb_hash_end(rb_hash_start((st_index_t)a));
++ }
++ else if (BUILTIN_TYPE(a) == T_STRING) {
+ hnum = rb_str_hash(a);
+- break;
+-
+- default:
++ }
++ else {
+ hval = rb_hash(a);
+ hnum = FIX2LONG(hval);
+ }
+@@ -106,10 +100,8 @@ static const struct st_hash_type objhash = {
+ rb_any_hash,
+ };
+
+-static const struct st_hash_type identhash = {
+- st_numcmp,
+- st_numhash,
+-};
++extern const struct st_hash_type st_hashtype_num;
++#define identhash st_hashtype_num
+
+ typedef int st_foreach_func(st_data_t, st_data_t, st_data_t);
+
+@@ -124,7 +116,6 @@ foreach_safe_i(st_data_t key, st_data_t value, struct foreach_safe_arg *arg)
+ {
+ int status;
+
+- if (key == Qundef) return ST_CONTINUE;
+ status = (*arg->func)(key, value, arg->arg);
+ if (status == ST_CONTINUE) {
+ return ST_CHECK;
+@@ -140,7 +131,7 @@ st_foreach_safe(st_table *table, int (*func)(ANYARGS), st_data_t a)
+ arg.tbl = table;
+ arg.func = (st_foreach_func *)func;
+ arg.arg = a;
+- if (st_foreach(table, foreach_safe_i, (st_data_t)&arg)) {
++ if (st_foreach_check(table, foreach_safe_i, (st_data_t)&arg, 0)) {
+ rb_raise(rb_eRuntimeError, "hash modified during iteration");
+ }
+ }
+@@ -154,21 +145,21 @@ struct hash_foreach_arg {
+ };
+
+ static int
+-hash_foreach_iter(st_data_t key, st_data_t value, struct hash_foreach_arg *arg)
++hash_foreach_iter(st_data_t key, st_data_t value, st_data_t argp)
+ {
++ struct hash_foreach_arg *arg = (struct hash_foreach_arg *)argp;
+ int status;
+ st_table *tbl;
+
+ tbl = RHASH(arg->hash)->ntbl;
+- if ((VALUE)key == Qundef) return ST_CONTINUE;
+ status = (*arg->func)((VALUE)key, (VALUE)value, arg->arg);
+ if (RHASH(arg->hash)->ntbl != tbl) {
+ rb_raise(rb_eRuntimeError, "rehash occurred during iteration");
+ }
+ switch (status) {
+ case ST_DELETE:
+- st_delete_safe(tbl, &key, 0, Qundef);
+ FL_SET(arg->hash, HASH_DELETED);
++ return ST_DELETE;
+ case ST_CONTINUE:
+ break;
+ case ST_STOP:
+@@ -184,7 +175,7 @@ hash_foreach_ensure(VALUE hash)
+
+ if (RHASH(hash)->iter_lev == 0) {
+ if (FL_TEST(hash, HASH_DELETED)) {
+- st_cleanup_safe(RHASH(hash)->ntbl, Qundef);
++ st_cleanup_safe(RHASH(hash)->ntbl, (st_data_t)Qundef);
+ FL_UNSET(hash, HASH_DELETED);
+ }
+ }
+@@ -192,9 +183,10 @@ hash_foreach_ensure(VALUE hash)
+ }
+
+ static VALUE
+-hash_foreach_call(struct hash_foreach_arg *arg)
++hash_foreach_call(VALUE arg)
+ {
+- if (st_foreach(RHASH(arg->hash)->ntbl, hash_foreach_iter, (st_data_t)arg)) {
++ VALUE hash = ((struct hash_foreach_arg *)arg)->hash;
++ if (st_foreach_check(RHASH(hash)->ntbl, hash_foreach_iter, (st_data_t)arg, (st_data_t)Qundef)) {
+ rb_raise(rb_eRuntimeError, "hash modified during iteration");
+ }
+ return Qnil;
+@@ -447,7 +439,7 @@ rb_hash_rehash_i(VALUE key, VALUE value, VALUE arg)
+ {
+ st_table *tbl = (st_table *)arg;
+
+- if (key != Qundef) st_insert(tbl, key, value);
++ st_insert(tbl, (st_data_t)key, (st_data_t)value);
+ return ST_CONTINUE;
+ }
+
+@@ -490,6 +482,20 @@ rb_hash_rehash(VALUE hash)
+ return hash;
+ }
+
++static VALUE
++hash_default_value(VALUE hash, VALUE key)
++{
++ if (rb_method_basic_definition_p(CLASS_OF(hash), id_default)) {
++ VALUE ifnone = RHASH_IFNONE(hash);
++ if (!FL_TEST(hash, HASH_PROC_DEFAULT)) return ifnone;
++ if (key == Qundef) return Qnil;
++ return rb_funcall(ifnone, id_yield, 2, hash, key);
++ }
++ else {
++ return rb_funcall(hash, id_default, 1, key);
++ }
++}
++
+ /*
+ * call-seq:
+ * hsh[key] -> value
+@@ -510,13 +516,7 @@ rb_hash_aref(VALUE hash, VALUE key)
+ st_data_t val;
+
+ if (!RHASH(hash)->ntbl || !st_lookup(RHASH(hash)->ntbl, key, &val)) {
+- if (!FL_TEST(hash, HASH_PROC_DEFAULT) &&
+- rb_method_basic_definition_p(CLASS_OF(hash), id_default)) {
+- return RHASH_IFNONE(hash);
+- }
+- else {
+- return rb_funcall(hash, id_default, 1, key);
+- }
++ return hash_default_value(hash, key);
+ }
+ return (VALUE)val;
+ }
+@@ -659,7 +659,7 @@ rb_hash_default(int argc, VALUE *argv, VALUE hash)
+ static VALUE
+ rb_hash_set_default(VALUE hash, VALUE ifnone)
+ {
+- rb_hash_modify(hash);
++ rb_hash_modify_check(hash);
+ RHASH_IFNONE(hash) = ifnone;
+ FL_UNSET(hash, HASH_PROC_DEFAULT);
+ return ifnone;
+@@ -707,7 +707,7 @@ rb_hash_set_default_proc(VALUE hash, VALUE proc)
+ {
+ VALUE b;
+
+- rb_hash_modify(hash);
++ rb_hash_modify_check(hash);
+ b = rb_check_convert_type(proc, T_DATA, "Proc", "to_proc");
+ if (NIL_P(b) || !rb_obj_is_proc(b)) {
+ rb_raise(rb_eTypeError,
+@@ -776,7 +776,7 @@ rb_hash_delete_key(VALUE hash, VALUE key)
+ if (!RHASH(hash)->ntbl)
+ return Qundef;
+ if (RHASH(hash)->iter_lev > 0) {
+- if (st_delete_safe(RHASH(hash)->ntbl, &ktmp, &val, Qundef)) {
++ if (st_delete_safe(RHASH(hash)->ntbl, &ktmp, &val, (st_data_t)Qundef)) {
+ FL_SET(hash, HASH_DELETED);
+ return (VALUE)val;
+ }
+@@ -809,7 +809,7 @@ rb_hash_delete(VALUE hash, VALUE key)
+ {
+ VALUE val;
+
+- rb_hash_modify(hash);
++ rb_hash_modify_check(hash);
+ val = rb_hash_delete_key(hash, key);
+ if (val != Qundef) return val;
+ if (rb_block_given_p()) {
+@@ -828,7 +828,6 @@ shift_i(VALUE key, VALUE value, VALUE arg)
+ {
+ struct shift_var *var = (struct shift_var *)arg;
+
+- if (key == Qundef) return ST_CONTINUE;
+ if (var->key != Qundef) return ST_STOP;
+ var->key = key;
+ var->val = value;
+@@ -840,7 +839,6 @@ shift_i_safe(VALUE key, VALUE value, VALUE arg)
+ {
+ struct shift_var *var = (struct shift_var *)arg;
+
+- if (key == Qundef) return ST_CONTINUE;
+ var->key = key;
+ var->val = value;
+ return ST_STOP;
+@@ -864,29 +862,25 @@ rb_hash_shift(VALUE hash)
+ {
+ struct shift_var var;
+
+- rb_hash_modify(hash);
+- var.key = Qundef;
+- rb_hash_foreach(hash, RHASH(hash)->iter_lev > 0 ? shift_i_safe : shift_i,
+- (VALUE)&var);
+-
+- if (var.key != Qundef) {
+- if (RHASH(hash)->iter_lev > 0) {
+- rb_hash_delete_key(hash, var.key);
++ rb_hash_modify_check(hash);
++ if (RHASH(hash)->ntbl) {
++ var.key = Qundef;
++ rb_hash_foreach(hash, RHASH(hash)->iter_lev > 0 ? shift_i_safe : shift_i,
++ (VALUE)&var);
++
++ if (var.key != Qundef) {
++ if (RHASH(hash)->iter_lev > 0) {
++ rb_hash_delete_key(hash, var.key);
++ }
++ return rb_assoc_new(var.key, var.val);
+ }
+- return rb_assoc_new(var.key, var.val);
+- }
+- else if (FL_TEST(hash, HASH_PROC_DEFAULT)) {
+- return rb_funcall(RHASH_IFNONE(hash), id_yield, 2, hash, Qnil);
+- }
+- else {
+- return RHASH_IFNONE(hash);
+ }
++ return hash_default_value(hash, Qnil);
+ }
+
+ static int
+ delete_if_i(VALUE key, VALUE value, VALUE hash)
+ {
+- if (key == Qundef) return ST_CONTINUE;
+ if (RTEST(rb_yield_values(2, key, value))) {
+ rb_hash_delete_key(hash, key);
+ }
+@@ -912,8 +906,9 @@ VALUE
+ rb_hash_delete_if(VALUE hash)
+ {
+ RETURN_ENUMERATOR(hash, 0, 0);
+- rb_hash_modify(hash);
+- rb_hash_foreach(hash, delete_if_i, hash);
++ rb_hash_modify_check(hash);
++ if (RHASH(hash)->ntbl)
++ rb_hash_foreach(hash, delete_if_i, hash);
+ return hash;
+ }
+
+@@ -984,7 +979,6 @@ rb_hash_values_at(int argc, VALUE *argv, VALUE hash)
+ static int
+ select_i(VALUE key, VALUE value, VALUE result)
+ {
+- if (key == Qundef) return ST_CONTINUE;
+ if (RTEST(rb_yield_values(2, key, value)))
+ rb_hash_aset(result, key, value);
+ return ST_CONTINUE;
+@@ -1018,7 +1012,6 @@ rb_hash_select(VALUE hash)
+ static int
+ keep_if_i(VALUE key, VALUE value, VALUE hash)
+ {
+- if (key == Qundef) return ST_CONTINUE;
+ if (!RTEST(rb_yield_values(2, key, value))) {
+ return ST_DELETE;
+ }
+@@ -1040,7 +1033,7 @@ rb_hash_select_bang(VALUE hash)
+ st_index_t n;
+
+ RETURN_ENUMERATOR(hash, 0, 0);
+- rb_hash_modify(hash);
++ rb_hash_modify_check(hash);
+ if (!RHASH(hash)->ntbl)
+ return Qnil;
+ n = RHASH(hash)->ntbl->num_entries;
+@@ -1065,8 +1058,9 @@ VALUE
+ rb_hash_keep_if(VALUE hash)
+ {
+ RETURN_ENUMERATOR(hash, 0, 0);
+- rb_hash_modify(hash);
+- rb_hash_foreach(hash, keep_if_i, hash);
++ rb_hash_modify_check(hash);
++ if (RHASH(hash)->ntbl)
++ rb_hash_foreach(hash, keep_if_i, hash);
+ return hash;
+ }
+
+@@ -1144,9 +1138,7 @@ rb_hash_aset(VALUE hash, VALUE key, VALUE val)
+ static int
+ replace_i(VALUE key, VALUE val, VALUE hash)
+ {
+- if (key != Qundef) {
+- rb_hash_aset(hash, key, val);
+- }
++ rb_hash_aset(hash, key, val);
+
+ return ST_CONTINUE;
+ }
+@@ -1227,7 +1219,6 @@ rb_hash_empty_p(VALUE hash)
+ static int
+ each_value_i(VALUE key, VALUE value)
+ {
+- if (key == Qundef) return ST_CONTINUE;
+ rb_yield(value);
+ return ST_CONTINUE;
+ }
+@@ -1262,7 +1253,6 @@ rb_hash_each_value(VALUE hash)
+ static int
+ each_key_i(VALUE key, VALUE value)
+ {
+- if (key == Qundef) return ST_CONTINUE;
+ rb_yield(key);
+ return ST_CONTINUE;
+ }
+@@ -1296,7 +1286,6 @@ rb_hash_each_key(VALUE hash)
+ static int
+ each_pair_i(VALUE key, VALUE value)
+ {
+- if (key == Qundef) return ST_CONTINUE;
+ rb_yield(rb_assoc_new(key, value));
+ return ST_CONTINUE;
+ }
+@@ -1334,7 +1323,6 @@ rb_hash_each_pair(VALUE hash)
+ static int
+ to_a_i(VALUE key, VALUE value, VALUE ary)
+ {
+- if (key == Qundef) return ST_CONTINUE;
+ rb_ary_push(ary, rb_assoc_new(key, value));
+ return ST_CONTINUE;
+ }
+@@ -1367,7 +1355,6 @@ inspect_i(VALUE key, VALUE value, VALUE str)
+ {
+ VALUE str2;
+
+- if (key == Qundef) return ST_CONTINUE;
+ str2 = rb_inspect(key);
+ if (RSTRING_LEN(str) > 1) {
+ rb_str_cat2(str, ", ");
+@@ -1434,7 +1421,6 @@ rb_hash_to_hash(VALUE hash)
+ static int
+ keys_i(VALUE key, VALUE value, VALUE ary)
+ {
+- if (key == Qundef) return ST_CONTINUE;
+ rb_ary_push(ary, key);
+ return ST_CONTINUE;
+ }
+@@ -1465,7 +1451,6 @@ rb_hash_keys(VALUE hash)
+ static int
+ values_i(VALUE key, VALUE value, VALUE ary)
+ {
+- if (key == Qundef) return ST_CONTINUE;
+ rb_ary_push(ary, value);
+ return ST_CONTINUE;
+ }
+@@ -1524,7 +1509,6 @@ rb_hash_search_value(VALUE key, VALUE value, VALUE arg)
+ {
+ VALUE *data = (VALUE *)arg;
+
+- if (key == Qundef) return ST_CONTINUE;
+ if (rb_equal(value, data[1])) {
+ data[0] = Qtrue;
+ return ST_STOP;
+@@ -1568,7 +1552,6 @@ eql_i(VALUE key, VALUE val1, VALUE arg)
+ struct equal_data *data = (struct equal_data *)arg;
+ st_data_t val2;
+
+- if (key == Qundef) return ST_CONTINUE;
+ if (!st_lookup(data->tbl, key, &val2)) {
+ data->result = Qfalse;
+ return ST_STOP;
+@@ -1599,7 +1582,7 @@ hash_equal(VALUE hash1, VALUE hash2, int eql)
+ struct equal_data data;
+
+ if (hash1 == hash2) return Qtrue;
+- if (TYPE(hash2) != T_HASH) {
++ if (!RB_TYPE_P(hash2, T_HASH)) {
+ if (!rb_respond_to(hash2, rb_intern("to_hash"))) {
+ return Qfalse;
+ }
+@@ -1670,7 +1653,6 @@ hash_i(VALUE key, VALUE val, VALUE arg)
+ st_index_t *hval = (st_index_t *)arg;
+ st_index_t hdata[2];
+
+- if (key == Qundef) return ST_CONTINUE;
+ hdata[0] = rb_hash(key);
+ hdata[1] = rb_hash(val);
+ *hval ^= st_hash(hdata, sizeof(hdata), 0);
+@@ -1711,7 +1693,6 @@ rb_hash_hash(VALUE hash)
+ static int
+ rb_hash_invert_i(VALUE key, VALUE value, VALUE hash)
+ {
+- if (key == Qundef) return ST_CONTINUE;
+ rb_hash_aset(hash, value, key);
+ return ST_CONTINUE;
+ }
+@@ -1740,7 +1721,6 @@ rb_hash_invert(VALUE hash)
+ static int
+ rb_hash_update_i(VALUE key, VALUE value, VALUE hash)
+ {
+- if (key == Qundef) return ST_CONTINUE;
+ hash_update(hash, key);
+ st_insert(RHASH(hash)->ntbl, key, value);
+ return ST_CONTINUE;
+@@ -1749,7 +1729,6 @@ rb_hash_update_i(VALUE key, VALUE value, VALUE hash)
+ static int
+ rb_hash_update_block_i(VALUE key, VALUE value, VALUE hash)
+ {
+- if (key == Qundef) return ST_CONTINUE;
+ if (rb_hash_has_key(hash, key)) {
+ value = rb_yield_values(3, key, rb_hash_aref(hash, key), value);
+ }
+@@ -1806,7 +1785,6 @@ rb_hash_update_func_i(VALUE key, VALUE value, VALUE arg0)
+ struct update_arg *arg = (struct update_arg *)arg0;
+ VALUE hash = arg->hash;
+
+- if (key == Qundef) return ST_CONTINUE;
+ if (rb_hash_has_key(hash, key)) {
+ value = (*arg->func)(key, rb_hash_aref(hash, key), value);
+ }
+@@ -1863,7 +1841,6 @@ assoc_i(VALUE key, VALUE val, VALUE arg)
+ {
+ VALUE *args = (VALUE *)arg;
+
+- if (key == Qundef) return ST_CONTINUE;
+ if (RTEST(rb_equal(args[0], key))) {
+ args[1] = rb_assoc_new(key, val);
+ return ST_STOP;
+@@ -1901,7 +1878,6 @@ rassoc_i(VALUE key, VALUE val, VALUE arg)
+ {
+ VALUE *args = (VALUE *)arg;
+
+- if (key == Qundef) return ST_CONTINUE;
+ if (RTEST(rb_equal(args[0], val))) {
+ args[1] = rb_assoc_new(key, val);
+ return ST_STOP;
+@@ -2198,7 +2174,7 @@ rb_env_path_tainted(void)
+ }
+
+ #if defined(_WIN32) || (defined(HAVE_SETENV) && defined(HAVE_UNSETENV))
+-#elif defined __sun__
++#elif defined __sun
+ static int
+ in_origenv(const char *str)
+ {
+@@ -2286,7 +2262,7 @@ ruby_setenv(const char *name, const char *value)
+ rb_sys_fail("unsetenv");
+ #endif
+ }
+-#elif defined __sun__
++#elif defined __sun
+ size_t len;
+ char **env_ptr, *str;
+ if (strchr(name, '=')) {
+@@ -3084,11 +3060,9 @@ env_invert(void)
+ static int
+ env_replace_i(VALUE key, VALUE val, VALUE keys)
+ {
+- if (key != Qundef) {
+- env_aset(Qnil, key, val);
+- if (rb_ary_includes(keys, key)) {
+- rb_ary_delete(keys, key);
+- }
++ env_aset(Qnil, key, val);
++ if (rb_ary_includes(keys, key)) {
++ rb_ary_delete(keys, key);
+ }
+ return ST_CONTINUE;
+ }
+@@ -3120,12 +3094,10 @@ env_replace(VALUE env, VALUE hash)
+ static int
+ env_update_i(VALUE key, VALUE val)
+ {
+- if (key != Qundef) {
+- if (rb_block_given_p()) {
+- val = rb_yield_values(3, key, rb_f_getenv(Qnil, key), val);
+- }
+- env_aset(Qnil, key, val);
++ if (rb_block_given_p()) {
++ val = rb_yield_values(3, key, rb_f_getenv(Qnil, key), val);
+ }
++ env_aset(Qnil, key, val);
+ return ST_CONTINUE;
+ }
+
+@@ -3150,15 +3122,116 @@ env_update(VALUE env, VALUE hash)
+ }
+
+ /*
+- * A <code>Hash</code> is a collection of key-value pairs. It is
+- * similar to an <code>Array</code>, except that indexing is done via
+- * arbitrary keys of any object type, not an integer index. Hashes enumerate
+- * their values in the order that the corresponding keys were inserted.
++ * A Hash is a dictionary-like collection of unique keys and their values.
++ * Also called associative arrays, they are similar to Arrays, but where an
++ * Array uses integers as its index, a Hash allows you to use any object
++ * type.
++ *
++ * Hashes enumerate their values in the order that the corresponding keys
++ * were inserted.
++ *
++ * A Hash can be easily created by using its implicit form:
++ *
++ * grades = { "Jane Doe" => 10, "Jim Doe" => 6 }
++ *
++ * Hashes allow an alternate syntax form when your keys are always symbols.
++ * Instead of
++ *
++ * options = { :font_size => 10, :font_family => "Arial" }
++ *
++ * You could write it as:
++ *
++ * options = { font_size: 10, font_family: "Arial" }
++ *
++ * Each named key is a symbol you can access in hash:
++ *
++ * options[:font_size] # => 10
++ *
++ * A Hash can also be created through its ::new method:
++ *
++ * grades = Hash.new
++ * grades["Dorothy Doe"] = 9
+ *
+ * Hashes have a <em>default value</em> that is returned when accessing
+- * keys that do not exist in the hash. By default, that value is
+- * <code>nil</code>.
++ * keys that do not exist in the hash. If no default is set +nil+ is used.
++ * You can set the default value by sending it as an argument to Hash.new:
++ *
++ * grades = Hash.new(0)
++ *
++ * Or by using the #default= method:
++ *
++ * grades = {"Timmy Doe" => 8}
++ * grades.default = 0
++ *
++ * Accessing a value in a Hash requires using its key:
++ *
++ * puts grades["Jane Doe"] # => 10
++ *
++ * === Common Uses
++ *
++ * Hashes are an easy way to represent data structures, such as
++ *
++ * books = {}
++ * books[:matz] = "The Ruby Language"
++ * books[:black] = "The Well-Grounded Rubyist"
++ *
++ * Hashes are also commonly used as a way to have named parameters in
++ * functions. Note that no brackets are used below. If a hash is the last
++ * argument on a method call, no braces are needed, thus creating a really
++ * clean interface:
++ *
++ * Person.create(name: "John Doe", age: 27)
++ *
++ * def self.create(params)
++ * @name = params[:name]
++ * @age = params[:age]
++ * end
++ *
++ * === Hash Keys
++ *
++ * Two objects refer to the same hash key when their <code>hash</code> value
++ * is identical and the two objects are <code>eql?</code> to each other.
++ *
++ * A user-defined class may be used as a hash key if the <code>hash</code>
++ * and <code>eql?</code> methods are overridden to provide meaningful
++ * behavior. By default, separate instances refer to separate hash keys.
++ *
++ * A typical implementation of <code>hash</code> is based on the
++ * object's data while <code>eql?</code> is usually aliased to the overridden
++ * <code>==</code> method:
++ *
++ * class Book
++ * attr_reader :author, :title
++ *
++ * def initialize(author, title)
++ * @author = author
++ * @title = title
++ * end
++ *
++ * def ==(other)
++ * self.class === other and
++ * other.author == @author and
++ * other.title == @title
++ * end
++ *
++ * alias eql? ==
++ *
++ * def hash
++ * @author.hash ^ @title.hash # XOR
++ * end
++ * end
++ *
++ * book1 = Book.new 'matz', 'Ruby in a Nutshell'
++ * book2 = Book.new 'matz', 'Ruby in a Nutshell'
++ *
++ * reviews = {}
++ *
++ * reviews[book1] = 'Great reference!'
++ * reviews[book2] = 'Nice and compact!'
++ *
++ * reviews.length #=> 1
+ *
++ * See also Object#hash and Object#eql?
+ */
+
+ void
+diff --git a/include/ruby/st.h b/include/ruby/st.h
+index 50f2a75..119dfde 100644
+--- a/include/ruby/st.h
++++ b/include/ruby/st.h
+@@ -36,7 +36,7 @@ typedef unsigned long st_data_t;
+ #elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
+ typedef unsigned LONG_LONG st_data_t;
+ #else
+-# error ---->> st.c requires sizeof(void*) == sizeof(long) to be compiled. <<----
++# error ---->> st.c requires sizeof(void*) == sizeof(long) or sizeof(LONG_LONG) to be compiled. <<----
+ #endif
+ #define ST_DATA_T_DEFINED
+
+@@ -74,6 +74,11 @@ struct st_hash_type {
+
+ #define ST_INDEX_BITS (sizeof(st_index_t) * CHAR_BIT)
+
++typedef struct st_packed_entry {
++ st_index_t hash;
++ st_data_t key, val;
++} st_packed_entry;
++
+ struct st_table {
+ const struct st_hash_type *type;
+ st_index_t num_bins;
+@@ -91,8 +96,17 @@ struct st_table {
+ __extension__
+ #endif
+ st_index_t num_entries : ST_INDEX_BITS - 1;
+- struct st_table_entry **bins;
+- struct st_table_entry *head, *tail;
++ union {
++ struct {
++ struct st_table_entry **bins;
++ struct st_table_entry *head, *tail;
++ } big;
++ struct {
++ struct st_packed_entry *entries;
++ st_index_t real_entries;
++ } packed;
++ st_packed_entry upacked;
++ } as;
+ };
+
+ #define st_is_member(table,key) st_lookup((table),(key),(st_data_t *)0)
+@@ -114,6 +128,7 @@ int st_insert2(st_table *, st_data_t, st_data_t, st_data_t (*)(st_data_t));
+ int st_lookup(st_table *, st_data_t, st_data_t *);
+ int st_get_key(st_table *, st_data_t, st_data_t *);
+ int st_foreach(st_table *, int (*)(ANYARGS), st_data_t);
++int st_foreach_check(st_table *, int (*)(ANYARGS), st_data_t, st_data_t);
+ int st_reverse_foreach(st_table *, int (*)(ANYARGS), st_data_t);
+ void st_add_direct(st_table *, st_data_t, st_data_t);
+ void st_free_table(st_table *);
+diff --git a/pool_alloc.h b/pool_alloc.h
+new file mode 100644
+index 0000000..957708e
+--- /dev/null
++++ b/pool_alloc.h
+@@ -0,0 +1,11 @@
++#ifndef POOL_ALLOC_H
++#define POOL_ALLOC_H
++
++#define POOL_ALLOC_API
++#ifdef POOL_ALLOC_API
++void ruby_xpool_free(void *ptr);
++void *ruby_xpool_malloc_6p();
++void *ruby_xpool_malloc_11p();
++#endif
++
++#endif
+diff --git a/pool_alloc.inc.h b/pool_alloc.inc.h
+new file mode 100644
+index 0000000..a7879ab
+--- /dev/null
++++ b/pool_alloc.inc.h
+@@ -0,0 +1,156 @@
++/*
++ * this is generic pool allocator
++ * you should define following macroses:
++ * ITEM_NAME - unique identifier, which allows to hold functions in a namespace
++ * ITEM_TYPEDEF(name) - passed to typedef to localize item type
++ * free_entry - desired name of function for free entry
++ * alloc_entry - defired name of function for allocate entry
++ */
++
++#if POOL_ALLOC_PART == 1
++#ifdef HEAP_ALIGN_LOG
++#define DEFAULT_POOL_SIZE (1 << HEAP_ALIGN_LOG)
++#else
++#define DEFAULT_POOL_SIZE (sizeof(void*) * 2048)
++#endif
++typedef unsigned int pool_holder_counter;
++
++typedef struct pool_entry_list pool_entry_list;
++typedef struct pool_holder pool_holder;
++
++typedef struct pool_header {
++ pool_holder *first;
++ pool_holder *_black_magick;
++ pool_holder_counter size; // size of entry in sizeof(void*) items
++ pool_holder_counter total; // size of entry in sizeof(void*) items
++} pool_header;
++
++struct pool_holder {
++ pool_holder_counter free, total;
++ pool_header *header;
++ void *freep;
++ pool_holder *fore, *back;
++ void *data[1];
++};
++#define POOL_DATA_SIZE(pool_size) (((pool_size) - sizeof(void*) * 6 - offsetof(pool_holder, data)) / sizeof(void*))
++#define POOL_ENTRY_SIZE(item_type) ((sizeof(item_type) - 1) / sizeof(void*) + 1)
++#define POOL_HOLDER_COUNT(pool_size, item_type) (POOL_DATA_SIZE(pool_size)/POOL_ENTRY_SIZE(item_type))
++#define INIT_POOL(item_type) {NULL, NULL, POOL_ENTRY_SIZE(item_type), POOL_HOLDER_COUNT(DEFAULT_POOL_SIZE, item_type)}
++
++#elif POOL_ALLOC_PART == 2
++static pool_holder *
++pool_holder_alloc(pool_header *header)
++{
++ pool_holder *holder;
++ pool_holder_counter i, size, count;
++ register void **ptr;
++
++ size_t sz = offsetof(pool_holder, data) +
++ header->size * header->total * sizeof(void*);
++#define objspace (&rb_objspace)
++ vm_malloc_prepare(objspace, DEFAULT_POOL_SIZE);
++ if (header->first != NULL) return header->first;
++ TRY_WITH_GC(holder = (pool_holder*) aligned_malloc(DEFAULT_POOL_SIZE, sz));
++ malloc_increase += DEFAULT_POOL_SIZE;
++#if CALC_EXACT_MALLOC_SIZE
++ objspace->malloc_params.allocated_size += DEFAULT_POOL_SIZE;
++ objspace->malloc_params.allocations++;
++#endif
++#undef objspace
++
++ size = header->size;
++ count = header->total;
++ holder->free = count;
++ holder->total = count;
++ holder->header = header;
++ holder->fore = NULL;
++ holder->back = NULL;
++ holder->freep = &holder->data;
++ ptr = holder->data;
++ for(i = count - 1; i; i-- ) {
++ ptr = *ptr = ptr + size;
++ }
++ *ptr = NULL;
++ header->first = holder;
++ return holder;
++}
++
++static inline void
++pool_holder_unchaing(pool_header *header, pool_holder *holder)
++{
++ register pool_holder *fore = holder->fore, *back = holder->back;
++ holder->fore = NULL;
++ holder->back = NULL;
++ if (fore != NULL) fore->back = back;
++ else header->_black_magick = back;
++ if (back != NULL) back->fore = fore;
++ else header->first = fore;
++}
++
++static inline pool_holder *
++entry_holder(void **entry)
++{
++ return (pool_holder*)(((uintptr_t)entry) & ~(DEFAULT_POOL_SIZE - 1));
++}
++
++static inline void
++pool_free_entry(void **entry)
++{
++ pool_holder *holder = entry_holder(entry);
++ pool_header *header = holder->header;
++
++ if (holder->free++ == 0) {
++ register pool_holder *first = header->first;
++ if (first == NULL) {
++ header->first = holder;
++ } else {
++ holder->back = first;
++ holder->fore = first->fore;
++ first->fore = holder;
++ if (holder->fore)
++ holder->fore->back = holder;
++ else
++ header->_black_magick = holder;
++ }
++ } else if (holder->free == holder->total && header->first != holder ) {
++ pool_holder_unchaing(header, holder);
++ aligned_free(holder);
++#if CALC_EXACT_MALLOC_SIZE
++ rb_objspace.malloc_params.allocated_size -= DEFAULT_POOL_SIZE;
++ rb_objspace.malloc_params.allocations--;
++#endif
++ return;
++ }
++
++ *entry = holder->freep;
++ holder->freep = entry;
++}
++
++static inline void*
++pool_alloc_entry(pool_header *header)
++{
++ pool_holder *holder = header->first;
++ void **result;
++ if (holder == NULL) {
++ holder = pool_holder_alloc(header);
++ }
++
++ result = holder->freep;
++ holder->freep = *result;
++
++ if (--holder->free == 0) {
++ pool_holder_unchaing(header, holder);
++ }
++
++ return result;
++}
++
++static void
++pool_finalize_header(pool_header *header)
++{
++ if (header->first) {
++ aligned_free(header->first);
++ header->first = NULL;
++ }
++}
++#endif
+diff --git a/st.c b/st.c
+index fda5784..20ec427 100644
+--- a/st.c
++++ b/st.c
+@@ -7,6 +7,7 @@
+ #include "st.h"
+ #else
+ #include "ruby/ruby.h"
++#include "pool_alloc.h"
+ #endif
+
+ #include <stdio.h>
+@@ -25,8 +26,17 @@ struct st_table_entry {
+ st_table_entry *fore, *back;
+ };
+
+-#define ST_DEFAULT_MAX_DENSITY 5
++#define STATIC_ASSERT(name, expr) typedef int static_assert_##name##_check[(expr) ? 1 : -1];
++
++#define ST_DEFAULT_MAX_DENSITY 2
+ #define ST_DEFAULT_INIT_TABLE_SIZE 11
++#define ST_DEFAULT_SECOND_TABLE_SIZE 19
++#define ST_DEFAULT_PACKED_TABLE_SIZE 18
++#define PACKED_UNIT (int)(sizeof(st_packed_entry) / sizeof(st_table_entry*))
++#define MAX_PACKED_HASH (int)(ST_DEFAULT_PACKED_TABLE_SIZE * sizeof(st_table_entry*) / sizeof(st_packed_entry))
++
++STATIC_ASSERT(st_packed_entry, sizeof(st_packed_entry) == sizeof(st_table_entry*[PACKED_UNIT]))
++STATIC_ASSERT(st_packed_bins, sizeof(st_packed_entry[MAX_PACKED_HASH]) <= sizeof(st_table_entry*[ST_DEFAULT_PACKED_TABLE_SIZE]))
+
+ /*
+ * DEFAULT_MAX_DENSITY is the default for the largest we allow the
+@@ -38,7 +48,8 @@ struct st_table_entry {
+ *
+ */
+
+-static const struct st_hash_type type_numhash = {
++#define type_numhash st_hashtype_num
++const struct st_hash_type st_hashtype_num = {
+ st_numcmp,
+ st_numhash,
+ };