Skip to content

HTTPS clone URL

Subversion checkout URL

You can clone with
or
.
Download ZIP
Browse files

Merge branches 'st_opt_sparse_array/ruby_1_9_3', 'ary-queue/ruby_1_9_…

…3', 'speedup-require/ruby_1_9_3' and 'backport-gc/ruby_1_9_3' into falcon-gc
  • Loading branch information...
commit 474dc5c2c3fcc52369734fb7b652f3f6898aaafc 4 parents a88ebf0 + 2389d86 + 5cfc32a + 8e234e5
@funny-falcon authored
View
180 array.c
@@ -255,15 +255,24 @@ rb_ary_modify(VALUE ary)
rb_ary_modify_check(ary);
if (ARY_SHARED_P(ary)) {
long len = RARRAY_LEN(ary);
+ VALUE shared = ARY_SHARED(ary);
if (len <= RARRAY_EMBED_LEN_MAX) {
VALUE *ptr = ARY_HEAP_PTR(ary);
- VALUE shared = ARY_SHARED(ary);
FL_UNSET_SHARED(ary);
FL_SET_EMBED(ary);
MEMCPY(ARY_EMBED_PTR(ary), ptr, VALUE, len);
rb_ary_decrement_share(shared);
ARY_SET_EMBED_LEN(ary, len);
}
+ else if (ARY_SHARED_NUM(shared) == 1 && len > RARRAY_LEN(shared)>>1) {
+ long shift = RARRAY_PTR(ary) - RARRAY_PTR(shared);
+ ARY_SET_PTR(ary, RARRAY_PTR(shared));
+ ARY_SET_CAPA(ary, RARRAY_LEN(shared));
+ MEMMOVE(RARRAY_PTR(ary), RARRAY_PTR(ary)+shift, VALUE, len);
+ FL_UNSET_SHARED(ary);
+ FL_SET_EMBED(shared);
+ rb_ary_decrement_share(shared);
+ }
else {
VALUE *ptr = ALLOC_N(VALUE, len);
MEMCPY(ptr, RARRAY_PTR(ary), VALUE, len);
@@ -274,6 +283,38 @@ rb_ary_modify(VALUE ary)
}
}
+static void
+ary_ensure_room_for_push(VALUE ary, long add_len)
+{
+ long new_len = RARRAY_LEN(ary) + add_len;
+ long capa;
+
+ if (ARY_SHARED_P(ary)) {
+ if (new_len > RARRAY_EMBED_LEN_MAX) {
+ VALUE shared = ARY_SHARED(ary);
+ if (ARY_SHARED_NUM(shared) == 1) {
+ if (RARRAY_PTR(ary) - RARRAY_PTR(shared) + new_len <= RARRAY_LEN(shared)) {
+ rb_ary_modify_check(ary);
+ }
+ else {
+ /* if array is shared, than it is likely it participate in push/shift pattern */
+ rb_ary_modify(ary);
+ capa = ARY_CAPA(ary);
+ if (new_len > capa - (capa >> 6)) {
+ ary_double_capa(ary, new_len);
+ }
+ }
+ return;
+ }
+ }
+ }
+ rb_ary_modify(ary);
+ capa = ARY_CAPA(ary);
+ if (new_len > capa) {
+ ary_double_capa(ary, new_len);
+ }
+}
+
VALUE
rb_ary_freeze(VALUE ary)
{
@@ -295,6 +336,33 @@ rb_ary_frozen_p(VALUE ary)
return Qfalse;
}
+/* This can be used to take a snapshot of an array (with
+ e.g. rb_ary_replace) and check later whether the array has been
+ modified from the snapshot. The snapshot is cheap, though if
+ something does modify the array it will pay the cost of copying
+ it. */
+VALUE
+rb_ary_dup_of_p(VALUE ary1, VALUE ary2)
+{
+ VALUE *p1, *p2;
+ long len = RARRAY_LEN(ary1);
+
+ if (len != RARRAY_LEN(ary2)) return Qfalse;
+
+ p1 = RARRAY_PTR(ary1);
+ p2 = RARRAY_PTR(ary2);
+
+ if (ARY_EMBED_P(ary1) && ARY_EMBED_P(ary2)) {
+ for (; len; len--, p1++, p2++) {
+ if (*p1 != *p2) return Qfalse;
+ }
+ return Qtrue;
+ }
+
+ if (p1 == p2) return Qtrue;
+ return Qfalse;
+}
+
static VALUE
ary_alloc(VALUE klass)
{
@@ -430,8 +498,9 @@ ary_make_shared(VALUE ary)
OBJSETUP(shared, 0, T_ARRAY);
FL_UNSET_EMBED(shared);
- ARY_SET_LEN((VALUE)shared, RARRAY_LEN(ary));
+ ARY_SET_LEN((VALUE)shared, ARY_CAPA(ary));
ARY_SET_PTR((VALUE)shared, RARRAY_PTR(ary));
+ rb_mem_clear(RARRAY_PTR(shared) + RARRAY_LEN(ary), ARY_CAPA(ary) - RARRAY_LEN(ary));
FL_SET_SHARED_ROOT(shared);
ARY_SET_SHARED_NUM((VALUE)shared, 1);
FL_SET_SHARED(ary);
@@ -721,8 +790,6 @@ ary_take_first_or_last(int argc, VALUE *argv, VALUE ary, enum ary_take_pos_flags
return ary_make_partial(ary, rb_cArray, offset, n);
}
-static VALUE rb_ary_push_1(VALUE ary, VALUE item);
-
/*
* call-seq:
* ary << obj -> ary
@@ -739,8 +806,12 @@ static VALUE rb_ary_push_1(VALUE ary, VALUE item);
VALUE
rb_ary_push(VALUE ary, VALUE item)
{
- rb_ary_modify(ary);
- return rb_ary_push_1(ary, item);
+ long idx = RARRAY_LEN(ary);
+
+ ary_ensure_room_for_push(ary, 1);
+ RARRAY_PTR(ary)[idx] = item;
+ ARY_SET_LEN(ary, idx + 1);
+ return ary;
}
static VALUE
@@ -756,6 +827,18 @@ rb_ary_push_1(VALUE ary, VALUE item)
return ary;
}
+static VALUE
+rb_ary_cat(VALUE ary, const VALUE *ptr, long len)
+{
+ long oldlen = RARRAY_LEN(ary);
+
+ ary_ensure_room_for_push(ary, len);
+copy:
+ MEMCPY(RARRAY_PTR(ary) + oldlen, ptr, VALUE, len);
+ ARY_SET_LEN(ary, oldlen + len);
+ return ary;
+}
+
/*
* call-seq:
* ary.push(obj, ... ) -> ary
@@ -772,11 +855,7 @@ rb_ary_push_1(VALUE ary, VALUE item)
static VALUE
rb_ary_push_m(int argc, VALUE *argv, VALUE ary)
{
- rb_ary_modify(ary);
- while (argc--) {
- rb_ary_push_1(ary, *argv++);
- }
- return ary;
+ return rb_ary_cat(ary, argv, argc);
}
VALUE
@@ -904,6 +983,55 @@ rb_ary_shift_m(int argc, VALUE *argv, VALUE ary)
return result;
}
+static void
+ary_ensure_room_for_unshift(VALUE ary, int argc)
+{
+ long len = RARRAY_LEN(ary);
+ long new_len = len + argc;
+ long capa;
+ VALUE *head, *sharedp;
+
+ if (ARY_SHARED_P(ary)) {
+ VALUE shared = ARY_SHARED(ary);
+ capa = RARRAY_LEN(shared);
+ if (ARY_SHARED_NUM(shared) == 1 && capa > new_len) {
+ head = RARRAY_PTR(ary);
+ sharedp = RARRAY_PTR(shared);
+ goto makeroom_if_need;
+ }
+ }
+
+ rb_ary_modify(ary);
+ capa = ARY_CAPA(ary);
+ if (capa - (capa >> 6) <= new_len) {
+ ary_double_capa(ary, new_len);
+ }
+
+ /* use shared array for big "queues" */
+ if (new_len > ARY_DEFAULT_SIZE * 4) {
+ /* make a room for unshifted items */
+ capa = ARY_CAPA(ary);
+ ary_make_shared(ary);
+
+ head = sharedp = RARRAY_PTR(ary);
+ goto makeroom;
+makeroom_if_need:
+ if (head - sharedp < argc) {
+ long room;
+makeroom:
+ room = capa - new_len;
+ room -= room >> 4;
+ MEMMOVE(sharedp + argc + room, head, VALUE, len);
+ head = sharedp + argc + room;
+ }
+ ARY_SET_PTR(ary, head - argc);
+ }
+ else {
+ /* sliding items */
+ MEMMOVE(RARRAY_PTR(ary) + argc, RARRAY_PTR(ary), VALUE, len);
+ }
+}
+
/*
* call-seq:
* ary.unshift(obj, ...) -> ary
@@ -919,19 +1047,16 @@ rb_ary_shift_m(int argc, VALUE *argv, VALUE ary)
static VALUE
rb_ary_unshift_m(int argc, VALUE *argv, VALUE ary)
{
- long len;
+ long len = RARRAY_LEN(ary);
- rb_ary_modify(ary);
- if (argc == 0) return ary;
- if (ARY_CAPA(ary) <= (len = RARRAY_LEN(ary)) + argc) {
- ary_double_capa(ary, len + argc);
+ if (argc == 0) {
+ rb_ary_modify_check(ary);
+ return ary;
}
- /* sliding items */
- MEMMOVE(RARRAY_PTR(ary) + argc, RARRAY_PTR(ary), VALUE, len);
+ ary_ensure_room_for_unshift(ary, argc);
MEMCPY(RARRAY_PTR(ary), argv, VALUE, argc);
- ARY_INCREASE_LEN(ary, argc);
-
+ ARY_SET_LEN(ary, len + argc);
return ary;
}
@@ -1293,15 +1418,12 @@ rb_ary_splice(VALUE ary, long beg, long len, VALUE rpl)
rpl = rb_ary_to_ary(rpl);
rlen = RARRAY_LEN(rpl);
}
- rb_ary_modify(ary);
if (beg >= RARRAY_LEN(ary)) {
if (beg > ARY_MAX_SIZE - rlen) {
rb_raise(rb_eIndexError, "index %ld too big", beg);
}
+ ary_ensure_room_for_push(ary, rlen-len); /* len is 0 or negative */
len = beg + rlen;
- if (len >= ARY_CAPA(ary)) {
- ary_double_capa(ary, len);
- }
rb_mem_clear(RARRAY_PTR(ary) + RARRAY_LEN(ary), beg - RARRAY_LEN(ary));
if (rlen > 0) {
MEMCPY(RARRAY_PTR(ary) + beg, RARRAY_PTR(rpl), VALUE, rlen);
@@ -1311,6 +1433,7 @@ rb_ary_splice(VALUE ary, long beg, long len, VALUE rpl)
else {
long alen;
+ rb_ary_modify(ary);
alen = RARRAY_LEN(ary) + rlen - len;
if (alen >= ARY_CAPA(ary)) {
ary_double_capa(ary, alen);
@@ -2100,12 +2223,13 @@ rb_ary_sort_bang(VALUE ary)
if (RARRAY_LEN(ary) > 1) {
VALUE tmp = ary_make_substitution(ary); /* only ary refers tmp */
struct ary_sort_data data;
+ long len = RARRAY_LEN(ary);
RBASIC(tmp)->klass = 0;
data.ary = tmp;
data.opt_methods = 0;
data.opt_inited = 0;
- ruby_qsort(RARRAY_PTR(tmp), RARRAY_LEN(tmp), sizeof(VALUE),
+ ruby_qsort(RARRAY_PTR(tmp), len, sizeof(VALUE),
rb_block_given_p()?sort_1:sort_2, &data);
if (ARY_EMBED_P(tmp)) {
@@ -2122,7 +2246,7 @@ rb_ary_sort_bang(VALUE ary)
if (ARY_HEAP_PTR(ary) == ARY_HEAP_PTR(tmp)) {
assert(!ARY_EMBED_P(ary));
FL_UNSET_SHARED(ary);
- ARY_SET_CAPA(ary, ARY_CAPA(tmp));
+ ARY_SET_CAPA(ary, RARRAY_LEN(tmp));
}
else {
assert(!ARY_SHARED_P(tmp));
@@ -2137,8 +2261,8 @@ rb_ary_sort_bang(VALUE ary)
xfree(ARY_HEAP_PTR(ary));
}
ARY_SET_PTR(ary, RARRAY_PTR(tmp));
- ARY_SET_HEAP_LEN(ary, RARRAY_LEN(tmp));
- ARY_SET_CAPA(ary, ARY_CAPA(tmp));
+ ARY_SET_HEAP_LEN(ary, len);
+ ARY_SET_CAPA(ary, RARRAY_LEN(tmp));
}
/* tmp was lost ownership for the ptr */
FL_UNSET(tmp, FL_FREEZE);
View
43 file.c
@@ -153,40 +153,60 @@ file_path_convert(VALUE name)
return name;
}
-static VALUE
-rb_get_path_check(VALUE obj, int level)
+static rb_encoding *
+check_path_encoding(VALUE str)
+{
+ rb_encoding *enc = rb_enc_get(str);
+ if (!rb_enc_asciicompat(enc)) {
+ rb_raise(rb_eEncCompatError, "path name must be ASCII-compatible (%s): %s",
+ rb_enc_name(enc), RSTRING_PTR(rb_str_inspect(str)));
+ }
+ return enc;
+}
+
+VALUE
+rb_get_path_check_to_string(VALUE obj, int level)
{
VALUE tmp;
ID to_path;
- rb_encoding *enc;
if (insecure_obj_p(obj, level)) {
rb_insecure_operation();
}
+ if (RB_TYPE_P(obj, T_STRING)) {
+ return obj;
+ }
CONST_ID(to_path, "to_path");
tmp = rb_check_funcall(obj, to_path, 0, 0);
if (tmp == Qundef) {
tmp = obj;
}
StringValue(tmp);
+ return tmp;
+}
+VALUE
+rb_get_path_check_convert(VALUE obj, VALUE tmp, int level)
+{
tmp = file_path_convert(tmp);
if (obj != tmp && insecure_obj_p(tmp, level)) {
rb_insecure_operation();
}
- enc = rb_enc_get(tmp);
- if (!rb_enc_asciicompat(enc)) {
- tmp = rb_str_inspect(tmp);
- rb_raise(rb_eEncCompatError, "path name must be ASCII-compatible (%s): %s",
- rb_enc_name(enc), RSTRING_PTR(tmp));
- }
+ check_path_encoding(tmp);
StringValueCStr(tmp);
return rb_str_new4(tmp);
}
+static VALUE
+rb_get_path_check(VALUE obj, int level)
+{
+ VALUE tmp = rb_get_path_check_to_string(obj, level);
+ return rb_get_path_check_convert(obj, tmp, level);
+}
+
VALUE
rb_get_path_no_checksafe(VALUE obj)
{
@@ -3254,7 +3274,6 @@ rb_file_expand_path(VALUE fname, VALUE dname)
VALUE
rb_file_expand_path_fast(VALUE fname, VALUE dname)
{
- check_expand_path_args(fname, dname);
return rb_file_expand_path_internal(fname, dname, 0, 0, EXPAND_PATH_BUFFER());
}
@@ -5245,7 +5264,7 @@ rb_find_file_ext_safe(VALUE *filep, const char *const *ext, int safe_level)
rb_raise(rb_eSecurityError, "loading from non-absolute path %s", f);
}
- RB_GC_GUARD(load_path) = rb_get_load_path();
+ RB_GC_GUARD(load_path) = rb_get_expanded_load_path();
if (!load_path) return 0;
fname = rb_str_dup(*filep);
@@ -5310,7 +5329,7 @@ rb_find_file_safe(VALUE path, int safe_level)
rb_raise(rb_eSecurityError, "loading from non-absolute path %s", f);
}
- RB_GC_GUARD(load_path) = rb_get_load_path();
+ RB_GC_GUARD(load_path) = rb_get_expanded_load_path();
if (load_path) {
long i;
View
547 gc.c
@@ -92,10 +92,12 @@ typedef struct {
unsigned int initial_malloc_limit;
unsigned int initial_heap_min_slots;
unsigned int initial_free_min;
+#if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
int gc_stress;
+#endif
} ruby_gc_params_t;
-ruby_gc_params_t initial_params = {
+static ruby_gc_params_t initial_params = {
GC_MALLOC_LIMIT,
HEAP_MIN_SLOTS,
FREE_MIN,
@@ -120,7 +122,10 @@ ruby_gc_params_t initial_params = {
int ruby_gc_debug_indent = 0;
/* for GC profile */
+#ifndef GC_PROFILE_MORE_DETAIL
#define GC_PROFILE_MORE_DETAIL 0
+#endif
+
typedef struct gc_profile_record {
double gc_time;
double gc_mark_time;
@@ -318,17 +323,20 @@ typedef struct RVALUE {
#endif
struct heaps_slot {
- void *membase;
- RVALUE *slot;
- size_t limit;
+ struct heaps_header *membase;
+ RVALUE *freelist;
struct heaps_slot *next;
struct heaps_slot *prev;
+ struct heaps_slot *free_next;
+ uintptr_t bits[1];
};
-struct sorted_heaps_slot {
+struct heaps_header {
+ struct heaps_slot *base;
+ uintptr_t *bits;
RVALUE *start;
RVALUE *end;
- struct heaps_slot *slot;
+ size_t limit;
};
struct gc_list {
@@ -352,7 +360,9 @@ typedef struct mark_stack {
size_t unused_cache_size;
} mark_stack_t;
+#ifndef CALC_EXACT_MALLOC_SIZE
#define CALC_EXACT_MALLOC_SIZE 0
+#endif
#ifdef POOL_ALLOC_API
/* POOL ALLOC API */
@@ -388,12 +398,12 @@ typedef struct rb_objspace {
size_t increment;
struct heaps_slot *ptr;
struct heaps_slot *sweep_slots;
- struct sorted_heaps_slot *sorted;
+ struct heaps_slot *free_slots;
+ struct heaps_header **sorted;
size_t length;
size_t used;
- RVALUE *freelist;
+ struct heaps_slot *reserve_slots;
RVALUE *range[2];
- RVALUE *freed;
size_t live_num;
size_t free_num;
size_t free_min;
@@ -404,6 +414,7 @@ typedef struct rb_objspace {
int dont_gc;
int dont_lazy_sweep;
int during_gc;
+ rb_atomic_t finalizing;
} flags;
struct {
st_table *table;
@@ -439,13 +450,12 @@ int *ruby_initial_gc_stress_ptr = &rb_objspace.gc_stress;
#define heaps objspace->heap.ptr
#define heaps_length objspace->heap.length
#define heaps_used objspace->heap.used
-#define freelist objspace->heap.freelist
#define lomem objspace->heap.range[0]
#define himem objspace->heap.range[1]
#define heaps_inc objspace->heap.increment
-#define heaps_freed objspace->heap.freed
#define dont_gc objspace->flags.dont_gc
#define during_gc objspace->flags.during_gc
+#define finalizing objspace->flags.finalizing
#define finalizer_table objspace->final.table
#define deferred_final_list objspace->final.deferred
#define global_List objspace->global_list
@@ -454,7 +464,16 @@ int *ruby_initial_gc_stress_ptr = &rb_objspace.gc_stress;
#define initial_heap_min_slots initial_params.initial_heap_min_slots
#define initial_free_min initial_params.initial_free_min
+#define is_lazy_sweeping(objspace) ((objspace)->heap.sweep_slots != 0)
+
+#define nonspecial_obj_id(obj) (VALUE)((SIGNED_VALUE)(obj)|FIXNUM_FLAG)
+
+#define HEAP_HEADER(p) ((struct heaps_header *)(p))
+
static void rb_objspace_call_finalizer(rb_objspace_t *objspace);
+static VALUE define_final0(VALUE obj, VALUE block);
+VALUE rb_define_final(VALUE obj, VALUE block);
+VALUE rb_undefine_final(VALUE obj);
#if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
rb_objspace_t *
@@ -535,14 +554,21 @@ rb_objspace_free(rb_objspace_t *objspace)
struct gc_list *list, *next;
for (list = global_List; list; list = next) {
next = list->next;
- free(list);
+ xfree(list);
}
}
+ if (objspace->heap.reserve_slots) {
+ struct heaps_slot *list, *next;
+ for (list = objspace->heap.reserve_slots; list; list = next) {
+ next = list->free_next;
+ free(list);
+ }
+ }
if (objspace->heap.sorted) {
size_t i;
for (i = 0; i < heaps_used; ++i) {
- free(objspace->heap.sorted[i].slot->membase);
- free(objspace->heap.sorted[i].slot);
+ free(objspace->heap.sorted[i]->base);
+ aligned_free(objspace->heap.sorted[i]);
}
free(objspace->heap.sorted);
heaps_used = 0;
@@ -560,23 +586,29 @@ rb_objspace_free(rb_objspace_t *objspace)
}
#endif
-/* tiny heap size */
-/* 32KB */
-/*#define HEAP_SIZE 0x8000 */
-/* 128KB */
-/*#define HEAP_SIZE 0x20000 */
-/* 64KB */
-/*#define HEAP_SIZE 0x10000 */
-/* 16KB */
-#define HEAP_SIZE 0x4000
-/* 8KB */
-/*#define HEAP_SIZE 0x2000 */
-/* 4KB */
-/*#define HEAP_SIZE 0x1000 */
-/* 2KB */
-/*#define HEAP_SIZE 0x800 */
-
-#define HEAP_OBJ_LIMIT (unsigned int)(HEAP_SIZE / sizeof(struct RVALUE))
+#ifndef HEAP_ALIGN_LOG
+/* default tiny heap size: 16KB */
+#define HEAP_ALIGN_LOG 14
+#endif
+#define HEAP_ALIGN (1UL << HEAP_ALIGN_LOG)
+#define HEAP_ALIGN_MASK (~(~0UL << HEAP_ALIGN_LOG))
+#define REQUIRED_SIZE_BY_MALLOC (sizeof(size_t) * 5)
+#define HEAP_SIZE (HEAP_ALIGN - REQUIRED_SIZE_BY_MALLOC)
+#define CEILDIV(i, mod) (((i) + (mod) - 1)/(mod))
+
+#define HEAP_OBJ_LIMIT (unsigned int)((HEAP_SIZE - sizeof(struct heaps_header))/sizeof(struct RVALUE))
+#define HEAP_BITMAP_LIMIT CEILDIV(CEILDIV(HEAP_SIZE, sizeof(struct RVALUE)), sizeof(uintptr_t)*8)
+#define HEAP_SLOT_SIZE (sizeof(struct heaps_slot) + (HEAP_BITMAP_LIMIT-1) * sizeof(uintptr_t))
+
+#define GET_HEAP_HEADER(x) (HEAP_HEADER(((uintptr_t)x) & ~(HEAP_ALIGN_MASK)))
+#define GET_HEAP_SLOT(x) (GET_HEAP_HEADER(x)->base)
+#define GET_HEAP_BITMAP(x) (GET_HEAP_HEADER(x)->bits)
+#define NUM_IN_SLOT(p) (((uintptr_t)p & HEAP_ALIGN_MASK)/sizeof(RVALUE))
+#define BITMAP_INDEX(p) (NUM_IN_SLOT(p) / (sizeof(uintptr_t) * 8))
+#define BITMAP_OFFSET(p) (NUM_IN_SLOT(p) & ((sizeof(uintptr_t) * 8)-1))
+#define MARKED_IN_BITMAP(bits, p) (bits[BITMAP_INDEX(p)] & ((uintptr_t)1 << BITMAP_OFFSET(p)))
+#define MARK_IN_BITMAP(bits, p) (bits[BITMAP_INDEX(p)] = bits[BITMAP_INDEX(p)] | ((uintptr_t)1 << BITMAP_OFFSET(p)))
+#define CLEAR_IN_BITMAP(bits, p) (bits[BITMAP_INDEX(p)] &= ~((uintptr_t)1 << BITMAP_OFFSET(p)))
extern sa_table rb_class_tbl;
@@ -888,8 +920,10 @@ vm_xfree(rb_objspace_t *objspace, void *ptr)
size_t size;
ptr = ((size_t *)ptr) - 1;
size = ((size_t*)ptr)[0];
- objspace->malloc_params.allocated_size -= size;
- objspace->malloc_params.allocations--;
+ if (size) {
+ objspace->malloc_params.allocated_size -= size;
+ objspace->malloc_params.allocations--;
+ }
#endif
free(ptr);
@@ -1070,28 +1104,39 @@ rb_gc_unregister_address(VALUE *addr)
}
}
-
static void
allocate_sorted_heaps(rb_objspace_t *objspace, size_t next_heaps_length)
{
- struct sorted_heaps_slot *p;
- size_t size;
+ struct heaps_header **p;
+ struct heaps_slot *slot;
+ size_t size, add, i;
- size = next_heaps_length*sizeof(struct sorted_heaps_slot);
+ size = next_heaps_length*sizeof(struct heaps_header *);
+ add = next_heaps_length - heaps_used;
if (heaps_used > 0) {
- p = (struct sorted_heaps_slot *)realloc(objspace->heap.sorted, size);
+ p = (struct heaps_header **)realloc(objspace->heap.sorted, size);
if (p) objspace->heap.sorted = p;
}
else {
- p = objspace->heap.sorted = (struct sorted_heaps_slot *)malloc(size);
+ p = objspace->heap.sorted = (struct heaps_header **)malloc(size);
}
if (p == 0) {
during_gc = 0;
rb_memerror();
}
- heaps_length = next_heaps_length;
+
+ for (i = 0; i < add; i++) {
+ slot = (struct heaps_slot *)malloc(HEAP_SLOT_SIZE);
+ if (slot == 0) {
+ during_gc = 0;
+ rb_memerror();
+ return;
+ }
+ slot->free_next = objspace->heap.reserve_slots;
+ objspace->heap.reserve_slots = slot;
+ }
}
static void *
@@ -1144,45 +1189,55 @@ aligned_free(void *ptr)
}
static void
+link_free_heap_slot(rb_objspace_t *objspace, struct heaps_slot *slot)
+{
+ slot->free_next = objspace->heap.free_slots;
+ objspace->heap.free_slots = slot;
+}
+
+static void
+unlink_free_heap_slot(rb_objspace_t *objspace, struct heaps_slot *slot)
+{
+ objspace->heap.free_slots = slot->free_next;
+ slot->free_next = NULL;
+}
+
+static void
assign_heap_slot(rb_objspace_t *objspace)
{
- RVALUE *p, *pend, *membase;
+ RVALUE *p, *pend;
+ struct heaps_header *membase;
struct heaps_slot *slot;
size_t hi, lo, mid;
size_t objs;
objs = HEAP_OBJ_LIMIT;
- p = (RVALUE*)malloc(HEAP_SIZE);
- if (p == 0) {
- during_gc = 0;
- rb_memerror();
- }
- slot = (struct heaps_slot *)malloc(sizeof(struct heaps_slot));
- if (slot == 0) {
- xfree(p);
+ membase = (struct heaps_header*)aligned_malloc(HEAP_ALIGN, HEAP_SIZE);
+ if (membase == 0) {
during_gc = 0;
rb_memerror();
}
+ assert(objspace->heap.reserve_slots != NULL);
+ slot = objspace->heap.reserve_slots;
+ objspace->heap.reserve_slots = slot->free_next;
MEMZERO((void*)slot, struct heaps_slot, 1);
slot->next = heaps;
if (heaps) heaps->prev = slot;
heaps = slot;
- membase = p;
+ p = (RVALUE*)((VALUE)membase + sizeof(struct heaps_header));
if ((VALUE)p % sizeof(RVALUE) != 0) {
- p = (RVALUE*)((VALUE)p + sizeof(RVALUE) - ((VALUE)p % sizeof(RVALUE)));
- if ((HEAP_SIZE - HEAP_OBJ_LIMIT * sizeof(RVALUE)) < (size_t)((char*)p - (char*)membase)) {
- objs--;
- }
+ p = (RVALUE*)((VALUE)p + sizeof(RVALUE) - ((VALUE)p % sizeof(RVALUE)));
+ objs = (HEAP_SIZE - (size_t)((VALUE)p - (VALUE)membase))/sizeof(RVALUE);
}
lo = 0;
hi = heaps_used;
while (lo < hi) {
- register RVALUE *mid_membase;
+ register struct heaps_header *mid_membase;
mid = (lo + hi) / 2;
- mid_membase = objspace->heap.sorted[mid].slot->membase;
+ mid_membase = objspace->heap.sorted[mid];
if (mid_membase < membase) {
lo = mid + 1;
}
@@ -1194,14 +1249,16 @@ assign_heap_slot(rb_objspace_t *objspace)
}
}
if (hi < heaps_used) {
- MEMMOVE(&objspace->heap.sorted[hi+1], &objspace->heap.sorted[hi], struct sorted_heaps_slot, heaps_used - hi);
- }
- objspace->heap.sorted[hi].slot = slot;
- objspace->heap.sorted[hi].start = p;
- objspace->heap.sorted[hi].end = (p + objs);
+ MEMMOVE(&objspace->heap.sorted[hi+1], &objspace->heap.sorted[hi], struct heaps_header*, heaps_used - hi);
+ }
+ objspace->heap.sorted[hi] = membase;
+ membase->start = p;
+ membase->end = (p + objs);
+ membase->base = heaps;
+ membase->bits = heaps->bits;
+ membase->limit = objs;
heaps->membase = membase;
- heaps->slot = p;
- heaps->limit = objs;
+ memset(heaps->bits, 0, HEAP_BITMAP_LIMIT * sizeof(uintptr_t));
objspace->heap.free_num += objs;
pend = p + objs;
if (lomem == 0 || lomem > p) lomem = p;
@@ -1210,19 +1267,24 @@ assign_heap_slot(rb_objspace_t *objspace)
while (p < pend) {
p->as.free.flags = 0;
- p->as.free.next = freelist;
- freelist = p;
+ p->as.free.next = heaps->freelist;
+ heaps->freelist = p;
p++;
}
+ link_free_heap_slot(objspace, heaps);
}
static void
add_heap_slots(rb_objspace_t *objspace, size_t add)
{
size_t i;
+ size_t next_heaps_length;
- if ((heaps_used + add) > heaps_length) {
- allocate_sorted_heaps(objspace, heaps_used + add);
+ next_heaps_length = heaps_used + add;
+
+ if (next_heaps_length > heaps_length) {
+ allocate_sorted_heaps(objspace, next_heaps_length);
+ heaps_length = next_heaps_length;
}
for (i = 0; i < add; i++) {
@@ -1273,6 +1335,7 @@ set_heaps_increment(rb_objspace_t *objspace)
if (next_heaps_length > heaps_length) {
allocate_sorted_heaps(objspace, next_heaps_length);
+ heaps_length = next_heaps_length;
}
}
@@ -1295,6 +1358,7 @@ rb_during_gc(void)
}
#define RANY(o) ((RVALUE*)(o))
+#define has_free_object (objspace->heap.free_slots && objspace->heap.free_slots->freelist)
VALUE
rb_newobj(void)
@@ -1315,15 +1379,18 @@ rb_newobj(void)
}
}
- if (UNLIKELY(!freelist)) {
+ if (UNLIKELY(!has_free_object)) {
if (!gc_lazy_sweep(objspace)) {
during_gc = 0;
rb_memerror();
}
}
- obj = (VALUE)freelist;
- freelist = freelist->as.free.next;
+ obj = (VALUE)objspace->heap.free_slots->freelist;
+ objspace->heap.free_slots->freelist = RANY(obj)->as.free.next;
+ if (objspace->heap.free_slots->freelist == NULL) {
+ unlink_free_heap_slot(objspace, objspace->heap.free_slots);
+ }
MEMZERO((void*)obj, RVALUE, 1);
#ifdef GC_DEBUG
@@ -1624,26 +1691,27 @@ static inline int
is_pointer_to_heap(rb_objspace_t *objspace, void *ptr)
{
register RVALUE *p = RANY(ptr);
- register struct sorted_heaps_slot *heap;
+ register struct heaps_header *heap;
register size_t hi, lo, mid;
if (p < lomem || p > himem) return FALSE;
if ((VALUE)p % sizeof(RVALUE) != 0) return FALSE;
+ heap = GET_HEAP_HEADER(p);
/* check if p looks like a pointer using bsearch*/
lo = 0;
hi = heaps_used;
while (lo < hi) {
mid = (lo + hi) / 2;
- heap = &objspace->heap.sorted[mid];
- if (heap->start <= p) {
- if (p < heap->end)
- return TRUE;
- lo = mid + 1;
- }
- else {
- hi = mid;
- }
+ if (heap > objspace->heap.sorted[mid]) {
+ lo = mid + 1;
+ }
+ else if (heap < objspace->heap.sorted[mid]) {
+ hi = mid;
+ }
+ else {
+ return (p >= heap->start && p < heap->end) ? TRUE : FALSE;
+ }
}
return FALSE;
}
@@ -1842,6 +1910,16 @@ rb_gc_mark_maybe(VALUE obj)
}
}
+static int
+gc_mark_ptr(rb_objspace_t *objspace, VALUE ptr)
+{
+ register uintptr_t *bits = GET_HEAP_BITMAP(ptr);
+ if (MARKED_IN_BITMAP(bits, ptr)) return 0;
+ MARK_IN_BITMAP(bits, ptr);
+ objspace->heap.live_num++;
+ return 1;
+}
+
static void
gc_mark(rb_objspace_t *objspace, VALUE ptr)
{
@@ -1850,9 +1928,7 @@ gc_mark(rb_objspace_t *objspace, VALUE ptr)
obj = RANY(ptr);
if (rb_special_const_p(ptr)) return; /* special const not marked */
if (obj->as.basic.flags == 0) return; /* free cell */
- if (obj->as.basic.flags & FL_MARK) return; /* already marked */
- obj->as.basic.flags |= FL_MARK;
- objspace->heap.live_num++;
+ if (!gc_mark_ptr(objspace, ptr)) return; /* already marked */
push_mark_stack(&objspace->mark_stack, ptr);
}
@@ -1867,6 +1943,7 @@ static void
gc_mark_children(rb_objspace_t *objspace, VALUE ptr)
{
register RVALUE *obj = RANY(ptr);
+ register uintptr_t *bits;
goto marking; /* skip */
@@ -1874,8 +1951,9 @@ gc_mark_children(rb_objspace_t *objspace, VALUE ptr)
obj = RANY(ptr);
if (rb_special_const_p(ptr)) return; /* special const not marked */
if (obj->as.basic.flags == 0) return; /* free cell */
- if (obj->as.basic.flags & FL_MARK) return; /* already marked */
- obj->as.basic.flags |= FL_MARK;
+ bits = GET_HEAP_BITMAP(ptr);
+ if (MARKED_IN_BITMAP(bits, ptr)) return; /* already marked */
+ MARK_IN_BITMAP(bits, ptr);
objspace->heap.live_num++;
marking:
@@ -2137,15 +2215,22 @@ gc_mark_children(rb_objspace_t *objspace, VALUE ptr)
static int obj_free(rb_objspace_t *, VALUE);
-static inline void
-add_freelist(rb_objspace_t *objspace, RVALUE *p)
+static inline struct heaps_slot *
+add_slot_local_freelist(rb_objspace_t *objspace, RVALUE *p)
{
+ struct heaps_slot *slot;
+
VALGRIND_MAKE_MEM_UNDEFINED((void*)p, sizeof(RVALUE));
p->as.free.flags = 0;
- p->as.free.next = freelist;
- freelist = p;
+ slot = GET_HEAP_SLOT(p);
+ p->as.free.next = slot->freelist;
+ slot->freelist = p;
+
+ return slot;
}
+static void free_unused_heap(rb_objspace_t *objspace, struct heaps_header *heap);
+
static void
finalize_list(rb_objspace_t *objspace, RVALUE *p)
{
@@ -2153,17 +2238,16 @@ finalize_list(rb_objspace_t *objspace, RVALUE *p)
RVALUE *tmp = p->as.free.next;
run_final(objspace, (VALUE)p);
if (!FL_TEST(p, FL_SINGLETON)) { /* not freeing page */
- if (objspace->heap.sweep_slots) {
- p->as.free.flags = 0;
- }
- else {
+ add_slot_local_freelist(objspace, p);
+ if (!is_lazy_sweeping(objspace)) {
GC_PROF_DEC_LIVE_NUM;
- add_freelist(objspace, p);
}
}
else {
- struct heaps_slot *slot = (struct heaps_slot *)(VALUE)RDATA(p)->dmark;
- slot->limit--;
+ struct heaps_header *heap = GET_HEAP_HEADER(p);
+ if (--heap->limit == 0) {
+ free_unused_heap(objspace, heap);
+ }
}
p = tmp;
}
@@ -2184,97 +2268,106 @@ unlink_heap_slot(rb_objspace_t *objspace, struct heaps_slot *slot)
slot->next = NULL;
}
-
static void
-free_unused_heaps(rb_objspace_t *objspace)
+free_unused_heap(rb_objspace_t *objspace, struct heaps_header *heap)
{
- size_t i, j;
- RVALUE *last = 0;
-
- for (i = j = 1; j < heaps_used; i++) {
- if (objspace->heap.sorted[i].slot->limit == 0) {
- if (!last) {
- last = objspace->heap.sorted[i].slot->membase;
- }
- else {
- free(objspace->heap.sorted[i].slot->membase);
- }
- free(objspace->heap.sorted[i].slot);
- heaps_used--;
- }
- else {
- if (i != j) {
- objspace->heap.sorted[j] = objspace->heap.sorted[i];
- }
- j++;
- }
- }
- if (last) {
- if (last < heaps_freed) {
- free(heaps_freed);
- heaps_freed = last;
- }
- else {
- free(last);
- }
+ register size_t hi, lo, mid;
+ lo = 0;
+ hi = heaps_used;
+ while (lo < hi) {
+ mid = (lo + hi) / 2;
+ if (heap > objspace->heap.sorted[mid]) {
+ lo = mid + 1;
+ }
+ else if (heap < objspace->heap.sorted[mid]) {
+ hi = mid;
+ }
+ else {
+ /* remove unused heap */
+ struct heaps_slot* h = objspace->heap.sorted[mid]->base;
+ h->free_next = objspace->heap.reserve_slots;
+ objspace->heap.reserve_slots = h;
+ aligned_free(objspace->heap.sorted[mid]);
+ heaps_used--;
+ MEMMOVE(objspace->heap.sorted + mid, objspace->heap.sorted + mid + 1,
+ struct heaps_header *, heaps_used - mid);
+ return;
+ }
}
}
static void
+gc_clear_slot_bits(struct heaps_slot *slot)
+{
+ memset(slot->bits, 0, HEAP_BITMAP_LIMIT * sizeof(uintptr_t));
+}
+
+static void
slot_sweep(rb_objspace_t *objspace, struct heaps_slot *sweep_slot)
{
size_t free_num = 0, final_num = 0;
RVALUE *p, *pend;
- RVALUE *free = freelist, *final = deferred_final_list;
+ RVALUE *final = deferred_final_list;
int deferred;
+ uintptr_t *bits;
- p = sweep_slot->slot; pend = p + sweep_slot->limit;
+ p = sweep_slot->membase->start; pend = sweep_slot->membase->end;
+ bits = sweep_slot->bits;
while (p < pend) {
- if (!(p->as.basic.flags & FL_MARK)) {
- if (p->as.basic.flags &&
- ((deferred = obj_free(objspace, (VALUE)p)) ||
- (FL_TEST(p, FL_FINALIZE)))) {
- if (!deferred) {
- p->as.free.flags = T_ZOMBIE;
- RDATA(p)->dfree = 0;
+ if ((!(MARKED_IN_BITMAP(bits, p))) && BUILTIN_TYPE(p) != T_ZOMBIE) {
+ if (p->as.basic.flags) {
+ if ((deferred = obj_free(objspace, (VALUE)p)) ||
+ (FL_TEST(p, FL_FINALIZE))) {
+ if (!deferred) {
+ p->as.free.flags = T_ZOMBIE;
+ RDATA(p)->dfree = 0;
+ }
+ p->as.free.next = deferred_final_list;
+ deferred_final_list = p;
+ assert(BUILTIN_TYPE(p) == T_ZOMBIE);
+ final_num++;
+ }
+ else {
+ VALGRIND_MAKE_MEM_UNDEFINED((void*)p, sizeof(RVALUE));
+ p->as.free.flags = 0;
+ p->as.free.next = sweep_slot->freelist;
+ sweep_slot->freelist = p;
+ free_num++;
}
- p->as.free.flags |= FL_MARK;
- p->as.free.next = deferred_final_list;
- deferred_final_list = p;
- final_num++;
}
else {
- add_freelist(objspace, p);
free_num++;
}
}
- else if (BUILTIN_TYPE(p) == T_ZOMBIE) {
- /* objects to be finalized */
- /* do nothing remain marked */
- }
- else {
- RBASIC(p)->flags &= ~FL_MARK;
- }
p++;
}
- if (final_num + free_num == sweep_slot->limit &&
+ gc_clear_slot_bits(sweep_slot);
+ if (final_num + free_num == sweep_slot->membase->limit &&
objspace->heap.free_num > objspace->heap.do_heap_free) {
RVALUE *pp;
for (pp = deferred_final_list; pp != final; pp = pp->as.free.next) {
- RDATA(pp)->dmark = (void (*)(void *))(VALUE)sweep_slot;
+ RDATA(pp)->dmark = 0;
pp->as.free.flags |= FL_SINGLETON; /* freeing page mark */
}
- sweep_slot->limit = final_num;
- freelist = free; /* cancel this page from freelist */
+ sweep_slot->membase->limit = final_num;
unlink_heap_slot(objspace, sweep_slot);
+ if (final_num == 0) {
+ free_unused_heap(objspace, sweep_slot->membase);
+ }
}
else {
+ if (free_num > 0) {
+ link_free_heap_slot(objspace, sweep_slot);
+ }
+ else {
+ sweep_slot->free_next = NULL;
+ }
objspace->heap.free_num += free_num;
}
objspace->heap.final_num += final_num;
- if (deferred_final_list) {
+ if (deferred_final_list && !finalizing) {
rb_thread_t *th = GET_THREAD();
if (th) {
RUBY_VM_SET_FINALIZER_INTERRUPT(th);
@@ -2286,7 +2379,7 @@ static int
ready_to_gc(rb_objspace_t *objspace)
{
if (dont_gc || during_gc) {
- if (!freelist) {
+ if (!has_free_object) {
if (!heaps_increment(objspace)) {
set_heaps_increment(objspace);
heaps_increment(objspace);
@@ -2300,15 +2393,17 @@ ready_to_gc(rb_objspace_t *objspace)
static void
before_gc_sweep(rb_objspace_t *objspace)
{
- freelist = 0;
objspace->heap.do_heap_free = (size_t)((heaps_used * HEAP_OBJ_LIMIT) * 0.65);
objspace->heap.free_min = (size_t)((heaps_used * HEAP_OBJ_LIMIT) * 0.2);
if (objspace->heap.free_min < initial_free_min) {
- objspace->heap.do_heap_free = heaps_used * HEAP_OBJ_LIMIT;
objspace->heap.free_min = initial_free_min;
+ if (objspace->heap.do_heap_free < initial_free_min) {
+ objspace->heap.do_heap_free = initial_free_min;
+ }
}
objspace->heap.sweep_slots = heaps;
objspace->heap.free_num = 0;
+ objspace->heap.free_slots = NULL;
/* sweep unlinked method entries */
if (GET_VM()->unlinked_method_entry_list) {
@@ -2331,8 +2426,6 @@ after_gc_sweep(rb_objspace_t *objspace)
if (malloc_limit < initial_malloc_limit) malloc_limit = initial_malloc_limit;
}
malloc_increase = 0;
-
- free_unused_heaps(objspace);
}
static int
@@ -2345,7 +2438,7 @@ lazy_sweep(rb_objspace_t *objspace)
next = objspace->heap.sweep_slots->next;
slot_sweep(objspace, objspace->heap.sweep_slots);
objspace->heap.sweep_slots = next;
- if (freelist) {
+ if (has_free_object) {
during_gc = 0;
return TRUE;
}
@@ -2357,10 +2450,10 @@ static void
rest_sweep(rb_objspace_t *objspace)
{
if (objspace->heap.sweep_slots) {
- while (objspace->heap.sweep_slots) {
- lazy_sweep(objspace);
- }
- after_gc_sweep(objspace);
+ while (objspace->heap.sweep_slots) {
+ lazy_sweep(objspace);
+ }
+ after_gc_sweep(objspace);
}
}
@@ -2407,9 +2500,9 @@ gc_lazy_sweep(rb_objspace_t *objspace)
}
GC_PROF_SWEEP_TIMER_START;
- if(!(res = lazy_sweep(objspace))) {
+ if (!(res = lazy_sweep(objspace))) {
after_gc_sweep(objspace);
- if(freelist) {
+ if (has_free_object) {
res = TRUE;
during_gc = 0;
}
@@ -2442,12 +2535,17 @@ void
rb_gc_force_recycle(VALUE p)
{
rb_objspace_t *objspace = &rb_objspace;
- GC_PROF_DEC_LIVE_NUM;
- if (RBASIC(p)->flags & FL_MARK) {
- RANY(p)->as.free.flags = 0;
+ struct heaps_slot *slot;
+
+ if (MARKED_IN_BITMAP(GET_HEAP_BITMAP(p), p)) {
+ add_slot_local_freelist(objspace, (RVALUE *)p);
}
else {
- add_freelist(objspace, (RVALUE *)p);
+ GC_PROF_DEC_LIVE_NUM;
+ slot = add_slot_local_freelist(objspace, (RVALUE *)p);
+ if (slot->free_next == NULL) {
+ link_free_heap_slot(objspace, slot);
+ }
}
}
@@ -2804,7 +2902,7 @@ static VALUE
objspace_each_objects(VALUE arg)
{
size_t i;
- RVALUE *membase = 0;
+ struct heaps_header *membase = 0;
RVALUE *pstart, *pend;
rb_objspace_t *objspace = &rb_objspace;
struct each_obj_args *args = (struct each_obj_args *)arg;
@@ -2812,16 +2910,16 @@ objspace_each_objects(VALUE arg)
i = 0;
while (i < heaps_used) {
- while (0 < i && (uintptr_t)membase < (uintptr_t)objspace->heap.sorted[i-1].slot->membase)
+ while (0 < i && membase < objspace->heap.sorted[i-1])
i--;
- while (i < heaps_used && (uintptr_t)objspace->heap.sorted[i].slot->membase <= (uintptr_t)membase)
+ while (i < heaps_used && objspace->heap.sorted[i] <= membase)
i++;
if (heaps_used <= i)
break;
- membase = objspace->heap.sorted[i].slot->membase;
+ membase = objspace->heap.sorted[i];
- pstart = objspace->heap.sorted[i].slot->slot;
- pend = pstart + objspace->heap.sorted[i].slot->limit;
+ pstart = membase->start;
+ pend = membase->end;
for (; pstart != pend; pstart++) {
if (pstart->as.basic.flags) {
@@ -2835,6 +2933,7 @@ objspace_each_objects(VALUE arg)
}
}
}
+ RB_GC_GUARD(v);
return Qnil;
}
@@ -3078,11 +3177,12 @@ run_single_final(VALUE arg)
}
static void
-run_finalizer(rb_objspace_t *objspace, VALUE objid, VALUE table)
+run_finalizer(rb_objspace_t *objspace, VALUE obj, VALUE table)
{
long i;
int status;
VALUE args[3];
+ VALUE objid = nonspecial_obj_id(obj);
if (RARRAY_LEN(table) > 0) {
args[1] = rb_obj_freeze(rb_ary_new3(1, objid));
@@ -3106,13 +3206,11 @@ run_finalizer(rb_objspace_t *objspace, VALUE objid, VALUE table)
static void
run_final(rb_objspace_t *objspace, VALUE obj)
{
- VALUE objid;
RUBY_DATA_FUNC free_func = 0;
st_data_t key, table;
objspace->heap.final_num--;
- objid = rb_obj_id(obj); /* make obj into id */
RBASIC(obj)->klass = 0;
if (RTYPEDDATA_P(obj)) {
@@ -3127,7 +3225,7 @@ run_final(rb_objspace_t *objspace, VALUE obj)
key = (st_data_t)obj;
if (st_delete(finalizer_table, &key, &table)) {
- run_finalizer(objspace, objid, (VALUE)table);
+ run_finalizer(objspace, obj, (VALUE)table);
}
}
@@ -3145,7 +3243,10 @@ finalize_deferred(rb_objspace_t *objspace)
void
rb_gc_finalize_deferred(void)
{
- finalize_deferred(&rb_objspace);
+ rb_objspace_t *objspace = &rb_objspace;
+ if (ATOMIC_EXCHANGE(finalizing, 1)) return;
+ finalize_deferred(objspace);
+ ATOMIC_SET(finalizing, 0);
}
struct force_finalize_list {
@@ -3181,6 +3282,8 @@ rb_objspace_call_finalizer(rb_objspace_t *objspace)
rest_sweep(objspace);
+ if (ATOMIC_EXCHANGE(finalizing, 1)) return;
+
/* run finalizers */
finalize_deferred(objspace);
assert(deferred_final_list == 0);
@@ -3191,8 +3294,9 @@ rb_objspace_call_finalizer(rb_objspace_t *objspace)
st_foreach(finalizer_table, force_chain_object, (st_data_t)&list);
while (list) {
struct force_finalize_list *curr = list;
- run_finalizer(objspace, rb_obj_id(curr->obj), curr->table);
- st_delete(finalizer_table, (st_data_t*)&curr->obj, 0);
+ st_data_t obj = (st_data_t)curr->obj;
+ run_finalizer(objspace, curr->obj, curr->table);
+ st_delete(finalizer_table, &obj, 0);
list = curr->next;
xfree(curr);
}
@@ -3203,7 +3307,7 @@ rb_objspace_call_finalizer(rb_objspace_t *objspace)
/* run data object's finalizers */
for (i = 0; i < heaps_used; i++) {
- p = objspace->heap.sorted[i].start; pend = objspace->heap.sorted[i].end;
+ p = objspace->heap.sorted[i]->start; pend = objspace->heap.sorted[i]->end;
while (p < pend) {
if (BUILTIN_TYPE(p) == T_DATA &&
DATA_PTR(p) && RANY(p)->as.data.dfree &&
@@ -3239,6 +3343,7 @@ rb_objspace_call_finalizer(rb_objspace_t *objspace)
st_free_table(finalizer_table);
finalizer_table = 0;
+ ATOMIC_SET(finalizing, 0);
}
void
@@ -3246,8 +3351,39 @@ rb_gc(void)
{
rb_objspace_t *objspace = &rb_objspace;
garbage_collect(objspace);
- finalize_deferred(objspace);
- free_unused_heaps(objspace);
+ if (!finalizing) finalize_deferred(objspace);
+}
+
+static inline int
+is_id_value(rb_objspace_t *objspace, VALUE ptr)
+{
+ if (!is_pointer_to_heap(objspace, (void *)ptr)) return FALSE;
+ if (BUILTIN_TYPE(ptr) > T_FIXNUM) return FALSE;
+ if (BUILTIN_TYPE(ptr) == T_ICLASS) return FALSE;
+ return TRUE;
+}
+
+static inline int
+is_dead_object(rb_objspace_t *objspace, VALUE ptr)
+{
+ struct heaps_slot *slot = objspace->heap.sweep_slots;
+ if (!is_lazy_sweeping(objspace) || MARKED_IN_BITMAP(GET_HEAP_BITMAP(ptr), ptr))
+ return FALSE;
+ while (slot) {
+ if ((VALUE)slot->membase->start <= ptr && ptr < (VALUE)(slot->membase->end))
+ return TRUE;
+ slot = slot->next;
+ }
+ return FALSE;
+}
+
+static inline int
+is_live_object(rb_objspace_t *objspace, VALUE ptr)
+{
+ if (BUILTIN_TYPE(ptr) == 0) return FALSE;
+ if (RBASIC(ptr)->klass == 0) return FALSE;
+ if (is_dead_object(objspace, ptr)) return FALSE;
+ return TRUE;
}
/*
@@ -3292,11 +3428,10 @@ id2ref(VALUE obj, VALUE objid)
return ID2SYM(symid);
}
- if (!is_pointer_to_heap(objspace, (void *)ptr) ||
- BUILTIN_TYPE(ptr) > T_FIXNUM || BUILTIN_TYPE(ptr) == T_ICLASS) {
+ if (!is_id_value(objspace, ptr)) {
rb_raise(rb_eRangeError, "%p is not id value", p0);
}
- if (BUILTIN_TYPE(ptr) == 0 || RBASIC(ptr)->klass == 0) {
+ if (!is_live_object(objspace, ptr)) {
rb_raise(rb_eRangeError, "%p is recycled object", p0);
}
return (VALUE)ptr;
@@ -3409,7 +3544,7 @@ count_objects(int argc, VALUE *argv, VALUE os)
VALUE hash;
if (rb_scan_args(argc, argv, "01", &hash) == 1) {
- if (TYPE(hash) != T_HASH)
+ if (!RB_TYPE_P(hash, T_HASH))
rb_raise(rb_eTypeError, "non-hash given");
}
@@ -3420,7 +3555,7 @@ count_objects(int argc, VALUE *argv, VALUE os)
for (i = 0; i < heaps_used; i++) {
RVALUE *p, *pend;
- p = objspace->heap.sorted[i].start; pend = objspace->heap.sorted[i].end;
+ p = objspace->heap.sorted[i]->start; pend = objspace->heap.sorted[i]->end;
for (;p < pend; p++) {
if (p->as.basic.flags) {
counts[BUILTIN_TYPE(p)]++;
@@ -3429,7 +3564,7 @@ count_objects(int argc, VALUE *argv, VALUE os)
freed++;
}
}
- total += objspace->heap.sorted[i].slot->limit;
+ total += objspace->heap.sorted[i]->limit;
}
if (hash == Qnil) {
@@ -3528,7 +3663,7 @@ gc_stat(int argc, VALUE *argv, VALUE self)
VALUE hash;
if (rb_scan_args(argc, argv, "01", &hash) == 1) {
- if (TYPE(hash) != T_HASH)
+ if (!RB_TYPE_P(hash, T_HASH))
rb_raise(rb_eTypeError, "non-hash given");
}
@@ -3583,6 +3718,33 @@ gc_malloc_allocations(VALUE self)
}
#endif
+/*
+ * call-seq:
+ * GC::Profiler.raw_data -> [Hash, ...]
+ *
+ * Returns an Array of individual raw profile data Hashes ordered
+ * from earliest to latest by <tt>:GC_INVOKE_TIME</tt>. For example:
+ *
+ * [{:GC_TIME=>1.3000000000000858e-05,
+ * :GC_INVOKE_TIME=>0.010634999999999999,
+ * :HEAP_USE_SIZE=>289640,
+ * :HEAP_TOTAL_SIZE=>588960,
+ * :HEAP_TOTAL_OBJECTS=>14724,
+ * :GC_IS_MARKED=>false},
+ * ...
+ * ]
+ *
+ * The keys mean:
+ *
+ * +:GC_TIME+:: Time taken for this run in milliseconds
+ * +:GC_INVOKE_TIME+:: Time the GC was invoked since startup in seconds
+ * +:HEAP_USE_SIZE+:: Bytes of heap used
+ * +:HEAP_TOTAL_SIZE+:: Size of heap in bytes
+ * +:HEAP_TOTAL_OBJECTS+:: Number of objects
+ * +:GC_IS_MARKED+:: Is the GC in the mark phase
+ *
+ */
+
static VALUE
gc_profile_record_get(void)
{
@@ -3775,6 +3937,7 @@ Init_GC(void)
rb_mProfiler = rb_define_module_under(rb_mGC, "Profiler");
rb_define_singleton_method(rb_mProfiler, "enabled?", gc_profile_enable_get, 0);
rb_define_singleton_method(rb_mProfiler, "enable", gc_profile_enable, 0);
+ rb_define_singleton_method(rb_mProfiler, "raw_data", gc_profile_record_get, 0);
rb_define_singleton_method(rb_mProfiler, "disable", gc_profile_disable, 0);
rb_define_singleton_method(rb_mProfiler, "clear", gc_profile_clear, 0);
rb_define_singleton_method(rb_mProfiler, "result", gc_profile_result, 0);
View
2  hash.c
@@ -1075,7 +1075,7 @@ clear_i(VALUE key, VALUE value, VALUE dummy)
*
*/
-static VALUE
+VALUE
rb_hash_clear(VALUE hash)
{
rb_hash_modify_check(hash);
View
2  include/ruby/intern.h
@@ -65,6 +65,7 @@ VALUE rb_ary_tmp_new(long);
void rb_ary_free(VALUE);
void rb_ary_modify(VALUE);
VALUE rb_ary_freeze(VALUE);
+VALUE rb_ary_dup_of_p(VALUE, VALUE);
VALUE rb_ary_aref(int, VALUE*, VALUE);
VALUE rb_ary_subseq(VALUE, long, long);
void rb_ary_store(VALUE, long, VALUE);
@@ -451,6 +452,7 @@ VALUE rb_hash_lookup(VALUE, VALUE);
VALUE rb_hash_lookup2(VALUE, VALUE, VALUE);
VALUE rb_hash_fetch(VALUE, VALUE);
VALUE rb_hash_aset(VALUE, VALUE, VALUE);
+VALUE rb_hash_clear(VALUE);
VALUE rb_hash_delete_if(VALUE);
VALUE rb_hash_delete(VALUE,VALUE);
typedef VALUE rb_hash_update_func(VALUE newkey, VALUE oldkey, VALUE value);
View
3  internal.h
@@ -95,6 +95,8 @@ VALUE rb_home_dir(const char *user, VALUE result);
VALUE rb_realpath_internal(VALUE basedir, VALUE path, int strict);
VALUE rb_file_expand_path_fast(VALUE, VALUE);
VALUE rb_file_expand_path_internal(VALUE, VALUE, int, int, VALUE);
+VALUE rb_get_path_check_to_string(VALUE, int);
+VALUE rb_get_path_check_convert(VALUE, VALUE, int);
void Init_File(void);
#ifdef _WIN32
@@ -120,6 +122,7 @@ VALUE rb_iseq_clone(VALUE iseqval, VALUE newcbase);
/* load.c */
VALUE rb_get_load_path(void);
+VALUE rb_get_expanded_load_path(void);
/* math.c */
VALUE rb_math_atan2(VALUE, VALUE);
View
510 load.c
@@ -34,21 +34,120 @@ rb_get_load_path(void)
return load_path;
}
-VALUE
-rb_get_expanded_load_path(void)
+enum expand_type {
+ EXPAND_ALL,
+ EXPAND_RELATIVE,
+ EXPAND_HOME,
+ EXPAND_NON_CACHE
+};
+
+/* Construct expanded load path and store it to cache.
+ We rebuild load path partially if the cache is invalid.
+ We don't cache non string object and expand it every time. We ensure that
+ string objects in $LOAD_PATH are frozen.
+ */
+static void
+rb_construct_expanded_load_path(int type, int *has_relative, int *has_non_cache)
{
- VALUE load_path = rb_get_load_path();
+ rb_vm_t *vm = GET_VM();
+ VALUE load_path = vm->load_path;
+ VALUE expanded_load_path = vm->expanded_load_path;
VALUE ary;
long i;
+ int level = rb_safe_level();
ary = rb_ary_new2(RARRAY_LEN(load_path));
for (i = 0; i < RARRAY_LEN(load_path); ++i) {
- VALUE path = rb_file_expand_path_fast(RARRAY_PTR(load_path)[i], Qnil);
- rb_str_freeze(path);
- rb_ary_push(ary, path);
+ VALUE path, as_str, expanded_path;
+ int is_string, non_cache;
+ char *as_cstr;
+ as_str = path = RARRAY_PTR(load_path)[i];
+ is_string = RB_TYPE_P(path, T_STRING) ? 1 : 0;
+ non_cache = !is_string ? 1 : 0;
+ as_str = rb_get_path_check_to_string(path, level);
+ as_cstr = RSTRING_PTR(as_str);
+
+ if (!non_cache) {
+ if ((type == EXPAND_RELATIVE &&
+ rb_is_absolute_path(as_cstr)) ||
+ (type == EXPAND_HOME &&
+ (!as_cstr[0] || as_cstr[0] != '~')) ||
+ (type == EXPAND_NON_CACHE)) {
+ /* Use cached expanded path. */
+ rb_ary_push(ary, RARRAY_PTR(expanded_load_path)[i]);
+ continue;
+ }
+ }
+ if (!*has_relative && !rb_is_absolute_path(as_cstr))
+ *has_relative = 1;
+ if (!*has_non_cache && non_cache)
+ *has_non_cache = 1;
+ /* Freeze only string object. We expand other objects every time. */
+ if (is_string)
+ rb_str_freeze(path);
+ as_str = rb_get_path_check_convert(path, as_str, level);
+ expanded_path = rb_file_expand_path_fast(as_str, Qnil);
+ rb_str_freeze(expanded_path);
+ rb_ary_push(ary, expanded_path);
}
rb_obj_freeze(ary);
- return ary;
+ vm->expanded_load_path = ary;
+ rb_ary_replace(vm->load_path_snapshot, vm->load_path);
+}
+
+static VALUE
+load_path_getcwd(void)
+{
+ char *cwd = my_getcwd();
+ VALUE cwd_str = rb_filesystem_str_new_cstr(cwd);
+ xfree(cwd);
+ return cwd_str;
+}
+
+VALUE
+rb_get_expanded_load_path(void)
+{
+ rb_vm_t *vm = GET_VM();
+ const VALUE non_cache = Qtrue;
+
+ if (!rb_ary_dup_of_p(vm->load_path_snapshot, vm->load_path)) {
+ /* The load path was modified. Rebuild the expanded load path. */
+ int has_relative = 0, has_non_cache = 0;
+ rb_construct_expanded_load_path(EXPAND_ALL, &has_relative, &has_non_cache);
+ if (has_relative) {
+ vm->load_path_check_cache = load_path_getcwd();
+ }
+ else if (has_non_cache) {
+ /* Non string object. */
+ vm->load_path_check_cache = non_cache;
+ }
+ else {
+ vm->load_path_check_cache = 0;
+ }
+ }
+ else if (vm->load_path_check_cache == non_cache) {
+ int has_relative = 1, has_non_cache = 1;
+ /* Expand only non-cacheable objects. */
+ rb_construct_expanded_load_path(EXPAND_NON_CACHE,
+ &has_relative, &has_non_cache);
+ }
+ else if (vm->load_path_check_cache) {
+ int has_relative = 1, has_non_cache = 1;
+ VALUE cwd = load_path_getcwd();
+ if (!rb_str_equal(vm->load_path_check_cache, cwd)) {
+ /* Current working directory or filesystem encoding was changed.
+ Expand relative load path and non-cacheable objects again. */
+ vm->load_path_check_cache = cwd;
+ rb_construct_expanded_load_path(EXPAND_RELATIVE,
+ &has_relative, &has_non_cache);
+ }
+ else {
+ /* Expand only tilde (User HOME) and non-cacheable objects. */
+ rb_construct_expanded_load_path(EXPAND_HOME,
+ &has_relative, &has_non_cache);
+ }
+ }
+ return vm->expanded_load_path;
}
static VALUE
@@ -63,12 +162,320 @@ get_loaded_features(void)
return GET_VM()->loaded_features;
}
+static void
+reset_loaded_features_snapshot(void)
+{
+ rb_vm_t *vm = GET_VM();
+ rb_ary_replace(vm->loaded_features_snapshot, vm->loaded_features);
+}
+
static st_table *
get_loading_table(void)
{
return GET_VM()->loading_table;
}
+static inline uint32_t
+fmix_uint(uint32_t val)
+{
+ /* with -O2 even on i386 gcc and clang transform this to
+ * single multiplication 32bit*32bit=64bit */
+ uint64_t res = ((uint64_t)val) * (uint64_t)0x85ebca6b;
+ return (uint32_t)res ^ (uint32_t)(res>>32);
+}
+
+static uint32_t
+fast_string_hash(const char *str, long len)
+{
+ uint32_t res = 0;
+ for(;len > 0; str+=16, len-=16) {
+ uint32_t buf[4] = {0, 0, 0, 0};
+ memcpy(buf, str, len < 16 ? len : 16);
+ res = fmix_uint(res ^ buf[0]);
+ res = fmix_uint(res ^ buf[1]);
+ res = fmix_uint(res ^ buf[2]);
+ res = fmix_uint(res ^ buf[3]);
+ }
+ return res;
+}
+
+/*
+ * We build index for loaded features relying on fact that loaded_feature_path
+ * rechecks feature name. So that, we could store only hash of string instead
+ * of whole string in a hash.
+ * Instead of allocation of array of offsets by each feature, we organize
+ * offsets in a single linked lists - one list per feature - which are stored
+ * in a single array. And we store in a hash positions of head and tail of
+ * this list
+ */
+#define FI_LAST (-1)
+#define FI_DEFAULT_HASH_SIZE (64)
+#define FI_DEFAULT_LIST_SIZE (64)
+
+typedef struct features_index_hash_item {
+ uint32_t hash;
+ /* we will store position + 1 in head and tail,
+ * so that if head == 0 then item is free */
+ int head;
+ int tail;
+} fi_hash_item;
+
+typedef struct features_index_hash {
+ int capa;
+ int size;
+ fi_hash_item *items;
+} fi_hash;
+
+static fi_hash_item *
+fi_hash_candidate(fi_hash *index, uint32_t hash)
+{
+ fi_hash_item *items = index->items;
+ int capa_1 = index->capa - 1;
+ int pos = hash & capa_1;
+ if (items[pos].hash != hash && items[pos].head != 0) {
+ /* always odd, so that it has no common diviser with capa*/
+ int step = (hash % capa_1) | 1;
+ do {
+ pos = (pos + step) & capa_1;
+ } while (items[pos].hash != hash && items[pos].head != 0);
+ }
+ return items + pos;
+}
+
+static void
+fi_hash_rehash(fi_hash *index)
+{
+ fi_hash temp;
+ int i;
+ fi_hash_item *items = index->items;
+ temp.capa = index->capa * 2;
+ temp.size = index->size;
+ temp.items = xcalloc(temp.capa, sizeof(fi_hash_item));
+ for(i=0; i < index->capa; i++) {
+ if (items[i].head) {
+ fi_hash_item *item = fi_hash_candidate(&temp, items[i].hash);
+ *item = items[i];
+ }
+ }
+ *index = temp;
+ xfree(items);
+}
+
+static int
+fi_hash_find_head(fi_hash *index, const char *str, long len)
+{
+ uint32_t hash = fast_string_hash(str, len);
+ fi_hash_item *item = fi_hash_candidate(index, hash);
+ return item->head - 1; /* if head==0 then result is FI_LAST */
+}
+
+/* inserts position of tail into hash,
+ * returns previous tail position or FI_LAST */
+static int
+fi_hash_insert_pos(fi_hash *index, const char *str, long len, int pos)
+{
+ fi_hash_item *item;
+ int hash = fast_string_hash(str, len);
+ if (index->size > index->capa / 4 * 3) {
+ fi_hash_rehash(index);
+ }
+ item = fi_hash_candidate(index, hash);
+ if (item->head) {
+ int res = item->tail - 1;
+ item->tail = pos + 1;
+ return res;
+ }
+ else {
+ item->hash = hash;
+ item->head = pos + 1;
+ item->tail = pos + 1;
+ index->size++;
+ return FI_LAST;
+ }
+}
+
+typedef struct features_index_multilist_item {
+ int offset;
+ int next;
+} fi_list_item;
+
+typedef struct features_index_multilist {
+ fi_list_item *items;
+ int capa;
+ int size;
+} fi_list;
+
+static void
+fi_list_insert_offset(fi_list *list, int offset, int prev_pos)
+{
+ if (list->size == list->capa) {
+ REALLOC_N(list->items, fi_list_item, list->capa*2);
+ MEMZERO(list->items + list->capa, fi_list_item, list->capa);
+ list->capa*=2;
+ }
+ list->items[list->size].offset = offset;
+ list->items[list->size].next = FI_LAST;
+ if (prev_pos != FI_LAST) {
+ list->items[prev_pos].next = list->size;
+ }
+ list->size++;
+}
+
+typedef struct features_index {
+ fi_hash hash;
+ fi_list list;
+} st_features_index;
+
+static void
+features_index_free(void *p)
+{
+ if (p) {
+ st_features_index *fi = p;
+ xfree(fi->hash.items);
+ xfree(fi->list.items);
+ xfree(fi);
+ }
+}
+
+static VALUE
+features_index_allocate()
+{
+ st_features_index *index = xcalloc(1, sizeof(st_features_index));
+ index->hash.capa = FI_DEFAULT_HASH_SIZE;
+ index->hash.items = xcalloc(index->hash.capa, sizeof(fi_hash_item));
+ index->list.capa = FI_DEFAULT_LIST_SIZE;
+ index->list.items = xcalloc(index->list.capa, sizeof(fi_list_item));
+ return Data_Wrap_Struct(rb_cObject, 0, features_index_free, index);
+}
+
+static st_features_index *
+get_loaded_features_index_raw(void)
+{
+ st_features_index *index;
+ Data_Get_Struct(GET_VM()->loaded_features_index, st_features_index, index);
+ return index;
+}
+
+static void
+features_index_clear()
+{
+ st_features_index *index = get_loaded_features_index_raw();
+ MEMZERO(index->hash.items, fi_hash_item, index->hash.capa);
+ index->hash.size = 0;
+ MEMZERO(index->list.items, fi_list_item, index->list.capa);
+ index->list.size = 0;
+}
+
+static void
+features_index_add_single(const char *short_feature, long len, int offset)
+{
+ st_features_index *index = get_loaded_features_index_raw();
+ int prev_pos = fi_hash_insert_pos(&index->hash, short_feature, len, index->list.size);
+ fi_list_insert_offset(&index->list, offset, prev_pos);
+}
+
+/* Add to the loaded-features index all the required entries for
+ `feature`, located at `offset` in $LOADED_FEATURES. We add an
+ index entry at each string `short_feature` for which
+ feature == "#{prefix}#{short_feature}#{e}"
+ where `e` is empty or matches %r{^\.[^./]*$}, and `prefix` is empty
+ or ends in '/'. This maintains the invariant that `rb_feature_p()`
+ relies on for its fast lookup.
+*/
+static void
+features_index_add(const char *feature_str, long len, int offset)
+{
+ const char *feature_end, *ext, *p;
+
+ feature_end = feature_str + len;
+
+ for (ext = feature_end; ext > feature_str; ext--)
+ if (*ext == '.' || *ext == '/')
+ break;
+ if (*ext != '.')
+ ext = NULL;
+ /* Now `ext` points to the only string matching %r{^\.[^./]*$} that is
+ at the end of `feature`, or is NULL if there is no such string. */
+
+ p = ext ? ext : feature_end;
+ while (1) {
+ p--;
+ while (p >= feature_str && *p != '/')
+ p--;
+ if (p < feature_str)
+ break;
+ /* Now *p == '/'. We reach this point for every '/' in `feature`. */
+ features_index_add_single(p + 1, feature_end - p - 1, offset);
+ if (ext) {
+ features_index_add_single(p + 1, ext - p - 1, offset);
+ }
+ }
+ features_index_add_single(feature_str, len, offset);
+ if (ext) {
+ features_index_add_single(feature_str, ext - feature_str, offset);
+ }
+}
+
+static fi_list_item
+features_index_find(st_features_index *index, const char *feature, long len)
+{
+ int pos = fi_hash_find_head(&index->hash, feature, len);
+ if (pos == FI_LAST) {
+ fi_list_item res = {FI_LAST, FI_LAST};
+ return res;
+ }
+ else
+ return index->list.items[pos];
+}
+
+static fi_list_item
+features_index_next(st_features_index *index, fi_list_item cur)
+{
+ if (cur.next == FI_LAST) {
+ fi_list_item res = {FI_LAST, FI_LAST};
+ return res;
+ }
+ else
+ return index->list.items[cur.next];
+}
+
+static st_features_index *
+get_loaded_features_index(void)
+{
+ VALUE features;
+ int i;
+ rb_vm_t *vm = GET_VM();
+
+ if (!rb_ary_dup_of_p(vm->loaded_features_snapshot, vm->loaded_features)) {
+ /* The sharing was broken; something (other than us in rb_provide_feature())
+ modified loaded_features. Rebuild the index. */
+ features_index_clear();
+ features = vm->loaded_features;
+ for (i = 0; i < RARRAY_LEN(features); i++) {
+ VALUE entry, as_str;
+ as_str = entry = rb_ary_entry(features, i);
+ StringValue(as_str);
+ if (as_str != entry)
+ rb_ary_store(features, i, as_str);
+ rb_str_freeze(as_str);
+ features_index_add(RSTRING_PTR(as_str), RSTRING_LEN(as_str), i);
+ }
+ reset_loaded_features_snapshot();
+ }
+ return get_loaded_features_index_raw();
+}
+
+/* This searches `load_path` for a value such that
+ name == "#{load_path[i]}/#{feature}"
+ if `feature` is a suffix of `name`, or otherwise
+ name == "#{load_path[i]}/#{feature}#{ext}"
+ for an acceptable string `ext`. It returns
+ `load_path[i].to_str` if found, else 0.
+
+ If type is 's', then `ext` is acceptable only if IS_DLEXT(ext);
+ if 'r', then only if IS_RBEXT(ext); otherwise `ext` may be absent
+ or have any value matching `%r{^\.[^./]*$}`.
+*/
static VALUE
loaded_feature_path(const char *name, long vlen, const char *feature, long len,
int type, VALUE load_path)
@@ -88,23 +495,22 @@ loaded_feature_path(const char *name, long vlen, const char *feature, long len,
return 0;
plen = e - name - len - 1;
}
+ if (type == 's' && !IS_DLEXT(&name[plen+len+1])
+ || type == 'r' && !IS_RBEXT(&name[plen+len+1])
+ || name[plen] != '/') {
+ return 0;
+ }
+ /* Now name == "#{prefix}/#{feature}#{ext}" where ext is acceptable
+ (possibly empty) and prefix is some string of length plen. */
+
for (i = 0; i < RARRAY_LEN(load_path); ++i) {
VALUE p = RARRAY_PTR(load_path)[i];
const char *s = StringValuePtr(p);
long n = RSTRING_LEN(p);
- if (n != plen ) continue;
- if (n && (strncmp(name, s, n) || name[n] != '/')) continue;
- switch (type) {
- case 's':
- if (IS_DLEXT(&name[n+len+1])) return p;
- break;
- case 'r':
- if (IS_RBEXT(&name[n+len+1])) return p;
- break;
- default:
- return p;
- }
+ if (n != plen) continue;
+ if (n && strncmp(name, s, n)) continue;
+ return p;
}
return 0;
}
@@ -132,10 +538,12 @@ loaded_feature_path_i(st_data_t v, st_data_t b, st_data_t f)
static int
rb_feature_p(const char *feature, const char *ext, int rb, int expanded, const char **fn)
{
- VALUE v, features, p, load_path = 0;
+ VALUE features, v, p, load_path = 0;
const char *f, *e;
long i, len, elen, n;
st_table *loading_tbl;
+ st_features_index *features_index;
+ fi_list_item index_list;
st_data_t data;
int type;
@@ -151,8 +559,43 @@ rb_feature_p(const char *feature, const char *ext, int rb, int expanded, const c
type = 0;
}
features = get_loaded_features();
- for (i = 0; i < RARRAY_LEN(features); ++i) {
- v = RARRAY_PTR(features)[i];
+ features_index = get_loaded_features_index();
+
+ index_list = features_index_find(features_index, feature, len);
+ /* We search `features` for an entry such that either
+ "#{features[i]}" == "#{load_path[j]}/#{feature}#{e}"
+ for some j, or
+ "#{features[i]}" == "#{feature}#{e}"
+ Here `e` is an "allowed" extension -- either empty or one
+ of the extensions accepted by IS_RBEXT, IS_SOEXT, or
+ IS_DLEXT. Further, if `ext && rb` then `IS_RBEXT(e)`,
+ and if `ext && !rb` then `IS_SOEXT(e) || IS_DLEXT(e)`.
+
+ If `expanded`, then only the latter form (without load_path[j])
+ is accepted. Otherwise either form is accepted, *unless* `ext`
+ is false and an otherwise-matching entry of the first form is
+ preceded by an entry of the form
+ "#{features[i2]}" == "#{load_path[j2]}/#{feature}#{e2}"
+ where `e2` matches %r{^\.[^./]*$} but is not an allowed extension.
+ After a "distractor" entry of this form, only entries of the
+ form "#{feature}#{e}" are accepted.
+
+ In `rb_provide_feature()` and `get_loaded_features_index()` we
+ maintain an invariant that the list `index` will point to at least
+ every entry in `features` which has the form
+ "#{prefix}#{feature}#{e}"
+ where `e` is empty or matches %r{^\.[^./]*$}, and `prefix` is empty
+ or ends in '/'. This includes both match forms above, as well
+ as any distractors, so we may ignore all other entries in `features`.
+ (since we store only hash value of feature, there could be also
+ other features in the list, but probability of it is very small,
+ and loaded_feature_path will recheck for it)
+ */
+ for (; index_list.offset != FI_LAST ;
+ index_list = features_index_next(features_index, index_list)) {
+ long index = index_list.offset;
+
+ v = RARRAY_PTR(features)[index];
f = StringValuePtr(v);
if ((n = RSTRING_LEN(v)) < len) continue;
if (strncmp(f, feature, len) != 0) {
@@ -175,6 +618,7 @@ rb_feature_p(const char *feature, const char *ext, int rb, int expanded, const c
return 'r';
}
}
+
loading_tbl = get_loading_table();
if (loading_tbl) {
f = 0;
@@ -183,7 +627,7 @@ rb_feature_p(const char *feature, const char *ext, int rb, int expanded, const c
fs.name = feature;
fs.len = len;
fs.type = type;
- fs.load_path = load_path ? load_path : rb_get_load_path();
+ fs.load_path = load_path ? load_path : rb_get_expanded_load_path();
fs.result = 0;
st_foreach(loading_tbl, loaded_feature_path_i, (st_data_t)&fs);
if ((f = fs.result) != 0) {
@@ -233,7 +677,7 @@ rb_feature_provided(const char *feature, const char **loading)
if (*feature == '.' &&
(feature[1] == '/' || strncmp(feature+1, "./", 2) == 0)) {
- fullpath = rb_file_expand_path_fast(rb_str_new2(feature), Qnil);
+ fullpath = rb_file_expand_path_fast(rb_get_path(rb_str_new2(feature)), Qnil);
feature = RSTRING_PTR(fullpath);
}
if (ext && !strchr(ext, '/')) {
@@ -254,11 +698,20 @@ rb_feature_provided(const char *feature, const char **loading)
static void
rb_provide_feature(VALUE feature)
{
- if (OBJ_FROZEN(get_loaded_features())) {
+ VALUE features;
+
+ features = get_loaded_features();
+ if (OBJ_FROZEN(features)) {
rb_raise(rb_eRuntimeError,
"$LOADED_FEATURES is frozen; cannot append feature");
}
- rb_ary_push(get_loaded_features(), feature);
+ rb_str_freeze(feature);
+
+ rb_ary_push(features, feature);
+ StringValue(feature);
+ features_index_add(RSTRING_PTR(feature), RSTRING_LEN(feature), (int)(RARRAY_LEN(features)-1));
+ RB_GC_GUARD(feature);
+ reset_loaded_features_snapshot();
}
void
@@ -774,10 +1227,15 @@ Init_load()
rb_alias_variable(rb_intern("$-I"), id_load_path);
rb_alias_variable(rb_intern("$LOAD_PATH"), id_load_path);
vm->load_path = rb_ary_new();
+ vm->expanded_load_path = rb_ary_new();
+ vm->load_path_snapshot = rb_ary_new();
+ vm->load_path_check_cache = 0;
rb_define_virtual_variable("$\"", get_loaded_features, 0);
rb_define_virtual_variable("$LOADED_FEATURES", get_loaded_features, 0);
vm->loaded_features = rb_ary_new();
+ vm->loaded_features_snapshot = rb_ary_new();
+ vm->loaded_features_index = features_index_allocate();
rb_define_global_function("load", rb_f_load, -1);
rb_define_global_function("require", rb_f_require, 1);
View
3  ruby.c
@@ -1366,7 +1366,8 @@ process_options(int argc, char **argv, struct cmdline_options *opt)
long i;
VALUE load_path = GET_VM()->load_path;
for (i = 0; i < RARRAY_LEN(load_path); ++i) {
- rb_enc_associate(RARRAY_PTR(load_path)[i], lenc);
+ RARRAY_PTR(load_path)[i] =
+ rb_enc_associate(rb_str_dup(RARRAY_PTR(load_path)[i]), lenc);
}
}
if (!(opt->disable & DISABLE_BIT(gems))) {
View
12 test/ruby/test_array.rb
@@ -414,6 +414,18 @@ def test_ASET # '[]='
a = @cls[1, 2, 3]
a[-1, 0] = a
assert_equal([1, 2, 1, 2, 3, 3], a)
+
+ a = @cls[]
+ a[5,0] = [5]
+ assert_equal([nil, nil, nil, nil, nil, 5], a)
+
+ a = @cls[1]
+ a[1,0] = [2]
+ assert_equal([1, 2], a)
+
+ a = @cls[1]
+ a[1,1] = [2]
+ assert_equal([1, 2], a)
end
def test_assoc
View
110 test/ruby/test_require.rb
@@ -356,4 +356,114 @@ def test_loaded_features_encoding
$:.replace(loadpath)
$".replace(features)
end
+
+ def test_require_changed_current_dir
+ bug7158 = '[ruby-core:47970]'
+ Dir.mktmpdir {|tmp|
+ Dir.chdir(tmp) {
+ Dir.mkdir("a")
+ Dir.mkdir("b")
+ open(File.join("a", "foo.rb"), "w") {}
+ open(File.join("b", "bar.rb"), "w") {|f|
+ f.puts "p :ok"
+ }
+ assert_in_out_err([], <<-INPUT, %w(:ok), [], bug7158)
+ $: << "."
+ Dir.chdir("a")
+ require "foo"
+ Dir.chdir("../b")
+ p :ng unless require "bar"
+ Dir.chdir("..")
+ p :ng if require "b/bar"
+ INPUT
+ }
+ }
+ end
+
+ def test_require_not_modified_load_path
+ bug7158 = '[ruby-core:47970]'
+ Dir.mktmpdir {|tmp|
+ Dir.chdir(tmp) {
+ open("foo.rb", "w") {}
+ assert_in_out_err([], <<-INPUT, %w(:ok), [], bug7158)
+ a = Object.new
+ def a.to_str
+ "#{tmp}"
+ end
+ $: << a
+ require "foo"
+ last_path = $:.pop
+ p :ok if last_path == a && last_path.class == Object
+ INPUT
+ }
+ }
+ end
+
+ def test_require_changed_home
+ bug7158 = '[ruby-core:47970]'
+ Dir.mktmpdir {|tmp|
+ Dir.chdir(tmp) {
+ open("foo.rb", "w") {}
+ Dir.mkdir("a")
+ open(File.join("a", "bar.rb"), "w") {}
+ assert_in_out_err([], <<-INPUT, %w(:ok), [], bug7158)
+ $: << '~'
+ ENV['HOME'] = "#{tmp}"
+ require "foo"
+ ENV['HOME'] = "#{tmp}/a"
+ p :ok if require "bar"
+ INPUT
+ }
+ }
+ end
+
+ def test_require_to_path_redefined_in_load_path
+ bug7158 = '[ruby-core:47970]'
+ Dir.mktmpdir {|tmp|
+ Dir.chdir(tmp) {
+ open("foo.rb", "w") {}
+ assert_in_out_err(["RUBYOPT"=>nil], <<-INPUT, %w(:ok), [], bug7158)
+ a = Object.new
+ def a.to_path
+ "bar"
+ end
+ $: << a
+ begin
+ require "foo"
+ p :ng
+ rescue LoadError
+ end
+ def a.to_path
+ "#{tmp}"
+ end
+ p :ok if require "foo"
+ INPUT
+ }
+ }
+ end
+
+ def test_require_to_str_redefined_in_load_path
+ bug7158 = '[ruby-core:47970]'
+ Dir.mktmpdir {|tmp|
+ Dir.chdir(tmp) {
+ open("foo.rb", "w") {}
+ assert_in_out_err(["RUBYOPT"=>nil], <<-INPUT, %w(:ok), [], bug7158)
+ a = Object.new
+ def a.to_str
+ "foo"
+ end
+ $: << a
+ begin
+ require "foo"
+ p :ng
+ rescue LoadError
+ end