Skip to content

Commit

Permalink
Drill newobj cache instead of ractor
Browse files Browse the repository at this point in the history
  • Loading branch information
peterzhu2118 committed Feb 12, 2024
1 parent d4a6c65 commit 190a55d
Showing 1 changed file with 20 additions and 26 deletions.
46 changes: 20 additions & 26 deletions gc.c
Expand Up @@ -2799,11 +2799,10 @@ size_pool_idx_for_size(size_t size)
}

static VALUE
newobj_alloc(rb_objspace_t *objspace, rb_ractor_t *cr, size_t size_pool_idx, bool vm_locked)
newobj_alloc(rb_objspace_t *objspace, rb_ractor_newobj_cache_t *cache, size_t size_pool_idx, bool vm_locked)
{
rb_size_pool_t *size_pool = &size_pools[size_pool_idx];
rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(size_pool);
rb_ractor_newobj_cache_t *cache = &cr->newobj_cache;

VALUE obj = ractor_cache_allocate_slot(objspace, cache, size_pool_idx);

Expand All @@ -2812,7 +2811,7 @@ newobj_alloc(rb_objspace_t *objspace, rb_ractor_t *cr, size_t size_pool_idx, boo
bool unlock_vm = false;

if (!vm_locked) {
RB_VM_LOCK_ENTER_CR_LEV(cr, &lev);
RB_VM_LOCK_ENTER_CR_LEV(GET_RACTOR(), &lev);
vm_locked = true;
unlock_vm = true;
}
Expand Down Expand Up @@ -2841,7 +2840,7 @@ newobj_alloc(rb_objspace_t *objspace, rb_ractor_t *cr, size_t size_pool_idx, boo
}

if (unlock_vm) {
RB_VM_LOCK_LEAVE_CR_LEV(cr, &lev);
RB_VM_LOCK_LEAVE_CR_LEV(GET_RACTOR(), &lev);
}
}

Expand All @@ -2856,15 +2855,15 @@ newobj_zero_slot(VALUE obj)
memset((char *)obj + sizeof(struct RBasic), 0, rb_gc_obj_slot_size(obj) - sizeof(struct RBasic));
}

ALWAYS_INLINE(static VALUE newobj_slowpath(VALUE klass, VALUE flags, rb_objspace_t *objspace, rb_ractor_t *cr, int wb_protected, size_t size_pool_idx));
ALWAYS_INLINE(static VALUE newobj_slowpath(VALUE klass, VALUE flags, rb_objspace_t *objspace, rb_ractor_newobj_cache_t *cache, int wb_protected, size_t size_pool_idx));

static inline VALUE
newobj_slowpath(VALUE klass, VALUE flags, rb_objspace_t *objspace, rb_ractor_t *cr, int wb_protected, size_t size_pool_idx)
newobj_slowpath(VALUE klass, VALUE flags, rb_objspace_t *objspace, rb_ractor_newobj_cache_t *cache, int wb_protected, size_t size_pool_idx)
{
VALUE obj;
unsigned int lev;

RB_VM_LOCK_ENTER_CR_LEV(cr, &lev);
RB_VM_LOCK_ENTER_CR_LEV(GET_RACTOR(), &lev);
{
if (UNLIKELY(during_gc || ruby_gc_stressful)) {
if (during_gc) {
Expand All @@ -2880,35 +2879,35 @@ newobj_slowpath(VALUE klass, VALUE flags, rb_objspace_t *objspace, rb_ractor_t *
}
}

obj = newobj_alloc(objspace, cr, size_pool_idx, true);
obj = newobj_alloc(objspace, cache, size_pool_idx, true);
newobj_init(klass, flags, wb_protected, objspace, obj);

gc_event_hook_prep(objspace, RUBY_INTERNAL_EVENT_NEWOBJ, obj, newobj_zero_slot(obj));
}
RB_VM_LOCK_LEAVE_CR_LEV(cr, &lev);
RB_VM_LOCK_LEAVE_CR_LEV(GET_RACTOR(), &lev);

return obj;
}

NOINLINE(static VALUE newobj_slowpath_wb_protected(VALUE klass, VALUE flags,
rb_objspace_t *objspace, rb_ractor_t *cr, size_t size_pool_idx));
rb_objspace_t *objspace, rb_ractor_newobj_cache_t *cache, size_t size_pool_idx));
NOINLINE(static VALUE newobj_slowpath_wb_unprotected(VALUE klass, VALUE flags,
rb_objspace_t *objspace, rb_ractor_t *cr, size_t size_pool_idx));
rb_objspace_t *objspace, rb_ractor_newobj_cache_t *cache, size_t size_pool_idx));

static VALUE
newobj_slowpath_wb_protected(VALUE klass, VALUE flags, rb_objspace_t *objspace, rb_ractor_t *cr, size_t size_pool_idx)
newobj_slowpath_wb_protected(VALUE klass, VALUE flags, rb_objspace_t *objspace, rb_ractor_newobj_cache_t *cache, size_t size_pool_idx)
{
return newobj_slowpath(klass, flags, objspace, cr, TRUE, size_pool_idx);
return newobj_slowpath(klass, flags, objspace, cache, TRUE, size_pool_idx);
}

static VALUE
newobj_slowpath_wb_unprotected(VALUE klass, VALUE flags, rb_objspace_t *objspace, rb_ractor_t *cr, size_t size_pool_idx)
newobj_slowpath_wb_unprotected(VALUE klass, VALUE flags, rb_objspace_t *objspace, rb_ractor_newobj_cache_t *cache, size_t size_pool_idx)
{
return newobj_slowpath(klass, flags, objspace, cr, FALSE, size_pool_idx);
return newobj_slowpath(klass, flags, objspace, cache, FALSE, size_pool_idx);
}

static inline VALUE
newobj_of0(VALUE klass, VALUE flags, int wb_protected, rb_ractor_t *cr, size_t alloc_size)
newobj_of(rb_ractor_t *cr, VALUE klass, VALUE flags, VALUE v1, VALUE v2, VALUE v3, int wb_protected, size_t alloc_size)
{
VALUE obj;
rb_objspace_t *objspace = &rb_objspace;
Expand All @@ -2929,28 +2928,23 @@ newobj_of0(VALUE klass, VALUE flags, int wb_protected, rb_ractor_t *cr, size_t a
flags |= (VALUE)size_pool_idx << SHAPE_FLAG_SHIFT;
}

rb_ractor_newobj_cache_t *cache = &cr->newobj_cache;

if (!UNLIKELY(during_gc ||
ruby_gc_stressful ||
gc_event_newobj_hook_needed_p(objspace)) &&
wb_protected) {
obj = newobj_alloc(objspace, cr, size_pool_idx, false);
obj = newobj_alloc(objspace, cache, size_pool_idx, false);
newobj_init(klass, flags, wb_protected, objspace, obj);
}
else {
RB_DEBUG_COUNTER_INC(obj_newobj_slowpath);

obj = wb_protected ?
newobj_slowpath_wb_protected(klass, flags, objspace, cr, size_pool_idx) :
newobj_slowpath_wb_unprotected(klass, flags, objspace, cr, size_pool_idx);
newobj_slowpath_wb_protected(klass, flags, objspace, cache, size_pool_idx) :
newobj_slowpath_wb_unprotected(klass, flags, objspace, cache, size_pool_idx);
}

return obj;
}

static inline VALUE
newobj_of(rb_ractor_t *cr, VALUE klass, VALUE flags, VALUE v1, VALUE v2, VALUE v3, int wb_protected, size_t alloc_size)
{
VALUE obj = newobj_of0(klass, flags, wb_protected, cr, alloc_size);
return newobj_fill(obj, v1, v2, v3);
}

Expand Down

0 comments on commit 190a55d

Please sign in to comment.