Skip to content

Commit

Permalink
cache free pages per ractor
Browse files Browse the repository at this point in the history
Per ractor method cache (GH-#3842) only cached 1 page and this patch
caches several pages to keep at least 512 free slots if available.
If you increase the number of cached free slots, all cached slots
will be collected when the GC is invoked.
  • Loading branch information
ko1 committed Dec 10, 2020
1 parent 554c094 commit da3bca5
Show file tree
Hide file tree
Showing 2 changed files with 58 additions and 12 deletions.
69 changes: 57 additions & 12 deletions gc.c
Expand Up @@ -2157,34 +2157,70 @@ newobj_init(VALUE klass, VALUE flags, int wb_protected, rb_objspace_t *objspace,
return obj;
}

static inline void
ractor_cache_fill_freelist(rb_objspace_t *objspace, rb_ractor_t *cr, struct heap_page *page)
{
cr->newobj_cache.using_page = page;
cr->newobj_cache.freelist = page->freelist;
page->free_slots = 0;
page->freelist = NULL;
}

static inline VALUE
ractor_cached_freeobj(rb_objspace_t *objspace, rb_ractor_t *cr)
{
retry:;
RVALUE *p = cr->newobj_cache.freelist;

if (p) {
if (LIKELY(p != NULL)) {
VALUE obj = (VALUE)p;
cr->newobj_cache.freelist = p->as.free.next;
asan_unpoison_object(obj, true);
return obj;
}
else {
return Qfalse;
if (cr->newobj_cache.free_pages) {
struct heap_page *page = cr->newobj_cache.free_pages;
cr->newobj_cache.free_pages = page->free_next;
ractor_cache_fill_freelist(objspace, cr, page);
goto retry;
}
return false;
}
}

#define RACTOR_SLOT_CACHE_NUM 512

static struct heap_page *
heap_next_freepage(rb_objspace_t *objspace, rb_heap_t *heap)
heap_next_freepages(rb_objspace_t *objspace, rb_heap_t *heap)
{
ASSERT_vm_locking();

struct heap_page *page;

// find at least 1 page
while (heap->free_pages == NULL) {
heap_prepare(objspace, heap);
}
page = heap->free_pages;
heap->free_pages = page->free_next;

// cache another pages if available
struct heap_page *page = heap->free_pages;
size_t free_slots = page->free_slots;
struct heap_page *p = page;

int page_cnt = 1;

while (p->free_next) {
if (free_slots >= RACTOR_SLOT_CACHE_NUM) {
break;
}
free_slots += p->free_slots;
p = p->free_next;
page_cnt++;
}

heap->free_pages = p->free_next;
p->free_next = NULL;

RUBY_DEBUG_LOG("free_slots:%d pages:%d", page->free_next ? (int)free_slots : (int)page->free_slots, page_cnt);

GC_ASSERT(page->free_slots != 0);
RUBY_DEBUG_LOG("page:%p freelist:%p cnt:%d", page, page->freelist, page->free_slots);
Expand All @@ -2199,13 +2235,13 @@ ractor_cache_slots(rb_objspace_t *objspace, rb_ractor_t *cr)
{
ASSERT_vm_locking();
GC_ASSERT(cr->newobj_cache.freelist == NULL);
GC_ASSERT(cr->newobj_cache.free_pages == NULL);

struct heap_page *page = heap_next_freepage(objspace, heap_eden);
struct heap_page *page = heap_next_freepages(objspace, heap_eden);
struct heap_page *pages = page->free_next;

cr->newobj_cache.using_page = page;
cr->newobj_cache.freelist = page->freelist;
page->free_slots = 0;
page->freelist = NULL;
ractor_cache_fill_freelist(objspace, cr, page);
cr->newobj_cache.free_pages = pages;

GC_ASSERT(RB_TYPE_P((VALUE)cr->newobj_cache.freelist, T_NONE));
}
Expand Down Expand Up @@ -5041,8 +5077,17 @@ gc_sweep_start_heap(rb_objspace_t *objspace, rb_heap_t *heap)
*p = freelist;
}

#if 0
int free_slots = 0;
for (RVALUE *p = freelist; p; p = p->as.free.next) free_slots++;
for (struct heap_page *page = r->newobj_cache.free_pages; page;
page = page->free_next) free_slots += page->free_slots;
fprintf(stderr, "r:%d unused free_slots:%d\n", r->id, free_slots);
#endif

r->newobj_cache.using_page = NULL;
r->newobj_cache.freelist = NULL;
r->newobj_cache.free_pages = NULL;
}
}

Expand Down
1 change: 1 addition & 0 deletions ractor_core.h
Expand Up @@ -137,6 +137,7 @@ struct rb_ractor_struct {
struct {
struct RVALUE *freelist;
struct heap_page *using_page;
struct heap_page *free_pages;
} newobj_cache;

// gc.c rb_objspace_reachable_objects_from
Expand Down

0 comments on commit da3bca5

Please sign in to comment.