Skip to content

Commit

Permalink
Allocate frames on heap that will need promotion.
Browse files Browse the repository at this point in the history
By trying to use existing promotions as an indicator. This somehow
seems to end up doing *worse* for the CORE.setting build, however
it looks like it helps `make spectest` wallclock time a bit (though
it's all within noise).
  • Loading branch information
jnthn committed Aug 1, 2017
1 parent 53000c8 commit 21450df
Show file tree
Hide file tree
Showing 3 changed files with 82 additions and 24 deletions.
6 changes: 6 additions & 0 deletions src/6model/reprs/MVMStaticFrame.h
Expand Up @@ -73,6 +73,12 @@ struct MVMStaticFrameBody {
/* Does the frame have an exit handler we need to run? */
MVMuint8 has_exit_handler;

/* Should we allocate the frame directly on the heap? Doing so may avoid
* needing to promote it there later. Set by measuring the number of times
* the frame is promoted to the heap relative to the number of times it is
* invoked, and then only pre-specialization. */
MVMuint8 allocate_on_heap;

/* The compilation unit unique ID of this frame. */
MVMString *cuuid;

Expand Down
5 changes: 5 additions & 0 deletions src/6model/reprs/MVMStaticFrameSpesh.h
Expand Up @@ -22,6 +22,11 @@ struct MVMStaticFrameSpeshBody {
/* Specialization statistics assembled by the specialization worker thread
* from logs. */
MVMSpeshStats *spesh_stats;

/* Number of times the frame was promoted to the heap, when it was not
* specialized. Used to decide whether we'll directly allocate this frame
* on the heap. */
MVMuint32 num_heap_promotions;
};
struct MVMStaticFrameSpesh {
MVMObject common;
Expand Down
95 changes: 71 additions & 24 deletions src/core/frame.c
Expand Up @@ -219,22 +219,38 @@ static MVMFrame * autoclose(MVMThreadContext *tc, MVMStaticFrame *needed) {

/* Obtains memory for a frame on the thread-local call stack. */
static MVMFrame * allocate_frame(MVMThreadContext *tc, MVMStaticFrame *static_frame,
MVMSpeshCandidate *spesh_cand) {
MVMSpeshCandidate *spesh_cand, MVMint32 heap) {
MVMFrame *frame;
MVMint32 env_size, work_size;
MVMStaticFrameBody *static_frame_body;

/* Allocate the frame. */
MVMCallStackRegion *stack = tc->stack_current;
if (stack->alloc + sizeof(MVMFrame) >= stack->alloc_limit)
stack = MVM_callstack_region_next(tc);
frame = (MVMFrame *)stack->alloc;
stack->alloc += sizeof(MVMFrame);

/* Ensure collectable header flags and owner are zeroed, which means we'll
* never try to mark or root the frame. */
frame->header.flags = 0;
frame->header.owner = 0;
if (heap) {
/* Allocate frame on the heap. We know it's already zeroed. */
MVMROOT(tc, static_frame, {
if (tc->cur_frame)
MVM_frame_force_to_heap(tc, tc->cur_frame);
frame = MVM_gc_allocate_frame(tc);
});
}
else {
/* Allocate the frame on the call stack. */
MVMCallStackRegion *stack = tc->stack_current;
if (stack->alloc + sizeof(MVMFrame) >= stack->alloc_limit)
stack = MVM_callstack_region_next(tc);
frame = (MVMFrame *)stack->alloc;
stack->alloc += sizeof(MVMFrame);

/* Ensure collectable header flags and owner are zeroed, which means we'll
* never try to mark or root the frame. */
frame->header.flags = 0;
frame->header.owner = 0;

/* Current arguments callsite must be NULL as it's used in GC. Extra must
* be NULL so we know we don't have it. Flags should be zeroed. */
frame->cur_args_callsite = NULL;
frame->extra = NULL;
frame->flags = 0;
}

/* Allocate space for lexicals and work area. */
static_frame_body = &(static_frame->body);
Expand Down Expand Up @@ -275,12 +291,6 @@ static MVMFrame * allocate_frame(MVMThreadContext *tc, MVMStaticFrame *static_fr
/* Assign a sequence nr */
frame->sequence_nr = tc->next_frame_nr++;

/* Current arguments callsite must be NULL as it's used in GC. Extra must
* be NULL so we know we don't have it. Flags should be zeroed. */
frame->cur_args_callsite = NULL;
frame->extra = NULL;
frame->flags = 0;

return frame;
}

Expand Down Expand Up @@ -444,7 +454,19 @@ void MVM_frame_invoke(MVMThreadContext *tc, MVMStaticFrame *static_frame,
callsite, args);
if (spesh_cand >= 0) {
MVMSpeshCandidate *chosen_cand = spesh->body.spesh_candidates[spesh_cand];
frame = allocate_frame(tc, static_frame, chosen_cand);
if (static_frame->body.allocate_on_heap) {
MVMROOT(tc, static_frame, {
MVMROOT(tc, code_ref, {
MVMROOT(tc, outer, {
frame = allocate_frame(tc, static_frame, chosen_cand, 1);
});
});
});
}
else {
frame = allocate_frame(tc, static_frame, chosen_cand, 0);
frame->spesh_correlation_id = 0;
}
if (chosen_cand->jitcode) {
chosen_bytecode = chosen_cand->jitcode->bytecode;
frame->jit_entry_label = chosen_cand->jitcode->labels[0];
Expand All @@ -454,23 +476,35 @@ void MVM_frame_invoke(MVMThreadContext *tc, MVMStaticFrame *static_frame,
}
frame->effective_spesh_slots = chosen_cand->spesh_slots;
frame->spesh_cand = chosen_cand;
frame->spesh_correlation_id = 0;
}
else {
frame = allocate_frame(tc, static_frame, NULL);
if (static_frame->body.allocate_on_heap) {
MVMROOT(tc, static_frame, {
MVMROOT(tc, code_ref, {
MVMROOT(tc, outer, {
frame = allocate_frame(tc, static_frame, NULL, 1);
});
});
});
}
else {
frame = allocate_frame(tc, static_frame, NULL, 0);
frame->spesh_cand = NULL;
frame->effective_spesh_slots = NULL;
frame->spesh_correlation_id = 0;
}
chosen_bytecode = static_frame->body.bytecode;
frame->spesh_cand = NULL;
frame->effective_spesh_slots = NULL;

/* If we should be spesh logging, set the correlation ID. */
frame->spesh_correlation_id = 0;
if (tc->instance->spesh_enabled && tc->spesh_log) {
if (spesh->body.spesh_entries_recorded++ < MVM_SPESH_LOG_LOGGED_ENOUGH) {
MVMint32 id = ++tc->spesh_cid;
frame->spesh_correlation_id = id;
MVMROOT(tc, static_frame, {
MVMROOT(tc, frame, {
MVM_spesh_log_entry(tc, id, static_frame, callsite);
});
});
}
}
}
Expand Down Expand Up @@ -568,6 +602,19 @@ MVMFrame * MVM_frame_move_to_heap(MVMThreadContext *tc, MVMFrame *frame) {
/* Allocate a heap frame. */
MVMFrame *promoted = MVM_gc_allocate_frame(tc);

/* Bump heap promotion counter, to encourage allocating this kind
* of frame directly on the heap in the future. If the frame was
* entered at least ten times, and over 60% of the entries lead
* to an eventual heap promotion, them we'll mark it to allocated
* right away on the heap. */
MVMStaticFrame *sf = cur_to_promote->static_info;
if (!sf->body.allocate_on_heap && !cur_to_promote->spesh_cand) {
MVMuint32 promos = sf->body.spesh->body.num_heap_promotions++;
MVMuint32 entries = sf->body.spesh->body.spesh_entries_recorded;
if (entries > 10 && promos > (2 * entries) / 3)
sf->body.allocate_on_heap = 1;
}

/* Copy current frame's body to it. */
memcpy(
(char *)promoted + sizeof(MVMCollectable),
Expand Down

0 comments on commit 21450df

Please sign in to comment.