Skip to content

Commit

Permalink
Merge pull request #1336 from MoarVM/flags-split
Browse files Browse the repository at this point in the history
Split `flags` in struct MVMCollectable to avoid data races when setting `MVM_CF_HAS_OBJECT_ID` and `MVM_CF_REF_FROM_GEN2`
  • Loading branch information
jnthn committed Aug 13, 2020
2 parents 03d3e43 + 7f854e6 commit 9a52033
Show file tree
Hide file tree
Showing 23 changed files with 163 additions and 147 deletions.
2 changes: 1 addition & 1 deletion src/6model/6model.c
Original file line number Diff line number Diff line change
Expand Up @@ -475,7 +475,7 @@ MVMuint64 MVM_6model_next_type_cache_id(MVMThreadContext *tc) {
* instances, marks the individual ojbect as never repossessable. */
void MVM_6model_never_repossess(MVMThreadContext *tc, MVMObject *obj) {
if (IS_CONCRETE(obj))
obj->header.flags |= MVM_CF_NEVER_REPOSSESS;
obj->header.flags1 |= MVM_CF_NEVER_REPOSSESS;
else
obj->st->mode_flags |= MVM_NEVER_REPOSSESS_TYPE;
}
Expand Down
61 changes: 37 additions & 24 deletions src/6model/6model.h
Original file line number Diff line number Diff line change
Expand Up @@ -103,54 +103,66 @@ struct MVMStorageSpec {
#define MVM_STORAGE_SPEC_CAN_BOX_STR 4
#define MVM_STORAGE_SPEC_CAN_BOX_MASK 7

/* Flags that may be set on any collectable. */
/* Flags that may be set on any collectable.
* These are split across two bytes so that we can set MVM_CF_HAS_OBJECT_ID and
* MVM_CF_REF_FROM_GEN2 from different threads without a data race.
* (Plenty of work for the CPU cache coherency hardware, but no race.)
* This avoids the need to use atomic operations to manipulate the flags. */

/* Needed to provide a migration path for the code in Rakudo's perl6_ops.c */
#define MVM_COLLECTABLE_FLAGS1

typedef enum {
/* Is a type object (and thus not a concrete instance). */
MVM_CF_TYPE_OBJECT = 1,

/* Is an STable. */
MVM_CF_STABLE = 2,

/* Is a heap-promoted call frame. */
/* Is a heap-promoted call frame.
* Beware, this flag bit must not overlap with any values set by macros
* MVM_FRAME_FLAG_* in frame.h */
MVM_CF_FRAME = 4,

/* Have we allocated memory to store a serialization index? */
MVM_CF_SERIALZATION_INDEX_ALLOCATED = 8,

/* Have we arranged a persistent object ID for this object? */
MVM_CF_HAS_OBJECT_ID = 16,

/* Have we flagged this object as something we must never repossess? */
/* Note: if you're hunting for a flag, some day in the future when we
* have used them all, this one is easy enough to eliminate by having the
* tiny number of objects marked this way in a remembered set. */
MVM_CF_NEVER_REPOSSESS = 32
} MVMCollectableFlags1;

typedef enum {
/* Has already been seen once in GC nursery. */
MVM_CF_NURSERY_SEEN = 8,
MVM_CF_NURSERY_SEEN = 1,

/* Has been promoted to the old generation. */
MVM_CF_SECOND_GEN = 16,
MVM_CF_SECOND_GEN = 2,

/* Has already been added to the gen2 aggregates pointing to nursery
* objects list. */
MVM_CF_IN_GEN2_ROOT_LIST = 32,
MVM_CF_IN_GEN2_ROOT_LIST = 4,

/* A full GC run has found this object to be live. */
MVM_CF_GEN2_LIVE = 64,
MVM_CF_GEN2_LIVE = 8,

/* This object in fromspace is live with a valid forwarder. */
/* TODO - should be possible to use the same bit for this and GEN2_LIVE. */
MVM_CF_FORWARDER_VALID = 128,

/* Have we allocated memory to store a serialization index? */
MVM_CF_SERIALZATION_INDEX_ALLOCATED = 256,

/* Have we arranged a persistent object ID for this object? */
MVM_CF_HAS_OBJECT_ID = 512,

/* Have we flagged this object as something we must never repossess? */
/* Note: if you're hunting for a flag, some day in the future when we
* have used them all, this one is easy enough to eliminate by having the
* tiny number of objects marked this way in a remembered set. */
MVM_CF_NEVER_REPOSSESS = 1024,
MVM_CF_FORWARDER_VALID = 16,

/* Is this object a nursery object that has been referenced from gen2?
* Used to promote it earlier. */
MVM_CF_REF_FROM_GEN2 = 2048,
MVM_CF_REF_FROM_GEN2 = 32,

/* Has this item been chained into a gen2 freelist? This is only used in
* GC debug more. */
MVM_CF_DEBUG_IN_GEN2_FREE_LIST = 4096,
} MVMCollectableFlags;
MVM_CF_DEBUG_IN_GEN2_FREE_LIST = 64
} MVMCollectableFlags2;

#ifdef MVM_USE_OVERFLOW_SERIALIZATION_INDEX
struct MVMSerializationIndex {
Expand Down Expand Up @@ -199,7 +211,8 @@ struct MVMCollectable {
MVMuint32 owner;

/* Collectable flags (see MVMCollectableFlags). */
MVMuint16 flags;
MVMuint8 flags1;
MVMuint8 flags2;

/* Object size, in bytes. */
MVMuint16 size;
Expand Down Expand Up @@ -688,7 +701,7 @@ struct MVMREPROps {
#define OBJECT_BODY(o) (&(((MVMObjectStooge *)(o))->data))

/* Macros for getting/setting type-objectness. */
#define IS_CONCRETE(o) (!(((MVMObject *)o)->header.flags & MVM_CF_TYPE_OBJECT))
#define IS_CONCRETE(o) (!(((MVMObject *)o)->header.flags1 & MVM_CF_TYPE_OBJECT))

/* Some functions related to 6model core functionality. */
MVM_PUBLIC MVMObject * MVM_6model_get_how(MVMThreadContext *tc, MVMSTable *st);
Expand Down
2 changes: 1 addition & 1 deletion src/6model/reprs/VMArray.c
Original file line number Diff line number Diff line change
Expand Up @@ -810,7 +810,7 @@ static void copy_elements(MVMThreadContext *tc, MVMObject *src, MVMObject *dest,
if (elems > 0) {
MVMint64 i;
MVMuint16 kind;
MVMuint8 d_needs_barrier = dest->header.flags & MVM_CF_SECOND_GEN;
MVMuint8 d_needs_barrier = dest->header.flags2 & MVM_CF_SECOND_GEN;
if (s_repr_data && d_repr_data
&& s_repr_data->slot_type == d_repr_data->slot_type
&& s_repr_data->elem_size == d_repr_data->elem_size
Expand Down
6 changes: 3 additions & 3 deletions src/6model/sc.c
Original file line number Diff line number Diff line change
Expand Up @@ -364,7 +364,7 @@ void MVM_sc_disclaim(MVMThreadContext *tc, MVMSerializationContext *sc) {
obj = root_objects[i];
col = &obj->header;
#ifdef MVM_USE_OVERFLOW_SERIALIZATION_INDEX
if (col->flags & MVM_CF_SERIALZATION_INDEX_ALLOCATED) {
if (col->flags1 & MVM_CF_SERIALZATION_INDEX_ALLOCATED) {
struct MVMSerializationIndex *const sci = col->sc_forward_u.sci;
col->sc_forward_u.sci = NULL;
MVM_free(sci);
Expand Down Expand Up @@ -401,7 +401,7 @@ void MVM_sc_disclaim(MVMThreadContext *tc, MVMSerializationContext *sc) {

/* SC repossession barrier. */
void MVM_SC_WB_OBJ(MVMThreadContext *tc, MVMObject *obj) {
assert(!(obj->header.flags & MVM_CF_FORWARDER_VALID));
assert(!(obj->header.flags2 & MVM_CF_FORWARDER_VALID));
assert(MVM_sc_get_idx_of_sc(&obj->header) != (MVMuint32)~0);
if (MVM_sc_get_idx_of_sc(&obj->header) > 0 && !(obj->st->mode_flags & MVM_NEVER_REPOSSESS_TYPE))
MVM_sc_wb_hit_obj(tc, obj);
Expand All @@ -418,7 +418,7 @@ void MVM_sc_wb_hit_obj(MVMThreadContext *tc, MVMObject *obj) {
return;

/* Same if the object is flagged as one to never repossess. */
if (obj->header.flags & MVM_CF_NEVER_REPOSSESS)
if (obj->header.flags1 & MVM_CF_NEVER_REPOSSESS)
return;

/* Otherwise, check that the object's SC is different from the SC
Expand Down
24 changes: 12 additions & 12 deletions src/6model/sc.h
Original file line number Diff line number Diff line change
Expand Up @@ -35,18 +35,18 @@ MVM_STATIC_INLINE MVMObject * MVM_sc_get_sc_object(MVMThreadContext *tc, MVMComp
void MVM_sc_disclaim(MVMThreadContext *tc, MVMSerializationContext *sc);

MVM_STATIC_INLINE MVMuint32 MVM_sc_get_idx_of_sc(MVMCollectable *col) {
assert(!(col->flags & MVM_CF_FORWARDER_VALID));
assert(!(col->flags2 & MVM_CF_FORWARDER_VALID));
#ifdef MVM_USE_OVERFLOW_SERIALIZATION_INDEX
if (col->flags & MVM_CF_SERIALZATION_INDEX_ALLOCATED)
if (col->flags1 & MVM_CF_SERIALZATION_INDEX_ALLOCATED)
return col->sc_forward_u.sci->sc_idx;
#endif
return col->sc_forward_u.sc.sc_idx;
}

MVM_STATIC_INLINE MVMuint32 MVM_sc_get_idx_in_sc(MVMCollectable *col) {
assert(!(col->flags & MVM_CF_FORWARDER_VALID));
assert(!(col->flags2 & MVM_CF_FORWARDER_VALID));
#ifdef MVM_USE_OVERFLOW_SERIALIZATION_INDEX
if (col->flags & MVM_CF_SERIALZATION_INDEX_ALLOCATED)
if (col->flags1 & MVM_CF_SERIALZATION_INDEX_ALLOCATED)
return col->sc_forward_u.sci->idx;
if (col->sc_forward_u.sc.idx == MVM_DIRECT_SC_IDX_SENTINEL)
return ~0;
Expand All @@ -55,17 +55,17 @@ MVM_STATIC_INLINE MVMuint32 MVM_sc_get_idx_in_sc(MVMCollectable *col) {
}

MVM_STATIC_INLINE void MVM_sc_set_idx_in_sc(MVMCollectable *col, MVMuint32 i) {
assert(!(col->flags & MVM_CF_FORWARDER_VALID));
assert(!(col->flags2 & MVM_CF_FORWARDER_VALID));
#ifdef MVM_USE_OVERFLOW_SERIALIZATION_INDEX
if (col->flags & MVM_CF_SERIALZATION_INDEX_ALLOCATED) {
if (col->flags1 & MVM_CF_SERIALZATION_INDEX_ALLOCATED) {
col->sc_forward_u.sci->idx = i;
} else if (i >= MVM_DIRECT_SC_IDX_SENTINEL) {
struct MVMSerializationIndex *const sci
= MVM_malloc(sizeof(struct MVMSerializationIndex));
sci->sc_idx = col->sc_forward_u.sc.sc_idx;
sci->idx = i;
col->sc_forward_u.sci = sci;
col->flags |= MVM_CF_SERIALZATION_INDEX_ALLOCATED;
col->flags1 |= MVM_CF_SERIALZATION_INDEX_ALLOCATED;
} else
#endif
{
Expand All @@ -76,7 +76,7 @@ MVM_STATIC_INLINE void MVM_sc_set_idx_in_sc(MVMCollectable *col, MVMuint32 i) {
/* Gets a collectable's SC. */
MVM_STATIC_INLINE MVMSerializationContext * MVM_sc_get_collectable_sc(MVMThreadContext *tc, MVMCollectable *col) {
MVMuint32 sc_idx;
assert(!(col->flags & MVM_CF_FORWARDER_VALID));
assert(!(col->flags2 & MVM_CF_FORWARDER_VALID));
sc_idx = MVM_sc_get_idx_of_sc(col);
assert(sc_idx != ~(MVMuint32)0);
return sc_idx > 0 ? tc->instance->all_scs[sc_idx]->sc : NULL;
Expand All @@ -99,9 +99,9 @@ MVM_STATIC_INLINE MVMSerializationContext * MVM_sc_get_stable_sc(MVMThreadContex

/* Sets a collectable's SC. */
MVM_STATIC_INLINE void MVM_sc_set_collectable_sc(MVMThreadContext *tc, MVMCollectable *col, MVMSerializationContext *sc) {
assert(!(col->flags & MVM_CF_FORWARDER_VALID));
assert(!(col->flags2 & MVM_CF_FORWARDER_VALID));
#ifdef MVM_USE_OVERFLOW_SERIALIZATION_INDEX
if (col->flags & MVM_CF_SERIALZATION_INDEX_ALLOCATED) {
if (col->flags1 & MVM_CF_SERIALZATION_INDEX_ALLOCATED) {
col->sc_forward_u.sci->sc_idx = sc->body->sc_idx;
col->sc_forward_u.sci->idx = ~0;
} else
Expand All @@ -115,7 +115,7 @@ MVM_STATIC_INLINE void MVM_sc_set_collectable_sc(MVMThreadContext *tc, MVMCollec
sci->sc_idx = sc->body->sc_idx;
sci->idx = ~0;
col->sc_forward_u.sci = sci;
col->flags |= MVM_CF_SERIALZATION_INDEX_ALLOCATED;
col->flags1 |= MVM_CF_SERIALZATION_INDEX_ALLOCATED;
} else
#endif
{
Expand Down Expand Up @@ -172,7 +172,7 @@ void MVM_sc_wb_hit_st(MVMThreadContext *tc, MVMSTable *st);
void MVM_SC_WB_OBJ(MVMThreadContext *tc, MVMObject *obj);

MVM_STATIC_INLINE void MVM_SC_WB_ST(MVMThreadContext *tc, MVMSTable *st) {
assert(!(st->header.flags & MVM_CF_FORWARDER_VALID));
assert(!(st->header.flags2 & MVM_CF_FORWARDER_VALID));
assert(MVM_sc_get_idx_of_sc(&st->header) != ~(MVMuint32)0);
if (MVM_sc_get_idx_of_sc(&st->header) > 0)
MVM_sc_wb_hit_st(tc, st);
Expand Down
3 changes: 2 additions & 1 deletion src/core/frame.c
Original file line number Diff line number Diff line change
Expand Up @@ -281,7 +281,8 @@ static MVMFrame * allocate_frame(MVMThreadContext *tc, MVMStaticFrame *static_fr

/* Ensure collectable header flags and owner are zeroed, which means we'll
* never try to mark or root the frame. */
frame->header.flags = 0;
frame->header.flags1 = 0;
frame->header.flags2 = 0;
frame->header.owner = 0;

/* Current arguments callsite must be NULL as it's used in GC. Extra must
Expand Down
4 changes: 2 additions & 2 deletions src/core/frame.h
Original file line number Diff line number Diff line change
Expand Up @@ -199,9 +199,9 @@ struct MVMInvocationSpec {

/* Checks if a frame is allocated on a call stack or on the heap. If it is on
* the call stack, then it will have zeroed flags (since heap-allocated frames
* always have the "I'm a heap frame" bit set). */
* always have the "I'm a heap frame" bit set - MVM_CF_FRAME). */
MVM_STATIC_INLINE MVMuint32 MVM_FRAME_IS_ON_CALLSTACK(MVMThreadContext *tc, MVMFrame *frame) {
return frame->header.flags == 0;
return frame->header.flags1 == 0;
}

/* Forces a frame to the callstack if needed. Done as a static inline to make
Expand Down
6 changes: 3 additions & 3 deletions src/gc/allocation.c
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@ MVMSTable * MVM_gc_allocate_stable(MVMThreadContext *tc, const MVMREPROps *repr,
MVMSTable *st;
MVMROOT(tc, how, {
st = MVM_gc_allocate_zeroed(tc, sizeof(MVMSTable));
st->header.flags |= MVM_CF_STABLE;
st->header.flags1 = MVM_CF_STABLE;
st->header.size = sizeof(MVMSTable);
st->header.owner = tc->thread_id;
st->REPR = repr;
Expand All @@ -79,7 +79,7 @@ MVMObject * MVM_gc_allocate_type_object(MVMThreadContext *tc, MVMSTable *st) {
MVMObject *obj;
MVMROOT(tc, st, {
obj = MVM_gc_allocate_zeroed(tc, sizeof(MVMObject));
obj->header.flags |= MVM_CF_TYPE_OBJECT;
obj->header.flags1 = MVM_CF_TYPE_OBJECT;
obj->header.size = sizeof(MVMObject);
obj->header.owner = tc->thread_id;
MVM_ASSIGN_REF(tc, &(obj->header), obj->st, st);
Expand All @@ -104,7 +104,7 @@ MVMObject * MVM_gc_allocate_object(MVMThreadContext *tc, MVMSTable *st) {
/* Allocates a new heap frame. */
MVMFrame * MVM_gc_allocate_frame(MVMThreadContext *tc) {
MVMFrame *f = MVM_gc_allocate_zeroed(tc, sizeof(MVMFrame));
f->header.flags |= MVM_CF_FRAME;
f->header.flags1 = MVM_CF_FRAME;
f->header.size = sizeof(MVMFrame);
f->header.owner = tc->thread_id;
return f;
Expand Down
Loading

0 comments on commit 9a52033

Please sign in to comment.