Skip to content
Browse files

Allow passing gct as a function paramater in the GC

This patch adds support for passing the gct variable as a
paramter in the garbage collector. The GC functions that
need access to the gct variable are declared with the
DECLARE_GCT_PARAM macro that will add the gct as an extra
argument. When calling these function use the GCT_PARAM
macro to automatically add the gct variable to the function
call.

The defintion of these two macros are controlled by the
PASS_GCT_AS_PARAM preprocessor variable. If this variable is
defined then the above macros are setup to do the parameter
passing, otherwise these macros will be suitably defined to
do nothing.

Currently we only pass the gct variable as a paramter when
using an llvm compiler C compiler such as clang or llvm-gcc.
  • Loading branch information...
1 parent b5a88e9 commit ed77af50f34a3aedcefe2f0dd624cc9e4010a2bc @dmpots committed
Showing with 342 additions and 307 deletions.
  1. +50 −51 rts/sm/Evac.c
  2. +10 −0 rts/sm/Evac.h
  3. +27 −17 rts/sm/GC.c
  4. +24 −10 rts/sm/GCTDecl.h
  5. +2 −2 rts/sm/GCUtils.c
  6. +3 −3 rts/sm/GCUtils.h
  7. +10 −10 rts/sm/MarkWeak.c
  8. +4 −2 rts/sm/MarkWeak.h
  9. +206 −206 rts/sm/Scav.c
  10. +6 −6 rts/sm/Scav.h
View
101 rts/sm/Evac.c
@@ -36,23 +36,22 @@ StgWord64 whitehole_spin = 0;
#endif
#if !defined(PARALLEL_GC)
-#define copy_tag_nolock(p, info, src, size, stp, tag) \
- copy_tag(p, info, src, size, stp, tag)
+#define copy_tag_nolock(...) copy_tag(__VA_ARGS__)
#endif
/* Used to avoid long recursion due to selector thunks
*/
#define MAX_THUNK_SELECTOR_DEPTH 16
-static void eval_thunk_selector (StgClosure **q, StgSelector * p, rtsBool);
-STATIC_INLINE void evacuate_large(StgPtr p);
+static void eval_thunk_selector (DECLARE_GCT_PARAM(StgClosure **q, StgSelector * p, rtsBool));
+STATIC_INLINE void evacuate_large(DECLARE_GCT_PARAM(StgPtr p));
/* -----------------------------------------------------------------------------
Allocate some space in which to copy an object.
-------------------------------------------------------------------------- */
STATIC_INLINE StgPtr
-alloc_for_copy (nat size, nat gen_no)
+alloc_for_copy (DECLARE_GCT_PARAM(nat size, nat gen_no))
{
StgPtr to;
gen_workspace *ws;
@@ -78,7 +77,7 @@ alloc_for_copy (nat size, nat gen_no)
to = ws->todo_free;
ws->todo_free += size;
if (ws->todo_free > ws->todo_lim) {
- to = todo_block_full(size, ws);
+ to = todo_block_full(GCT_PARAM(size, ws));
}
ASSERT(ws->todo_free >= ws->todo_bd->free && ws->todo_free <= ws->todo_lim);
@@ -90,13 +89,13 @@ alloc_for_copy (nat size, nat gen_no)
-------------------------------------------------------------------------- */
STATIC_INLINE GNUC_ATTR_HOT void
-copy_tag(StgClosure **p, const StgInfoTable *info,
- StgClosure *src, nat size, nat gen_no, StgWord tag)
+copy_tag(DECLARE_GCT_PARAM(StgClosure **p, const StgInfoTable *info,
+ StgClosure *src, nat size, nat gen_no, StgWord tag))
{
StgPtr to, from;
nat i;
- to = alloc_for_copy(size,gen_no);
+ to = alloc_for_copy(GCT_PARAM(size,gen_no));
from = (StgPtr)src;
to[0] = (W_)info;
@@ -113,7 +112,7 @@ copy_tag(StgClosure **p, const StgInfoTable *info,
const StgInfoTable *new_info;
new_info = (const StgInfoTable *)cas((StgPtr)&src->header.info, (W_)info, MK_FORWARDING_PTR(to));
if (new_info != info) {
- return evacuate(p); // does the failed_to_evac stuff
+ return evacuate(GCT_PARAM(p)); // does the failed_to_evac stuff
} else {
*p = TAG_CLOSURE(tag,(StgClosure*)to);
}
@@ -132,13 +131,13 @@ copy_tag(StgClosure **p, const StgInfoTable *info,
#if defined(PARALLEL_GC)
STATIC_INLINE void
-copy_tag_nolock(StgClosure **p, const StgInfoTable *info,
- StgClosure *src, nat size, nat gen_no, StgWord tag)
+copy_tag_nolock(DECLARE_GCT_PARAM(StgClosure **p, const StgInfoTable *info,
+ StgClosure *src, nat size, nat gen_no, StgWord tag))
{
StgPtr to, from;
nat i;
- to = alloc_for_copy(size,gen_no);
+ to = alloc_for_copy(GCT_PARAM(size,gen_no));
from = (StgPtr)src;
to[0] = (W_)info;
@@ -169,8 +168,8 @@ copy_tag_nolock(StgClosure **p, const StgInfoTable *info,
* used to optimise evacuation of TSOs.
*/
static rtsBool
-copyPart(StgClosure **p, StgClosure *src, nat size_to_reserve,
- nat size_to_copy, nat gen_no)
+copyPart(DECLARE_GCT_PARAM(StgClosure **p, StgClosure *src, nat size_to_reserve,
+ nat size_to_copy, nat gen_no))
{
StgPtr to, from;
nat i;
@@ -187,14 +186,14 @@ copyPart(StgClosure **p, StgClosure *src, nat size_to_reserve,
}
if (IS_FORWARDING_PTR(info)) {
src->header.info = (const StgInfoTable *)info;
- evacuate(p); // does the failed_to_evac stuff
+ evacuate(GCT_PARAM(p)); // does the failed_to_evac stuff
return rtsFalse;
}
#else
info = (W_)src->header.info;
#endif
- to = alloc_for_copy(size_to_reserve, gen_no);
+ to = alloc_for_copy(GCT_PARAM(size_to_reserve, gen_no));
from = (StgPtr)src;
to[0] = info;
@@ -221,10 +220,10 @@ copyPart(StgClosure **p, StgClosure *src, nat size_to_reserve,
/* Copy wrappers that don't tag the closure after copying */
STATIC_INLINE GNUC_ATTR_HOT void
-copy(StgClosure **p, const StgInfoTable *info,
- StgClosure *src, nat size, nat gen_no)
+copy(DECLARE_GCT_PARAM(StgClosure **p, const StgInfoTable *info,
+ StgClosure *src, nat size, nat gen_no))
{
- copy_tag(p,info,src,size,gen_no,0);
+ copy_tag(GCT_PARAM(p,info,src,size,gen_no,0));
}
/* -----------------------------------------------------------------------------
@@ -239,7 +238,7 @@ copy(StgClosure **p, const StgInfoTable *info,
-------------------------------------------------------------------------- */
STATIC_INLINE void
-evacuate_large(StgPtr p)
+evacuate_large(DECLARE_GCT_PARAM(StgPtr p))
{
bdescr *bd;
generation *gen, *new_gen;
@@ -353,7 +352,7 @@ evacuate_large(StgPtr p)
------------------------------------------------------------------------- */
REGPARM1 GNUC_ATTR_HOT void
-evacuate(StgClosure **p)
+evacuate(DECLARE_GCT_PARAM(StgClosure **p))
{
bdescr *bd = NULL;
nat gen_no;
@@ -489,7 +488,7 @@ evacuate(StgClosure **p)
/* evacuate large objects by re-linking them onto a different list.
*/
if (bd->flags & BF_LARGE) {
- evacuate_large((P_)q);
+ evacuate_large(GCT_PARAM((P_)q));
return;
}
@@ -544,7 +543,7 @@ evacuate(StgClosure **p)
case MUT_VAR_DIRTY:
case MVAR_CLEAN:
case MVAR_DIRTY:
- copy(p,info,q,sizeW_fromITBL(INFO_PTR_TO_STRUCT(info)),gen_no);
+ copy(GCT_PARAM(p,info,q,sizeW_fromITBL(INFO_PTR_TO_STRUCT(info)),gen_no));
return;
// For ints and chars of low value, save space by replacing references to
@@ -573,7 +572,7 @@ evacuate(StgClosure **p)
);
}
else {
- copy_tag_nolock(p,info,q,sizeofW(StgHeader)+1,gen_no,tag);
+ copy_tag_nolock(GCT_PARAM(p,info,q,sizeofW(StgHeader)+1,gen_no,tag));
}
#endif
return;
@@ -582,12 +581,12 @@ evacuate(StgClosure **p)
case FUN_0_1:
case FUN_1_0:
case CONSTR_1_0:
- copy_tag_nolock(p,info,q,sizeofW(StgHeader)+1,gen_no,tag);
+ copy_tag_nolock(GCT_PARAM(p,info,q,sizeofW(StgHeader)+1,gen_no,tag));
return;
case THUNK_1_0:
case THUNK_0_1:
- copy(p,info,q,sizeofW(StgThunk)+1,gen_no);
+ copy(GCT_PARAM(p,info,q,sizeofW(StgThunk)+1,gen_no));
return;
case THUNK_1_1:
@@ -596,7 +595,7 @@ evacuate(StgClosure **p)
#ifdef NO_PROMOTE_THUNKS
#error bitrotted
#endif
- copy(p,info,q,sizeofW(StgThunk)+2,gen_no);
+ copy(GCT_PARAM(p,info,q,sizeofW(StgThunk)+2,gen_no));
return;
case FUN_1_1:
@@ -604,21 +603,21 @@ evacuate(StgClosure **p)
case FUN_0_2:
case CONSTR_1_1:
case CONSTR_2_0:
- copy_tag_nolock(p,info,q,sizeofW(StgHeader)+2,gen_no,tag);
+ copy_tag_nolock(GCT_PARAM(p,info,q,sizeofW(StgHeader)+2,gen_no,tag));
return;
case CONSTR_0_2:
- copy_tag_nolock(p,info,q,sizeofW(StgHeader)+2,gen_no,tag);
+ copy_tag_nolock(GCT_PARAM(p,info,q,sizeofW(StgHeader)+2,gen_no,tag));
return;
case THUNK:
- copy(p,info,q,thunk_sizeW_fromITBL(INFO_PTR_TO_STRUCT(info)),gen_no);
+ copy(GCT_PARAM(p,info,q,thunk_sizeW_fromITBL(INFO_PTR_TO_STRUCT(info)),gen_no));
return;
case FUN:
case IND_PERM:
case CONSTR:
- copy_tag_nolock(p,info,q,sizeW_fromITBL(INFO_PTR_TO_STRUCT(info)),gen_no,tag);
+ copy_tag_nolock(GCT_PARAM(p,info,q,sizeW_fromITBL(INFO_PTR_TO_STRUCT(info)),gen_no,tag));
return;
case BLACKHOLE:
@@ -636,7 +635,7 @@ evacuate(StgClosure **p)
|| i == &stg_WHITEHOLE_info
|| i == &stg_BLOCKING_QUEUE_CLEAN_info
|| i == &stg_BLOCKING_QUEUE_DIRTY_info) {
- copy(p,info,q,sizeofW(StgInd),gen_no);
+ copy(GCT_PARAM(p,info,q,sizeofW(StgInd),gen_no));
return;
}
ASSERT(i != &stg_IND_info);
@@ -650,15 +649,15 @@ evacuate(StgClosure **p)
case WEAK:
case PRIM:
case MUT_PRIM:
- copy(p,info,q,sizeW_fromITBL(INFO_PTR_TO_STRUCT(info)),gen_no);
+ copy(GCT_PARAM(p,info,q,sizeW_fromITBL(INFO_PTR_TO_STRUCT(info)),gen_no));
return;
case BCO:
- copy(p,info,q,bco_sizeW((StgBCO *)q),gen_no);
+ copy(GCT_PARAM(p,info,q,bco_sizeW((StgBCO *)q),gen_no));
return;
case THUNK_SELECTOR:
- eval_thunk_selector(p, (StgSelector *)q, rtsTrue);
+ eval_thunk_selector(GCT_PARAM(p, (StgSelector *)q, rtsTrue));
return;
case IND:
@@ -682,20 +681,20 @@ evacuate(StgClosure **p)
barf("evacuate: stack frame at %p\n", q);
case PAP:
- copy(p,info,q,pap_sizeW((StgPAP*)q),gen_no);
+ copy(GCT_PARAM(p,info,q,pap_sizeW((StgPAP*)q),gen_no));
return;
case AP:
- copy(p,info,q,ap_sizeW((StgAP*)q),gen_no);
+ copy(GCT_PARAM(p,info,q,ap_sizeW((StgAP*)q),gen_no));
return;
case AP_STACK:
- copy(p,info,q,ap_stack_sizeW((StgAP_STACK*)q),gen_no);
+ copy(GCT_PARAM(p,info,q,ap_stack_sizeW((StgAP_STACK*)q),gen_no));
return;
case ARR_WORDS:
// just copy the block
- copy(p,info,q,arr_words_sizeW((StgArrWords *)q),gen_no);
+ copy(GCT_PARAM(p,info,q,arr_words_sizeW((StgArrWords *)q),gen_no));
return;
case MUT_ARR_PTRS_CLEAN:
@@ -703,11 +702,11 @@ evacuate(StgClosure **p)
case MUT_ARR_PTRS_FROZEN:
case MUT_ARR_PTRS_FROZEN0:
// just copy the block
- copy(p,info,q,mut_arr_ptrs_sizeW((StgMutArrPtrs *)q),gen_no);
+ copy(GCT_PARAM(p,info,q,mut_arr_ptrs_sizeW((StgMutArrPtrs *)q),gen_no));
return;
case TSO:
- copy(p,info,q,sizeofW(StgTSO),gen_no);
+ copy(GCT_PARAM(p,info,q,sizeofW(StgTSO),gen_no));
return;
case STACK:
@@ -721,8 +720,8 @@ evacuate(StgClosure **p)
StgPtr r, s;
rtsBool mine;
- mine = copyPart(p,(StgClosure *)stack, stack_sizeW(stack),
- sizeofW(StgStack), gen_no);
+ mine = copyPart(GCT_PARAM(p,(StgClosure *)stack, stack_sizeW(stack),
+ sizeofW(StgStack), gen_no));
if (mine) {
new_stack = (StgStack *)*p;
move_STACK(stack, new_stack);
@@ -736,7 +735,7 @@ evacuate(StgClosure **p)
}
case TREC_CHUNK:
- copy(p,info,q,sizeofW(StgTRecChunk),gen_no);
+ copy(GCT_PARAM(p,info,q,sizeofW(StgTRecChunk),gen_no));
return;
default:
@@ -810,7 +809,7 @@ unchain_thunk_selectors(StgSelector *p, StgClosure *val)
}
static void
-eval_thunk_selector (StgClosure **q, StgSelector * p, rtsBool evac)
+eval_thunk_selector (DECLARE_GCT_PARAM(StgClosure **q, StgSelector * p, rtsBool evac))
// NB. for legacy reasons, p & q are swapped around :(
{
nat field;
@@ -855,7 +854,7 @@ eval_thunk_selector (StgClosure **q, StgSelector * p, rtsBool evac)
if (bd->flags & BF_MARKED) {
// must call evacuate() to mark this closure if evac==rtsTrue
*q = (StgClosure *)p;
- if (evac) evacuate(q);
+ if (evac) evacuate(GCT_PARAM(q));
unchain_thunk_selectors(prev_thunk_selector, (StgClosure *)p);
return;
}
@@ -887,7 +886,7 @@ eval_thunk_selector (StgClosure **q, StgSelector * p, rtsBool evac)
// - undo the chain we've built to point to p.
SET_INFO(p, (const StgInfoTable *)info_ptr);
*q = (StgClosure *)p;
- if (evac) evacuate(q);
+ if (evac) evacuate(GCT_PARAM(q));
unchain_thunk_selectors(prev_thunk_selector, (StgClosure *)p);
return;
}
@@ -990,7 +989,7 @@ eval_thunk_selector (StgClosure **q, StgSelector * p, rtsBool evac)
// evacuate() cannot recurse through
// eval_thunk_selector(), because we know val is not
// a THUNK_SELECTOR.
- if (evac) evacuate(q);
+ if (evac) evacuate(GCT_PARAM(q));
return;
}
@@ -1042,7 +1041,7 @@ eval_thunk_selector (StgClosure **q, StgSelector * p, rtsBool evac)
// rtsFalse says "don't evacuate the result". It will,
// however, update any THUNK_SELECTORs that are evaluated
// along the way.
- eval_thunk_selector(&val, (StgSelector*)selectee, rtsFalse);
+ eval_thunk_selector(GCT_PARAM(&val, (StgSelector*)selectee, rtsFalse));
gct->thunk_selector_depth--;
// did we actually manage to evaluate it?
@@ -1079,7 +1078,7 @@ eval_thunk_selector (StgClosure **q, StgSelector * p, rtsBool evac)
// check whether it was updated in the meantime.
*q = (StgClosure *)p;
if (evac) {
- copy(q,(const StgInfoTable *)info_ptr,(StgClosure *)p,THUNK_SELECTOR_sizeW(),bd->dest_no);
+ copy(GCT_PARAM(q,(const StgInfoTable *)info_ptr,(StgClosure *)p,THUNK_SELECTOR_sizeW(),bd->dest_no));
}
unchain_thunk_selectors(prev_thunk_selector, *q);
return;
View
10 rts/sm/Evac.h
@@ -28,12 +28,22 @@
// their arguments on the stack.
#if __GNUC__ >= 2 && (defined(x86_64_HOST_ARCH) || defined(i386_HOST_ARCH))
#define REGPARM1 __attribute__((regparm(1)))
+#define REGPARM2 __attribute__((regparm(2)))
#else
#define REGPARM1
+#define REGPARM2
#endif
+#include "GCThread.h"
+#include "GCTDecl.h"
+
+#ifdef PASS_GCT_AS_PARAM
+REGPARM2 void evacuate (gc_thread* gct, StgClosure **p);
+REGPARM2 void evacuate1 (gc_thread* gct, StgClosure **p);
+#else
REGPARM1 void evacuate (StgClosure **p);
REGPARM1 void evacuate1 (StgClosure **p);
+#endif
extern lnat thunk_selector_depth;
View
44 rts/sm/GC.c
@@ -144,12 +144,12 @@ static void init_gc_thread (gc_thread *t);
static void resize_generations (void);
static void resize_nursery (void);
static void start_gc_threads (void);
-static void scavenge_until_all_done (void);
+static void scavenge_until_all_done (DECLARE_GCT_ONLY_PARAM);
static StgWord inc_running (void);
static StgWord dec_running (void);
static void wakeup_gc_threads (nat me);
static void shutdown_gc_threads (nat me);
-static void collect_gct_blocks (void);
+static void collect_gct_blocks (DECLARE_GCT_ONLY_PARAM);
#if 0 && defined(DEBUG)
static void gcCAFs (void);
@@ -179,6 +179,9 @@ GarbageCollect (rtsBool force_major_gc,
lnat live_blocks, live_words, allocated, max_copied, avg_copied;
#if defined(THREADED_RTS)
gc_thread *saved_gct;
+#if defined(PASS_GCT_AS_PARAM)
+ gc_thread *gct;
+#endif
#endif
nat g, n;
@@ -328,13 +331,13 @@ GarbageCollect (rtsBool force_major_gc,
if (n_gc_threads == 1) {
for (n = 0; n < n_capabilities; n++) {
#if defined(THREADED_RTS)
- scavenge_capability_mut_Lists1(&capabilities[n]);
+ scavenge_capability_mut_Lists1(GCT_PARAM((&capabilities[n])));
#else
scavenge_capability_mut_lists(&capabilities[n]);
#endif
}
} else {
- scavenge_capability_mut_lists(gct->cap);
+ scavenge_capability_mut_lists(GCT_PARAM(gct->cap));
}
// follow roots from the CAF list (used by GHCi)
@@ -360,7 +363,7 @@ GarbageCollect (rtsBool force_major_gc,
#endif
// Mark the weak pointer list, and prepare to detect dead weak pointers.
- markWeakPtrList();
+ markWeakPtrList(GCT_ONLY_PARAM);
initWeakForGC();
// Mark the stable pointer table.
@@ -372,13 +375,13 @@ GarbageCollect (rtsBool force_major_gc,
*/
for (;;)
{
- scavenge_until_all_done();
+ scavenge_until_all_done(GCT_ONLY_PARAM);
// The other threads are now stopped. We might recurse back to
// here, but from now on this is the only thread.
// must be last... invariant is that everything is fully
// scavenged at this point.
- if (traverseWeakPtrList()) { // returns rtsTrue if evaced something
+ if (traverseWeakPtrList(GCT_ONLY_PARAM)) { // returns rtsTrue if evaced something
inc_running();
continue;
}
@@ -911,7 +914,7 @@ dec_running (void)
}
static rtsBool
-any_work (void)
+any_work (DECLARE_GCT_ONLY_PARAM)
{
int g;
gen_workspace *ws;
@@ -958,7 +961,7 @@ any_work (void)
}
static void
-scavenge_until_all_done (void)
+scavenge_until_all_done (DECLARE_GCT_ONLY_PARAM)
{
DEBUG_ONLY( nat r );
@@ -966,15 +969,15 @@ scavenge_until_all_done (void)
loop:
#if defined(THREADED_RTS)
if (n_gc_threads > 1) {
- scavenge_loop();
+ scavenge_loop(GCT_ONLY_PARAM);
} else {
- scavenge_loop1();
+ scavenge_loop1(GCT_ONLY_PARAM);
}
#else
scavenge_loop();
#endif
- collect_gct_blocks();
+ collect_gct_blocks(GCT_ONLY_PARAM);
// scavenge_loop() only exits when there's no work to do
@@ -990,7 +993,7 @@ scavenge_until_all_done (void)
while (gc_running_threads != 0) {
// usleep(1);
- if (any_work()) {
+ if (any_work(GCT_ONLY_PARAM)) {
inc_running();
traceEventGcWork(gct->cap);
goto loop;
@@ -1010,6 +1013,9 @@ void
gcWorkerThread (Capability *cap)
{
gc_thread *saved_gct;
+#if defined(PASS_GCT_AS_PARAM)
+ gc_thread *gct;
+#endif
// necessary if we stole a callee-saves register for gct:
saved_gct = gct;
@@ -1040,9 +1046,9 @@ gcWorkerThread (Capability *cap)
// Every thread evacuates some roots.
gct->evac_gen_no = 0;
markCapability(mark_root, gct, cap, rtsTrue/*prune sparks*/);
- scavenge_capability_mut_lists(cap);
+ scavenge_capability_mut_lists(GCT_PARAM(cap));
- scavenge_until_all_done();
+ scavenge_until_all_done(GCT_ONLY_PARAM);
#ifdef THREADED_RTS
// Now that the whole heap is marked, we discard any sparks that
@@ -1338,7 +1344,7 @@ prepare_uncollected_gen (generation *gen)
-------------------------------------------------------------------------- */
static void
-collect_gct_blocks (void)
+collect_gct_blocks (DECLARE_GCT_ONLY_PARAM)
{
nat g;
gen_workspace *ws;
@@ -1411,11 +1417,15 @@ mark_root(void *user USED_IF_THREADS, StgClosure **root)
// incorrect.
#if defined(THREADED_RTS)
gc_thread *saved_gct;
+#if defined(PASS_GCT_AS_PARAM)
+ gc_thread *gct;
+#else
saved_gct = gct;
#endif
+#endif
SET_GCT(user);
- evacuate(root);
+ evacuate(GCT_PARAM(root));
SET_GCT(saved_gct);
}
View
34 rts/sm/GCTDecl.h
@@ -21,16 +21,11 @@
register then use gcc's __thread extension to create a thread-local
variable.
-------------------------------------------------------------------------- */
-
#if defined(THREADED_RTS)
#define GLOBAL_REG_DECL(type,name,reg) register type name REG(reg);
-#ifdef llvm_CC_FLAVOR
-#define SET_GCT(to) (setThreadLocalVar(&gctKey, to))
-#else
#define SET_GCT(to) gct = (to)
-#endif
@@ -40,8 +35,8 @@
// about 5% in GC performance, but of course that might change as gcc
// improves. -- SDM 2009/04/03
//
-// For MacOSX, we can use an llvm-based C compiler which will store the gct
-// in a thread local variable using pthreads.
+// For MacOSX, we can use an llvm-based C compiler which will pass gct
+// as a parameter to the GC functions
extern __thread gc_thread* gct;
#define DECLARE_GCT __thread gc_thread* gct;
@@ -50,9 +45,8 @@ extern __thread gc_thread* gct;
// LLVM does not support the __thread extension and will generate
// incorrect code for global register variables. If we are compiling
// with a C compiler that uses an LLVM back end (clang or llvm-gcc) then we
-// use pthread_getspecific() to handle the thread local storage for gct.
-#define gct ((gc_thread *)(getThreadLocalVar(&gctKey)))
-#define DECLARE_GCT /* nothing */
+// pass the gct variable as a parameter to all the functions that need it
+#define PASS_GCT_AS_PARAM 1
#elif defined(sparc_HOST_ARCH)
// On SPARC we can't pin gct to a register. Names like %l1 are just offsets
@@ -104,6 +98,26 @@ extern StgWord8 the_gc_thread[];
#endif // THREADED_RTS
+// Definitions for passing the GCT variable as a parameter to the GC functions
+#if defined(PASS_GCT_AS_PARAM)
+#define DECLARE_GCT /* nothing */
+#undef gct
+
+// for function declarations
+#define DECLARE_GCT_PARAM(...) gc_thread *gct, __VA_ARGS__
+#define DECLARE_GCT_ONLY_PARAM gc_thread *gct
+// for function calls
+#define GCT_PARAM(...) gct, __VA_ARGS__
+#define GCT_ONLY_PARAM gct
+#else
+// for function declarations
+#define DECLARE_GCT_PARAM(...) __VA_ARGS__
+#define DECLARE_GCT_ONLY_PARAM void
+// for function calls
+#define GCT_PARAM(...) __VA_ARGS__
+#define GCT_ONLY_PARAM /* nothing */
+#endif
+
#include "EndPrivate.h"
#endif // SM_GCTDECL_H
View
4 rts/sm/GCUtils.c
@@ -113,7 +113,7 @@ grab_local_todo_block (gen_workspace *ws)
#if defined(THREADED_RTS)
bdescr *
-steal_todo_block (nat g)
+steal_todo_block (DECLARE_GCT_PARAM(nat g))
{
nat n;
bdescr *bd;
@@ -159,7 +159,7 @@ push_scanned_block (bdescr *bd, gen_workspace *ws)
}
StgPtr
-todo_block_full (nat size, gen_workspace *ws)
+todo_block_full (DECLARE_GCT_PARAM(nat size, gen_workspace *ws))
{
StgPtr p;
bdescr *bd;
View
6 rts/sm/GCUtils.h
@@ -22,12 +22,12 @@ bdescr *allocBlock_sync(void);
void freeChain_sync(bdescr *bd);
void push_scanned_block (bdescr *bd, gen_workspace *ws);
-StgPtr todo_block_full (nat size, gen_workspace *ws);
+StgPtr todo_block_full (DECLARE_GCT_PARAM(nat size, gen_workspace *ws));
StgPtr alloc_todo_block (gen_workspace *ws, nat size);
bdescr *grab_local_todo_block (gen_workspace *ws);
#if defined(THREADED_RTS)
-bdescr *steal_todo_block (nat s);
+bdescr *steal_todo_block (DECLARE_GCT_PARAM(nat s));
#endif
// Returns true if a block is partially full. This predicate is used to try
@@ -48,7 +48,7 @@ void printMutableList (bdescr *bd);
// mutable lists attached to the current gc_thread structure, which
// are the same as the mutable lists on the Capability.
INLINE_HEADER void
-recordMutableGen_GC (StgClosure *p, nat gen_no)
+recordMutableGen_GC (DECLARE_GCT_PARAM(StgClosure *p, nat gen_no))
{
bdescr *bd;
View
20 rts/sm/MarkWeak.c
@@ -82,7 +82,7 @@ StgWeak *old_weak_ptr_list; // also pending finaliser list
// List of threads found to be unreachable
StgTSO *resurrected_threads;
-static void resurrectUnreachableThreads (generation *gen);
+static void resurrectUnreachableThreads (DECLARE_GCT_PARAM(generation *gen));
static rtsBool tidyThreadList (generation *gen);
void
@@ -95,7 +95,7 @@ initWeakForGC(void)
}
rtsBool
-traverseWeakPtrList(void)
+traverseWeakPtrList(DECLARE_GCT_ONLY_PARAM)
{
StgWeak *w, **last_w, *next_w;
StgClosure *new;
@@ -135,8 +135,8 @@ traverseWeakPtrList(void)
if (new != NULL) {
w->key = new;
// evacuate the value and finalizer
- evacuate(&w->value);
- evacuate(&w->finalizer);
+ evacuate(GCT_PARAM(&w->value));
+ evacuate(GCT_PARAM(&w->finalizer));
// remove this weak ptr from the old_weak_ptr list
*last_w = w->link;
// and put it on the new weak ptr list
@@ -167,7 +167,7 @@ traverseWeakPtrList(void)
*/
if (flag == rtsFalse) {
for (w = old_weak_ptr_list; w; w = w->link) {
- evacuate(&w->finalizer);
+ evacuate(GCT_PARAM(&w->finalizer));
}
// Next, move to the WeakThreads stage after fully
@@ -207,7 +207,7 @@ traverseWeakPtrList(void)
{
nat g;
for (g = 0; g <= N; g++) {
- resurrectUnreachableThreads(&generations[g]);
+ resurrectUnreachableThreads(GCT_PARAM(&generations[g]));
}
}
@@ -221,7 +221,7 @@ traverseWeakPtrList(void)
}
}
- static void resurrectUnreachableThreads (generation *gen)
+ static void resurrectUnreachableThreads (DECLARE_GCT_PARAM(generation *gen))
{
StgTSO *t, *tmp, *next;
@@ -238,7 +238,7 @@ traverseWeakPtrList(void)
continue;
default:
tmp = t;
- evacuate((StgClosure **)&tmp);
+ evacuate(GCT_PARAM((StgClosure **)&tmp));
tmp->global_link = resurrected_threads;
resurrected_threads = tmp;
}
@@ -301,7 +301,7 @@ static rtsBool tidyThreadList (generation *gen)
-------------------------------------------------------------------------- */
void
-markWeakPtrList ( void )
+markWeakPtrList ( DECLARE_GCT_ONLY_PARAM )
{
StgWeak *w, **last_w;
@@ -320,7 +320,7 @@ markWeakPtrList ( void )
}
#endif
- evacuate((StgClosure **)last_w);
+ evacuate(GCT_PARAM((StgClosure **)last_w));
w = *last_w;
if (w->header.info == &stg_DEAD_WEAK_info) {
last_w = &(((StgDeadWeak*)w)->link);
View
6 rts/sm/MarkWeak.h
@@ -15,14 +15,16 @@
#define SM_MARKWEAK_H
#include "BeginPrivate.h"
+#include "GCThread.h"
+#include "GCTDecl.h"
extern StgWeak *old_weak_ptr_list;
extern StgTSO *resurrected_threads;
extern StgTSO *exception_threads;
void initWeakForGC ( void );
-rtsBool traverseWeakPtrList ( void );
-void markWeakPtrList ( void );
+rtsBool traverseWeakPtrList ( DECLARE_GCT_ONLY_PARAM );
+void markWeakPtrList ( DECLARE_GCT_ONLY_PARAM );
#include "EndPrivate.h"
View
412 rts/sm/Scav.c
@@ -28,17 +28,17 @@
#include "Capability.h"
#include "LdvProfile.h"
-static void scavenge_stack (StgPtr p, StgPtr stack_end);
+static void scavenge_stack (DECLARE_GCT_PARAM(StgPtr p, StgPtr stack_end));
-static void scavenge_large_bitmap (StgPtr p,
- StgLargeBitmap *large_bitmap,
- nat size );
+static void scavenge_large_bitmap (DECLARE_GCT_PARAM( StgPtr p,
+ StgLargeBitmap *large_bitmap,
+ nat size ));
#if defined(THREADED_RTS) && !defined(PARALLEL_GC)
# define evacuate(a) evacuate1(a)
# define scavenge_loop(a) scavenge_loop1(a)
# define scavenge_block(a) scavenge_block1(a)
-# define scavenge_mutable_list(bd,g) scavenge_mutable_list1(bd,g)
+# define scavenge_mutable_list(...) scavenge_mutable_list1(__VA_ARGS__)
# define scavenge_capability_mut_lists(cap) scavenge_capability_mut_Lists1(cap)
#endif
@@ -47,7 +47,7 @@ static void scavenge_large_bitmap (StgPtr p,
-------------------------------------------------------------------------- */
static void
-scavengeTSO (StgTSO *tso)
+scavengeTSO (DECLARE_GCT_PARAM(StgTSO *tso))
{
rtsBool saved_eager;
@@ -61,21 +61,21 @@ scavengeTSO (StgTSO *tso)
saved_eager = gct->eager_promotion;
gct->eager_promotion = rtsFalse;
- evacuate((StgClosure **)&tso->blocked_exceptions);
- evacuate((StgClosure **)&tso->bq);
+ evacuate(GCT_PARAM((StgClosure **)&tso->blocked_exceptions));
+ evacuate(GCT_PARAM((StgClosure **)&tso->bq));
// scavange current transaction record
- evacuate((StgClosure **)&tso->trec);
+ evacuate(GCT_PARAM((StgClosure **)&tso->trec));
- evacuate((StgClosure **)&tso->stackobj);
+ evacuate(GCT_PARAM((StgClosure **)&tso->stackobj));
- evacuate((StgClosure **)&tso->_link);
+ evacuate(GCT_PARAM((StgClosure **)&tso->_link));
if ( tso->why_blocked == BlockedOnMVar
|| tso->why_blocked == BlockedOnBlackHole
|| tso->why_blocked == BlockedOnMsgThrowTo
|| tso->why_blocked == NotBlocked
) {
- evacuate(&tso->block_info.closure);
+ evacuate(GCT_PARAM(&tso->block_info.closure));
}
#ifdef THREADED_RTS
// in the THREADED_RTS, block_info.closure must always point to a
@@ -96,7 +96,7 @@ scavengeTSO (StgTSO *tso)
Mutable arrays of pointers
-------------------------------------------------------------------------- */
-static StgPtr scavenge_mut_arr_ptrs (StgMutArrPtrs *a)
+static StgPtr scavenge_mut_arr_ptrs (DECLARE_GCT_PARAM(StgMutArrPtrs *a))
{
lnat m;
rtsBool any_failed;
@@ -108,7 +108,7 @@ static StgPtr scavenge_mut_arr_ptrs (StgMutArrPtrs *a)
{
q = p + (1 << MUT_ARR_PTRS_CARD_BITS);
for (; p < q; p++) {
- evacuate((StgClosure**)p);
+ evacuate(GCT_PARAM((StgClosure**)p));
}
if (gct->failed_to_evac) {
any_failed = rtsTrue;
@@ -122,7 +122,7 @@ static StgPtr scavenge_mut_arr_ptrs (StgMutArrPtrs *a)
q = (StgPtr)&a->payload[a->ptrs];
if (p < q) {
for (; p < q; p++) {
- evacuate((StgClosure**)p);
+ evacuate(GCT_PARAM((StgClosure**)p));
}
if (gct->failed_to_evac) {
any_failed = rtsTrue;
@@ -138,7 +138,7 @@ static StgPtr scavenge_mut_arr_ptrs (StgMutArrPtrs *a)
}
// scavenge only the marked areas of a MUT_ARR_PTRS
-static StgPtr scavenge_mut_arr_ptrs_marked (StgMutArrPtrs *a)
+static StgPtr scavenge_mut_arr_ptrs_marked (DECLARE_GCT_PARAM(StgMutArrPtrs *a))
{
lnat m;
StgPtr p, q;
@@ -152,7 +152,7 @@ static StgPtr scavenge_mut_arr_ptrs_marked (StgMutArrPtrs *a)
q = stg_min(p + (1 << MUT_ARR_PTRS_CARD_BITS),
(StgPtr)&a->payload[a->ptrs]);
for (; p < q; p++) {
- evacuate((StgClosure**)p);
+ evacuate(GCT_PARAM((StgClosure**)p));
}
if (gct->failed_to_evac) {
any_failed = rtsTrue;
@@ -173,7 +173,7 @@ static StgPtr scavenge_mut_arr_ptrs_marked (StgMutArrPtrs *a)
-------------------------------------------------------------------------- */
STATIC_INLINE StgPtr
-scavenge_arg_block (StgFunInfoTable *fun_info, StgClosure **args)
+scavenge_arg_block (DECLARE_GCT_PARAM(StgFunInfoTable *fun_info, StgClosure **args))
{
StgPtr p;
StgWord bitmap;
@@ -187,7 +187,7 @@ scavenge_arg_block (StgFunInfoTable *fun_info, StgClosure **args)
goto small_bitmap;
case ARG_GEN_BIG:
size = GET_FUN_LARGE_BITMAP(fun_info)->size;
- scavenge_large_bitmap(p, GET_FUN_LARGE_BITMAP(fun_info), size);
+ scavenge_large_bitmap(GCT_PARAM(p, GET_FUN_LARGE_BITMAP(fun_info), size));
p += size;
break;
default:
@@ -196,7 +196,7 @@ scavenge_arg_block (StgFunInfoTable *fun_info, StgClosure **args)
small_bitmap:
while (size > 0) {
if ((bitmap & 1) == 0) {
- evacuate((StgClosure **)p);
+ evacuate(GCT_PARAM((StgClosure **)p));
}
p++;
bitmap = bitmap >> 1;
@@ -208,7 +208,7 @@ scavenge_arg_block (StgFunInfoTable *fun_info, StgClosure **args)
}
STATIC_INLINE GNUC_ATTR_HOT StgPtr
-scavenge_PAP_payload (StgClosure *fun, StgClosure **payload, StgWord size)
+scavenge_PAP_payload (DECLARE_GCT_PARAM(StgClosure *fun, StgClosure **payload, StgWord size))
{
StgPtr p;
StgWord bitmap;
@@ -223,11 +223,11 @@ scavenge_PAP_payload (StgClosure *fun, StgClosure **payload, StgWord size)
bitmap = BITMAP_BITS(fun_info->f.b.bitmap);
goto small_bitmap;
case ARG_GEN_BIG:
- scavenge_large_bitmap(p, GET_FUN_LARGE_BITMAP(fun_info), size);
+ scavenge_large_bitmap(GCT_PARAM(p, GET_FUN_LARGE_BITMAP(fun_info), size));
p += size;
break;
case ARG_BCO:
- scavenge_large_bitmap((StgPtr)payload, BCO_BITMAP(fun), size);
+ scavenge_large_bitmap(GCT_PARAM((StgPtr)payload, BCO_BITMAP(fun), size));
p += size;
break;
default:
@@ -235,7 +235,7 @@ scavenge_PAP_payload (StgClosure *fun, StgClosure **payload, StgWord size)
small_bitmap:
while (size > 0) {
if ((bitmap & 1) == 0) {
- evacuate((StgClosure **)p);
+ evacuate(GCT_PARAM((StgClosure **)p));
}
p++;
bitmap = bitmap >> 1;
@@ -247,17 +247,17 @@ scavenge_PAP_payload (StgClosure *fun, StgClosure **payload, StgWord size)
}
STATIC_INLINE GNUC_ATTR_HOT StgPtr
-scavenge_PAP (StgPAP *pap)
+scavenge_PAP (DECLARE_GCT_PARAM(StgPAP *pap))
{
- evacuate(&pap->fun);
- return scavenge_PAP_payload (pap->fun, pap->payload, pap->n_args);
+ evacuate(GCT_PARAM(&pap->fun));
+ return scavenge_PAP_payload (GCT_PARAM(pap->fun, pap->payload, pap->n_args));
}
STATIC_INLINE StgPtr
-scavenge_AP (StgAP *ap)
+scavenge_AP (DECLARE_GCT_PARAM(StgAP *ap))
{
- evacuate(&ap->fun);
- return scavenge_PAP_payload (ap->fun, ap->payload, ap->n_args);
+ evacuate(GCT_PARAM(&ap->fun));
+ return scavenge_PAP_payload (GCT_PARAM(ap->fun, ap->payload, ap->n_args));
}
/* -----------------------------------------------------------------------------
@@ -268,7 +268,7 @@ scavenge_AP (StgAP *ap)
* pointers we get back from evacuate().
*/
static void
-scavenge_large_srt_bitmap( StgLargeSRT *large_srt )
+scavenge_large_srt_bitmap( DECLARE_GCT_PARAM(StgLargeSRT *large_srt) )
{
nat i, b, size;
StgWord bitmap;
@@ -280,7 +280,7 @@ scavenge_large_srt_bitmap( StgLargeSRT *large_srt )
p = (StgClosure **)large_srt->srt;
for (i = 0; i < size; ) {
if ((bitmap & 1) != 0) {
- evacuate(p);
+ evacuate(GCT_PARAM(p));
}
i++;
p++;
@@ -298,7 +298,7 @@ scavenge_large_srt_bitmap( StgLargeSRT *large_srt )
* never dereference it.
*/
STATIC_INLINE GNUC_ATTR_HOT void
-scavenge_srt (StgClosure **srt, nat srt_bitmap)
+scavenge_srt (DECLARE_GCT_PARAM(StgClosure **srt, nat srt_bitmap))
{
nat bitmap;
StgClosure **p;
@@ -307,7 +307,7 @@ scavenge_srt (StgClosure **srt, nat srt_bitmap)
p = srt;
if (bitmap == (StgHalfWord)(-1)) {
- scavenge_large_srt_bitmap( (StgLargeSRT *)srt );
+ scavenge_large_srt_bitmap( GCT_PARAM((StgLargeSRT *)srt ));
return;
}
@@ -328,7 +328,7 @@ scavenge_srt (StgClosure **srt, nat srt_bitmap)
evacuate(p);
}
#else
- evacuate(p);
+ evacuate(GCT_PARAM(p));
#endif
}
p++;
@@ -338,25 +338,25 @@ scavenge_srt (StgClosure **srt, nat srt_bitmap)
STATIC_INLINE GNUC_ATTR_HOT void
-scavenge_thunk_srt(const StgInfoTable *info)
+scavenge_thunk_srt(DECLARE_GCT_PARAM(const StgInfoTable *info))
{
StgThunkInfoTable *thunk_info;
if (!major_gc) return;
thunk_info = itbl_to_thunk_itbl(info);
- scavenge_srt((StgClosure **)GET_SRT(thunk_info), thunk_info->i.srt_bitmap);
+ scavenge_srt(GCT_PARAM((StgClosure **)GET_SRT(thunk_info), thunk_info->i.srt_bitmap));
}
STATIC_INLINE GNUC_ATTR_HOT void
-scavenge_fun_srt(const StgInfoTable *info)
+scavenge_fun_srt(DECLARE_GCT_PARAM(const StgInfoTable *info))
{
StgFunInfoTable *fun_info;
if (!major_gc) return;
fun_info = itbl_to_fun_itbl(info);
- scavenge_srt((StgClosure **)GET_FUN_SRT(fun_info), fun_info->i.srt_bitmap);
+ scavenge_srt(GCT_PARAM((StgClosure **)GET_FUN_SRT(fun_info), fun_info->i.srt_bitmap));
}
/* -----------------------------------------------------------------------------
@@ -372,7 +372,7 @@ scavenge_fun_srt(const StgInfoTable *info)
-------------------------------------------------------------------------- */
static GNUC_ATTR_HOT void
-scavenge_block (bdescr *bd)
+scavenge_block (DECLARE_GCT_PARAM(bdescr *bd))
{
StgPtr p, q;
StgInfoTable *info;
@@ -410,9 +410,9 @@ scavenge_block (bdescr *bd)
{
StgMVar *mvar = ((StgMVar *)p);
gct->eager_promotion = rtsFalse;
- evacuate((StgClosure **)&mvar->head);
- evacuate((StgClosure **)&mvar->tail);
- evacuate((StgClosure **)&mvar->value);
+ evacuate(GCT_PARAM((StgClosure **)&mvar->head));
+ evacuate(GCT_PARAM((StgClosure **)&mvar->tail));
+ evacuate(GCT_PARAM((StgClosure **)&mvar->value));
gct->eager_promotion = saved_eager_promotion;
if (gct->failed_to_evac) {
@@ -425,85 +425,85 @@ scavenge_block (bdescr *bd)
}
case FUN_2_0:
- scavenge_fun_srt(info);
- evacuate(&((StgClosure *)p)->payload[1]);
- evacuate(&((StgClosure *)p)->payload[0]);
+ scavenge_fun_srt(GCT_PARAM(info));
+ evacuate(GCT_PARAM(&((StgClosure *)p)->payload[1]));
+ evacuate(GCT_PARAM(&((StgClosure *)p)->payload[0]));
p += sizeofW(StgHeader) + 2;
break;
case THUNK_2_0:
- scavenge_thunk_srt(info);
- evacuate(&((StgThunk *)p)->payload[1]);
- evacuate(&((StgThunk *)p)->payload[0]);
+ scavenge_thunk_srt(GCT_PARAM(info));
+ evacuate(GCT_PARAM(&((StgThunk *)p)->payload[1]));
+ evacuate(GCT_PARAM(&((StgThunk *)p)->payload[0]));
p += sizeofW(StgThunk) + 2;
break;
case CONSTR_2_0:
- evacuate(&((StgClosure *)p)->payload[1]);
- evacuate(&((StgClosure *)p)->payload[0]);
+ evacuate(GCT_PARAM(&((StgClosure *)p)->payload[1]));
+ evacuate(GCT_PARAM(&((StgClosure *)p)->payload[0]));
p += sizeofW(StgHeader) + 2;
break;
case THUNK_1_0:
- scavenge_thunk_srt(info);
- evacuate(&((StgThunk *)p)->payload[0]);
+ scavenge_thunk_srt(GCT_PARAM(info));
+ evacuate(GCT_PARAM(&((StgThunk *)p)->payload[0]));
p += sizeofW(StgThunk) + 1;
break;
case FUN_1_0:
- scavenge_fun_srt(info);
+ scavenge_fun_srt(GCT_PARAM(info));
case CONSTR_1_0:
- evacuate(&((StgClosure *)p)->payload[0]);
+ evacuate(GCT_PARAM(&((StgClosure *)p)->payload[0]));
p += sizeofW(StgHeader) + 1;
break;
case THUNK_0_1:
- scavenge_thunk_srt(info);
+ scavenge_thunk_srt(GCT_PARAM(info));
p += sizeofW(StgThunk) + 1;
break;
case FUN_0_1:
- scavenge_fun_srt(info);
+ scavenge_fun_srt(GCT_PARAM(info));
case CONSTR_0_1:
p += sizeofW(StgHeader) + 1;
break;
case THUNK_0_2:
- scavenge_thunk_srt(info);
+ scavenge_thunk_srt(GCT_PARAM(info));
p += sizeofW(StgThunk) + 2;
break;
case FUN_0_2:
- scavenge_fun_srt(info);
+ scavenge_fun_srt(GCT_PARAM(info));
case CONSTR_0_2:
p += sizeofW(StgHeader) + 2;
break;
case THUNK_1_1:
- scavenge_thunk_srt(info);
- evacuate(&((StgThunk *)p)->payload[0]);
+ scavenge_thunk_srt(GCT_PARAM(info));
+ evacuate(GCT_PARAM(&((StgThunk *)p)->payload[0]));
p += sizeofW(StgThunk) + 2;
break;
case FUN_1_1:
- scavenge_fun_srt(info);
+ scavenge_fun_srt(GCT_PARAM(info));
case CONSTR_1_1:
- evacuate(&((StgClosure *)p)->payload[0]);
+ evacuate(GCT_PARAM(&((StgClosure *)p)->payload[0]));
p += sizeofW(StgHeader) + 2;
break;
case FUN:
- scavenge_fun_srt(info);
+ scavenge_fun_srt(GCT_PARAM(info));
goto gen_obj;
case THUNK:
{
StgPtr end;
- scavenge_thunk_srt(info);
+ scavenge_thunk_srt(GCT_PARAM(info));
end = (P_)((StgThunk *)p)->payload + info->layout.payload.ptrs;
for (p = (P_)((StgThunk *)p)->payload; p < end; p++) {
- evacuate((StgClosure **)p);
+ evacuate(GCT_PARAM((StgClosure **)p));
}
p += info->layout.payload.nptrs;
break;
@@ -518,7 +518,7 @@ scavenge_block (bdescr *bd)
end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
- evacuate((StgClosure **)p);
+ evacuate(GCT_PARAM((StgClosure **)p));
}
p += info->layout.payload.nptrs;
break;
@@ -526,23 +526,23 @@ scavenge_block (bdescr *bd)
case BCO: {
StgBCO *bco = (StgBCO *)p;
- evacuate((StgClosure **)&bco->instrs);
- evacuate((StgClosure **)&bco->literals);
- evacuate((StgClosure **)&bco->ptrs);
+ evacuate(GCT_PARAM((StgClosure **)&bco->instrs));
+ evacuate(GCT_PARAM((StgClosure **)&bco->literals));
+ evacuate(GCT_PARAM((StgClosure **)&bco->ptrs));
p += bco_sizeW(bco);
break;
}
case IND_PERM:
case BLACKHOLE:
- evacuate(&((StgInd *)p)->indirectee);
+ evacuate(GCT_PARAM(&((StgInd *)p)->indirectee));
p += sizeofW(StgInd);
break;
case MUT_VAR_CLEAN:
case MUT_VAR_DIRTY:
gct->eager_promotion = rtsFalse;
- evacuate(&((StgMutVar *)p)->var);
+ evacuate(GCT_PARAM(&((StgMutVar *)p)->var));
gct->eager_promotion = saved_eager_promotion;
if (gct->failed_to_evac) {
@@ -558,10 +558,10 @@ scavenge_block (bdescr *bd)
StgBlockingQueue *bq = (StgBlockingQueue *)p;
gct->eager_promotion = rtsFalse;
- evacuate(&bq->bh);
- evacuate((StgClosure**)&bq->owner);
- evacuate((StgClosure**)&bq->queue);
- evacuate((StgClosure**)&bq->link);
+ evacuate(GCT_PARAM(&bq->bh));
+ evacuate(GCT_PARAM((StgClosure**)&bq->owner));
+ evacuate(GCT_PARAM((StgClosure**)&bq->queue));
+ evacuate(GCT_PARAM((StgClosure**)&bq->link));
gct->eager_promotion = saved_eager_promotion;
if (gct->failed_to_evac) {
@@ -576,7 +576,7 @@ scavenge_block (bdescr *bd)
case THUNK_SELECTOR:
{
StgSelector *s = (StgSelector *)p;
- evacuate(&s->selectee);
+ evacuate(GCT_PARAM(&s->selectee));
p += THUNK_SELECTOR_sizeW();
break;
}
@@ -586,18 +586,18 @@ scavenge_block (bdescr *bd)
{
StgAP_STACK *ap = (StgAP_STACK *)p;
- evacuate(&ap->fun);
- scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
+ evacuate(GCT_PARAM(&ap->fun));
+ scavenge_stack(GCT_PARAM((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size));
p = (StgPtr)ap->payload + ap->size;
break;
}
case PAP:
- p = scavenge_PAP((StgPAP *)p);
+ p = scavenge_PAP(GCT_PARAM((StgPAP *)p));
break;
case AP:
- p = scavenge_AP((StgAP *)p);
+ p = scavenge_AP(GCT_PARAM((StgAP *)p));
break;
case ARR_WORDS:
@@ -614,7 +614,7 @@ scavenge_block (bdescr *bd)
// avoid traversing it during minor GCs.
gct->eager_promotion = rtsFalse;
- p = scavenge_mut_arr_ptrs((StgMutArrPtrs*)p);
+ p = scavenge_mut_arr_ptrs(GCT_PARAM((StgMutArrPtrs*)p));
if (gct->failed_to_evac) {
((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
@@ -631,7 +631,7 @@ scavenge_block (bdescr *bd)
case MUT_ARR_PTRS_FROZEN0:
// follow everything
{
- p = scavenge_mut_arr_ptrs((StgMutArrPtrs*)p);
+ p = scavenge_mut_arr_ptrs(GCT_PARAM((StgMutArrPtrs*)p));
// If we're going to put this object on the mutable list, then
// set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
@@ -645,7 +645,7 @@ scavenge_block (bdescr *bd)
case TSO:
{
- scavengeTSO((StgTSO *)p);
+ scavengeTSO(GCT_PARAM((StgTSO *)p));
p += sizeofW(StgTSO);
break;
}
@@ -656,7 +656,7 @@ scavenge_block (bdescr *bd)
gct->eager_promotion = rtsFalse;
- scavenge_stack(stack->sp, stack->stack + stack->stack_size);
+ scavenge_stack(GCT_PARAM(stack->sp, stack->stack + stack->stack_size));
stack->dirty = gct->failed_to_evac;
p += stack_sizeW(stack);
@@ -672,7 +672,7 @@ scavenge_block (bdescr *bd)
end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
- evacuate((StgClosure **)p);
+ evacuate(GCT_PARAM((StgClosure **)p));
}
p += info->layout.payload.nptrs;
@@ -687,11 +687,11 @@ scavenge_block (bdescr *bd)
StgTRecChunk *tc = ((StgTRecChunk *) p);
TRecEntry *e = &(tc -> entries[0]);
gct->eager_promotion = rtsFalse;
- evacuate((StgClosure **)&tc->prev_chunk);
+ evacuate(GCT_PARAM((StgClosure **)&tc->prev_chunk));
for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
- evacuate((StgClosure **)&e->tvar);
- evacuate((StgClosure **)&e->expected_value);
- evacuate((StgClosure **)&e->new_value);
+ evacuate(GCT_PARAM((StgClosure **)&e->tvar));
+ evacuate(GCT_PARAM((StgClosure **)&e->expected_value));
+ evacuate(GCT_PARAM((StgClosure **)&e->new_value));
}
gct->eager_promotion = saved_eager_promotion;
gct->failed_to_evac = rtsTrue; // mutable
@@ -714,7 +714,7 @@ scavenge_block (bdescr *bd)
if (gct->failed_to_evac) {
gct->failed_to_evac = rtsFalse;
if (bd->gen_no > 0) {
- recordMutableGen_GC((StgClosure *)q, bd->gen_no);
+ recordMutableGen_GC(GCT_PARAM((StgClosure *)q, bd->gen_no));
}
}
}
@@ -748,7 +748,7 @@ scavenge_block (bdescr *bd)
-------------------------------------------------------------------------- */
static void
-scavenge_mark_stack(void)
+scavenge_mark_stack(DECLARE_GCT_ONLY_PARAM)
{
StgPtr p, q;
StgInfoTable *info;
@@ -770,9 +770,9 @@ scavenge_mark_stack(void)
{
StgMVar *mvar = ((StgMVar *)p);
gct->eager_promotion = rtsFalse;
- evacuate((StgClosure **)&mvar->head);
- evacuate((StgClosure **)&mvar->tail);
- evacuate((StgClosure **)&mvar->value);
+ evacuate(GCT_PARAM((StgClosure **)&mvar->head));
+ evacuate(GCT_PARAM((StgClosure **)&mvar->tail));
+ evacuate(GCT_PARAM((StgClosure **)&mvar->value));
gct->eager_promotion = saved_eager_promotion;
if (gct->failed_to_evac) {
@@ -784,47 +784,47 @@ scavenge_mark_stack(void)
}
case FUN_2_0:
- scavenge_fun_srt(info);
- evacuate(&((StgClosure *)p)->payload[1]);
- evacuate(&((StgClosure *)p)->payload[0]);
+ scavenge_fun_srt(GCT_PARAM(info));
+ evacuate(GCT_PARAM(&((StgClosure *)p)->payload[1]));
+ evacuate(GCT_PARAM(&((StgClosure *)p)->payload[0]));
break;
case THUNK_2_0:
- scavenge_thunk_srt(info);
- evacuate(&((StgThunk *)p)->payload[1]);
- evacuate(&((StgThunk *)p)->payload[0]);
+ scavenge_thunk_srt(GCT_PARAM(info));
+ evacuate(GCT_PARAM(&((StgThunk *)p)->payload[1]));
+ evacuate(GCT_PARAM(&((StgThunk *)p)->payload[0]));
break;
case CONSTR_2_0:
- evacuate(&((StgClosure *)p)->payload[1]);
- evacuate(&((StgClosure *)p)->payload[0]);
+ evacuate(GCT_PARAM(&((StgClosure *)p)->payload[1]));
+ evacuate(GCT_PARAM(&((StgClosure *)p)->payload[0]));
break;
case FUN_1_0:
case FUN_1_1:
- scavenge_fun_srt(info);
- evacuate(&((StgClosure *)p)->payload[0]);
+ scavenge_fun_srt(GCT_PARAM(info));
+ evacuate(GCT_PARAM(&((StgClosure *)p)->payload[0]));
break;
case THUNK_1_0:
case THUNK_1_1:
- scavenge_thunk_srt(info);
- evacuate(&((StgThunk *)p)->payload[0]);
+ scavenge_thunk_srt(GCT_PARAM(info));
+ evacuate(GCT_PARAM(&((StgThunk *)p)->payload[0]));
break;
case CONSTR_1_0:
case CONSTR_1_1:
- evacuate(&((StgClosure *)p)->payload[0]);
+ evacuate(GCT_PARAM(&((StgClosure *)p)->payload[0]));
break;
case FUN_0_1:
case FUN_0_2:
- scavenge_fun_srt(info);
+ scavenge_fun_srt(GCT_PARAM(info));
break;
case THUNK_0_1:
case THUNK_0_2:
- scavenge_thunk_srt(info);
+ scavenge_thunk_srt(GCT_PARAM(info));
break;
case CONSTR_0_1:
@@ -832,17 +832,17 @@ scavenge_mark_stack(void)
break;
case FUN:
- scavenge_fun_srt(info);
+ scavenge_fun_srt(GCT_PARAM(info));
goto gen_obj;
case THUNK:
{
StgPtr end;
- scavenge_thunk_srt(info);
+ scavenge_thunk_srt(GCT_PARAM(info));
end = (P_)((StgThunk *)p)->payload + info->layout.payload.ptrs;
for (p = (P_)((StgThunk *)p)->payload; p < end; p++) {
- evacuate((StgClosure **)p);
+ evacuate(GCT_PARAM((StgClosure **)p));
}
break;
}
@@ -856,16 +856,16 @@ scavenge_mark_stack(void)
end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
- evacuate((StgClosure **)p);
+ evacuate(GCT_PARAM((StgClosure **)p));
}
break;
}
case BCO: {
StgBCO *bco = (StgBCO *)p;
- evacuate((StgClosure **)&bco->instrs);
- evacuate((StgClosure **)&bco->literals);
- evacuate((StgClosure **)&bco->ptrs);
+ evacuate(GCT_PARAM((StgClosure **)&bco->instrs));
+ evacuate(GCT_PARAM((StgClosure **)&bco->literals));
+ evacuate(GCT_PARAM((StgClosure **)&bco->ptrs));
break;
}
@@ -877,13 +877,13 @@ scavenge_mark_stack(void)
case IND:
case BLACKHOLE:
- evacuate(&((StgInd *)p)->indirectee);
+ evacuate(GCT_PARAM(&((StgInd *)p)->indirectee));
break;
case MUT_VAR_CLEAN:
case MUT_VAR_DIRTY: {
gct->eager_promotion = rtsFalse;
- evacuate(&((StgMutVar *)p)->var);
+ evacuate(GCT_PARAM(&((StgMutVar *)p)->var));
gct->eager_promotion = saved_eager_promotion;
if (gct->failed_to_evac) {
@@ -899,10 +899,10 @@ scavenge_mark_stack(void)
StgBlockingQueue *bq = (StgBlockingQueue *)p;
gct->eager_promotion = rtsFalse;
- evacuate(&bq->bh);
- evacuate((StgClosure**)&bq->owner);
- evacuate((StgClosure**)&bq->queue);
- evacuate((StgClosure**)&bq->link);
+ evacuate(GCT_PARAM(&bq->bh));
+ evacuate(GCT_PARAM((StgClosure**)&bq->owner));
+ evacuate(GCT_PARAM((StgClosure**)&bq->queue));
+ evacuate(GCT_PARAM((StgClosure**)&bq->link));
gct->eager_promotion = saved_eager_promotion;
if (gct->failed_to_evac) {
@@ -919,7 +919,7 @@ scavenge_mark_stack(void)
case THUNK_SELECTOR:
{
StgSelector *s = (StgSelector *)p;
- evacuate(&s->selectee);
+ evacuate(GCT_PARAM(&s->selectee));
break;
}
@@ -928,17 +928,17 @@ scavenge_mark_stack(void)
{
StgAP_STACK *ap = (StgAP_STACK *)p;
- evacuate(&ap->fun);
- scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
+ evacuate(GCT_PARAM(&ap->fun));
+ scavenge_stack(GCT_PARAM((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size));
break;
}
case PAP:
- scavenge_PAP((StgPAP *)p);
+ scavenge_PAP(GCT_PARAM((StgPAP *)p));
break;
case AP:
- scavenge_AP((StgAP *)p);
+ scavenge_AP(GCT_PARAM((StgAP *)p));
break;
case MUT_ARR_PTRS_CLEAN:
@@ -951,7 +951,7 @@ scavenge_mark_stack(void)
// avoid traversing it during minor GCs.
gct->eager_promotion = rtsFalse;
- scavenge_mut_arr_ptrs((StgMutArrPtrs *)p);
+ scavenge_mut_arr_ptrs(GCT_PARAM((StgMutArrPtrs *)p));
if (gct->failed_to_evac) {
((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
@@ -970,7 +970,7 @@ scavenge_mark_stack(void)
{
StgPtr q = p;
- scavenge_mut_arr_ptrs((StgMutArrPtrs *)p);
+ scavenge_mut_arr_ptrs(GCT_PARAM((StgMutArrPtrs *)p));
// If we're going to put this object on the mutable list, then
// set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
@@ -984,7 +984,7 @@ scavenge_mark_stack(void)
case TSO:
{
- scavengeTSO((StgTSO*)p);
+ scavengeTSO(GCT_PARAM((StgTSO*)p));
break;
}
@@ -994,7 +994,7 @@ scavenge_mark_stack(void)
gct->eager_promotion = rtsFalse;
- scavenge_stack(stack->sp, stack->stack + stack->stack_size);
+ scavenge_stack(GCT_PARAM(stack->sp, stack->stack + stack->stack_size));
stack->dirty = gct->failed_to_evac;
gct->eager_promotion = saved_eager_promotion;
@@ -1009,7 +1009,7 @@ scavenge_mark_stack(void)
end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
- evacuate((StgClosure **)p);
+ evacuate(GCT_PARAM((StgClosure **)p));
}
gct->eager_promotion = saved_eager_promotion;
@@ -1023,11 +1023,11 @@ scavenge_mark_stack(void)
StgTRecChunk *tc = ((StgTRecChunk *) p);
TRecEntry *e = &(tc -> entries[0]);
gct->eager_promotion = rtsFalse;
- evacuate((StgClosure **)&tc->prev_chunk);
+ evacuate(GCT_PARAM((StgClosure **)&tc->prev_chunk));
for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
- evacuate((StgClosure **)&e->tvar);
- evacuate((StgClosure **)&e->expected_value);
- evacuate((StgClosure **)&e->new_value);
+ evacuate(GCT_PARAM((StgClosure **)&e->tvar));
+ evacuate(GCT_PARAM((StgClosure **)&e->expected_value));
+ evacuate(GCT_PARAM((StgClosure **)&e->new_value));
}
gct->eager_promotion = saved_eager_promotion;
gct->failed_to_evac = rtsTrue; // mutable
@@ -1042,7 +1042,7 @@ scavenge_mark_stack(void)
if (gct->failed_to_evac) {
gct->failed_to_evac = rtsFalse;
if (gct->evac_gen_no) {
- recordMutableGen_GC((StgClosure *)q, gct->evac_gen_no);
+ recordMutableGen_GC(GCT_PARAM((StgClosure *)q, gct->evac_gen_no));
}
}
} // while (p = pop_mark_stack())
@@ -1057,7 +1057,7 @@ scavenge_mark_stack(void)
-------------------------------------------------------------------------- */
static rtsBool
-scavenge_one(StgPtr p)
+scavenge_one(DECLARE_GCT_PARAM(StgPtr p))
{
const StgInfoTable *info;
rtsBool no_luck;
@@ -1075,9 +1075,9 @@ scavenge_one(StgPtr p)
{
StgMVar *mvar = ((StgMVar *)p);
gct->eager_promotion = rtsFalse;
- evacuate((StgClosure **)&mvar->head);
- evacuate((StgClosure **)&mvar->tail);
- evacuate((StgClosure **)&mvar->value);
+ evacuate(GCT_PARAM((StgClosure **)&mvar->head));
+ evacuate(GCT_PARAM((StgClosure **)&mvar->tail));
+ evacuate(GCT_PARAM((StgClosure **)&mvar->value));
gct->eager_promotion = saved_eager_promotion;
if (gct->failed_to_evac) {
@@ -1099,7 +1099,7 @@ scavenge_one(StgPtr p)
end = (StgPtr)((StgThunk *)p)->payload + info->layout.payload.ptrs;
for (q = (StgPtr)((StgThunk *)p)->payload; q < end; q++) {
- evacuate((StgClosure **)q);
+ evacuate(GCT_PARAM((StgClosure **)q));
}
break;
}
@@ -1124,7 +1124,7 @@ scavenge_one(StgPtr p)
end = (StgPtr)((StgClosure *)p)->payload + info->layout.payload.ptrs;
for (q = (StgPtr)((StgClosure *)p)->payload; q < end; q++) {
- evacuate((StgClosure **)q);
+ evacuate(GCT_PARAM((StgClosure **)q));
}
break;
}
@@ -1134,7 +1134,7 @@ scavenge_one(StgPtr p)
StgPtr q = p;
gct->eager_promotion = rtsFalse;
- evacuate(&((StgMutVar *)p)->var);
+ evacuate(GCT_PARAM(&((StgMutVar *)p)->var));
gct->eager_promotion = saved_eager_promotion;
if (gct->failed_to_evac) {
@@ -1150,10 +1150,10 @@ scavenge_one(StgPtr p)
StgBlockingQueue *bq = (StgBlockingQueue *)p;
gct->eager_promotion = rtsFalse;
- evacuate(&bq->bh);
- evacuate((StgClosure**)&bq->owner);
- evacuate((StgClosure**)&bq->queue);
- evacuate((StgClosure**)&bq->link);
+ evacuate(GCT_PARAM(&bq->bh));
+ evacuate(GCT_PARAM((StgClosure**)&bq->owner));
+ evacuate(GCT_PARAM((StgClosure**)&bq->queue));
+ evacuate(GCT_PARAM((StgClosure**)&bq->link));
gct->eager_promotion = saved_eager_promotion;
if (gct->failed_to_evac) {
@@ -1167,7 +1167,7 @@ scavenge_one(StgPtr p)
case THUNK_SELECTOR:
{
StgSelector *s = (StgSelector *)p;
- evacuate(&s->selectee);
+ evacuate(GCT_PARAM(&s->selectee));
break;
}
@@ -1175,18 +1175,18 @@ scavenge_one(StgPtr p)
{
StgAP_STACK *ap = (StgAP_STACK *)p;
- evacuate(&ap->fun);
- scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
+ evacuate(GCT_PARAM(&ap->fun));
+ scavenge_stack(GCT_PARAM((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size));
p = (StgPtr)ap->payload + ap->size;
break;
}
case PAP:
- p = scavenge_PAP((StgPAP *)p);
+ p = scavenge_PAP(GCT_PARAM((StgPAP *)p));
break;
case AP:
- p = scavenge_AP((StgAP *)p);
+ p = scavenge_AP(GCT_PARAM((StgAP *)p));
break;
case ARR_WORDS:
@@ -1202,7 +1202,7 @@ scavenge_one(StgPtr p)
// avoid traversing it during minor GCs.
gct->eager_promotion = rtsFalse;
- scavenge_mut_arr_ptrs((StgMutArrPtrs *)p);
+ scavenge_mut_arr_ptrs(GCT_PARAM((StgMutArrPtrs *)p));
if (gct->failed_to_evac) {
((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
@@ -1219,7 +1219,7 @@ scavenge_one(StgPtr p)
case MUT_ARR_PTRS_FROZEN0:
{
// follow everything
- scavenge_mut_arr_ptrs((StgMutArrPtrs *)p);
+ scavenge_mut_arr_ptrs(GCT_PARAM((StgMutArrPtrs *)p));
// If we're going to put this object on the mutable list, then
// set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
@@ -1233,7 +1233,7 @@ scavenge_one(StgPtr p)
case TSO:
{
- scavengeTSO((StgTSO*)p);
+ scavengeTSO(GCT_PARAM((StgTSO*)p));
break;
}
@@ -1243,7 +1243,7 @@ scavenge_one(StgPtr p)
gct->eager_promotion = rtsFalse;
- scavenge_stack(stack->sp, stack->stack + stack->stack_size);
+ scavenge_stack(GCT_PARAM(stack->sp, stack->stack + stack->stack_size));
stack->dirty = gct->failed_to_evac;
gct->eager_promotion = saved_eager_promotion;
@@ -1258,7 +1258,7 @@ scavenge_one(StgPtr p)
end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
- evacuate((StgClosure **)p);
+ evacuate(GCT_PARAM((StgClosure **)p));
}
gct->eager_promotion = saved_eager_promotion;
@@ -1273,11 +1273,11 @@ scavenge_one(StgPtr p)
StgTRecChunk *tc = ((StgTRecChunk *) p);
TRecEntry *e = &(tc -> entries[0]);
gct->eager_promotion = rtsFalse;
- evacuate((StgClosure **)&tc->prev_chunk);
+ evacuate(GCT_PARAM((StgClosure **)&tc->prev_chunk));
for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
- evacuate((StgClosure **)&e->tvar);
- evacuate((StgClosure **)&e->expected_value);
- evacuate((StgClosure **)&e->new_value);
+ evacuate(GCT_PARAM((StgClosure **)&e->tvar));
+ evacuate(GCT_PARAM((StgClosure **)&e->expected_value));
+ evacuate(GCT_PARAM((StgClosure **)&e->new_value));
}
gct->eager_promotion = saved_eager_promotion;
gct->failed_to_evac = rtsTrue; // mutable
@@ -1290,7 +1290,7 @@ scavenge_one(StgPtr p)
// on the large-object list and then gets updated. See #3424.
case BLACKHOLE:
case IND_STATIC:
- evacuate(&((StgInd *)p)->indirectee);
+ evacuate(GCT_PARAM(&((StgInd *)p)->indirectee));
#if 0 && defined(DEBUG)
if (RtsFlags.DebugFlags.gc)
@@ -1337,7 +1337,7 @@ scavenge_one(StgPtr p)
-------------------------------------------------------------------------- */
void
-scavenge_mutable_list(bdescr *bd, generation *gen)
+scavenge_mutable_list(DECLARE_GCT_PARAM(bdescr *bd, generation *gen))
{
StgPtr p, q;
nat gen_no;
@@ -1378,7 +1378,7 @@ scavenge_mutable_list(bdescr *bd, generation *gen)
//
switch (get_itbl((StgClosure *)p)->type) {
case MUT_ARR_PTRS_CLEAN:
- recordMutableGen_GC((StgClosure *)p,gen_no);
+ recordMutableGen_GC(GCT_PARAM((StgClosure *)p,gen_no));
continue;
case MUT_ARR_PTRS_DIRTY:
{
@@ -1386,7 +1386,7 @@ scavenge_mutable_list(bdescr *bd, generation *gen)
saved_eager_promotion = gct->eager_promotion;
gct->eager_promotion = rtsFalse;
- scavenge_mut_arr_ptrs_marked((StgMutArrPtrs *)p);
+ scavenge_mut_arr_ptrs_marked(GCT_PARAM((StgMutArrPtrs *)p));
if (gct->failed_to_evac) {
((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
@@ -1396,24 +1396,24 @@ scavenge_mutable_list(bdescr *bd, generation *gen)
gct->eager_promotion = saved_eager_promotion;
gct->failed_to_evac = rtsFalse;
- recordMutableGen_GC((StgClosure *)p,gen_no);
+ recordMutableGen_GC(GCT_PARAM((StgClosure *)p,gen_no));
continue;
}
default:
;
}
- if (scavenge_one(p)) {
+ if (scavenge_one(GCT_PARAM(p))) {
// didn't manage to promote everything, so put the
// object back on the list.
- recordMutableGen_GC((StgClosure *)p,gen_no);
+ recordMutableGen_GC(GCT_PARAM((StgClosure *)p,gen_no));
}
}
}
}
void
-scavenge_capability_mut_lists (Capability *cap)
+scavenge_capability_mut_lists (DECLARE_GCT_PARAM(Capability *cap))
{
nat g;
@@ -1424,7 +1424,7 @@ scavenge_capability_mut_lists (Capability *cap)
* namely to reduce the likelihood of spurious old->new pointers.
*/
for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
- scavenge_mutable_list(cap->saved_mut_lists[g], &generations[g]);
+ scavenge_mutable_list(GCT_PARAM(cap->saved_mut_lists[g], &generations[g]));
freeChain_sync(cap->saved_mut_lists[g]);
cap->saved_mut_lists[g] = NULL;
}
@@ -1439,7 +1439,7 @@ scavenge_capability_mut_lists (Capability *cap)
-------------------------------------------------------------------------- */
static void
-scavenge_static(void)
+scavenge_static(DECLARE_GCT_ONLY_PARAM)
{
StgClosure* p;
const StgInfoTable *info;
@@ -1484,7 +1484,7 @@ scavenge_static(void)
case IND_STATIC:
{
StgInd *ind = (StgInd *)p;
- evacuate(&ind->indirectee);
+ evacuate(GCT_PARAM(&ind->indirectee));
/* might fail to evacuate it, in which case we have to pop it
* back on the mutable list of the oldest generation. We
@@ -1493,17 +1493,17 @@ scavenge_static(void)
*/
if (gct->failed_to_evac) {
gct->failed_to_evac = rtsFalse;
- recordMutableGen_GC((StgClosure *)p,oldest_gen->no);
+ recordMutableGen_GC(GCT_PARAM((StgClosure *)p,oldest_gen->no));
}
break;
}
case THUNK_STATIC:
- scavenge_thunk_srt(info);
+ scavenge_thunk_srt(GCT_PARAM(info));
break;
case FUN_STATIC:
- scavenge_fun_srt(info);
+ scavenge_fun_srt(GCT_PARAM(info));
break;
case CONSTR_STATIC:
@@ -1513,7 +1513,7 @@ scavenge_static(void)
next = (P_)p->payload + info->layout.payload.ptrs;
// evacuate the pointers
for (q = (P_)p->payload; q < next; q++) {
- evacuate((StgClosure **)q);
+ evacuate(GCT_PARAM((StgClosure **)q));
}
break;
}
@@ -1531,7 +1531,7 @@ scavenge_static(void)
-------------------------------------------------------------------------- */
static void
-scavenge_large_bitmap( StgPtr p, StgLargeBitmap *large_bitmap, nat size )
+scavenge_large_bitmap( DECLARE_GCT_PARAM(StgPtr p, StgLargeBitmap *large_bitmap, nat size) )
{
nat i, j, b;
StgWord bitmap;
@@ -1544,7 +1544,7 @@ scavenge_large_bitmap( StgPtr p, StgLargeBitmap *large_bitmap, nat size )
i += j;
for (; j > 0; j--, p++) {
if ((bitmap & 1) == 0) {
- evacuate((StgClosure **)p);
+ evacuate(GCT_PARAM((StgClosure **)p));
}
bitmap = bitmap >> 1;
}
@@ -1552,11 +1552,11 @@ scavenge_large_bitmap( StgPtr p, StgLargeBitmap *large_bitmap, nat size )
}
STATIC_INLINE StgPtr
-scavenge_small_bitmap (StgPtr p, nat size, StgWord bitmap)
+scavenge_small_bitmap (DECLARE_GCT_PARAM(StgPtr p, nat size, StgWord bitmap))
{
while (size > 0) {
if ((bitmap & 1) == 0) {
- evacuate((StgClosure **)p);
+ evacuate(GCT_PARAM((StgClosure **)p));
}
p++;
bitmap = bitmap >> 1;
@@ -1572,7 +1572,7 @@ scavenge_small_bitmap (StgPtr p, nat size, StgWord bitmap)
-------------------------------------------------------------------------- */
static void
-scavenge_stack(StgPtr p, StgPtr stack_end)
+scavenge_stack(DECLARE_GCT_PARAM(StgPtr p, StgPtr stack_end))
{
const StgRetInfoTable* info;
StgWord bitmap;
@@ -1626,7 +1626,7 @@ scavenge_stack(StgPtr p, StgPtr stack_end)
StgUpdateFrame *frame = (StgUpdateFrame *)p;
StgClosure *v;
- evacuate(&frame->updatee);
+ evacuate(GCT_PARAM(&frame->updatee));
v = frame->updatee;
if (GET_CLOSURE_TAG(v) != 0 ||
(get_itbl(v)->type != BLACKHOLE)) {
@@ -1651,11 +1651,11 @@ scavenge_stack(StgPtr p, StgPtr stack_end)
// NOTE: the payload starts immediately after the info-ptr, we
// don't have an StgHeader in the same sense as a heap closure.
p++;
- p = scavenge_small_bitmap(p, size, bitmap);
+ p = scavenge_small_bitmap(GCT_PARAM(p, size, bitmap));
follow_srt:
if (major_gc)
- scavenge_srt((StgClosure **)GET_SRT(info), info->i.srt_bitmap);
+ scavenge_srt(GCT_PARAM((StgClosure **)GET_SRT(info), info->i.srt_bitmap));
continue;
case RET_BCO: {
@@ -1663,11 +1663,11 @@ scavenge_stack(StgPtr p, StgPtr stack_end)
nat size;
p++;
- evacuate((StgClosure **)p);
+ evacuate(GCT_PARAM((StgClosure **)p));
bco = (StgBCO *)*p;
p++;
size = BCO_BITMAP_SIZE(bco);
- scavenge_large_bitmap(p, BCO_BITMAP(bco), size);
+ scavenge_large_bitmap(GCT_PARAM(p, BCO_BITMAP(bco), size));
p += size;
continue;
}
@@ -1679,7 +1679,7 @@ scavenge_stack(StgPtr p, StgPtr stack_end)
size = GET_LARGE_BITMAP(&info->i)->size;
p++;
- scavenge_large_bitmap(p, GET_LARGE_BITMAP(&info->i), size);
+ scavenge_large_bitmap(GCT_PARAM(p, GET_LARGE_BITMAP(&info->i), size));
p += size;
// and don't forget to follow the SRT
goto follow_srt;
@@ -1698,14 +1698,14 @@ scavenge_stack(StgPtr p, StgPtr stack_end)
bitmap = RET_DYN_LIVENESS(dyn);
p = (P_)&((StgRetDyn *)p)->payload[0];
size = RET_DYN_BITMAP_SIZE;
- p = scavenge_small_bitmap(p, size, bitmap);
+ p = scavenge_small_bitmap(GCT_PARAM(p, size, bitmap));
// skip over the non-ptr words
p += RET_DYN_NONPTRS(dyn) + RET_DYN_NONPTR_REGS_SIZE;
// follow the ptr words
for (size = RET_DYN_PTRS(dyn); size > 0; size--) {
- evacuate((StgClosure **)p);
+ evacuate(GCT_PARAM((StgClosure **)p));
p++;
}
continue;
@@ -1716,9 +1716,9 @@ scavenge_stack(StgPtr p, StgPtr stack_end)
StgRetFun *ret_fun = (StgRetFun *)p;
StgFunInfoTable *fun_info;
- evacuate(&ret_fun->fun);
+ evacuate(GCT_PARAM(&ret_fun->fun));
fun_info = get_fun_itbl(UNTAG_CLOSURE(ret_fun->fun));
- p = scavenge_arg_block(fun_info, ret_fun->payload);
+ p = scavenge_arg_block(GCT_PARAM(fun_info, ret_fun->payload));
goto follow_srt;
}
@@ -1738,7 +1738,7 @@ scavenge_stack(StgPtr p, StgPtr stack_end)
--------------------------------------------------------------------------- */