Skip to content
This repository
Browse code

[concurrency] Next stage of concurrency implementation, message passi…

…ng between schedulers.

git-svn-id: https://svn.parrot.org/parrot/trunk@24562 d31e2699-5ff4-0310-a27c-f18f2fbe73fe
  • Loading branch information...
commit 4d9c96049dab33c9e88942f9c527c162314a5cef 1 parent 204f307
allisonrandal allisonrandal authored
33 include/parrot/scheduler.h
@@ -23,6 +23,13 @@ void Parrot_cx_add_handler(PARROT_INTERP, ARGIN(PMC *handler))
23 23 __attribute__nonnull__(2);
24 24
25 25 PARROT_API
  26 +void Parrot_cx_broadcast_message(PARROT_INTERP,
  27 + ARGIN(STRING *messagetype),
  28 + ARGIN_NULLOK(PMC *data))
  29 + __attribute__nonnull__(1)
  30 + __attribute__nonnull__(2);
  31 +
  32 +PARROT_API
26 33 PARROT_CAN_RETURN_NULL
27 34 PMC * Parrot_cx_delete_suspend_for_gc(PARROT_INTERP)
28 35 __attribute__nonnull__(1);
@@ -39,6 +46,10 @@ PMC * Parrot_cx_find_handler_for_task(PARROT_INTERP, ARGIN(PMC *task))
39 46 __attribute__nonnull__(2);
40 47
41 48 PARROT_API
  49 +void Parrot_cx_request_suspend_for_gc(PARROT_INTERP)
  50 + __attribute__nonnull__(1);
  51 +
  52 +PARROT_API
42 53 void Parrot_cx_runloop_end(PARROT_INTERP)
43 54 __attribute__nonnull__(1);
44 55
@@ -64,10 +75,6 @@ opcode_t * Parrot_cx_schedule_sleep(PARROT_INTERP,
64 75 __attribute__nonnull__(1);
65 76
66 77 PARROT_API
67   -void Parrot_cx_schedule_suspend_for_gc(PARROT_INTERP)
68   - __attribute__nonnull__(1);
69   -
70   -PARROT_API
71 78 void Parrot_cx_schedule_task(PARROT_INTERP, ARGIN(PMC *task))
72 79 __attribute__nonnull__(1)
73 80 __attribute__nonnull__(2);
@@ -81,6 +88,18 @@ void Parrot_cx_schedule_timer(PARROT_INTERP,
81 88 ARGIN_NULLOK(PMC *sub))
82 89 __attribute__nonnull__(1);
83 90
  91 +PARROT_API
  92 +void Parrot_cx_send_message(PARROT_INTERP,
  93 + ARGIN(STRING *messagetype),
  94 + ARGIN_NULLOK(PMC *payload))
  95 + __attribute__nonnull__(1)
  96 + __attribute__nonnull__(2);
  97 +
  98 +void Parrot_cx_check_tasks(PARROT_INTERP, ARGMOD(PMC *scheduler))
  99 + __attribute__nonnull__(1)
  100 + __attribute__nonnull__(2)
  101 + FUNC_MODIFIES(*scheduler);
  102 +
84 103 void Parrot_cx_handle_tasks(PARROT_INTERP, ARGMOD(PMC *scheduler))
85 104 __attribute__nonnull__(1)
86 105 __attribute__nonnull__(2)
@@ -93,11 +112,9 @@ void Parrot_cx_invoke_callback(PARROT_INTERP, ARGIN(PMC *callback))
93 112 __attribute__nonnull__(1)
94 113 __attribute__nonnull__(2);
95 114
96   -void Parrot_cx_refresh_task_list(PARROT_INTERP)
97   - __attribute__nonnull__(1);
98   -
99   -void Parrot_cx_runloop_sleep(ARGMOD(PMC *scheduler))
  115 +void Parrot_cx_refresh_task_list(PARROT_INTERP, ARGMOD(PMC *scheduler))
100 116 __attribute__nonnull__(1)
  117 + __attribute__nonnull__(2)
101 118 FUNC_MODIFIES(*scheduler);
102 119
103 120 void Parrot_cx_runloop_wake(PARROT_INTERP, ARGMOD(PMC *scheduler))
23 include/parrot/scheduler_private.h
@@ -17,25 +17,34 @@
17 17
18 18 /* Scheduler PMC's underlying struct. */
19 19 typedef struct Parrot_Scheduler {
20   - int id; /* The scheduler's ID. */
21   - int max_tid; /* The highest assigned task ID. */
22   - int pending; /* A count of pending tasks (cached for fast
  20 + INTVAL id; /* The scheduler's ID. */
  21 + INTVAL max_tid; /* The highest assigned task ID. */
  22 + INTVAL pending; /* A count of pending tasks (cached for fast
23 23 lookup). */
24 24 PMC *task_list; /* The current list of tasks. */
25 25 PMC *task_index; /* An index into the current list of tasks,
26 26 ordered by priority. */
27 27 PMC *wait_index; /* An unordered index of inactive tasks. */
28 28 PMC *handlers; /* The list of currently active handlers. */
29   - Parrot_cond condition; /* Flag used by scheduler runloop */
30   - Parrot_mutex lock; /* Flag used by scheduler runloop */
  29 + PMC *messages; /* A message queue used for communication
  30 + between schedulers. */
  31 + Parrot_mutex msg_lock; /* Lock to synchronize use of the message queue. */
31 32 Parrot_Interp interp; /* A link back to the scheduler's interpreter. */
32   - Parrot_thread runloop_handle; /* A handle for the scheduler's runloop
33   - thread, if any. */
34 33 } Parrot_Scheduler;
35 34
36 35 /* Macro to access underlying structure of a Scheduler PMC. */
37 36 #define PARROT_SCHEDULER(s) ((Parrot_Scheduler *) PMC_data(s))
38 37
  38 +/* SchedulerMessage PMC's underlying struct. */
  39 +typedef struct Parrot_SchedulerMessage {
  40 + INTVAL id; /* The message's ID. */
  41 + STRING *type; /* The message's type. */
  42 + PMC *data; /* Additional data for the message. */
  43 +} Parrot_SchedulerMessage;
  44 +
  45 +/* Macro to access underlying structure of a Scheduler PMC. */
  46 +#define PARROT_SCHEDULERMESSAGE(s) ((Parrot_SchedulerMessage *) PMC_data(s))
  47 +
39 48 /* Task PMC's underlying struct. */
40 49 typedef struct Parrot_Task {
41 50 INTVAL id; /* The task ID. */
2  src/inter_create.c
@@ -317,8 +317,8 @@ Parrot_really_destroy(PARROT_INTERP, SHIM(int exit_code), SHIM(void *arg))
317 317 * wait for threads to complete if needed; terminate the event loop
318 318 */
319 319 if (!interp->parent_interpreter) {
320   - pt_join_threads(interp);
321 320 Parrot_cx_runloop_end(interp);
  321 + pt_join_threads(interp);
322 322 }
323 323
324 324 /* if something needs destruction (e.g. closing PIOs)
1  src/pmc/parrotinterpreter.pmc
@@ -47,6 +47,7 @@ clone_interpreter(Parrot_Interp d, Parrot_Interp s, INTVAL flags)
47 47 Parrot_block_DOD(d);
48 48
49 49 d->scheduler = pmc_new(d, enum_class_Scheduler);
  50 + d->scheduler = VTABLE_share_ro(d, d->scheduler);
50 51
51 52 if (flags & PARROT_CLONE_RUNOPS)
52 53 d->run_core = s->run_core;
85 src/pmc/scheduler.pmc
@@ -46,11 +46,11 @@ Initialize a concurrency scheduler object.
46 46 core_struct->max_tid = 0;
47 47 core_struct->task_list = pmc_new(interp, enum_class_Hash);
48 48 core_struct->task_index = pmc_new(interp, enum_class_ResizableIntegerArray);
49   - core_struct->handlers = pmc_new(interp, enum_class_ResizablePMCArray);
50 49 core_struct->wait_index = pmc_new(interp, enum_class_ResizablePMCArray);
  50 + core_struct->handlers = pmc_new(interp, enum_class_ResizablePMCArray);
  51 + core_struct->messages = pmc_new(interp, enum_class_ResizablePMCArray);
51 52 core_struct->interp = INTERP;
52   - COND_INIT(core_struct->condition);
53   - MUTEX_INIT(core_struct->lock);
  53 + MUTEX_INIT(core_struct->msg_lock);
54 54 }
55 55
56 56 /*
@@ -75,30 +75,15 @@ An C<Integer> representing the unique identifier for this scheduler.
75 75
76 76 if (! VTABLE_isa(INTERP, data, CONST_STRING(INTERP, "Hash")))
77 77 real_exception(INTERP, NULL, INVALID_OPERATION,
78   - "Task initializer must be a Hash");
79   -
80   - core_struct = mem_allocate_zeroed_typed(Parrot_Scheduler);
  78 + "Scheduler initializer must be a Hash");
81 79
82   - /* Set flags for custom DOD mark and destroy. */
83   - PObj_custom_mark_SET(SELF);
84   - PObj_active_destroy_SET(SELF);
85   -
86   - /* Set up the core struct. */
87   - PMC_data(SELF) = core_struct;
  80 + SELF.init();
  81 + core_struct = PARROT_SCHEDULER(SELF);
88 82
89 83 elem = VTABLE_get_pmc_keyed_str(INTERP, data, CONST_STRING(INTERP, "id"));
90 84
91 85 if (! PMC_IS_NULL(elem))
92 86 core_struct->id = VTABLE_get_integer(INTERP, elem);
93   - else
94   - core_struct->id = 0;
95   -
96   - core_struct->max_tid = 0;
97   - core_struct->task_list = pmc_new(interp, enum_class_Hash);
98   - core_struct->task_index = pmc_new(interp, enum_class_ResizableIntegerArray);
99   - core_struct->handlers = pmc_new(interp, enum_class_ResizablePMCArray);
100   - core_struct->wait_index = pmc_new(interp, enum_class_ResizablePMCArray);
101   - core_struct->interp = INTERP;
102 87 }
103 88
104 89 /*
@@ -136,9 +121,7 @@ current maximum, and a birthtime of the current time.
136 121
137 122
138 123 SCHEDULER_cache_valid_CLEAR(SELF);
139   -/* LOCK(core_struct->lock); */
140 124 Parrot_cx_runloop_wake(core_struct->interp, SELF);
141   -/* UNLOCK(core_struct->lock); */
142 125 }
143 126
144 127 /*
@@ -224,6 +207,7 @@ Set this PMC as shared.
224 207 shared_struct->task_index = pt_shared_fixup(INTERP, shared_struct->task_index);
225 208 shared_struct->wait_index = pt_shared_fixup(INTERP, shared_struct->wait_index);
226 209 shared_struct->handlers = pt_shared_fixup(INTERP, shared_struct->handlers);
  210 + shared_struct->messages = pt_shared_fixup(INTERP, shared_struct->messages);
227 211
228 212 return shared_self;
229 213 }
@@ -239,8 +223,7 @@ Free the scheduler's underlying struct.
239 223 */
240 224 void destroy() {
241 225 Parrot_Scheduler * const core_struct = PARROT_SCHEDULER(SELF);
242   - COND_DESTROY(core_struct->condition);
243   - MUTEX_DESTROY(core_struct->lock);
  226 + MUTEX_DESTROY(core_struct->msg_lock);
244 227 mem_sys_free(core_struct);
245 228 }
246 229
@@ -265,6 +248,8 @@ Mark any referenced strings and PMCs.
265 248 pobject_lives(interp, (PObj*)core_struct->wait_index);
266 249 if (core_struct->handlers)
267 250 pobject_lives(interp, (PObj*)core_struct->handlers);
  251 + if (core_struct->messages)
  252 + pobject_lives(interp, (PObj*)core_struct->messages);
268 253 }
269 254 }
270 255
@@ -356,9 +341,7 @@ Called after the scheduler has been thawed.
356 341 */
357 342
358 343 void thawfinish(visit_info *info) {
359   - Parrot_Scheduler * const core_struct = PARROT_SCHEDULER(SELF);
360   -
361   - /* TODO: Rebuild the task index. */
  344 + Parrot_cx_refresh_task_list(INTERP, SELF);
362 345 }
363 346
364 347 /*
@@ -419,50 +402,6 @@ PMCNULL.
419 402 PCCRETURN(PMC *PMCNULL);
420 403 }
421 404
422   -/*
423   -
424   -=item C<PCCMETHOD refresh_task_list()>
425   -
426   -Perform maintenance on the scheduler's list of active tasks, checking for
427   -completed timers or sleep events, sorting for priority, etc.
428   -
429   -=cut
430   -
431   -*/
432   -
433   - PCCMETHOD refresh_task_list() {
434   - Parrot_Scheduler * core_struct = PARROT_SCHEDULER(SELF);
435   - INTVAL num_tasks, index;
436   -
437   - /* Sweep the wait list for completed timers */
438   - num_tasks = VTABLE_elements(interp, core_struct->wait_index);
439   - for (index = 0; index < num_tasks; index++) {
440   - INTVAL tid = VTABLE_get_integer_keyed_int(interp, core_struct->wait_index, index);
441   - if (tid > 0) {
442   - PMC *task = VTABLE_get_pmc_keyed_int(interp, core_struct->task_list, tid);
443   - if (PMC_IS_NULL(task)) {
444   - /* Cleanup expired tasks. */
445   - VTABLE_set_integer_keyed_int(INTERP, core_struct->wait_index, index, 0);
446   - }
447   - else {
448   - /* Move the timer to the active task list if the timer has
449   - * completed. */
450   - FLOATVAL timer_end_time = VTABLE_get_number_keyed_int(interp,
451   - task, PARROT_TIMER_NSEC);
452   - if (timer_end_time <= Parrot_floatval_time()) {
453   - VTABLE_push_integer(INTERP, core_struct->task_index, tid);
454   - VTABLE_set_integer_keyed_int(INTERP, core_struct->wait_index, index, 0);
455   - Parrot_cx_schedule_repeat(interp, task);
456   - SCHEDULER_cache_valid_CLEAR(SELF);
457   - }
458   - }
459   - }
460   - }
461   -
462   - /* Sort the task list index */
463   -
464   - SCHEDULER_cache_valid_SET(SELF);
465   - }
466 405
467 406 }
468 407
@@ -472,7 +411,7 @@ completed timers or sleep events, sorting for priority, etc.
472 411
473 412 =head1 SEE ALSO
474 413
475   -F<docs/pdds/pdd15_objects.pod>.
  414 +F<docs/pdds/pdd25_concurrency.pod>.
476 415
477 416 =cut
478 417
309 src/pmc/schedulermessage.pmc
... ... @@ -0,0 +1,309 @@
  1 +/*
  2 +Copyright (C) 2001-2007, The Perl Foundation.
  3 +$Id: $
  4 +
  5 +=head1 NAME
  6 +
  7 +src/pmc/schedulermessage.pmc - The concurrency scheduler
  8 +
  9 +=head1 DESCRIPTION
  10 +
  11 +Implements a message passed between concurrency schedulers.
  12 +
  13 +=head2 Vtable Functions
  14 +
  15 +=over 4
  16 +
  17 +=cut
  18 +
  19 +*/
  20 +
  21 +#include "parrot/parrot.h"
  22 +#include "parrot/scheduler_private.h"
  23 +
  24 +pmclass SchedulerMessage need_ext {
  25 +
  26 +/*
  27 +
  28 +=item C<void init()>
  29 +
  30 +Initialize a concurrency scheduler message object.
  31 +
  32 +=cut
  33 +
  34 +*/
  35 +
  36 + void init() {
  37 + Parrot_SchedulerMessage * const core_struct
  38 + = mem_allocate_zeroed_typed(Parrot_SchedulerMessage);
  39 +
  40 + /* Set flags for custom DOD mark and destroy. */
  41 + PObj_custom_mark_SET(SELF);
  42 + PObj_active_destroy_SET(SELF);
  43 +
  44 + /* Set up the core struct. */
  45 + PMC_data(SELF) = core_struct;
  46 + core_struct->id = 0;
  47 + core_struct->type = CONST_STRING(INTERP, "");
  48 + core_struct->data = PMCNULL;
  49 + }
  50 +
  51 +/*
  52 +
  53 +=item C<void init_pmc(PMC *data)>
  54 +
  55 +Initializes a new SchedulerMessage with a C<Hash> PMC with any or all of the keys:
  56 +
  57 +=over 4
  58 +
  59 +=item C<id>
  60 +
  61 +An C<Integer> representing the unique identifier for this scheduler message.
  62 +
  63 +=item C<type>
  64 +
  65 +A C<String> representing the unique type for this scheduler message.
  66 +
  67 +=item C<data>
  68 +
  69 +An C<PMC> representing the data passed in this scheduler message.
  70 +
  71 +=back
  72 +
  73 +*/
  74 +
  75 + void init_pmc(PMC *data) {
  76 + PMC *elem;
  77 + Parrot_SchedulerMessage *core_struct;
  78 +
  79 + if (! VTABLE_isa(INTERP, data, CONST_STRING(INTERP, "Hash")))
  80 + real_exception(INTERP, NULL, INVALID_OPERATION,
  81 + "message initializer must be a Hash");
  82 +
  83 + SELF.init();
  84 + core_struct = PARROT_SCHEDULERMESSAGE(SELF);
  85 +
  86 + elem = VTABLE_get_pmc_keyed_str(INTERP, data, CONST_STRING(INTERP, "id"));
  87 + if (! PMC_IS_NULL(elem))
  88 + core_struct->id = VTABLE_get_integer(INTERP, elem);
  89 +
  90 + elem = VTABLE_get_pmc_keyed_str(INTERP, data, CONST_STRING(INTERP, "type"));
  91 + if (! PMC_IS_NULL(elem))
  92 + core_struct->type = VTABLE_get_string(INTERP, elem);
  93 +
  94 + elem = VTABLE_get_pmc_keyed_str(INTERP, data, CONST_STRING(INTERP, "data"));
  95 + if (! PMC_IS_NULL(elem))
  96 + core_struct->data = elem;
  97 + }
  98 +
  99 +/*
  100 +
  101 +=item C<INTVAL get_integer()>
  102 +
  103 +Retrieve the message ID.
  104 +
  105 +=cut
  106 +
  107 +*/
  108 +
  109 + INTVAL get_integer() {
  110 + Parrot_SchedulerMessage * core_struct = PARROT_SCHEDULERMESSAGE(SELF);
  111 + return core_struct->id;
  112 + }
  113 +
  114 +/*
  115 +
  116 +=item C<void set_integer_native(INTVAL value)>
  117 +
  118 +Set the message ID.
  119 +
  120 +=cut
  121 +
  122 +*/
  123 +
  124 + void set_integer_native(INTVAL value) {
  125 + Parrot_SchedulerMessage * core_struct = PARROT_SCHEDULERMESSAGE(SELF);
  126 + core_struct->id = value;
  127 + }
  128 +
  129 +
  130 +/*
  131 +
  132 +=item C<STRING * get_string()>
  133 +
  134 +Retrieve the message type.
  135 +
  136 +=cut
  137 +
  138 +*/
  139 +
  140 + STRING * get_string() {
  141 + Parrot_SchedulerMessage * core_struct = PARROT_SCHEDULERMESSAGE(SELF);
  142 + return core_struct->type;
  143 + }
  144 +
  145 +/*
  146 +
  147 +=item C<void set_string_native(STRING *value)>
  148 +
  149 +Set the message type.
  150 +
  151 +=cut
  152 +
  153 +*/
  154 +
  155 + void set_string_native(STRING *value) {
  156 + Parrot_SchedulerMessage * core_struct = PARROT_SCHEDULERMESSAGE(SELF);
  157 + core_struct->type = value;
  158 + }
  159 +
  160 +
  161 +/*
  162 +
  163 +=item C<PMC *share_ro()>
  164 +
  165 +Set this PMC as shared.
  166 +
  167 +=cut
  168 +
  169 +*/
  170 +
  171 + PMC *share_ro() {
  172 + PMC *shared_self;
  173 + Parrot_SchedulerMessage *shared_struct;
  174 +
  175 + if (PObj_is_PMC_shared_TEST(SELF))
  176 + return SELF;
  177 +
  178 + shared_self = pt_shared_fixup(INTERP, SELF);
  179 + shared_struct = PARROT_SCHEDULERMESSAGE(shared_self);
  180 +
  181 + shared_struct->data = pt_shared_fixup(INTERP, shared_struct->data);
  182 +
  183 + return shared_self;
  184 + }
  185 +
  186 +/*
  187 +
  188 +=item C<void destroy()>
  189 +
  190 +Free the scheduler's underlying struct.
  191 +
  192 +=cut
  193 +
  194 +*/
  195 + void destroy() {
  196 + mem_sys_free(PMC_data(SELF));
  197 + }
  198 +
  199 +/*
  200 +
  201 +=item C<void mark()>
  202 +
  203 +Mark any referenced strings and PMCs.
  204 +
  205 +=cut
  206 +
  207 +*/
  208 + void mark() {
  209 + if (PARROT_SCHEDULERMESSAGE(SELF)) {
  210 + Parrot_SchedulerMessage * const core_struct = PARROT_SCHEDULERMESSAGE(SELF);
  211 +
  212 + if (core_struct->data)
  213 + pobject_lives(interp, (PObj*)core_struct->data);
  214 + }
  215 + }
  216 +
  217 +/*
  218 +
  219 +=item C<void visit(visit_info *info)>
  220 +
  221 +This is used by freeze/thaw to visit the contents of the scheduler message.
  222 +
  223 +C<*info> is the visit info, (see F<include/parrot/pmc_freeze.h>).
  224 +
  225 +=cut
  226 +
  227 +*/
  228 +
  229 + void visit(visit_info *info) {
  230 + Parrot_SchedulerMessage * const core_struct = PARROT_SCHEDULERMESSAGE(SELF);
  231 + PMC **pos;
  232 +
  233 + /* 1) visit message data */
  234 + pos = &core_struct->data;
  235 + info->thaw_ptr = pos;
  236 + (info->visit_pmc_now)(INTERP, *pos, info);
  237 +
  238 + }
  239 +
  240 +/*
  241 +
  242 +=item C<void freeze(visit_info *info)>
  243 +
  244 +Used to archive the scheduler message.
  245 +
  246 +=cut
  247 +
  248 +*/
  249 +
  250 + void freeze(visit_info *info) {
  251 + IMAGE_IO *io = info->image_io;
  252 + Parrot_SchedulerMessage * const core_struct = PARROT_SCHEDULERMESSAGE(SELF);
  253 +
  254 + /* 1) freeze message id */
  255 + VTABLE_push_integer(INTERP, io, core_struct->id);
  256 +
  257 + /* 2) freeze message type */
  258 + VTABLE_push_string(INTERP, io, core_struct->type);
  259 + }
  260 +
  261 +/*
  262 +
  263 +=item C<void thaw(visit_info *info)>
  264 +
  265 +Used to unarchive the scheduler message.
  266 +
  267 +=cut
  268 +
  269 +*/
  270 +
  271 + void thaw(visit_info *info) {
  272 + IMAGE_IO * const io = info->image_io;
  273 +
  274 + /* 1. thaw message id */
  275 + const INTVAL id = VTABLE_shift_integer(INTERP, io);
  276 +
  277 + /* 2. thaw message type */
  278 + STRING * const type = VTABLE_shift_string(INTERP, io);
  279 +
  280 + /* Allocate the message's core data struct and set custom flags. */
  281 + SELF.init();
  282 +
  283 + /* Set the message's id to the frozen id */
  284 + PARROT_SCHEDULERMESSAGE(SELF)->id = id;
  285 +
  286 + /* Set the message's type to the frozen type */
  287 + PARROT_SCHEDULERMESSAGE(SELF)->type = type;
  288 + }
  289 +
  290 +}
  291 +
  292 +/*
  293 +
  294 +=back
  295 +
  296 +=head1 SEE ALSO
  297 +
  298 +F<docs/pdds/pdd25_concurrency.pod>.
  299 +
  300 +=cut
  301 +
  302 +*/
  303 +
  304 +/*
  305 + * Local variables:
  306 + * c-file-style: "parrot"
  307 + * End:
  308 + * vim: expandtab shiftwidth=4:
  309 + */
330 src/scheduler.c
@@ -27,10 +27,16 @@ exceptions, async I/O, and concurrent tasks (threads).
27 27
28 28 /* HEADERIZER BEGIN: static */
29 29
30   -PARROT_WARN_UNUSED_RESULT
31   -PARROT_CAN_RETURN_NULL
32   -static void* scheduler_runloop(ARGMOD(PMC *scheduler))
  30 +static void scheduler_process_messages(PARROT_INTERP,
  31 + ARGMOD(PMC *scheduler))
  32 + __attribute__nonnull__(1)
  33 + __attribute__nonnull__(2)
  34 + FUNC_MODIFIES(*scheduler);
  35 +
  36 +static void scheduler_process_wait_list(PARROT_INTERP,
  37 + ARGMOD(PMC *scheduler))
33 38 __attribute__nonnull__(1)
  39 + __attribute__nonnull__(2)
34 40 FUNC_MODIFIES(*scheduler);
35 41
36 42 /* HEADERIZER END: static */
@@ -39,7 +45,7 @@ static void* scheduler_runloop(ARGMOD(PMC *scheduler))
39 45
40 46 =head2 Scheduler Interface Functions
41 47
42   -Functions that are used to interface with the concurrency scheduler.
  48 +Functions to interface with the concurrency scheduler.
43 49
44 50 =over 4
45 51
@@ -51,14 +57,9 @@ Initalize the concurrency scheduler for the interpreter.
51 57
52 58 */
53 59
54   -typedef void *(pt_start_routine_f)(void *);
55   -
56 60 void
57 61 Parrot_cx_init_scheduler(PARROT_INTERP)
58 62 {
59   -#if CX_DEBUG
60   - fprintf(stderr, "call to Parrot_cx_init_scheduler\n");
61   -#endif
62 63 if (!interp->parent_interpreter) {
63 64 PMC *scheduler;
64 65
@@ -106,7 +107,7 @@ void
106 107 Parrot_cx_handle_tasks(PARROT_INTERP, ARGMOD(PMC *scheduler))
107 108 {
108 109 SCHEDULER_wake_requested_CLEAR(scheduler);
109   - Parrot_cx_refresh_task_list(interp);
  110 + Parrot_cx_refresh_task_list(interp, scheduler);
110 111
111 112 while (VTABLE_get_integer(interp, scheduler) > 0) {
112 113 PMC * const task = VTABLE_pop_pmc(interp, scheduler);
@@ -114,11 +115,7 @@ Parrot_cx_handle_tasks(PARROT_INTERP, ARGMOD(PMC *scheduler))
114 115 PMC *type_pmc = VTABLE_get_attr_str(interp, task, CONST_STRING(interp, "type"));
115 116 STRING *type = VTABLE_get_string(interp, type_pmc);
116 117
117   - if (string_equal(interp, type, CONST_STRING(interp, "suspend_for_gc")) == 0) {
118   - Parrot_Task *task_struct = PARROT_TASK(task);
119   - pt_suspend_self_for_gc(task_struct->interp);
120   - }
121   - else if (string_equal(interp, type, CONST_STRING(interp, "callback")) == 0) {
  118 + if (string_equal(interp, type, CONST_STRING(interp, "callback")) == 0) {
122 119 Parrot_cx_invoke_callback(interp, task);
123 120 }
124 121 else if (string_equal(interp, type, CONST_STRING(interp, "timer")) == 0) {
@@ -143,7 +140,7 @@ Parrot_cx_handle_tasks(PARROT_INTERP, ARGMOD(PMC *scheduler))
143 140 /* If the scheduler was flagged to terminate, make sure you process all
144 141 * tasks. */
145 142 if (SCHEDULER_terminate_requested_TEST(scheduler))
146   - Parrot_cx_refresh_task_list(interp);
  143 + Parrot_cx_refresh_task_list(interp, scheduler);
147 144
148 145 } /* end of pending tasks */
149 146
@@ -153,50 +150,24 @@ Parrot_cx_handle_tasks(PARROT_INTERP, ARGMOD(PMC *scheduler))
153 150
154 151 =item C<void Parrot_cx_refresh_task_list>
155 152
156   -Tell the scheduler to perform maintenance on the priority task list.
  153 +Tell the scheduler to perform maintenance on its list of active tasks, checking
  154 +for completed timers or sleep events, sorting for priority, checking for
  155 +messages, etc.
157 156
158 157 =cut
159 158
160 159 */
161 160
162 161 void
163   -Parrot_cx_refresh_task_list(PARROT_INTERP)
  162 +Parrot_cx_refresh_task_list(PARROT_INTERP, ARGMOD(PMC *scheduler))
164 163 {
165   - if (interp->scheduler)
166   - Parrot_PCCINVOKE(interp, interp->scheduler,
167   - CONST_STRING(interp, "refresh_task_list"), "->");
168   - else
169   - real_exception(interp, NULL, INVALID_OPERATION,
170   - "Scheduler was not initialized for this interpreter.\n");
171   - return;
172   -}
  164 + scheduler_process_wait_list(interp, scheduler);
  165 + scheduler_process_messages(interp, scheduler);
173 166
174   -/*
175   -
176   -=item C<void Parrot_cx_runloop_sleep>
177   -
178   -Pause the scheduler runloop. Called when there are no more pending tasks in the
179   -scheduler's task list, to freeze the runloop until there are tasks to handle.
180   -
181   -Sleep is skipped if a wake signal was received since the last sleep, indicating
182   -more tasks to process. Sleep is also skipped if the scheduler is in the process
183   -of terminating, instead processing any remaining tasks as quickly as possible
184   -before finalization.
185   -
186   -=cut
187   -
188   -*/
  167 + /* TODO: Sort the task list index */
189 168
190   -void
191   -Parrot_cx_runloop_sleep(ARGMOD(PMC *scheduler))
192   -{
193   - Parrot_Scheduler * const sched_struct = PARROT_SCHEDULER(scheduler);
194   - if (SCHEDULER_terminate_requested_TEST(scheduler))
195   - return;
196   -
197   - if (!SCHEDULER_wake_requested_TEST(scheduler))
198   - COND_WAIT(sched_struct->condition, sched_struct->lock);
199   - SCHEDULER_wake_requested_CLEAR(scheduler);
  169 + SCHEDULER_cache_valid_SET(scheduler);
  170 + return;
200 171 }
201 172
202 173 /*
@@ -213,10 +184,8 @@ the scheduler's task list).
213 184 void
214 185 Parrot_cx_runloop_wake(PARROT_INTERP, ARGMOD(PMC *scheduler))
215 186 {
216   - Parrot_Scheduler * const sched_struct = PARROT_SCHEDULER(scheduler);
217 187 enable_event_checking(interp);
218 188 SCHEDULER_wake_requested_SET(scheduler);
219   - COND_SIGNAL(sched_struct->condition);
220 189 }
221 190
222 191
@@ -234,31 +203,16 @@ PARROT_API
234 203 void
235 204 Parrot_cx_runloop_end(PARROT_INTERP)
236 205 {
237   -#if CX_DEBUG
238   - fprintf(stderr, "call to Parrot_cx_runloop_end\n");
239   -#endif
240   - if (!interp->parent_interpreter) {
241   - Parrot_Scheduler * const sched_struct = PARROT_SCHEDULER(interp->scheduler);
242   - void *raw_retval = NULL;
243   -
244   - SCHEDULER_terminate_requested_SET(interp->scheduler);
245   - Parrot_cx_handle_tasks(interp, interp->scheduler);
246   -
247   -/* LOCK(sched_struct->lock);
248   - * SCHEDULER_terminate_requested_SET(interp->scheduler);
249   - * Parrot_cx_runloop_wake(interp, interp->scheduler);
250   - * UNLOCK(sched_struct->lock);
251   - *
252   - * JOIN(sched_struct->runloop_handle, raw_retval);
253   - */
254   - }
  206 + SCHEDULER_terminate_requested_SET(interp->scheduler);
  207 + Parrot_cx_handle_tasks(interp, interp->scheduler);
255 208 }
256 209
257 210 /*
258 211
259 212 =item C<void Parrot_cx_schedule_task>
260 213
261   -Add a task to scheduler's task list.
  214 +Add a task to scheduler's task list. Cannot be called across
  215 +interpreters/threads, must be called from within the interpreter's runloop.
262 216
263 217 =cut
264 218
@@ -367,10 +321,9 @@ Parrot_cx_schedule_callback(PARROT_INTERP,
367 321
368 322 /*
369 323
370   -=item C<void Parrot_schedule_suspend_for_gc>
  324 +=item C<void Parrot_cx_request_suspend_for_gc>
371 325
372   -Create a new timer event due at C<diff> from now, repeated at C<interval>
373   -and running the passed C<sub>.
  326 +Tell the scheduler to suspend for GC at the next safe pause.
374 327
375 328 =cut
376 329
@@ -378,13 +331,12 @@ and running the passed C<sub>.
378 331
379 332 PARROT_API
380 333 void
381   -Parrot_cx_schedule_suspend_for_gc(PARROT_INTERP)
  334 +Parrot_cx_request_suspend_for_gc(PARROT_INTERP)
382 335 {
383   - PMC *event = pmc_new(interp, enum_class_Task);
384   -
385   - VTABLE_set_string_native(interp, event, CONST_STRING(interp, "suspend_for_gc"));
386   -
387   - Parrot_cx_schedule_task(interp, event);
  336 +#if CX_DEBUG
  337 + fprintf(stderr, "requesting gc suspend [interp=%p]\n", interp);
  338 +#endif
  339 + Parrot_cx_send_message(interp, CONST_STRING(interp, "suspend_for_gc"), PMCNULL);
388 340 }
389 341
390 342 /*
@@ -415,8 +367,8 @@ Parrot_cx_delete_task(PARROT_INTERP, ARGIN(PMC *task))
415 367
416 368 =item C<PMC * Parrot_cx_delete_suspend_for_gc>
417 369
418   -Remove a task that would suspend GC from the task list. (Provided for backward
419   -compatibility in the threads implementation.)
  370 +Remove a message that would suspend GC from the message queue. (Provided for
  371 +backward compatibility in the threads implementation.)
420 372
421 373 =cut
422 374
@@ -431,23 +383,31 @@ Parrot_cx_delete_suspend_for_gc(PARROT_INTERP)
431 383 Parrot_Scheduler * sched_struct = PARROT_SCHEDULER(interp->scheduler);
432 384 INTVAL num_tasks, index;
433 385
  386 +#if CX_DEBUG
  387 + fprintf(stderr, "called delete_suspend_for_gc\n");
  388 +#endif
  389 +
  390 +#if CX_DEBUG
  391 + fprintf(stderr, "locking msg_lock (delete) [interp=%p]\n", interp);
  392 +#endif
  393 + LOCK(sched_struct->msg_lock);
434 394 /* Search the task index for GC suspend tasks */
435   - num_tasks = VTABLE_elements(interp, sched_struct->task_index);
  395 + num_tasks = VTABLE_elements(interp, sched_struct->messages);
436 396 for (index = 0; index < num_tasks; index++) {
437   - INTVAL tid = VTABLE_get_integer_keyed_int(interp, sched_struct->task_index, index);
438   - if (tid > 0) {
439   - PMC *task = VTABLE_get_pmc_keyed_int(interp, sched_struct->task_list, tid);
440   - if (!PMC_IS_NULL(task)) {
441   - PMC *type = VTABLE_get_attr_str(interp, task,
442   - CONST_STRING(interp, "type"));
443   - if (string_equal(interp, VTABLE_get_string(interp, type),
444   - CONST_STRING(interp, "suspend_for_gc")) == 0) {
445   - Parrot_cx_delete_task(interp, task);
446   - return task;
447   - }
448   - }
  397 + PMC *message = VTABLE_get_pmc_keyed_int(interp, sched_struct->messages, index);
  398 + if (!PMC_IS_NULL(message)
  399 + && string_equal(interp, VTABLE_get_string(interp, message),
  400 + CONST_STRING(interp, "suspend_for_gc")) == 0) {
  401 + VTABLE_delete_keyed_int(interp, sched_struct->messages, index);
  402 + UNLOCK(sched_struct->msg_lock);
  403 + return message;
449 404 }
450 405 }
  406 +#if CX_DEBUG
  407 + fprintf(stderr, "unlocking msg_lock (delete) [interp=%p]\n", interp);
  408 +#endif
  409 + UNLOCK(sched_struct->msg_lock);
  410 +
451 411 }
452 412 else
453 413 real_exception(interp, NULL, INVALID_OPERATION,
@@ -483,6 +443,81 @@ Parrot_cx_add_handler(PARROT_INTERP, ARGIN(PMC *handler))
483 443
484 444 =back
485 445
  446 +=head2 Scheduler Message Interface Functions
  447 +
  448 +Functions that are used to interface with the message queue in the concurrency
  449 +scheduler.
  450 +
  451 +=over 4
  452 +
  453 +=item C<void Parrot_cx_send_message>
  454 +
  455 +Send a message to a scheduler in a different interpreter/thread.
  456 +
  457 +=cut
  458 +
  459 +*/
  460 +
  461 +PARROT_API
  462 +void
  463 +Parrot_cx_send_message(PARROT_INTERP, ARGIN(STRING *messagetype), ARGIN_NULLOK(PMC *payload))
  464 +{
  465 + if(interp->scheduler) {
  466 + Parrot_Scheduler * sched_struct = PARROT_SCHEDULER(interp->scheduler);
  467 + PMC *message = pmc_new(interp, enum_class_SchedulerMessage);
  468 + VTABLE_set_string_native(interp, message, messagetype);
  469 + message = VTABLE_share_ro(interp, message);
  470 +
  471 +#if CX_DEBUG
  472 + fprintf(stderr, "sending message[interp=%p]\n", interp);
  473 +#endif
  474 +
  475 +#if CX_DEBUG
  476 + fprintf(stderr, "locking msg_lock (send) [interp=%p]\n", interp);
  477 +#endif
  478 + LOCK(sched_struct->msg_lock);
  479 + VTABLE_push_pmc(interp, sched_struct->messages, message);
  480 +#if CX_DEBUG
  481 + fprintf(stderr, "unlocking msg_lock (send) [interp=%p]\n", interp);
  482 +#endif
  483 + UNLOCK(sched_struct->msg_lock);
  484 + Parrot_cx_runloop_wake(interp, interp->scheduler);
  485 +
  486 + }
  487 +
  488 +}
  489 +
  490 +/*
  491 +
  492 +=item C<void Parrot_cx_broadcast_message>
  493 +
  494 +Send a message to the schedulers in all interpreters/threads linked to this
  495 +one.
  496 +
  497 +=cut
  498 +
  499 +*/
  500 +
  501 +PARROT_API
  502 +void
  503 +Parrot_cx_broadcast_message(PARROT_INTERP, ARGIN(STRING *messagetype), ARGIN_NULLOK(PMC *data))
  504 +{
  505 + UINTVAL i;
  506 + LOCK(interpreter_array_mutex);
  507 + for (i = 0; i < n_interpreters; ++i) {
  508 + Parrot_Interp other_interp = interpreter_array[i];
  509 + if (interp == other_interp)
  510 + continue;
  511 + Parrot_cx_send_message(other_interp, messagetype, data);
  512 + }
  513 + UNLOCK(interpreter_array_mutex);
  514 +
  515 +}
  516 +
  517 +/*
  518 +
  519 +=back
  520 +
486 521 =head2 Task Interface Functions
487 522
488 523 Functions that are used to interface with a specific task in the concurrency scheduler.
@@ -631,62 +666,91 @@ Functions that are only used within the scheduler.
631 666
632 667 =over 4
633 668
634   -=item C<static void* scheduler_runloop>
635   -
636   -The scheduler runloop is started by the interpreter. It manages the flow of
637   -concurrent scheduling for the parent interpreter, and for lightweight
638   -concurrent tasks running within that interpreter. More complex concurrent tasks
639   -have their own runloop.
  669 +=item C<static void scheduler_process_wait_list>
640 670
641   -Currently the runloop is implented as a mutex/lock thread.
  671 +Scheduler maintenance, scan the list of waiting tasks to see if any are ready
  672 +to become active tasks.
642 673
643 674 =cut
644 675
645 676 */
646 677
647   -PARROT_WARN_UNUSED_RESULT
648   -PARROT_CAN_RETURN_NULL
649   -static void*
650   -scheduler_runloop(ARGMOD(PMC *scheduler))
  678 +static void
  679 +scheduler_process_wait_list(PARROT_INTERP, ARGMOD(PMC *scheduler))
651 680 {
652   - Parrot_Scheduler * const sched_struct = PARROT_SCHEDULER(scheduler);
653   - int running = 1;
  681 + Parrot_Scheduler * sched_struct = PARROT_SCHEDULER(scheduler);
  682 + INTVAL num_tasks, index;
  683 +
  684 + /* Sweep the wait list for completed timers */
  685 + num_tasks = VTABLE_elements(interp, sched_struct->wait_index);
  686 + for (index = 0; index < num_tasks; index++) {
  687 + INTVAL tid = VTABLE_get_integer_keyed_int(interp, sched_struct->wait_index, index);
  688 + if (tid > 0) {
  689 + PMC *task = VTABLE_get_pmc_keyed_int(interp, sched_struct->task_list, tid);
  690 + if (PMC_IS_NULL(task)) {
  691 + /* Cleanup expired tasks. */
  692 + VTABLE_set_integer_keyed_int(interp, sched_struct->wait_index, index, 0);
  693 + }
  694 + else {
  695 + /* Move the timer to the active task list if the timer has
  696 + * completed. */
  697 + FLOATVAL timer_end_time = VTABLE_get_number_keyed_int(interp,
  698 + task, PARROT_TIMER_NSEC);
  699 + if (timer_end_time <= Parrot_floatval_time()) {
  700 + VTABLE_push_integer(interp, sched_struct->task_index, tid);
  701 + VTABLE_set_integer_keyed_int(interp, sched_struct->wait_index, index, 0);
  702 + Parrot_cx_schedule_repeat(interp, task);
  703 + SCHEDULER_cache_valid_CLEAR(scheduler);
  704 + }
  705 + }
  706 + }
  707 + }
  708 +}
654 709
655   -#if CX_DEBUG
656   - fprintf(stderr, "started scheduler runloop\n");
657   -#endif
658   - LOCK(sched_struct->lock);
  710 +/*
  711 +
  712 +=over 4
  713 +
  714 +=item C<static void scheduler_process_messages>
  715 +
  716 +Scheduler maintenance, scan the list of messages sent from other schedulers and
  717 +take appropriate action on any received.
  718 +
  719 +=cut
  720 +
  721 +*/
  722 +
  723 +static void
  724 +scheduler_process_messages(PARROT_INTERP, ARGMOD(PMC *scheduler))
  725 +{
  726 + Parrot_Scheduler * sched_struct = PARROT_SCHEDULER(scheduler);
  727 + INTVAL num_messages, index;
  728 + PMC *message;
659 729
660   - while (running) {
661 730 #if CX_DEBUG
662   - fprintf(stderr, "Before sleep\n");
  731 + fprintf(stderr, "processing messages [interp=%p]\n", interp);
663 732 #endif
664   - /* Sleep until a task is pending */
665   - Parrot_cx_runloop_sleep(scheduler);
666 733
  734 + while (VTABLE_elements(interp, sched_struct->messages) > 0) {
667 735 #if CX_DEBUG
668   - fprintf(stderr, "After sleep, before handling tasks\n");
  736 + fprintf(stderr, "locking msg_lock (process) [interp=%p]\n", interp);
669 737 #endif
670   - /* Process pending tasks, if there are any */
671   -/* running = Parrot_cx_handle_tasks(sched_struct->interp, scheduler);*/
  738 + LOCK(sched_struct->msg_lock);
  739 + message = VTABLE_pop_pmc(interp, sched_struct->messages);
672 740 #if CX_DEBUG
673   - fprintf(stderr, "After handling tasks\n");
  741 + fprintf(stderr, "unlocking msg_lock (process) [interp=%p]\n", interp);
674 742 #endif
675   -
676   - } /* end runloop */
677   -
  743 + UNLOCK(sched_struct->msg_lock);
  744 + if (!PMC_IS_NULL(message)
  745 + && string_equal(interp, VTABLE_get_string(interp, message),
  746 + CONST_STRING(interp, "suspend_for_gc")) == 0) {
678 747 #if CX_DEBUG
679   - fprintf(stderr, "ended scheduler runloop\n");
  748 + fprintf(stderr, "found a suspend, suspending [interp=%p]\n", interp);
680 749 #endif
  750 + pt_suspend_self_for_gc(interp);
  751 + }
  752 + }
681 753
682   - UNLOCK(sched_struct->lock);
683   -
684   - /*
685   - COND_DESTROY(sched_struct->condition);
686   - MUTEX_DESTROY(sched_struct->lock);
687   - */
688   -
689   - return NULL;
690 754 }
691 755
692 756 /*
65 src/thread.c
@@ -453,7 +453,7 @@ thread_func(ARGIN_NULLOK(void *arg))
453 453 * thread is finito
454 454 */
455 455 LOCK(interpreter_array_mutex);
456   - DEBUG_ONLY(fprintf(stderr, "marking an thread as finished"));
  456 + DEBUG_ONLY(fprintf(stderr, "marking an thread as finished\n"));
457 457
458 458 interp->thread_data->state |= THREAD_STATE_FINISHED;
459 459 tid = interp->thread_data->tid;
@@ -464,7 +464,7 @@ thread_func(ARGIN_NULLOK(void *arg))
464 464 }
465 465 if (interp->thread_data->state & THREAD_STATE_DETACHED) {
466 466 interpreter_array[tid] = NULL;
467   - DEBUG_ONLY(fprintf(stderr, "really destroying an interpreter [exit while detached]"));
  467 + DEBUG_ONLY(fprintf(stderr, "really destroying an interpreter [exit while detached]\n"));
468 468 Parrot_really_destroy(interp, 0, NULL);
469 469 }
470 470 else if (interp->thread_data->state & THREAD_STATE_JOINED) {
@@ -902,7 +902,7 @@ remove_queued_suspend_gc(PARROT_INTERP)
902 902 mem_sys_free(ev);
903 903 mem_sys_free(cur);
904 904 cur = NULL;
905   - DEBUG_ONLY(fprintf(stderr, "%p: remove_queued_suspend_gc: got one", interp));
  905 + DEBUG_ONLY(fprintf(stderr, "%p: remove_queued_suspend_gc: got one\n", interp));
906 906 }
907 907
908 908 queue_unlock(queue);
@@ -935,7 +935,7 @@ pt_gc_count_threads(PARROT_INTERP)
935 935 continue;
936 936 ++count;
937 937 }
938   - DEBUG_ONLY(fprintf(stderr, "found %d threads", count));
  938 + DEBUG_ONLY(fprintf(stderr, "found %d threads\n", count));
939 939 return count;
940 940 }
941 941
@@ -956,12 +956,12 @@ pt_gc_wait_for_stage(PARROT_INTERP, thread_gc_stage_enum from_stage,
956 956 Shared_gc_info *info = shared_gc_info;
957 957 int thread_count;
958 958
959   - DEBUG_ONLY(fprintf(stderr, "%p: gc_wait_for_stage: %d->%d", interp, from_stage, to_stage));
  959 + DEBUG_ONLY(fprintf(stderr, "%p: gc_wait_for_stage: %d->%d\n", interp, from_stage, to_stage));
960 960
961 961 /* XXX well-timed thread death can mess this up */
962 962 LOCK(interpreter_array_mutex);
963 963
964   - DEBUG_ONLY(fprintf(stderr, "%p: got lock", interp));
  964 + DEBUG_ONLY(fprintf(stderr, "%p: got lock\n", interp));
965 965 thread_count = pt_gc_count_threads(interp);
966 966
967 967 PARROT_ASSERT(info->gc_stage == from_stage);
@@ -975,7 +975,7 @@ pt_gc_wait_for_stage(PARROT_INTERP, thread_gc_stage_enum from_stage,
975 975
976 976 ++info->num_reached;
977 977
978   - DEBUG_ONLY(fprintf(stderr, "%p: gc_wait_for_stage: got %d", interp, info->num_reached));
  978 + DEBUG_ONLY(fprintf(stderr, "%p: gc_wait_for_stage: got %d\n", interp, info->num_reached));
979 979
980 980 if (info->num_reached == thread_count) {
981 981 info->gc_stage = to_stage;
@@ -1033,21 +1033,21 @@ assumed held.
1033 1033 static void
1034 1034 pt_suspend_one_for_gc(PARROT_INTERP)
1035 1035 {
1036   - DEBUG_ONLY(fprintf(stderr, "suspend one: %p", interp));
  1036 + DEBUG_ONLY(fprintf(stderr, "suspend one: %p\n", interp));
1037 1037 if (is_suspended_for_gc(interp)) {
1038   - DEBUG_ONLY(fprintf(stderr, "ignoring already suspended"));
  1038 + DEBUG_ONLY(fprintf(stderr, "ignoring already suspended\n"));
1039 1039 return;
1040 1040 }
1041 1041
1042 1042 if (interp->thread_data->state & THREAD_STATE_GC_WAKEUP) {
1043   - DEBUG_ONLY(fprintf(stderr, "just waking it up"));
  1043 + DEBUG_ONLY(fprintf(stderr, "just waking it up\n"));
1044 1044 interp->thread_data->state |= THREAD_STATE_SUSPENDED_GC;
1045 1045 COND_SIGNAL(interp->thread_data->interp_cond);
1046 1046 }
1047 1047 else {
1048   - DEBUG_ONLY(fprintf(stderr, "queuing event"));
  1048 + DEBUG_ONLY(fprintf(stderr, "queuing event\n"));
1049 1049 interp->thread_data->state |= THREAD_STATE_SUSPEND_GC_REQUESTED;
1050   - Parrot_cx_schedule_suspend_for_gc(interp);
  1050 + Parrot_cx_request_suspend_for_gc(interp);
1051 1051 }
1052 1052 }
1053 1053
@@ -1066,7 +1066,7 @@ pt_suspend_all_for_gc(PARROT_INTERP)
1066 1066 {
1067 1067 UINTVAL i;
1068 1068
1069   - DEBUG_ONLY(fprintf(stderr, "suspend_all_for_gc [interp=%p]", interp));
  1069 + DEBUG_ONLY(fprintf(stderr, "suspend_all_for_gc [interp=%p]\n", interp));
1070 1070
1071 1071 LOCK(interpreter_array_mutex);
1072 1072 interp->thread_data->state |= THREAD_STATE_SUSPENDED_GC;
@@ -1095,7 +1095,7 @@ pt_suspend_all_for_gc(PARROT_INTERP)
1095 1095 * so we have a suspend event in our queue to ignore
1096 1096 */
1097 1097 /* XXX still reachable? */
1098   - DEBUG_ONLY(fprintf(stderr, "apparently someone else is doing it [%p]", other_interp));
  1098 + DEBUG_ONLY(fprintf(stderr, "apparently someone else is doing it [%p]\n", other_interp));
1099 1099 fprintf(stderr, "??? found later (%p)\n", other_interp);
1100 1100 successp = Parrot_cx_delete_suspend_for_gc(interp);
1101 1101 PARROT_ASSERT(successp);
@@ -1138,19 +1138,20 @@ pt_suspend_self_for_gc(PARROT_INTERP)
1138 1138 {
1139 1139 PARROT_ASSERT(interp);
1140 1140 PARROT_ASSERT(!interp->arena_base->DOD_block_level);
1141   - DEBUG_ONLY(fprintf(stderr, "%p: suspend_self_for_gc", interp));
  1141 + DEBUG_ONLY(fprintf(stderr, "%p: suspend_self_for_gc\n", interp));
1142 1142 /* since we are modifying our own state, we need to lock
1143 1143 * the interpreter_array_mutex.
1144 1144 */
1145 1145 LOCK(interpreter_array_mutex);
1146   - DEBUG_ONLY(fprintf(stderr, "%p: got lock", interp));
  1146 + DEBUG_ONLY(fprintf(stderr, "%p: got lock\n", interp));
1147 1147
1148 1148 PARROT_ASSERT(interp->thread_data->state &
1149 1149 (THREAD_STATE_SUSPEND_GC_REQUESTED | THREAD_STATE_SUSPENDED_GC));
1150 1150
1151 1151 if (interp->thread_data->state & THREAD_STATE_SUSPEND_GC_REQUESTED) {
1152   - DEBUG_ONLY(fprintf(stderr, "remove queued request"));