Skip to content

HTTPS clone URL

Subversion checkout URL

You can clone with HTTPS or Subversion.

Download ZIP
Browse files

[concurrency] Next stage of concurrency implementation, message passi…

…ng between schedulers.

git-svn-id: https://svn.parrot.org/parrot/trunk@24562 d31e2699-5ff4-0310-a27c-f18f2fbe73fe
  • Loading branch information...
commit 4d9c96049dab33c9e88942f9c527c162314a5cef 1 parent 204f307
allisonrandal allisonrandal authored
33 include/parrot/scheduler.h
View
@@ -23,6 +23,13 @@ void Parrot_cx_add_handler(PARROT_INTERP, ARGIN(PMC *handler))
__attribute__nonnull__(2);
PARROT_API
+void Parrot_cx_broadcast_message(PARROT_INTERP,
+ ARGIN(STRING *messagetype),
+ ARGIN_NULLOK(PMC *data))
+ __attribute__nonnull__(1)
+ __attribute__nonnull__(2);
+
+PARROT_API
PARROT_CAN_RETURN_NULL
PMC * Parrot_cx_delete_suspend_for_gc(PARROT_INTERP)
__attribute__nonnull__(1);
@@ -39,6 +46,10 @@ PMC * Parrot_cx_find_handler_for_task(PARROT_INTERP, ARGIN(PMC *task))
__attribute__nonnull__(2);
PARROT_API
+void Parrot_cx_request_suspend_for_gc(PARROT_INTERP)
+ __attribute__nonnull__(1);
+
+PARROT_API
void Parrot_cx_runloop_end(PARROT_INTERP)
__attribute__nonnull__(1);
@@ -64,10 +75,6 @@ opcode_t * Parrot_cx_schedule_sleep(PARROT_INTERP,
__attribute__nonnull__(1);
PARROT_API
-void Parrot_cx_schedule_suspend_for_gc(PARROT_INTERP)
- __attribute__nonnull__(1);
-
-PARROT_API
void Parrot_cx_schedule_task(PARROT_INTERP, ARGIN(PMC *task))
__attribute__nonnull__(1)
__attribute__nonnull__(2);
@@ -81,6 +88,18 @@ void Parrot_cx_schedule_timer(PARROT_INTERP,
ARGIN_NULLOK(PMC *sub))
__attribute__nonnull__(1);
+PARROT_API
+void Parrot_cx_send_message(PARROT_INTERP,
+ ARGIN(STRING *messagetype),
+ ARGIN_NULLOK(PMC *payload))
+ __attribute__nonnull__(1)
+ __attribute__nonnull__(2);
+
+void Parrot_cx_check_tasks(PARROT_INTERP, ARGMOD(PMC *scheduler))
+ __attribute__nonnull__(1)
+ __attribute__nonnull__(2)
+ FUNC_MODIFIES(*scheduler);
+
void Parrot_cx_handle_tasks(PARROT_INTERP, ARGMOD(PMC *scheduler))
__attribute__nonnull__(1)
__attribute__nonnull__(2)
@@ -93,11 +112,9 @@ void Parrot_cx_invoke_callback(PARROT_INTERP, ARGIN(PMC *callback))
__attribute__nonnull__(1)
__attribute__nonnull__(2);
-void Parrot_cx_refresh_task_list(PARROT_INTERP)
- __attribute__nonnull__(1);
-
-void Parrot_cx_runloop_sleep(ARGMOD(PMC *scheduler))
+void Parrot_cx_refresh_task_list(PARROT_INTERP, ARGMOD(PMC *scheduler))
__attribute__nonnull__(1)
+ __attribute__nonnull__(2)
FUNC_MODIFIES(*scheduler);
void Parrot_cx_runloop_wake(PARROT_INTERP, ARGMOD(PMC *scheduler))
23 include/parrot/scheduler_private.h
View
@@ -17,25 +17,34 @@
/* Scheduler PMC's underlying struct. */
typedef struct Parrot_Scheduler {
- int id; /* The scheduler's ID. */
- int max_tid; /* The highest assigned task ID. */
- int pending; /* A count of pending tasks (cached for fast
+ INTVAL id; /* The scheduler's ID. */
+ INTVAL max_tid; /* The highest assigned task ID. */
+ INTVAL pending; /* A count of pending tasks (cached for fast
lookup). */
PMC *task_list; /* The current list of tasks. */
PMC *task_index; /* An index into the current list of tasks,
ordered by priority. */
PMC *wait_index; /* An unordered index of inactive tasks. */
PMC *handlers; /* The list of currently active handlers. */
- Parrot_cond condition; /* Flag used by scheduler runloop */
- Parrot_mutex lock; /* Flag used by scheduler runloop */
+ PMC *messages; /* A message queue used for communication
+ between schedulers. */
+ Parrot_mutex msg_lock; /* Lock to synchronize use of the message queue. */
Parrot_Interp interp; /* A link back to the scheduler's interpreter. */
- Parrot_thread runloop_handle; /* A handle for the scheduler's runloop
- thread, if any. */
} Parrot_Scheduler;
/* Macro to access underlying structure of a Scheduler PMC. */
#define PARROT_SCHEDULER(s) ((Parrot_Scheduler *) PMC_data(s))
+/* SchedulerMessage PMC's underlying struct. */
+typedef struct Parrot_SchedulerMessage {
+ INTVAL id; /* The message's ID. */
+ STRING *type; /* The message's type. */
+ PMC *data; /* Additional data for the message. */
+} Parrot_SchedulerMessage;
+
+/* Macro to access underlying structure of a Scheduler PMC. */
+#define PARROT_SCHEDULERMESSAGE(s) ((Parrot_SchedulerMessage *) PMC_data(s))
+
/* Task PMC's underlying struct. */
typedef struct Parrot_Task {
INTVAL id; /* The task ID. */
2  src/inter_create.c
View
@@ -317,8 +317,8 @@ Parrot_really_destroy(PARROT_INTERP, SHIM(int exit_code), SHIM(void *arg))
* wait for threads to complete if needed; terminate the event loop
*/
if (!interp->parent_interpreter) {
- pt_join_threads(interp);
Parrot_cx_runloop_end(interp);
+ pt_join_threads(interp);
}
/* if something needs destruction (e.g. closing PIOs)
1  src/pmc/parrotinterpreter.pmc
View
@@ -47,6 +47,7 @@ clone_interpreter(Parrot_Interp d, Parrot_Interp s, INTVAL flags)
Parrot_block_DOD(d);
d->scheduler = pmc_new(d, enum_class_Scheduler);
+ d->scheduler = VTABLE_share_ro(d, d->scheduler);
if (flags & PARROT_CLONE_RUNOPS)
d->run_core = s->run_core;
85 src/pmc/scheduler.pmc
View
@@ -46,11 +46,11 @@ Initialize a concurrency scheduler object.
core_struct->max_tid = 0;
core_struct->task_list = pmc_new(interp, enum_class_Hash);
core_struct->task_index = pmc_new(interp, enum_class_ResizableIntegerArray);
- core_struct->handlers = pmc_new(interp, enum_class_ResizablePMCArray);
core_struct->wait_index = pmc_new(interp, enum_class_ResizablePMCArray);
+ core_struct->handlers = pmc_new(interp, enum_class_ResizablePMCArray);
+ core_struct->messages = pmc_new(interp, enum_class_ResizablePMCArray);
core_struct->interp = INTERP;
- COND_INIT(core_struct->condition);
- MUTEX_INIT(core_struct->lock);
+ MUTEX_INIT(core_struct->msg_lock);
}
/*
@@ -75,30 +75,15 @@ An C<Integer> representing the unique identifier for this scheduler.
if (! VTABLE_isa(INTERP, data, CONST_STRING(INTERP, "Hash")))
real_exception(INTERP, NULL, INVALID_OPERATION,
- "Task initializer must be a Hash");
-
- core_struct = mem_allocate_zeroed_typed(Parrot_Scheduler);
+ "Scheduler initializer must be a Hash");
- /* Set flags for custom DOD mark and destroy. */
- PObj_custom_mark_SET(SELF);
- PObj_active_destroy_SET(SELF);
-
- /* Set up the core struct. */
- PMC_data(SELF) = core_struct;
+ SELF.init();
+ core_struct = PARROT_SCHEDULER(SELF);
elem = VTABLE_get_pmc_keyed_str(INTERP, data, CONST_STRING(INTERP, "id"));
if (! PMC_IS_NULL(elem))
core_struct->id = VTABLE_get_integer(INTERP, elem);
- else
- core_struct->id = 0;
-
- core_struct->max_tid = 0;
- core_struct->task_list = pmc_new(interp, enum_class_Hash);
- core_struct->task_index = pmc_new(interp, enum_class_ResizableIntegerArray);
- core_struct->handlers = pmc_new(interp, enum_class_ResizablePMCArray);
- core_struct->wait_index = pmc_new(interp, enum_class_ResizablePMCArray);
- core_struct->interp = INTERP;
}
/*
@@ -136,9 +121,7 @@ current maximum, and a birthtime of the current time.
SCHEDULER_cache_valid_CLEAR(SELF);
-/* LOCK(core_struct->lock); */
Parrot_cx_runloop_wake(core_struct->interp, SELF);
-/* UNLOCK(core_struct->lock); */
}
/*
@@ -224,6 +207,7 @@ Set this PMC as shared.
shared_struct->task_index = pt_shared_fixup(INTERP, shared_struct->task_index);
shared_struct->wait_index = pt_shared_fixup(INTERP, shared_struct->wait_index);
shared_struct->handlers = pt_shared_fixup(INTERP, shared_struct->handlers);
+ shared_struct->messages = pt_shared_fixup(INTERP, shared_struct->messages);
return shared_self;
}
@@ -239,8 +223,7 @@ Free the scheduler's underlying struct.
*/
void destroy() {
Parrot_Scheduler * const core_struct = PARROT_SCHEDULER(SELF);
- COND_DESTROY(core_struct->condition);
- MUTEX_DESTROY(core_struct->lock);
+ MUTEX_DESTROY(core_struct->msg_lock);
mem_sys_free(core_struct);
}
@@ -265,6 +248,8 @@ Mark any referenced strings and PMCs.
pobject_lives(interp, (PObj*)core_struct->wait_index);
if (core_struct->handlers)
pobject_lives(interp, (PObj*)core_struct->handlers);
+ if (core_struct->messages)
+ pobject_lives(interp, (PObj*)core_struct->messages);
}
}
@@ -356,9 +341,7 @@ Called after the scheduler has been thawed.
*/
void thawfinish(visit_info *info) {
- Parrot_Scheduler * const core_struct = PARROT_SCHEDULER(SELF);
-
- /* TODO: Rebuild the task index. */
+ Parrot_cx_refresh_task_list(INTERP, SELF);
}
/*
@@ -419,50 +402,6 @@ PMCNULL.
PCCRETURN(PMC *PMCNULL);
}
-/*
-
-=item C<PCCMETHOD refresh_task_list()>
-
-Perform maintenance on the scheduler's list of active tasks, checking for
-completed timers or sleep events, sorting for priority, etc.
-
-=cut
-
-*/
-
- PCCMETHOD refresh_task_list() {
- Parrot_Scheduler * core_struct = PARROT_SCHEDULER(SELF);
- INTVAL num_tasks, index;
-
- /* Sweep the wait list for completed timers */
- num_tasks = VTABLE_elements(interp, core_struct->wait_index);
- for (index = 0; index < num_tasks; index++) {
- INTVAL tid = VTABLE_get_integer_keyed_int(interp, core_struct->wait_index, index);
- if (tid > 0) {
- PMC *task = VTABLE_get_pmc_keyed_int(interp, core_struct->task_list, tid);
- if (PMC_IS_NULL(task)) {
- /* Cleanup expired tasks. */
- VTABLE_set_integer_keyed_int(INTERP, core_struct->wait_index, index, 0);
- }
- else {
- /* Move the timer to the active task list if the timer has
- * completed. */
- FLOATVAL timer_end_time = VTABLE_get_number_keyed_int(interp,
- task, PARROT_TIMER_NSEC);
- if (timer_end_time <= Parrot_floatval_time()) {
- VTABLE_push_integer(INTERP, core_struct->task_index, tid);
- VTABLE_set_integer_keyed_int(INTERP, core_struct->wait_index, index, 0);
- Parrot_cx_schedule_repeat(interp, task);
- SCHEDULER_cache_valid_CLEAR(SELF);
- }
- }
- }
- }
-
- /* Sort the task list index */
-
- SCHEDULER_cache_valid_SET(SELF);
- }
}
@@ -472,7 +411,7 @@ completed timers or sleep events, sorting for priority, etc.
=head1 SEE ALSO
-F<docs/pdds/pdd15_objects.pod>.
+F<docs/pdds/pdd25_concurrency.pod>.
=cut
309 src/pmc/schedulermessage.pmc
View
@@ -0,0 +1,309 @@
+/*
+Copyright (C) 2001-2007, The Perl Foundation.
+$Id: $
+
+=head1 NAME
+
+src/pmc/schedulermessage.pmc - The concurrency scheduler
+
+=head1 DESCRIPTION
+
+Implements a message passed between concurrency schedulers.
+
+=head2 Vtable Functions
+
+=over 4
+
+=cut
+
+*/
+
+#include "parrot/parrot.h"
+#include "parrot/scheduler_private.h"
+
+pmclass SchedulerMessage need_ext {
+
+/*
+
+=item C<void init()>
+
+Initialize a concurrency scheduler message object.
+
+=cut
+
+*/
+
+ void init() {
+ Parrot_SchedulerMessage * const core_struct
+ = mem_allocate_zeroed_typed(Parrot_SchedulerMessage);
+
+ /* Set flags for custom DOD mark and destroy. */
+ PObj_custom_mark_SET(SELF);
+ PObj_active_destroy_SET(SELF);
+
+ /* Set up the core struct. */
+ PMC_data(SELF) = core_struct;
+ core_struct->id = 0;
+ core_struct->type = CONST_STRING(INTERP, "");
+ core_struct->data = PMCNULL;
+ }
+
+/*
+
+=item C<void init_pmc(PMC *data)>
+
+Initializes a new SchedulerMessage with a C<Hash> PMC with any or all of the keys:
+
+=over 4
+
+=item C<id>
+
+An C<Integer> representing the unique identifier for this scheduler message.
+
+=item C<type>
+
+A C<String> representing the unique type for this scheduler message.
+
+=item C<data>
+
+An C<PMC> representing the data passed in this scheduler message.
+
+=back
+
+*/
+
+ void init_pmc(PMC *data) {
+ PMC *elem;
+ Parrot_SchedulerMessage *core_struct;
+
+ if (! VTABLE_isa(INTERP, data, CONST_STRING(INTERP, "Hash")))
+ real_exception(INTERP, NULL, INVALID_OPERATION,
+ "message initializer must be a Hash");
+
+ SELF.init();
+ core_struct = PARROT_SCHEDULERMESSAGE(SELF);
+
+ elem = VTABLE_get_pmc_keyed_str(INTERP, data, CONST_STRING(INTERP, "id"));
+ if (! PMC_IS_NULL(elem))
+ core_struct->id = VTABLE_get_integer(INTERP, elem);
+
+ elem = VTABLE_get_pmc_keyed_str(INTERP, data, CONST_STRING(INTERP, "type"));
+ if (! PMC_IS_NULL(elem))
+ core_struct->type = VTABLE_get_string(INTERP, elem);
+
+ elem = VTABLE_get_pmc_keyed_str(INTERP, data, CONST_STRING(INTERP, "data"));
+ if (! PMC_IS_NULL(elem))
+ core_struct->data = elem;
+ }
+
+/*
+
+=item C<INTVAL get_integer()>
+
+Retrieve the message ID.
+
+=cut
+
+*/
+
+ INTVAL get_integer() {
+ Parrot_SchedulerMessage * core_struct = PARROT_SCHEDULERMESSAGE(SELF);
+ return core_struct->id;
+ }
+
+/*
+
+=item C<void set_integer_native(INTVAL value)>
+
+Set the message ID.
+
+=cut
+
+*/
+
+ void set_integer_native(INTVAL value) {
+ Parrot_SchedulerMessage * core_struct = PARROT_SCHEDULERMESSAGE(SELF);
+ core_struct->id = value;
+ }
+
+
+/*
+
+=item C<STRING * get_string()>
+
+Retrieve the message type.
+
+=cut
+
+*/
+
+ STRING * get_string() {
+ Parrot_SchedulerMessage * core_struct = PARROT_SCHEDULERMESSAGE(SELF);
+ return core_struct->type;
+ }
+
+/*
+
+=item C<void set_string_native(STRING *value)>
+
+Set the message type.
+
+=cut
+
+*/
+
+ void set_string_native(STRING *value) {
+ Parrot_SchedulerMessage * core_struct = PARROT_SCHEDULERMESSAGE(SELF);
+ core_struct->type = value;
+ }
+
+
+/*
+
+=item C<PMC *share_ro()>
+
+Set this PMC as shared.
+
+=cut
+
+*/
+
+ PMC *share_ro() {
+ PMC *shared_self;
+ Parrot_SchedulerMessage *shared_struct;
+
+ if (PObj_is_PMC_shared_TEST(SELF))
+ return SELF;
+
+ shared_self = pt_shared_fixup(INTERP, SELF);
+ shared_struct = PARROT_SCHEDULERMESSAGE(shared_self);
+
+ shared_struct->data = pt_shared_fixup(INTERP, shared_struct->data);
+
+ return shared_self;
+ }
+
+/*
+
+=item C<void destroy()>
+
+Free the scheduler's underlying struct.
+
+=cut
+
+*/
+ void destroy() {
+ mem_sys_free(PMC_data(SELF));
+ }
+
+/*
+
+=item C<void mark()>
+
+Mark any referenced strings and PMCs.
+
+=cut
+
+*/
+ void mark() {
+ if (PARROT_SCHEDULERMESSAGE(SELF)) {
+ Parrot_SchedulerMessage * const core_struct = PARROT_SCHEDULERMESSAGE(SELF);
+
+ if (core_struct->data)
+ pobject_lives(interp, (PObj*)core_struct->data);
+ }
+ }
+
+/*
+
+=item C<void visit(visit_info *info)>
+
+This is used by freeze/thaw to visit the contents of the scheduler message.
+
+C<*info> is the visit info, (see F<include/parrot/pmc_freeze.h>).
+
+=cut
+
+*/
+
+ void visit(visit_info *info) {
+ Parrot_SchedulerMessage * const core_struct = PARROT_SCHEDULERMESSAGE(SELF);
+ PMC **pos;
+
+ /* 1) visit message data */
+ pos = &core_struct->data;
+ info->thaw_ptr = pos;
+ (info->visit_pmc_now)(INTERP, *pos, info);
+
+ }
+
+/*
+
+=item C<void freeze(visit_info *info)>
+
+Used to archive the scheduler message.
+
+=cut
+
+*/
+
+ void freeze(visit_info *info) {
+ IMAGE_IO *io = info->image_io;
+ Parrot_SchedulerMessage * const core_struct = PARROT_SCHEDULERMESSAGE(SELF);
+
+ /* 1) freeze message id */
+ VTABLE_push_integer(INTERP, io, core_struct->id);
+
+ /* 2) freeze message type */
+ VTABLE_push_string(INTERP, io, core_struct->type);
+ }
+
+/*
+
+=item C<void thaw(visit_info *info)>
+
+Used to unarchive the scheduler message.
+
+=cut
+
+*/
+
+ void thaw(visit_info *info) {
+ IMAGE_IO * const io = info->image_io;
+
+ /* 1. thaw message id */
+ const INTVAL id = VTABLE_shift_integer(INTERP, io);
+
+ /* 2. thaw message type */
+ STRING * const type = VTABLE_shift_string(INTERP, io);
+
+ /* Allocate the message's core data struct and set custom flags. */
+ SELF.init();
+
+ /* Set the message's id to the frozen id */
+ PARROT_SCHEDULERMESSAGE(SELF)->id = id;
+
+ /* Set the message's type to the frozen type */
+ PARROT_SCHEDULERMESSAGE(SELF)->type = type;
+ }
+
+}
+
+/*
+
+=back
+
+=head1 SEE ALSO
+
+F<docs/pdds/pdd25_concurrency.pod>.
+
+=cut
+
+*/
+
+/*
+ * Local variables:
+ * c-file-style: "parrot"
+ * End:
+ * vim: expandtab shiftwidth=4:
+ */
330 src/scheduler.c
View
@@ -27,10 +27,16 @@ exceptions, async I/O, and concurrent tasks (threads).
/* HEADERIZER BEGIN: static */
-PARROT_WARN_UNUSED_RESULT
-PARROT_CAN_RETURN_NULL
-static void* scheduler_runloop(ARGMOD(PMC *scheduler))
+static void scheduler_process_messages(PARROT_INTERP,
+ ARGMOD(PMC *scheduler))
+ __attribute__nonnull__(1)
+ __attribute__nonnull__(2)
+ FUNC_MODIFIES(*scheduler);
+
+static void scheduler_process_wait_list(PARROT_INTERP,
+ ARGMOD(PMC *scheduler))
__attribute__nonnull__(1)
+ __attribute__nonnull__(2)
FUNC_MODIFIES(*scheduler);
/* HEADERIZER END: static */
@@ -39,7 +45,7 @@ static void* scheduler_runloop(ARGMOD(PMC *scheduler))
=head2 Scheduler Interface Functions
-Functions that are used to interface with the concurrency scheduler.
+Functions to interface with the concurrency scheduler.
=over 4
@@ -51,14 +57,9 @@ Initalize the concurrency scheduler for the interpreter.
*/
-typedef void *(pt_start_routine_f)(void *);
-
void
Parrot_cx_init_scheduler(PARROT_INTERP)
{
-#if CX_DEBUG
- fprintf(stderr, "call to Parrot_cx_init_scheduler\n");
-#endif
if (!interp->parent_interpreter) {
PMC *scheduler;
@@ -106,7 +107,7 @@ void
Parrot_cx_handle_tasks(PARROT_INTERP, ARGMOD(PMC *scheduler))
{
SCHEDULER_wake_requested_CLEAR(scheduler);
- Parrot_cx_refresh_task_list(interp);
+ Parrot_cx_refresh_task_list(interp, scheduler);
while (VTABLE_get_integer(interp, scheduler) > 0) {
PMC * const task = VTABLE_pop_pmc(interp, scheduler);
@@ -114,11 +115,7 @@ Parrot_cx_handle_tasks(PARROT_INTERP, ARGMOD(PMC *scheduler))
PMC *type_pmc = VTABLE_get_attr_str(interp, task, CONST_STRING(interp, "type"));
STRING *type = VTABLE_get_string(interp, type_pmc);
- if (string_equal(interp, type, CONST_STRING(interp, "suspend_for_gc")) == 0) {
- Parrot_Task *task_struct = PARROT_TASK(task);
- pt_suspend_self_for_gc(task_struct->interp);
- }
- else if (string_equal(interp, type, CONST_STRING(interp, "callback")) == 0) {
+ if (string_equal(interp, type, CONST_STRING(interp, "callback")) == 0) {
Parrot_cx_invoke_callback(interp, task);
}
else if (string_equal(interp, type, CONST_STRING(interp, "timer")) == 0) {
@@ -143,7 +140,7 @@ Parrot_cx_handle_tasks(PARROT_INTERP, ARGMOD(PMC *scheduler))
/* If the scheduler was flagged to terminate, make sure you process all
* tasks. */
if (SCHEDULER_terminate_requested_TEST(scheduler))
- Parrot_cx_refresh_task_list(interp);
+ Parrot_cx_refresh_task_list(interp, scheduler);
} /* end of pending tasks */
@@ -153,50 +150,24 @@ Parrot_cx_handle_tasks(PARROT_INTERP, ARGMOD(PMC *scheduler))
=item C<void Parrot_cx_refresh_task_list>
-Tell the scheduler to perform maintenance on the priority task list.
+Tell the scheduler to perform maintenance on its list of active tasks, checking
+for completed timers or sleep events, sorting for priority, checking for
+messages, etc.
=cut
*/
void
-Parrot_cx_refresh_task_list(PARROT_INTERP)
+Parrot_cx_refresh_task_list(PARROT_INTERP, ARGMOD(PMC *scheduler))
{
- if (interp->scheduler)
- Parrot_PCCINVOKE(interp, interp->scheduler,
- CONST_STRING(interp, "refresh_task_list"), "->");
- else
- real_exception(interp, NULL, INVALID_OPERATION,
- "Scheduler was not initialized for this interpreter.\n");
- return;
-}
+ scheduler_process_wait_list(interp, scheduler);
+ scheduler_process_messages(interp, scheduler);
-/*
-
-=item C<void Parrot_cx_runloop_sleep>
-
-Pause the scheduler runloop. Called when there are no more pending tasks in the
-scheduler's task list, to freeze the runloop until there are tasks to handle.
-
-Sleep is skipped if a wake signal was received since the last sleep, indicating
-more tasks to process. Sleep is also skipped if the scheduler is in the process
-of terminating, instead processing any remaining tasks as quickly as possible
-before finalization.
-
-=cut
-
-*/
+ /* TODO: Sort the task list index */
-void
-Parrot_cx_runloop_sleep(ARGMOD(PMC *scheduler))
-{
- Parrot_Scheduler * const sched_struct = PARROT_SCHEDULER(scheduler);
- if (SCHEDULER_terminate_requested_TEST(scheduler))
- return;
-
- if (!SCHEDULER_wake_requested_TEST(scheduler))
- COND_WAIT(sched_struct->condition, sched_struct->lock);
- SCHEDULER_wake_requested_CLEAR(scheduler);
+ SCHEDULER_cache_valid_SET(scheduler);
+ return;
}
/*
@@ -213,10 +184,8 @@ the scheduler's task list).
void
Parrot_cx_runloop_wake(PARROT_INTERP, ARGMOD(PMC *scheduler))
{
- Parrot_Scheduler * const sched_struct = PARROT_SCHEDULER(scheduler);
enable_event_checking(interp);
SCHEDULER_wake_requested_SET(scheduler);
- COND_SIGNAL(sched_struct->condition);
}
@@ -234,31 +203,16 @@ PARROT_API
void
Parrot_cx_runloop_end(PARROT_INTERP)
{
-#if CX_DEBUG
- fprintf(stderr, "call to Parrot_cx_runloop_end\n");
-#endif
- if (!interp->parent_interpreter) {
- Parrot_Scheduler * const sched_struct = PARROT_SCHEDULER(interp->scheduler);
- void *raw_retval = NULL;
-
- SCHEDULER_terminate_requested_SET(interp->scheduler);
- Parrot_cx_handle_tasks(interp, interp->scheduler);
-
-/* LOCK(sched_struct->lock);
- * SCHEDULER_terminate_requested_SET(interp->scheduler);
- * Parrot_cx_runloop_wake(interp, interp->scheduler);
- * UNLOCK(sched_struct->lock);
- *
- * JOIN(sched_struct->runloop_handle, raw_retval);
- */
- }
+ SCHEDULER_terminate_requested_SET(interp->scheduler);
+ Parrot_cx_handle_tasks(interp, interp->scheduler);
}
/*
=item C<void Parrot_cx_schedule_task>
-Add a task to scheduler's task list.
+Add a task to scheduler's task list. Cannot be called across
+interpreters/threads, must be called from within the interpreter's runloop.
=cut
@@ -367,10 +321,9 @@ Parrot_cx_schedule_callback(PARROT_INTERP,
/*
-=item C<void Parrot_schedule_suspend_for_gc>
+=item C<void Parrot_cx_request_suspend_for_gc>
-Create a new timer event due at C<diff> from now, repeated at C<interval>
-and running the passed C<sub>.
+Tell the scheduler to suspend for GC at the next safe pause.
=cut
@@ -378,13 +331,12 @@ and running the passed C<sub>.
PARROT_API
void
-Parrot_cx_schedule_suspend_for_gc(PARROT_INTERP)
+Parrot_cx_request_suspend_for_gc(PARROT_INTERP)
{
- PMC *event = pmc_new(interp, enum_class_Task);
-
- VTABLE_set_string_native(interp, event, CONST_STRING(interp, "suspend_for_gc"));
-
- Parrot_cx_schedule_task(interp, event);
+#if CX_DEBUG
+ fprintf(stderr, "requesting gc suspend [interp=%p]\n", interp);
+#endif
+ Parrot_cx_send_message(interp, CONST_STRING(interp, "suspend_for_gc"), PMCNULL);
}
/*
@@ -415,8 +367,8 @@ Parrot_cx_delete_task(PARROT_INTERP, ARGIN(PMC *task))
=item C<PMC * Parrot_cx_delete_suspend_for_gc>
-Remove a task that would suspend GC from the task list. (Provided for backward
-compatibility in the threads implementation.)
+Remove a message that would suspend GC from the message queue. (Provided for
+backward compatibility in the threads implementation.)
=cut
@@ -431,23 +383,31 @@ Parrot_cx_delete_suspend_for_gc(PARROT_INTERP)
Parrot_Scheduler * sched_struct = PARROT_SCHEDULER(interp->scheduler);
INTVAL num_tasks, index;
+#if CX_DEBUG
+ fprintf(stderr, "called delete_suspend_for_gc\n");
+#endif
+
+#if CX_DEBUG
+ fprintf(stderr, "locking msg_lock (delete) [interp=%p]\n", interp);
+#endif
+ LOCK(sched_struct->msg_lock);
/* Search the task index for GC suspend tasks */
- num_tasks = VTABLE_elements(interp, sched_struct->task_index);
+ num_tasks = VTABLE_elements(interp, sched_struct->messages);
for (index = 0; index < num_tasks; index++) {
- INTVAL tid = VTABLE_get_integer_keyed_int(interp, sched_struct->task_index, index);
- if (tid > 0) {
- PMC *task = VTABLE_get_pmc_keyed_int(interp, sched_struct->task_list, tid);
- if (!PMC_IS_NULL(task)) {
- PMC *type = VTABLE_get_attr_str(interp, task,
- CONST_STRING(interp, "type"));
- if (string_equal(interp, VTABLE_get_string(interp, type),
- CONST_STRING(interp, "suspend_for_gc")) == 0) {
- Parrot_cx_delete_task(interp, task);
- return task;
- }
- }
+ PMC *message = VTABLE_get_pmc_keyed_int(interp, sched_struct->messages, index);
+ if (!PMC_IS_NULL(message)
+ && string_equal(interp, VTABLE_get_string(interp, message),
+ CONST_STRING(interp, "suspend_for_gc")) == 0) {
+ VTABLE_delete_keyed_int(interp, sched_struct->messages, index);
+ UNLOCK(sched_struct->msg_lock);
+ return message;
}
}
+#if CX_DEBUG
+ fprintf(stderr, "unlocking msg_lock (delete) [interp=%p]\n", interp);
+#endif
+ UNLOCK(sched_struct->msg_lock);
+
}
else
real_exception(interp, NULL, INVALID_OPERATION,
@@ -483,6 +443,81 @@ Parrot_cx_add_handler(PARROT_INTERP, ARGIN(PMC *handler))
=back
+=head2 Scheduler Message Interface Functions
+
+Functions that are used to interface with the message queue in the concurrency
+scheduler.
+
+=over 4
+
+=item C<void Parrot_cx_send_message>
+
+Send a message to a scheduler in a different interpreter/thread.
+
+=cut
+
+*/
+
+PARROT_API
+void
+Parrot_cx_send_message(PARROT_INTERP, ARGIN(STRING *messagetype), ARGIN_NULLOK(PMC *payload))
+{
+ if(interp->scheduler) {
+ Parrot_Scheduler * sched_struct = PARROT_SCHEDULER(interp->scheduler);
+ PMC *message = pmc_new(interp, enum_class_SchedulerMessage);
+ VTABLE_set_string_native(interp, message, messagetype);
+ message = VTABLE_share_ro(interp, message);
+
+#if CX_DEBUG
+ fprintf(stderr, "sending message[interp=%p]\n", interp);
+#endif
+
+#if CX_DEBUG
+ fprintf(stderr, "locking msg_lock (send) [interp=%p]\n", interp);
+#endif
+ LOCK(sched_struct->msg_lock);
+ VTABLE_push_pmc(interp, sched_struct->messages, message);
+#if CX_DEBUG
+ fprintf(stderr, "unlocking msg_lock (send) [interp=%p]\n", interp);
+#endif
+ UNLOCK(sched_struct->msg_lock);
+ Parrot_cx_runloop_wake(interp, interp->scheduler);
+
+ }
+
+}
+
+/*
+
+=item C<void Parrot_cx_broadcast_message>
+
+Send a message to the schedulers in all interpreters/threads linked to this
+one.
+
+=cut
+
+*/
+
+PARROT_API
+void
+Parrot_cx_broadcast_message(PARROT_INTERP, ARGIN(STRING *messagetype), ARGIN_NULLOK(PMC *data))
+{
+ UINTVAL i;
+ LOCK(interpreter_array_mutex);
+ for (i = 0; i < n_interpreters; ++i) {
+ Parrot_Interp other_interp = interpreter_array[i];
+ if (interp == other_interp)
+ continue;
+ Parrot_cx_send_message(other_interp, messagetype, data);
+ }
+ UNLOCK(interpreter_array_mutex);
+
+}
+
+/*
+
+=back
+
=head2 Task Interface Functions
Functions that are used to interface with a specific task in the concurrency scheduler.
@@ -631,62 +666,91 @@ Functions that are only used within the scheduler.
=over 4
-=item C<static void* scheduler_runloop>
-
-The scheduler runloop is started by the interpreter. It manages the flow of
-concurrent scheduling for the parent interpreter, and for lightweight
-concurrent tasks running within that interpreter. More complex concurrent tasks
-have their own runloop.
+=item C<static void scheduler_process_wait_list>
-Currently the runloop is implented as a mutex/lock thread.
+Scheduler maintenance, scan the list of waiting tasks to see if any are ready
+to become active tasks.
=cut
*/
-PARROT_WARN_UNUSED_RESULT
-PARROT_CAN_RETURN_NULL
-static void*
-scheduler_runloop(ARGMOD(PMC *scheduler))
+static void
+scheduler_process_wait_list(PARROT_INTERP, ARGMOD(PMC *scheduler))
{
- Parrot_Scheduler * const sched_struct = PARROT_SCHEDULER(scheduler);
- int running = 1;
+ Parrot_Scheduler * sched_struct = PARROT_SCHEDULER(scheduler);
+ INTVAL num_tasks, index;
+
+ /* Sweep the wait list for completed timers */
+ num_tasks = VTABLE_elements(interp, sched_struct->wait_index);
+ for (index = 0; index < num_tasks; index++) {
+ INTVAL tid = VTABLE_get_integer_keyed_int(interp, sched_struct->wait_index, index);
+ if (tid > 0) {
+ PMC *task = VTABLE_get_pmc_keyed_int(interp, sched_struct->task_list, tid);
+ if (PMC_IS_NULL(task)) {
+ /* Cleanup expired tasks. */
+ VTABLE_set_integer_keyed_int(interp, sched_struct->wait_index, index, 0);
+ }
+ else {
+ /* Move the timer to the active task list if the timer has
+ * completed. */
+ FLOATVAL timer_end_time = VTABLE_get_number_keyed_int(interp,
+ task, PARROT_TIMER_NSEC);
+ if (timer_end_time <= Parrot_floatval_time()) {
+ VTABLE_push_integer(interp, sched_struct->task_index, tid);
+ VTABLE_set_integer_keyed_int(interp, sched_struct->wait_index, index, 0);
+ Parrot_cx_schedule_repeat(interp, task);
+ SCHEDULER_cache_valid_CLEAR(scheduler);
+ }
+ }
+ }
+ }
+}
-#if CX_DEBUG
- fprintf(stderr, "started scheduler runloop\n");
-#endif
- LOCK(sched_struct->lock);
+/*
+
+=over 4
+
+=item C<static void scheduler_process_messages>
+
+Scheduler maintenance, scan the list of messages sent from other schedulers and
+take appropriate action on any received.
+
+=cut
+
+*/
+
+static void
+scheduler_process_messages(PARROT_INTERP, ARGMOD(PMC *scheduler))
+{
+ Parrot_Scheduler * sched_struct = PARROT_SCHEDULER(scheduler);
+ INTVAL num_messages, index;
+ PMC *message;
- while (running) {
#if CX_DEBUG
- fprintf(stderr, "Before sleep\n");
+ fprintf(stderr, "processing messages [interp=%p]\n", interp);
#endif
- /* Sleep until a task is pending */
- Parrot_cx_runloop_sleep(scheduler);
+ while (VTABLE_elements(interp, sched_struct->messages) > 0) {
#if CX_DEBUG
- fprintf(stderr, "After sleep, before handling tasks\n");
+ fprintf(stderr, "locking msg_lock (process) [interp=%p]\n", interp);
#endif
- /* Process pending tasks, if there are any */
-/* running = Parrot_cx_handle_tasks(sched_struct->interp, scheduler);*/
+ LOCK(sched_struct->msg_lock);
+ message = VTABLE_pop_pmc(interp, sched_struct->messages);
#if CX_DEBUG
- fprintf(stderr, "After handling tasks\n");
+ fprintf(stderr, "unlocking msg_lock (process) [interp=%p]\n", interp);
#endif
-
- } /* end runloop */
-
+ UNLOCK(sched_struct->msg_lock);
+ if (!PMC_IS_NULL(message)
+ && string_equal(interp, VTABLE_get_string(interp, message),
+ CONST_STRING(interp, "suspend_for_gc")) == 0) {
#if CX_DEBUG
- fprintf(stderr, "ended scheduler runloop\n");
+ fprintf(stderr, "found a suspend, suspending [interp=%p]\n", interp);
#endif
+ pt_suspend_self_for_gc(interp);
+ }
+ }
- UNLOCK(sched_struct->lock);
-
- /*
- COND_DESTROY(sched_struct->condition);
- MUTEX_DESTROY(sched_struct->lock);
- */
-
- return NULL;
}
/*
65 src/thread.c
View
@@ -453,7 +453,7 @@ thread_func(ARGIN_NULLOK(void *arg))
* thread is finito
*/
LOCK(interpreter_array_mutex);
- DEBUG_ONLY(fprintf(stderr, "marking an thread as finished"));
+ DEBUG_ONLY(fprintf(stderr, "marking an thread as finished\n"));
interp->thread_data->state |= THREAD_STATE_FINISHED;
tid = interp->thread_data->tid;
@@ -464,7 +464,7 @@ thread_func(ARGIN_NULLOK(void *arg))
}
if (interp->thread_data->state & THREAD_STATE_DETACHED) {
interpreter_array[tid] = NULL;
- DEBUG_ONLY(fprintf(stderr, "really destroying an interpreter [exit while detached]"));
+ DEBUG_ONLY(fprintf(stderr, "really destroying an interpreter [exit while detached]\n"));
Parrot_really_destroy(interp, 0, NULL);
}
else if (interp->thread_data->state & THREAD_STATE_JOINED) {
@@ -902,7 +902,7 @@ remove_queued_suspend_gc(PARROT_INTERP)
mem_sys_free(ev);
mem_sys_free(cur);
cur = NULL;
- DEBUG_ONLY(fprintf(stderr, "%p: remove_queued_suspend_gc: got one", interp));
+ DEBUG_ONLY(fprintf(stderr, "%p: remove_queued_suspend_gc: got one\n", interp));
}
queue_unlock(queue);
@@ -935,7 +935,7 @@ pt_gc_count_threads(PARROT_INTERP)
continue;
++count;
}
- DEBUG_ONLY(fprintf(stderr, "found %d threads", count));
+ DEBUG_ONLY(fprintf(stderr, "found %d threads\n", count));
return count;
}
@@ -956,12 +956,12 @@ pt_gc_wait_for_stage(PARROT_INTERP, thread_gc_stage_enum from_stage,
Shared_gc_info *info = shared_gc_info;
int thread_count;
- DEBUG_ONLY(fprintf(stderr, "%p: gc_wait_for_stage: %d->%d", interp, from_stage, to_stage));
+ DEBUG_ONLY(fprintf(stderr, "%p: gc_wait_for_stage: %d->%d\n", interp, from_stage, to_stage));
/* XXX well-timed thread death can mess this up */
LOCK(interpreter_array_mutex);
- DEBUG_ONLY(fprintf(stderr, "%p: got lock", interp));
+ DEBUG_ONLY(fprintf(stderr, "%p: got lock\n", interp));
thread_count = pt_gc_count_threads(interp);
PARROT_ASSERT(info->gc_stage == from_stage);
@@ -975,7 +975,7 @@ pt_gc_wait_for_stage(PARROT_INTERP, thread_gc_stage_enum from_stage,
++info->num_reached;
- DEBUG_ONLY(fprintf(stderr, "%p: gc_wait_for_stage: got %d", interp, info->num_reached));
+ DEBUG_ONLY(fprintf(stderr, "%p: gc_wait_for_stage: got %d\n", interp, info->num_reached));
if (info->num_reached == thread_count) {
info->gc_stage = to_stage;
@@ -1033,21 +1033,21 @@ assumed held.
static void
pt_suspend_one_for_gc(PARROT_INTERP)
{
- DEBUG_ONLY(fprintf(stderr, "suspend one: %p", interp));
+ DEBUG_ONLY(fprintf(stderr, "suspend one: %p\n", interp));
if (is_suspended_for_gc(interp)) {
- DEBUG_ONLY(fprintf(stderr, "ignoring already suspended"));
+ DEBUG_ONLY(fprintf(stderr, "ignoring already suspended\n"));
return;
}
if (interp->thread_data->state & THREAD_STATE_GC_WAKEUP) {
- DEBUG_ONLY(fprintf(stderr, "just waking it up"));
+ DEBUG_ONLY(fprintf(stderr, "just waking it up\n"));
interp->thread_data->state |= THREAD_STATE_SUSPENDED_GC;
COND_SIGNAL(interp->thread_data->interp_cond);
}
else {
- DEBUG_ONLY(fprintf(stderr, "queuing event"));
+ DEBUG_ONLY(fprintf(stderr, "queuing event\n"));
interp->thread_data->state |= THREAD_STATE_SUSPEND_GC_REQUESTED;
- Parrot_cx_schedule_suspend_for_gc(interp);
+ Parrot_cx_request_suspend_for_gc(interp);
}
}
@@ -1066,7 +1066,7 @@ pt_suspend_all_for_gc(PARROT_INTERP)
{
UINTVAL i;
- DEBUG_ONLY(fprintf(stderr, "suspend_all_for_gc [interp=%p]", interp));
+ DEBUG_ONLY(fprintf(stderr, "suspend_all_for_gc [interp=%p]\n", interp));
LOCK(interpreter_array_mutex);
interp->thread_data->state |= THREAD_STATE_SUSPENDED_GC;
@@ -1095,7 +1095,7 @@ pt_suspend_all_for_gc(PARROT_INTERP)
* so we have a suspend event in our queue to ignore
*/
/* XXX still reachable? */
- DEBUG_ONLY(fprintf(stderr, "apparently someone else is doing it [%p]", other_interp));
+ DEBUG_ONLY(fprintf(stderr, "apparently someone else is doing it [%p]\n", other_interp));
fprintf(stderr, "??? found later (%p)\n", other_interp);
successp = Parrot_cx_delete_suspend_for_gc(interp);
PARROT_ASSERT(successp);
@@ -1138,19 +1138,20 @@ pt_suspend_self_for_gc(PARROT_INTERP)
{
PARROT_ASSERT(interp);
PARROT_ASSERT(!interp->arena_base->DOD_block_level);
- DEBUG_ONLY(fprintf(stderr, "%p: suspend_self_for_gc", interp));
+ DEBUG_ONLY(fprintf(stderr, "%p: suspend_self_for_gc\n", interp));
/* since we are modifying our own state, we need to lock
* the interpreter_array_mutex.
*/
LOCK(interpreter_array_mutex);
- DEBUG_ONLY(fprintf(stderr, "%p: got lock", interp));
+ DEBUG_ONLY(fprintf(stderr, "%p: got lock\n", interp));
PARROT_ASSERT(interp->thread_data->state &
(THREAD_STATE_SUSPEND_GC_REQUESTED | THREAD_STATE_SUSPENDED_GC));
if (interp->thread_data->state & THREAD_STATE_SUSPEND_GC_REQUESTED) {
- DEBUG_ONLY(fprintf(stderr, "remove queued request"));
+ DEBUG_ONLY(fprintf(stderr, "remove queued request\n"));
while (!PMC_IS_NULL(Parrot_cx_delete_suspend_for_gc(interp)));
+ DEBUG_ONLY(fprintf(stderr, "removed all queued requests\n"));
interp->thread_data->state &= ~THREAD_STATE_SUSPEND_GC_REQUESTED;
}
if (!(interp->thread_data->state & THREAD_STATE_SUSPENDED_GC)) {
@@ -1252,7 +1253,7 @@ pt_thread_join(NOTNULL(Parrot_Interp parent), UINTVAL tid)
interpreter_array[tid] = NULL;
running_threads--;
- DEBUG_ONLY(fprintf(stderr, "destroying an interpreter [join]"));
+ DEBUG_ONLY(fprintf(stderr, "destroying an interpreter [join]\n"));
if (Interp_debug_TEST(parent, PARROT_THREAD_DEBUG_FLAG))
fprintf(stderr, "running threads %d\n", running_threads);
@@ -1366,7 +1367,7 @@ detach(UINTVAL tid)
if (interp->thread_data->state & THREAD_STATE_FINISHED) {
interpreter_array[tid] = NULL;
- DEBUG_ONLY(fprintf(stderr, "destroying an interpreter [detach]"));
+ DEBUG_ONLY(fprintf(stderr, "destroying an interpreter [detach]\n"));
Parrot_really_destroy(interp, 0, NULL);
interp = NULL;
}
@@ -1432,7 +1433,7 @@ void
pt_add_to_interpreters(PARROT_INTERP, Parrot_Interp new_interp)
{
size_t i;
- DEBUG_ONLY(fprintf(stderr, "interp = %p", interp));
+ DEBUG_ONLY(fprintf(stderr, "interp = %p\n", interp));
if (!new_interp) {
/*
@@ -1499,7 +1500,7 @@ pt_add_to_interpreters(PARROT_INTERP, Parrot_Interp new_interp)
=item C<void pt_DOD_start_mark>
-DOD is gonna start the mark phase. In the presence of shared PMCs, we can only
+DOD is going to start the mark phase. In the presence of shared PMCs, we can only
run one DOD run at a time because C<< PMC->next_for_GC >> may be changed.
C<flags> are the DOD flags. We check if we need to collect shared objects or
@@ -1520,7 +1521,7 @@ pt_DOD_start_mark(PARROT_INTERP)
Shared_gc_info *info;
int block_level;
- DEBUG_ONLY(fprintf(stderr, "%p: pt_DOD_start_mark", interp));
+ DEBUG_ONLY(fprintf(stderr, "%p: pt_DOD_start_mark\n", interp));
/* if no other threads are running, we are safe */
if (!running_threads)
return;
@@ -1528,7 +1529,7 @@ pt_DOD_start_mark(PARROT_INTERP)
info = get_pool(interp);
PARROT_ATOMIC_INT_GET(block_level, info->gc_block_level);
- DEBUG_ONLY(fprintf(stderr, "start threaded mark"));
+ DEBUG_ONLY(fprintf(stderr, "start threaded mark\n"));
/*
* TODO now check, if we are the owner of a shared memory pool
* if yes:
@@ -1541,12 +1542,12 @@ pt_DOD_start_mark(PARROT_INTERP)
if (interp->thread_data->state & THREAD_STATE_SUSPENDED_GC) {
PARROT_ASSERT(!(interp->thread_data->state &
THREAD_STATE_SUSPEND_GC_REQUESTED));
- DEBUG_ONLY(fprintf(stderr, "already suspended..."));
+ DEBUG_ONLY(fprintf(stderr, "already suspended...\n"));
UNLOCK(interpreter_array_mutex);
}
else if (block_level) {
/* unthreaded collection */
- DEBUG_ONLY(fprintf(stderr, "... but blocked"));
+ DEBUG_ONLY(fprintf(stderr, "... but blocked\n"));
/* holding the lock */
return;
@@ -1558,27 +1559,27 @@ pt_DOD_start_mark(PARROT_INTERP)
interp->thread_data->state &= ~THREAD_STATE_SUSPEND_GC_REQUESTED;
interp->thread_data->state |= THREAD_STATE_SUSPENDED_GC;
- DEBUG_ONLY(fprintf(stderr, "%p: detected request", interp));
+ DEBUG_ONLY(fprintf(stderr, "%p: detected request\n", interp));
UNLOCK(interpreter_array_mutex);
}
else {
/* we need to stop the world */
- DEBUG_ONLY(fprintf(stderr, "stop the world"));
+ DEBUG_ONLY(fprintf(stderr, "stop the world\n"));
UNLOCK(interpreter_array_mutex);
pt_suspend_all_for_gc(interp);
}
- DEBUG_ONLY(fprintf(stderr, "%p: wait for stage", interp));
+ DEBUG_ONLY(fprintf(stderr, "%p: wait for stage\n", interp));
pt_gc_wait_for_stage(interp, THREAD_GC_STAGE_NONE, THREAD_GC_STAGE_MARK);
- DEBUG_ONLY(fprintf(stderr, "actually mark"));
+ DEBUG_ONLY(fprintf(stderr, "actually mark\n"));
/*
* we can't allow parallel running DODs both would mess with shared PMCs
* next_for_GC pointers
*/
LOCK(interpreter_array_mutex);
- DEBUG_ONLY(fprintf(stderr, "got marking lock"));
+ DEBUG_ONLY(fprintf(stderr, "got marking lock\n"));
}
/*
@@ -1639,9 +1640,9 @@ pt_DOD_stop_mark(PARROT_INTERP)
fprintf(stderr, "%p: extraneous suspend_gc event\n", (void *)interp);
}
- DEBUG_ONLY(fprintf(stderr, "%p: unlock", interp));
+ DEBUG_ONLY(fprintf(stderr, "%p: unlock\n", interp));
UNLOCK(interpreter_array_mutex);
- DEBUG_ONLY(fprintf(stderr, "wait to sweep"));
+ DEBUG_ONLY(fprintf(stderr, "wait to sweep\n"));
pt_gc_wait_for_stage(interp, THREAD_GC_STAGE_MARK, THREAD_GC_STAGE_SWEEP);
}
Please sign in to comment.
Something went wrong with that request. Please try again.