Skip to content

Commit

Permalink
Fix possible access to fromspace in various async I/O functions
Browse files Browse the repository at this point in the history
This error got copy&pasted 7 times. A local type casted variable was used but
not rooted. Since MVM_repr_box_str will allocate and therefore may trigger a GC
run, the local variable could have become outdated.
  • Loading branch information
niner committed Apr 24, 2020
1 parent fd27422 commit 8333393
Show file tree
Hide file tree
Showing 4 changed files with 16 additions and 24 deletions.
20 changes: 8 additions & 12 deletions src/io/asyncsocket.c
Original file line number Diff line number Diff line change
Expand Up @@ -126,8 +126,7 @@ static void read_setup(MVMThreadContext *tc, uv_loop_t *loop, MVMObject *async_t
/* Error; need to notify. */
MVMROOT(tc, async_task, {
MVMObject *arr = MVM_repr_alloc_init(tc, tc->instance->boot_types.BOOTArray);
MVMAsyncTask *t = (MVMAsyncTask *)async_task;
MVM_repr_push_o(tc, arr, t->body.schedulee);
MVM_repr_push_o(tc, arr, ((MVMAsyncTask *)async_task)->body.schedulee);
MVM_repr_push_o(tc, arr, tc->instance->boot_types.BOOTInt);
MVM_repr_push_o(tc, arr, tc->instance->boot_types.BOOTStr);
MVMROOT(tc, arr, {
Expand All @@ -137,7 +136,7 @@ static void read_setup(MVMThreadContext *tc, uv_loop_t *loop, MVMObject *async_t
tc->instance->boot_types.BOOTStr, msg_str);
MVM_repr_push_o(tc, arr, msg_box);
});
MVM_repr_push_o(tc, t->body.queue, arr);
MVM_repr_push_o(tc, ((MVMAsyncTask *)async_task)->body.queue, arr);
});
MVM_io_eventloop_remove_active_work(tc, &(ri->work_idx));
}
Expand Down Expand Up @@ -273,8 +272,7 @@ static void write_setup(MVMThreadContext *tc, uv_loop_t *loop, MVMObject *async_
if (!handle_data->handle || uv_is_closing((uv_handle_t *)handle_data->handle)) {
MVMROOT(tc, async_task, {
MVMObject *arr = MVM_repr_alloc_init(tc, tc->instance->boot_types.BOOTArray);
MVMAsyncTask *t = (MVMAsyncTask *)async_task;
MVM_repr_push_o(tc, arr, t->body.schedulee);
MVM_repr_push_o(tc, arr, ((MVMAsyncTask *)async_task)->body.schedulee);
MVM_repr_push_o(tc, arr, tc->instance->boot_types.BOOTInt);
MVMROOT(tc, arr, {
MVMString *msg_str = MVM_string_ascii_decode_nt(tc,
Expand All @@ -283,7 +281,7 @@ static void write_setup(MVMThreadContext *tc, uv_loop_t *loop, MVMObject *async_
tc->instance->boot_types.BOOTStr, msg_str);
MVM_repr_push_o(tc, arr, msg_box);
});
MVM_repr_push_o(tc, t->body.queue, arr);
MVM_repr_push_o(tc, ((MVMAsyncTask *)async_task)->body.queue, arr);
});
return;
}
Expand All @@ -306,8 +304,7 @@ static void write_setup(MVMThreadContext *tc, uv_loop_t *loop, MVMObject *async_
/* Error; need to notify. */
MVMROOT(tc, async_task, {
MVMObject *arr = MVM_repr_alloc_init(tc, tc->instance->boot_types.BOOTArray);
MVMAsyncTask *t = (MVMAsyncTask *)async_task;
MVM_repr_push_o(tc, arr, t->body.schedulee);
MVM_repr_push_o(tc, arr, ((MVMAsyncTask *)async_task)->body.schedulee);
MVM_repr_push_o(tc, arr, tc->instance->boot_types.BOOTInt);
MVMROOT(tc, arr, {
MVMString *msg_str = MVM_string_ascii_decode_nt(tc,
Expand All @@ -316,7 +313,7 @@ static void write_setup(MVMThreadContext *tc, uv_loop_t *loop, MVMObject *async_
tc->instance->boot_types.BOOTStr, msg_str);
MVM_repr_push_o(tc, arr, msg_box);
});
MVM_repr_push_o(tc, t->body.queue, arr);
MVM_repr_push_o(tc, ((MVMAsyncTask *)async_task)->body.queue, arr);
});

/* Cleanup handle. */
Expand Down Expand Up @@ -591,8 +588,7 @@ static void connect_setup(MVMThreadContext *tc, uv_loop_t *loop, MVMObject *asyn
/* Error; need to notify. */
MVMROOT(tc, async_task, {
MVMObject *arr = MVM_repr_alloc_init(tc, tc->instance->boot_types.BOOTArray);
MVMAsyncTask *t = (MVMAsyncTask *)async_task;
MVM_repr_push_o(tc, arr, t->body.schedulee);
MVM_repr_push_o(tc, arr, ((MVMAsyncTask *)async_task)->body.schedulee);
MVM_repr_push_o(tc, arr, tc->instance->boot_types.BOOTIO);
MVMROOT(tc, arr, {
MVMString *msg_str = MVM_string_ascii_decode_nt(tc,
Expand All @@ -605,7 +601,7 @@ static void connect_setup(MVMThreadContext *tc, uv_loop_t *loop, MVMObject *asyn
MVM_repr_push_o(tc, arr, tc->instance->boot_types.BOOTStr);
MVM_repr_push_o(tc, arr, tc->instance->boot_types.BOOTInt);
});
MVM_repr_push_o(tc, t->body.queue, arr);
MVM_repr_push_o(tc, ((MVMAsyncTask *)async_task)->body.queue, arr);
});

/* Cleanup handles. */
Expand Down
10 changes: 4 additions & 6 deletions src/io/asyncsocketudp.c
Original file line number Diff line number Diff line change
Expand Up @@ -167,8 +167,7 @@ static void read_setup(MVMThreadContext *tc, uv_loop_t *loop, MVMObject *async_t
/* Error; need to notify. */
MVMROOT(tc, async_task, {
MVMObject *arr = MVM_repr_alloc_init(tc, tc->instance->boot_types.BOOTArray);
MVMAsyncTask *t = (MVMAsyncTask *)async_task;
MVM_repr_push_o(tc, arr, t->body.schedulee);
MVM_repr_push_o(tc, arr, ((MVMAsyncTask *)async_task)->body.schedulee);
MVM_repr_push_o(tc, arr, tc->instance->boot_types.BOOTInt);
MVM_repr_push_o(tc, arr, tc->instance->boot_types.BOOTStr);
MVMROOT(tc, arr, {
Expand All @@ -178,7 +177,7 @@ static void read_setup(MVMThreadContext *tc, uv_loop_t *loop, MVMObject *async_t
tc->instance->boot_types.BOOTStr, msg_str);
MVM_repr_push_o(tc, arr, msg_box);
});
MVM_repr_push_o(tc, t->body.queue, arr);
MVM_repr_push_o(tc, ((MVMAsyncTask *)async_task)->body.queue, arr);
});
}
}
Expand Down Expand Up @@ -319,8 +318,7 @@ static void write_setup(MVMThreadContext *tc, uv_loop_t *loop, MVMObject *async_
/* Error; need to notify. */
MVMROOT(tc, async_task, {
MVMObject *arr = MVM_repr_alloc_init(tc, tc->instance->boot_types.BOOTArray);
MVMAsyncTask *t = (MVMAsyncTask *)async_task;
MVM_repr_push_o(tc, arr, t->body.schedulee);
MVM_repr_push_o(tc, arr, ((MVMAsyncTask *)async_task)->body.schedulee);
MVM_repr_push_o(tc, arr, tc->instance->boot_types.BOOTInt);
MVMROOT(tc, arr, {
MVMString *msg_str = MVM_string_ascii_decode_nt(tc,
Expand All @@ -329,7 +327,7 @@ static void write_setup(MVMThreadContext *tc, uv_loop_t *loop, MVMObject *async_
tc->instance->boot_types.BOOTStr, msg_str);
MVM_repr_push_o(tc, arr, msg_box);
});
MVM_repr_push_o(tc, t->body.queue, arr);
MVM_repr_push_o(tc, ((MVMAsyncTask *)async_task)->body.queue, arr);
});

/* Cleanup handle. */
Expand Down
5 changes: 2 additions & 3 deletions src/io/filewatchers.c
Original file line number Diff line number Diff line change
Expand Up @@ -53,8 +53,7 @@ static void setup(MVMThreadContext *tc, uv_loop_t *loop, MVMObject *async_task,
/* Error; need to notify. */
MVMROOT(tc, async_task, {
MVMObject *arr = MVM_repr_alloc_init(tc, tc->instance->boot_types.BOOTArray);
MVMAsyncTask *t = (MVMAsyncTask *)async_task;
MVM_repr_push_o(tc, arr, t->body.schedulee);
MVM_repr_push_o(tc, arr, ((MVMAsyncTask *)async_task)->body.schedulee);
MVM_repr_push_o(tc, arr, tc->instance->boot_types.BOOTStr);
MVM_repr_push_o(tc, arr, tc->instance->boot_types.BOOTInt);
MVMROOT(tc, arr, {
Expand All @@ -64,7 +63,7 @@ static void setup(MVMThreadContext *tc, uv_loop_t *loop, MVMObject *async_task,
tc->instance->boot_types.BOOTStr, msg_str);
MVM_repr_push_o(tc, arr, msg_box);
});
MVM_repr_push_o(tc, t->body.queue, arr);
MVM_repr_push_o(tc, ((MVMAsyncTask *)async_task)->body.queue, arr);
});
}
}
Expand Down
5 changes: 2 additions & 3 deletions src/io/procops.c
Original file line number Diff line number Diff line change
Expand Up @@ -269,8 +269,7 @@ static void write_setup(MVMThreadContext *tc, uv_loop_t *loop, MVMObject *async_
/* Error; need to notify. */
MVMROOT(tc, async_task, {
MVMObject *arr = MVM_repr_alloc_init(tc, tc->instance->boot_types.BOOTArray);
MVMAsyncTask *t = (MVMAsyncTask *)async_task;
MVM_repr_push_o(tc, arr, t->body.schedulee);
MVM_repr_push_o(tc, arr, ((MVMAsyncTask *)async_task)->body.schedulee);
MVM_repr_push_o(tc, arr, tc->instance->boot_types.BOOTInt);
MVMROOT(tc, arr, {
MVMString *msg_str = MVM_string_ascii_decode_nt(tc,
Expand All @@ -285,7 +284,7 @@ static void write_setup(MVMThreadContext *tc, uv_loop_t *loop, MVMObject *async_
tc->instance->boot_types.BOOTStr, msg_str);
MVM_repr_push_o(tc, arr, msg_box);
});
MVM_repr_push_o(tc, t->body.queue, arr);
MVM_repr_push_o(tc, ((MVMAsyncTask *)async_task)->body.queue, arr);
});

/* Cleanup handle. */
Expand Down

0 comments on commit 8333393

Please sign in to comment.