Skip to content

Commit

Permalink
Use the FSA for ConcBlockingQueue
Browse files Browse the repository at this point in the history
This makes sending things into a Channel much quicker.
  • Loading branch information
MasterDuke17 committed Apr 30, 2020
1 parent c0fe97a commit c595142
Showing 1 changed file with 9 additions and 9 deletions.
18 changes: 9 additions & 9 deletions src/6model/reprs/ConcBlockingQueue.c
Expand Up @@ -20,7 +20,7 @@ static MVMObject * type_object_for(MVMThreadContext *tc, MVMObject *HOW) {
/* Initializes a new instance. */
static void initialize(MVMThreadContext *tc, MVMSTable *st, MVMObject *root, void *data) {
MVMConcBlockingQueue *cbq = (MVMConcBlockingQueue*)root;
MVMConcBlockingQueueBody *body = MVM_calloc(1, sizeof(MVMConcBlockingQueueBody));
MVMConcBlockingQueueBody *body = MVM_fixed_size_alloc_zeroed(tc, tc->instance->fsa, sizeof(MVMConcBlockingQueueBody));
/* Initialize locks. */
int init_stat;

Expand All @@ -35,7 +35,7 @@ static void initialize(MVMThreadContext *tc, MVMSTable *st, MVMObject *root, voi
uv_strerror(init_stat));

/* Head and tail point to a null node. */
body->tail = body->head = MVM_calloc(1, sizeof(MVMConcBlockingQueueNode));
body->tail = body->head = MVM_fixed_size_alloc_zeroed(tc, tc->instance->fsa, sizeof(MVMConcBlockingQueueNode));
cbq->body = body;
}

Expand Down Expand Up @@ -71,7 +71,7 @@ static void gc_free(MVMThreadContext *tc, MVMObject *obj) {
MVMConcBlockingQueueNode *cur = body->head;
while (cur) {
MVMConcBlockingQueueNode *next = cur->next;
MVM_free(cur);
MVM_fixed_size_free(tc, tc->instance->fsa, sizeof(MVMConcBlockingQueueNode), cur);
cur = next;
}
body->head = body->tail = NULL;
Expand All @@ -82,7 +82,7 @@ static void gc_free(MVMThreadContext *tc, MVMObject *obj) {
uv_cond_destroy(&body->head_cond);

/* Clean up body */
MVM_free(body);
MVM_fixed_size_free(tc, tc->instance->fsa, sizeof(MVMConcBlockingQueueBody), body);
}

static const MVMStorageSpec storage_spec = {
Expand Down Expand Up @@ -152,7 +152,7 @@ static void push(MVMThreadContext *tc, MVMSTable *st, MVMObject *root, void *dat
MVM_exception_throw_adhoc(tc,
"Cannot store a null value in a concurrent blocking queue");

add = MVM_calloc(1, sizeof(MVMConcBlockingQueueNode));
add = MVM_fixed_size_alloc_zeroed(tc, tc->instance->fsa, sizeof(MVMConcBlockingQueueNode));

interval_id = MVM_telemetry_interval_start(tc, "ConcBlockingQueue.push");
MVMROOT2(tc, root, to_add, {
Expand Down Expand Up @@ -196,7 +196,7 @@ static void unshift(MVMThreadContext *tc, MVMSTable *st, MVMObject *root, void *

interval_id = MVM_telemetry_interval_start(tc, "ConcBlockingQueue.unshift");

add = MVM_calloc(1, sizeof(MVMConcBlockingQueueNode));
add = MVM_fixed_size_alloc_zeroed(tc, tc->instance->fsa, sizeof(MVMConcBlockingQueueNode));

/* We'll need to hold both the head and the tail lock, in case head == tail
* and push would update tail->next - without the tail lock, this could
Expand Down Expand Up @@ -248,7 +248,7 @@ static void shift(MVMThreadContext *tc, MVMSTable *st, MVMObject *root, void *da
});

taken = body->head->next;
MVM_free(body->head);
MVM_fixed_size_free(tc, tc->instance->fsa, sizeof(MVMConcBlockingQueueNode), body->head);
body->head = taken;
MVM_barrier();
value->o = taken->value;
Expand Down Expand Up @@ -333,7 +333,7 @@ MVMObject * MVM_concblockingqueue_jit_poll(MVMThreadContext *tc, MVMObject *queu
MVMObject * MVM_concblockingqueue_poll(MVMThreadContext *tc, MVMConcBlockingQueue *queue) {
MVMConcBlockingQueue *cbq = (MVMConcBlockingQueue *)queue;
MVMConcBlockingQueueBody *body = cbq->body;
MVMConcBlockingQueueNode *taken;
MVMConcBlockingQueueNode *taken;
MVMObject *result = tc->instance->VMNull;
unsigned int interval_id;

Expand All @@ -346,7 +346,7 @@ MVMObject * MVM_concblockingqueue_poll(MVMThreadContext *tc, MVMConcBlockingQueu

if (MVM_load(&body->elems) > 0) {
taken = body->head->next;
MVM_free(body->head);
MVM_fixed_size_free(tc, tc->instance->fsa, sizeof(MVMConcBlockingQueueNode), body->head);
body->head = taken;
MVM_barrier();
result = taken->value;
Expand Down

0 comments on commit c595142

Please sign in to comment.