Skip to content

Commit

Permalink
score: Fix _Thread_queue_Flush_critical()
Browse files Browse the repository at this point in the history
The thread queue extract operations performed by the
_Thread_queue_Flush_critical() may result in a priority change of the
thread queue owner. Carry out the scheduler priority update operation.
This is especially important in SMP configurations.

Close #3236.
  • Loading branch information
sebhub committed Nov 16, 2017
1 parent 79a998d commit 9c30c31
Show file tree
Hide file tree
Showing 2 changed files with 84 additions and 37 deletions.
18 changes: 14 additions & 4 deletions cpukit/score/src/threadqflush.c
Expand Up @@ -66,13 +66,15 @@ size_t _Thread_queue_Flush_critical(
Thread_queue_Context *queue_context
)
{
size_t flushed;
Chain_Control unblock;
Chain_Node *node;
Chain_Node *tail;
size_t flushed;
Chain_Control unblock;
Thread_Control *owner;
Chain_Node *node;
Chain_Node *tail;

flushed = 0;
_Chain_Initialize_empty( &unblock );
owner = queue->owner;

while ( true ) {
Thread_queue_Heads *heads;
Expand Down Expand Up @@ -131,6 +133,14 @@ size_t _Thread_queue_Flush_critical(
node = next;
} while ( node != tail );

if ( owner != NULL ) {
ISR_lock_Context lock_context;

_Thread_State_acquire( owner, &lock_context );
_Scheduler_Update_priority( owner );
_Thread_State_release( owner, &lock_context );
}

_Thread_Dispatch_enable( cpu_self );
} else {
_Thread_queue_Queue_release( queue, &queue_context->Lock_context.Lock_context );
Expand Down
103 changes: 70 additions & 33 deletions testsuites/smptests/smpmutex01/init.c
Expand Up @@ -38,15 +38,16 @@ typedef enum {
REQ_WAKE_UP_HELPER = RTEMS_EVENT_1,
REQ_MTX_OBTAIN = RTEMS_EVENT_2,
REQ_MTX_OBTAIN_TIMEOUT = RTEMS_EVENT_3,
REQ_MTX_RELEASE = RTEMS_EVENT_4,
REQ_MTX_2_OBTAIN = RTEMS_EVENT_5,
REQ_MTX_2_RELEASE = RTEMS_EVENT_6,
REQ_SEM_OBTAIN_RELEASE = RTEMS_EVENT_7,
REQ_SEM_RELEASE = RTEMS_EVENT_8,
REQ_SET_DONE = RTEMS_EVENT_9,
REQ_WAIT_FOR_DONE = RTEMS_EVENT_10,
REQ_SEND_EVENT_2 = RTEMS_EVENT_11,
REQ_SEND_EVENT_3 = RTEMS_EVENT_12
REQ_MTX_OBTAIN_UNSATISFIED = RTEMS_EVENT_4,
REQ_MTX_RELEASE = RTEMS_EVENT_5,
REQ_MTX_2_OBTAIN = RTEMS_EVENT_6,
REQ_MTX_2_RELEASE = RTEMS_EVENT_7,
REQ_SEM_OBTAIN_RELEASE = RTEMS_EVENT_8,
REQ_SEM_RELEASE = RTEMS_EVENT_9,
REQ_SET_DONE = RTEMS_EVENT_10,
REQ_WAIT_FOR_DONE = RTEMS_EVENT_11,
REQ_SEND_EVENT_2 = RTEMS_EVENT_12,
REQ_SEND_EVENT_3 = RTEMS_EVENT_13
} request_id;

typedef enum {
Expand Down Expand Up @@ -248,6 +249,14 @@ static void obtain_timeout(test_context *ctx)
rtems_test_assert(sc == RTEMS_TIMEOUT);
}

static void obtain_unsatisfied(test_context *ctx)
{
rtems_status_code sc;

sc = rtems_semaphore_obtain(ctx->mtx, RTEMS_WAIT, RTEMS_NO_TIMEOUT);
rtems_test_assert(sc == RTEMS_UNSATISFIED);
}

static void release(test_context *ctx)
{
rtems_status_code sc;
Expand All @@ -256,6 +265,14 @@ static void release(test_context *ctx)
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
}

static void flush(test_context *ctx)
{
rtems_status_code sc;

sc = rtems_semaphore_flush(ctx->mtx);
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
}

static void obtain_2(test_context *ctx)
{
rtems_status_code sc;
Expand Down Expand Up @@ -416,6 +433,11 @@ static void worker(rtems_task_argument arg)
++ctx->generation[id];
}

if ((events & REQ_MTX_OBTAIN_UNSATISFIED) != 0) {
obtain_unsatisfied(ctx);
++ctx->generation[id];
}

if ((events & REQ_MTX_RELEASE) != 0) {
release(ctx);
++ctx->generation[id];
Expand Down Expand Up @@ -463,11 +485,14 @@ static void test_init(test_context *ctx)
start_task(ctx, A_1, worker, 1, SCHED_A);
start_task(ctx, A_2_0, worker, 2, SCHED_A);
start_task(ctx, A_2_1, worker, 2, SCHED_A);
start_task(ctx, B_4, worker, 4, SCHED_B);
start_task(ctx, B_5_0, worker, 5, SCHED_B);
start_task(ctx, B_5_1, worker, 5, SCHED_B);
start_task(ctx, H_A, helper, 3, SCHED_A);
start_task(ctx, H_B, helper, 6, SCHED_B);

if (rtems_get_processor_count() >= PART_COUNT) {
start_task(ctx, B_4, worker, 4, SCHED_B);
start_task(ctx, B_5_0, worker, 5, SCHED_B);
start_task(ctx, B_5_1, worker, 5, SCHED_B);
start_task(ctx, H_B, helper, 6, SCHED_B);
}

sc = rtems_semaphore_create(
rtems_build_name('M', 'T', 'X', '1'),
Expand Down Expand Up @@ -510,6 +535,19 @@ static void test_simple_inheritance(test_context *ctx)
check_generations(ctx, A_1, NONE);
}

static void test_flush_inheritance(test_context *ctx)
{
assert_prio(ctx, M, 3);
obtain(ctx);
send_event(ctx, A_1, REQ_MTX_OBTAIN_UNSATISFIED);
check_generations(ctx, NONE, NONE);
assert_prio(ctx, M, 1);
flush(ctx);
check_generations(ctx, A_1, NONE);
assert_prio(ctx, M, 3);
release(ctx);
}

static void test_dequeue_order_one_scheduler_instance(test_context *ctx)
{
obtain(ctx);
Expand Down Expand Up @@ -909,33 +947,32 @@ static void test_omip_yield(test_context *ctx)
check_generations(ctx, B_5_0, NONE);
}

static void test(void)
static void test(test_context *ctx)
{
test_context *ctx = &test_instance;

test_init(ctx);
test_task_get_priority_not_defined(ctx);
test_simple_inheritance(ctx);
test_dequeue_order_one_scheduler_instance(ctx);
test_mixed_queue_two_scheduler_instances(ctx);
test_mixed_queue_two_scheduler_instances_sem_only(ctx);
test_simple_inheritance_two_scheduler_instances(ctx);
test_nested_inheritance_two_scheduler_instances(ctx);
test_dequeue_order_two_scheduler_instances(ctx);
test_omip_pre_emption(ctx);
test_omip_rescue(ctx);
test_omip_timeout(ctx);
test_omip_yield(ctx);

if (rtems_get_processor_count() >= PART_COUNT) {
test_task_get_priority_not_defined(ctx);
test_simple_inheritance(ctx);
test_dequeue_order_one_scheduler_instance(ctx);
test_mixed_queue_two_scheduler_instances(ctx);
test_mixed_queue_two_scheduler_instances_sem_only(ctx);
test_simple_inheritance_two_scheduler_instances(ctx);
test_nested_inheritance_two_scheduler_instances(ctx);
test_dequeue_order_two_scheduler_instances(ctx);
test_omip_pre_emption(ctx);
test_omip_rescue(ctx);
test_omip_timeout(ctx);
test_omip_yield(ctx);
}

test_flush_inheritance(ctx);
}

static void Init(rtems_task_argument arg)
{
TEST_BEGIN();

if (rtems_get_processor_count() >= PART_COUNT) {
test();
}

test(&test_instance);
TEST_END();
rtems_test_exit(0);
}
Expand Down

0 comments on commit 9c30c31

Please sign in to comment.