Skip to content

Commit

Permalink
linux-gen: sched: fix SP scheduler hang in process mode
Browse files Browse the repository at this point in the history
SP scheduler hangs in process mode performance test
due to global data structure were not created in shared
memory region.

Signed-off-by: Yi He <yi.he@linaro.org>
Reviewed-and-tested-by: Bill Fischofer <bill.fischofer@linaro.org>
Signed-off-by: Maxim Uvarov <maxim.uvarov@linaro.org>
  • Loading branch information
Yi He authored and muvarov committed Feb 9, 2017
1 parent 3ffa326 commit f21b2d2
Showing 1 changed file with 60 additions and 40 deletions.
100 changes: 60 additions & 40 deletions platform/linux-generic/odp_schedule_sp.c
Expand Up @@ -9,6 +9,7 @@
#include <odp/api/thread.h>
#include <odp/api/time.h>
#include <odp/api/schedule.h>
#include <odp/api/shared_memory.h>
#include <odp_schedule_if.h>
#include <odp_debug_internal.h>
#include <odp_align_internal.h>
Expand Down Expand Up @@ -107,6 +108,7 @@ typedef struct {
sched_cmd_t pktio_cmd[NUM_PKTIO];
prio_queue_t prio_queue[NUM_GROUP][NUM_PRIO];
sched_group_t sched_group;
odp_shm_t shm;
} sched_global_t;

typedef struct {
Expand All @@ -118,7 +120,7 @@ typedef struct {
int group[NUM_GROUP];
} sched_local_t;

static sched_global_t sched_global;
static sched_global_t *sched_global;
static __thread sched_local_t sched_local;

static inline uint32_t index_to_ring_idx(int pktio, uint32_t index)
Expand All @@ -144,30 +146,44 @@ static inline uint32_t index_from_ring_idx(uint32_t *index, uint32_t ring_idx)
static int init_global(void)
{
int i, j;
sched_group_t *sched_group = &sched_global.sched_group;
odp_shm_t shm;
sched_group_t *sched_group = NULL;

ODP_DBG("Using SP scheduler\n");

memset(&sched_global, 0, sizeof(sched_global_t));
shm = odp_shm_reserve("sp_scheduler",
sizeof(sched_global_t),
ODP_CACHE_LINE_SIZE, 0);

sched_global = odp_shm_addr(shm);

if (sched_global == NULL) {
ODP_ERR("Schedule init: Shm reserve failed.\n");
return -1;
}

memset(sched_global, 0, sizeof(sched_global_t));
sched_global->shm = shm;

for (i = 0; i < NUM_QUEUE; i++) {
sched_global.queue_cmd[i].s.type = CMD_QUEUE;
sched_global.queue_cmd[i].s.index = i;
sched_global.queue_cmd[i].s.ring_idx = index_to_ring_idx(0, i);
sched_global->queue_cmd[i].s.type = CMD_QUEUE;
sched_global->queue_cmd[i].s.index = i;
sched_global->queue_cmd[i].s.ring_idx = index_to_ring_idx(0, i);
}

for (i = 0; i < NUM_PKTIO; i++) {
sched_global.pktio_cmd[i].s.type = CMD_PKTIO;
sched_global.pktio_cmd[i].s.index = i;
sched_global.pktio_cmd[i].s.ring_idx = index_to_ring_idx(1, i);
sched_global.pktio_cmd[i].s.prio = PKTIN_PRIO;
sched_global.pktio_cmd[i].s.group = GROUP_PKTIN;
sched_global->pktio_cmd[i].s.type = CMD_PKTIO;
sched_global->pktio_cmd[i].s.index = i;
sched_global->pktio_cmd[i].s.ring_idx = index_to_ring_idx(1, i);
sched_global->pktio_cmd[i].s.prio = PKTIN_PRIO;
sched_global->pktio_cmd[i].s.group = GROUP_PKTIN;
}

for (i = 0; i < NUM_GROUP; i++)
for (j = 0; j < NUM_PRIO; j++)
ring_init(&sched_global.prio_queue[i][j].ring);
ring_init(&sched_global->prio_queue[i][j].ring);

sched_group = &sched_global->sched_group;
odp_ticketlock_init(&sched_group->s.lock);

for (i = 0; i < NUM_THREAD; i++)
Expand Down Expand Up @@ -201,16 +217,22 @@ static int init_local(void)

static int term_global(void)
{
int qi;
int qi, ret = 0;

for (qi = 0; qi < NUM_QUEUE; qi++) {
if (sched_global.queue_cmd[qi].s.init) {
if (sched_global->queue_cmd[qi].s.init) {
/* todo: dequeue until empty ? */
sched_cb_queue_destroy_finalize(qi);
}
}

return 0;
ret = odp_shm_free(sched_global->shm);
if (ret < 0) {
ODP_ERR("Shm free failed for sp_scheduler");
ret = -1;
}

return ret;
}

static int term_local(void)
Expand Down Expand Up @@ -266,7 +288,7 @@ static void remove_group(sched_group_t *sched_group, int thr, int group)

static int thr_add(odp_schedule_group_t group, int thr)
{
sched_group_t *sched_group = &sched_global.sched_group;
sched_group_t *sched_group = &sched_global->sched_group;

if (group < 0 || group >= NUM_GROUP)
return -1;
Expand All @@ -291,7 +313,7 @@ static int thr_add(odp_schedule_group_t group, int thr)

static int thr_rem(odp_schedule_group_t group, int thr)
{
sched_group_t *sched_group = &sched_global.sched_group;
sched_group_t *sched_group = &sched_global->sched_group;

if (group < 0 || group >= NUM_GROUP)
return -1;
Expand Down Expand Up @@ -319,7 +341,7 @@ static int num_grps(void)

static int init_queue(uint32_t qi, const odp_schedule_param_t *sched_param)
{
sched_group_t *sched_group = &sched_global.sched_group;
sched_group_t *sched_group = &sched_global->sched_group;
odp_schedule_group_t group = sched_param->group;
int prio = 0;

Expand All @@ -332,18 +354,18 @@ static int init_queue(uint32_t qi, const odp_schedule_param_t *sched_param)
if (sched_param->prio > 0)
prio = LOWEST_QUEUE_PRIO;

sched_global.queue_cmd[qi].s.prio = prio;
sched_global.queue_cmd[qi].s.group = group;
sched_global.queue_cmd[qi].s.init = 1;
sched_global->queue_cmd[qi].s.prio = prio;
sched_global->queue_cmd[qi].s.group = group;
sched_global->queue_cmd[qi].s.init = 1;

return 0;
}

static void destroy_queue(uint32_t qi)
{
sched_global.queue_cmd[qi].s.prio = 0;
sched_global.queue_cmd[qi].s.group = 0;
sched_global.queue_cmd[qi].s.init = 0;
sched_global->queue_cmd[qi].s.prio = 0;
sched_global->queue_cmd[qi].s.group = 0;
sched_global->queue_cmd[qi].s.init = 0;
}

static inline void add_tail(sched_cmd_t *cmd)
Expand All @@ -353,8 +375,7 @@ static inline void add_tail(sched_cmd_t *cmd)
int prio = cmd->s.prio;
uint32_t idx = cmd->s.ring_idx;

prio_queue = &sched_global.prio_queue[group][prio];

prio_queue = &sched_global->prio_queue[group][prio];
ring_enq(&prio_queue->ring, RING_MASK, idx);
}

Expand All @@ -364,8 +385,7 @@ static inline sched_cmd_t *rem_head(int group, int prio)
uint32_t ring_idx, index;
int pktio;

prio_queue = &sched_global.prio_queue[group][prio];

prio_queue = &sched_global->prio_queue[group][prio];
ring_idx = ring_deq(&prio_queue->ring, RING_MASK);

if (ring_idx == RING_EMPTY)
Expand All @@ -374,16 +394,16 @@ static inline sched_cmd_t *rem_head(int group, int prio)
pktio = index_from_ring_idx(&index, ring_idx);

if (pktio)
return &sched_global.pktio_cmd[index];
return &sched_global->pktio_cmd[index];

return &sched_global.queue_cmd[index];
return &sched_global->queue_cmd[index];
}

static int sched_queue(uint32_t qi)
{
sched_cmd_t *cmd;

cmd = &sched_global.queue_cmd[qi];
cmd = &sched_global->queue_cmd[qi];
add_tail(cmd);

return 0;
Expand Down Expand Up @@ -414,7 +434,7 @@ static void pktio_start(int pktio_index, int num, int pktin_idx[])
ODP_DBG("pktio index: %i, %i pktin queues %i\n",
pktio_index, num, pktin_idx[0]);

cmd = &sched_global.pktio_cmd[pktio_index];
cmd = &sched_global->pktio_cmd[pktio_index];

if (num > NUM_PKTIN)
ODP_ABORT("Supports only %i pktin queues per interface\n",
Expand All @@ -432,7 +452,7 @@ static inline sched_cmd_t *sched_cmd(void)
{
int prio, i;
int thr = sched_local.thr_id;
sched_group_t *sched_group = &sched_global.sched_group;
sched_group_t *sched_group = &sched_global->sched_group;
thr_group_t *thr_group = &sched_group->s.thr[thr];
uint32_t gen_cnt;

Expand Down Expand Up @@ -606,7 +626,7 @@ static odp_schedule_group_t schedule_group_create(const char *name,
const odp_thrmask_t *thrmask)
{
odp_schedule_group_t group = ODP_SCHED_GROUP_INVALID;
sched_group_t *sched_group = &sched_global.sched_group;
sched_group_t *sched_group = &sched_global->sched_group;
int i;

odp_ticketlock_lock(&sched_group->s.lock);
Expand Down Expand Up @@ -637,7 +657,7 @@ static odp_schedule_group_t schedule_group_create(const char *name,

static int schedule_group_destroy(odp_schedule_group_t group)
{
sched_group_t *sched_group = &sched_global.sched_group;
sched_group_t *sched_group = &sched_global->sched_group;

if (group < NUM_STATIC_GROUP || group >= NUM_GROUP)
return -1;
Expand All @@ -660,7 +680,7 @@ static int schedule_group_destroy(odp_schedule_group_t group)
static odp_schedule_group_t schedule_group_lookup(const char *name)
{
odp_schedule_group_t group = ODP_SCHED_GROUP_INVALID;
sched_group_t *sched_group = &sched_global.sched_group;
sched_group_t *sched_group = &sched_global->sched_group;
int i;

odp_ticketlock_lock(&sched_group->s.lock);
Expand All @@ -681,7 +701,7 @@ static int schedule_group_join(odp_schedule_group_t group,
const odp_thrmask_t *thrmask)
{
int thr;
sched_group_t *sched_group = &sched_global.sched_group;
sched_group_t *sched_group = &sched_global->sched_group;

if (group < 0 || group >= NUM_GROUP)
return -1;
Expand Down Expand Up @@ -713,7 +733,7 @@ static int schedule_group_leave(odp_schedule_group_t group,
const odp_thrmask_t *thrmask)
{
int thr;
sched_group_t *sched_group = &sched_global.sched_group;
sched_group_t *sched_group = &sched_global->sched_group;
odp_thrmask_t *all = &sched_group->s.group[GROUP_ALL].mask;
odp_thrmask_t not;

Expand Down Expand Up @@ -747,7 +767,7 @@ static int schedule_group_leave(odp_schedule_group_t group,
static int schedule_group_thrmask(odp_schedule_group_t group,
odp_thrmask_t *thrmask)
{
sched_group_t *sched_group = &sched_global.sched_group;
sched_group_t *sched_group = &sched_global->sched_group;

if (group < 0 || group >= NUM_GROUP)
return -1;
Expand All @@ -769,7 +789,7 @@ static int schedule_group_thrmask(odp_schedule_group_t group,
static int schedule_group_info(odp_schedule_group_t group,
odp_schedule_group_info_t *info)
{
sched_group_t *sched_group = &sched_global.sched_group;
sched_group_t *sched_group = &sched_global->sched_group;

if (group < 0 || group >= NUM_GROUP)
return -1;
Expand Down

0 comments on commit f21b2d2

Please sign in to comment.