Skip to content

Commit

Permalink
xen/cpupool: switch cpupool list to normal list interface
Browse files Browse the repository at this point in the history
Instead of open coding something like a linked list just use the
available functionality from list.h.

The allocation of a new cpupool id is not aware of a possible wrap.
Fix that. We don't, however, consider the case of extremely many pools
(beyond 4 billion) as something which needs explicitly handling right
now. First and foremost there would need to be systems with 4 billion
CPUs to make this many pools a sensible thing to have.

While adding the required new include to private.h sort the includes.

Signed-off-by: From: Juergen Gross <jgross@suse.com>
Reviewed-by: Dario Faggioli <dfaggioli@suse.com>
  • Loading branch information
jgross1 authored and jbeulich committed Dec 7, 2020
1 parent 30d430b commit d5ce1f6
Show file tree
Hide file tree
Showing 2 changed files with 57 additions and 47 deletions.
100 changes: 54 additions & 46 deletions xen/common/sched/cpupool.c
Original file line number Diff line number Diff line change
Expand Up @@ -16,20 +16,18 @@
#include <xen/init.h>
#include <xen/keyhandler.h>
#include <xen/lib.h>
#include <xen/list.h>
#include <xen/param.h>
#include <xen/percpu.h>
#include <xen/sched.h>
#include <xen/warning.h>

#include "private.h"

#define for_each_cpupool(ptr) \
for ((ptr) = &cpupool_list; *(ptr) != NULL; (ptr) = &((*(ptr))->next))

struct cpupool *cpupool0; /* Initial cpupool with Dom0 */
cpumask_t cpupool_free_cpus; /* cpus not in any cpupool */

static struct cpupool *cpupool_list; /* linked list, sorted by poolid */
static LIST_HEAD(cpupool_list); /* linked list, sorted by poolid */

static int cpupool_moving_cpu = -1;
static struct cpupool *cpupool_cpu_moving = NULL;
Expand Down Expand Up @@ -189,15 +187,15 @@ static struct cpupool *alloc_cpupool_struct(void)
*/
static struct cpupool *__cpupool_find_by_id(unsigned int id, bool exact)
{
struct cpupool **q;
struct cpupool *q;

ASSERT(spin_is_locked(&cpupool_lock));

for_each_cpupool(q)
if ( (*q)->cpupool_id >= id )
break;
list_for_each_entry(q, &cpupool_list, list)
if ( q->cpupool_id == id || (!exact && q->cpupool_id > id) )
return q;

return (!exact || (*q == NULL) || ((*q)->cpupool_id == id)) ? *q : NULL;
return NULL;
}

static struct cpupool *cpupool_find_by_id(unsigned int poolid)
Expand Down Expand Up @@ -246,8 +244,7 @@ static struct cpupool *cpupool_create(
unsigned int poolid, unsigned int sched_id, int *perr)
{
struct cpupool *c;
struct cpupool **q;
unsigned int last = 0;
struct cpupool *q;

*perr = -ENOMEM;
if ( (c = alloc_cpupool_struct()) == NULL )
Expand All @@ -260,23 +257,42 @@ static struct cpupool *cpupool_create(

spin_lock(&cpupool_lock);

for_each_cpupool(q)
if ( poolid != CPUPOOLID_NONE )
{
last = (*q)->cpupool_id;
if ( (poolid != CPUPOOLID_NONE) && (last >= poolid) )
break;
q = __cpupool_find_by_id(poolid, false);
if ( !q )
list_add_tail(&c->list, &cpupool_list);
else
{
list_add_tail(&c->list, &q->list);
if ( q->cpupool_id == poolid )
{
*perr = -EEXIST;
goto err;
}
}

c->cpupool_id = poolid;
}
if ( *q != NULL )
else
{
if ( (*q)->cpupool_id == poolid )
/* Cpupool 0 is created with specified id at boot and never removed. */
ASSERT(!list_empty(&cpupool_list));

q = list_last_entry(&cpupool_list, struct cpupool, list);
/* In case of wrap search for first free id. */
if ( q->cpupool_id == CPUPOOLID_NONE - 1 )
{
*perr = -EEXIST;
goto err;
list_for_each_entry(q, &cpupool_list, list)
if ( q->cpupool_id + 1 != list_next_entry(q, list)->cpupool_id )
break;
}
c->next = *q;

list_add(&c->list, &q->list);

c->cpupool_id = q->cpupool_id + 1;
}

c->cpupool_id = (poolid == CPUPOOLID_NONE) ? (last + 1) : poolid;
if ( poolid == 0 )
{
c->sched = scheduler_get_default();
Expand All @@ -291,8 +307,6 @@ static struct cpupool *cpupool_create(
c->gran = opt_sched_granularity;
c->sched_gran = sched_granularity;

*q = c;

spin_unlock(&cpupool_lock);

debugtrace_printk("Created cpupool %u with scheduler %s (%s)\n",
Expand All @@ -302,6 +316,8 @@ static struct cpupool *cpupool_create(
return c;

err:
list_del(&c->list);

spin_unlock(&cpupool_lock);
free_cpupool_struct(c);
return NULL;
Expand All @@ -312,27 +328,19 @@ static struct cpupool *cpupool_create(
* possible failures:
* - pool still in use
* - cpus still assigned to pool
* - pool not in list
*/
static int cpupool_destroy(struct cpupool *c)
{
struct cpupool **q;

spin_lock(&cpupool_lock);
for_each_cpupool(q)
if ( *q == c )
break;
if ( *q != c )
{
spin_unlock(&cpupool_lock);
return -ENOENT;
}

if ( (c->n_dom != 0) || cpumask_weight(c->cpu_valid) )
{
spin_unlock(&cpupool_lock);
return -EBUSY;
}
*q = c->next;

list_del(&c->list);

spin_unlock(&cpupool_lock);

cpupool_put(c);
Expand Down Expand Up @@ -732,17 +740,17 @@ static int cpupool_cpu_remove_prologue(unsigned int cpu)
*/
static void cpupool_cpu_remove_forced(unsigned int cpu)
{
struct cpupool **c;
struct cpupool *c;
int ret;
unsigned int master_cpu = sched_get_resource_cpu(cpu);

for_each_cpupool ( c )
list_for_each_entry(c, &cpupool_list, list)
{
if ( cpumask_test_cpu(master_cpu, (*c)->cpu_valid) )
if ( cpumask_test_cpu(master_cpu, c->cpu_valid) )
{
ret = cpupool_unassign_cpu_start(*c, master_cpu);
ret = cpupool_unassign_cpu_start(c, master_cpu);
BUG_ON(ret);
ret = cpupool_unassign_cpu_finish(*c);
ret = cpupool_unassign_cpu_finish(c);
BUG_ON(ret);
}
}
Expand Down Expand Up @@ -929,7 +937,7 @@ const cpumask_t *cpupool_valid_cpus(const struct cpupool *pool)
void dump_runq(unsigned char key)
{
s_time_t now = NOW();
struct cpupool **c;
struct cpupool *c;

spin_lock(&cpupool_lock);

Expand All @@ -944,12 +952,12 @@ void dump_runq(unsigned char key)
schedule_dump(NULL);
}

for_each_cpupool(c)
list_for_each_entry(c, &cpupool_list, list)
{
printk("Cpupool %u:\n", (*c)->cpupool_id);
printk("Cpus: %*pbl\n", CPUMASK_PR((*c)->cpu_valid));
sched_gran_print((*c)->gran, cpupool_get_granularity(*c));
schedule_dump(*c);
printk("Cpupool %u:\n", c->cpupool_id);
printk("Cpus: %*pbl\n", CPUMASK_PR(c->cpu_valid));
sched_gran_print(c->gran, cpupool_get_granularity(c));
schedule_dump(c);
}

spin_unlock(&cpupool_lock);
Expand Down
4 changes: 3 additions & 1 deletion xen/common/sched/private.h
Original file line number Diff line number Diff line change
Expand Up @@ -8,8 +8,9 @@
#ifndef __XEN_SCHED_IF_H__
#define __XEN_SCHED_IF_H__

#include <xen/percpu.h>
#include <xen/err.h>
#include <xen/list.h>
#include <xen/percpu.h>
#include <xen/rcupdate.h>

/* cpus currently in no cpupool */
Expand Down Expand Up @@ -510,6 +511,7 @@ struct cpupool
unsigned int n_dom;
cpumask_var_t cpu_valid; /* all cpus assigned to pool */
cpumask_var_t res_valid; /* all scheduling resources of pool */
struct list_head list;
struct cpupool *next;
struct scheduler *sched;
atomic_t refcnt;
Expand Down

0 comments on commit d5ce1f6

Please sign in to comment.