Skip to content

Commit

Permalink
blkcg: make root blkcg allocation use %GFP_KERNEL
Browse files Browse the repository at this point in the history
Currently, blkcg_activate_policy() depends on %GFP_ATOMIC allocation
from __blkg_lookup_create() for root blkcg creation.  This could make
policy fail unnecessarily.

Make blkg_alloc() take @gfp_mask, __blkg_lookup_create() take an
optional @new_blkg for preallocated blkg, and blkcg_activate_policy()
preload radix tree and preallocate blkg with %GFP_KERNEL before trying
to create the root blkg.

v2: __blkg_lookup_create() was returning %NULL on blkg alloc failure
   instead of ERR_PTR() value.  Fixed.

Signed-off-by: Tejun Heo <tj@kernel.org>
Acked-by: Vivek Goyal <vgoyal@redhat.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
  • Loading branch information
htejun authored and axboe committed Jun 25, 2012
1 parent 1358986 commit 1597499
Showing 1 changed file with 43 additions and 16 deletions.
59 changes: 43 additions & 16 deletions block/blk-cgroup.c
Expand Up @@ -91,16 +91,18 @@ static void blkg_free(struct blkcg_gq *blkg)
* blkg_alloc - allocate a blkg * blkg_alloc - allocate a blkg
* @blkcg: block cgroup the new blkg is associated with * @blkcg: block cgroup the new blkg is associated with
* @q: request_queue the new blkg is associated with * @q: request_queue the new blkg is associated with
* @gfp_mask: allocation mask to use
* *
* Allocate a new blkg assocating @blkcg and @q. * Allocate a new blkg assocating @blkcg and @q.
*/ */
static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q) static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q,
gfp_t gfp_mask)
{ {
struct blkcg_gq *blkg; struct blkcg_gq *blkg;
int i; int i;


/* alloc and init base part */ /* alloc and init base part */
blkg = kzalloc_node(sizeof(*blkg), GFP_ATOMIC, q->node); blkg = kzalloc_node(sizeof(*blkg), gfp_mask, q->node);
if (!blkg) if (!blkg)
return NULL; return NULL;


Expand All @@ -117,7 +119,7 @@ static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q)
continue; continue;


/* alloc per-policy data and attach it to blkg */ /* alloc per-policy data and attach it to blkg */
pd = kzalloc_node(pol->pd_size, GFP_ATOMIC, q->node); pd = kzalloc_node(pol->pd_size, gfp_mask, q->node);
if (!pd) { if (!pd) {
blkg_free(blkg); blkg_free(blkg);
return NULL; return NULL;
Expand Down Expand Up @@ -175,8 +177,13 @@ struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, struct request_queue *q)
} }
EXPORT_SYMBOL_GPL(blkg_lookup); EXPORT_SYMBOL_GPL(blkg_lookup);


/*
* If @new_blkg is %NULL, this function tries to allocate a new one as
* necessary using %GFP_ATOMIC. @new_blkg is always consumed on return.
*/
static struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg, static struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg,
struct request_queue *q) struct request_queue *q,
struct blkcg_gq *new_blkg)
{ {
struct blkcg_gq *blkg; struct blkcg_gq *blkg;
int ret; int ret;
Expand All @@ -188,18 +195,24 @@ static struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg,
blkg = __blkg_lookup(blkcg, q); blkg = __blkg_lookup(blkcg, q);
if (blkg) { if (blkg) {
rcu_assign_pointer(blkcg->blkg_hint, blkg); rcu_assign_pointer(blkcg->blkg_hint, blkg);
return blkg; goto out_free;
} }


/* blkg holds a reference to blkcg */ /* blkg holds a reference to blkcg */
if (!css_tryget(&blkcg->css)) if (!css_tryget(&blkcg->css)) {
return ERR_PTR(-EINVAL); blkg = ERR_PTR(-EINVAL);
goto out_free;
}


/* allocate */ /* allocate */
ret = -ENOMEM; if (!new_blkg) {
blkg = blkg_alloc(blkcg, q); new_blkg = blkg_alloc(blkcg, q, GFP_ATOMIC);
if (unlikely(!blkg)) if (unlikely(!new_blkg)) {
goto err_put; blkg = ERR_PTR(-ENOMEM);
goto out_put;
}
}
blkg = new_blkg;


/* insert */ /* insert */
spin_lock(&blkcg->lock); spin_lock(&blkcg->lock);
Expand All @@ -212,10 +225,13 @@ static struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg,


if (!ret) if (!ret)
return blkg; return blkg;
err_put:
blkg = ERR_PTR(ret);
out_put:
css_put(&blkcg->css); css_put(&blkcg->css);
blkg_free(blkg); out_free:
return ERR_PTR(ret); blkg_free(new_blkg);
return blkg;
} }


struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg, struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
Expand All @@ -227,7 +243,7 @@ struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
*/ */
if (unlikely(blk_queue_bypass(q))) if (unlikely(blk_queue_bypass(q)))
return ERR_PTR(blk_queue_dead(q) ? -EINVAL : -EBUSY); return ERR_PTR(blk_queue_dead(q) ? -EINVAL : -EBUSY);
return __blkg_lookup_create(blkcg, q); return __blkg_lookup_create(blkcg, q, NULL);
} }
EXPORT_SYMBOL_GPL(blkg_lookup_create); EXPORT_SYMBOL_GPL(blkg_lookup_create);


Expand Down Expand Up @@ -726,19 +742,30 @@ int blkcg_activate_policy(struct request_queue *q,
struct blkcg_gq *blkg; struct blkcg_gq *blkg;
struct blkg_policy_data *pd, *n; struct blkg_policy_data *pd, *n;
int cnt = 0, ret; int cnt = 0, ret;
bool preloaded;


if (blkcg_policy_enabled(q, pol)) if (blkcg_policy_enabled(q, pol))
return 0; return 0;


/* preallocations for root blkg */
blkg = blkg_alloc(&blkcg_root, q, GFP_KERNEL);
if (!blkg)
return -ENOMEM;

preloaded = !radix_tree_preload(GFP_KERNEL);

blk_queue_bypass_start(q); blk_queue_bypass_start(q);


/* make sure the root blkg exists and count the existing blkgs */ /* make sure the root blkg exists and count the existing blkgs */
spin_lock_irq(q->queue_lock); spin_lock_irq(q->queue_lock);


rcu_read_lock(); rcu_read_lock();
blkg = __blkg_lookup_create(&blkcg_root, q); blkg = __blkg_lookup_create(&blkcg_root, q, blkg);
rcu_read_unlock(); rcu_read_unlock();


if (preloaded)
radix_tree_preload_end();

if (IS_ERR(blkg)) { if (IS_ERR(blkg)) {
ret = PTR_ERR(blkg); ret = PTR_ERR(blkg);
goto out_unlock; goto out_unlock;
Expand Down

0 comments on commit 1597499

Please sign in to comment.