Skip to content

Commit aba19ee

Browse files
Ming Leiaxboe
authored andcommitted
blk-mq: Move flush queue allocation into blk_mq_init_hctx()
Move flush queue allocation into blk_mq_init_hctx() and its release into blk_mq_exit_hctx(), and prepare for replacing tags->lock with SRCU to draining inflight request walking. blk_mq_exit_hctx() is the last chance for us to get valid `tag_set` reference, and we need to add one SRCU to `tag_set` for freeing flush request via call_srcu(). It is safe to move flush queue & request release into blk_mq_exit_hctx(), because blk_mq_clear_flush_rq_mapping() clears the flush request reference int driver tags inflight request table, meantime inflight request walking is drained. Reviewed-by: Hannes Reinecke <hare@suse.de> Reviewed-by: Yu Kuai <yukuai3@huawei.com> Signed-off-by: Ming Lei <ming.lei@redhat.com> Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
1 parent ba28afb commit aba19ee

File tree

2 files changed

+13
-8
lines changed

2 files changed

+13
-8
lines changed

block/blk-mq-sysfs.c

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,6 @@ static void blk_mq_hw_sysfs_release(struct kobject *kobj)
3434
struct blk_mq_hw_ctx *hctx = container_of(kobj, struct blk_mq_hw_ctx,
3535
kobj);
3636

37-
blk_free_flush_queue(hctx->fq);
3837
sbitmap_free(&hctx->ctx_map);
3938
free_cpumask_var(hctx->cpumask);
4039
kfree(hctx->ctxs);

block/blk-mq.c

Lines changed: 13 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -3939,6 +3939,9 @@ static void blk_mq_exit_hctx(struct request_queue *q,
39393939
if (set->ops->exit_hctx)
39403940
set->ops->exit_hctx(hctx, hctx_idx);
39413941

3942+
blk_free_flush_queue(hctx->fq);
3943+
hctx->fq = NULL;
3944+
39423945
xa_erase(&q->hctx_table, hctx_idx);
39433946

39443947
spin_lock(&q->unused_hctx_lock);
@@ -3964,13 +3967,19 @@ static int blk_mq_init_hctx(struct request_queue *q,
39643967
struct blk_mq_tag_set *set,
39653968
struct blk_mq_hw_ctx *hctx, unsigned hctx_idx)
39663969
{
3970+
gfp_t gfp = GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY;
3971+
3972+
hctx->fq = blk_alloc_flush_queue(hctx->numa_node, set->cmd_size, gfp);
3973+
if (!hctx->fq)
3974+
goto fail;
3975+
39673976
hctx->queue_num = hctx_idx;
39683977

39693978
hctx->tags = set->tags[hctx_idx];
39703979

39713980
if (set->ops->init_hctx &&
39723981
set->ops->init_hctx(hctx, set->driver_data, hctx_idx))
3973-
goto fail;
3982+
goto fail_free_fq;
39743983

39753984
if (blk_mq_init_request(set, hctx->fq->flush_rq, hctx_idx,
39763985
hctx->numa_node))
@@ -3987,6 +3996,9 @@ static int blk_mq_init_hctx(struct request_queue *q,
39873996
exit_hctx:
39883997
if (set->ops->exit_hctx)
39893998
set->ops->exit_hctx(hctx, hctx_idx);
3999+
fail_free_fq:
4000+
blk_free_flush_queue(hctx->fq);
4001+
hctx->fq = NULL;
39904002
fail:
39914003
return -1;
39924004
}
@@ -4038,16 +4050,10 @@ blk_mq_alloc_hctx(struct request_queue *q, struct blk_mq_tag_set *set,
40384050
init_waitqueue_func_entry(&hctx->dispatch_wait, blk_mq_dispatch_wake);
40394051
INIT_LIST_HEAD(&hctx->dispatch_wait.entry);
40404052

4041-
hctx->fq = blk_alloc_flush_queue(hctx->numa_node, set->cmd_size, gfp);
4042-
if (!hctx->fq)
4043-
goto free_bitmap;
4044-
40454053
blk_mq_hctx_kobj_init(hctx);
40464054

40474055
return hctx;
40484056

4049-
free_bitmap:
4050-
sbitmap_free(&hctx->ctx_map);
40514057
free_ctxs:
40524058
kfree(hctx->ctxs);
40534059
free_cpumask:

0 commit comments

Comments
 (0)