@@ -71,7 +71,8 @@ static int blk_mq_poll_stats_bkt(const struct request *rq)
7171static inline struct blk_mq_hw_ctx * blk_qc_to_hctx (struct request_queue * q ,
7272 blk_qc_t qc )
7373{
74- return q -> queue_hw_ctx [(qc & ~BLK_QC_T_INTERNAL ) >> BLK_QC_T_SHIFT ];
74+ return xa_load (& q -> hctx_table ,
75+ (qc & ~BLK_QC_T_INTERNAL ) >> BLK_QC_T_SHIFT );
7576}
7677
7778static inline struct request * blk_qc_to_rq (struct blk_mq_hw_ctx * hctx ,
@@ -573,7 +574,7 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
573574 * If not tell the caller that it should skip this queue.
574575 */
575576 ret = - EXDEV ;
576- data .hctx = q -> queue_hw_ctx [ hctx_idx ] ;
577+ data .hctx = xa_load ( & q -> hctx_table , hctx_idx ) ;
577578 if (!blk_mq_hw_queue_mapped (data .hctx ))
578579 goto out_queue_exit ;
579580 cpu = cpumask_first_and (data .hctx -> cpumask , cpu_online_mask );
@@ -3437,6 +3438,8 @@ static void blk_mq_exit_hctx(struct request_queue *q,
34373438
34383439 blk_mq_remove_cpuhp (hctx );
34393440
3441+ xa_erase (& q -> hctx_table , hctx_idx );
3442+
34403443 spin_lock (& q -> unused_hctx_lock );
34413444 list_add (& hctx -> hctx_list , & q -> unused_hctx_list );
34423445 spin_unlock (& q -> unused_hctx_lock );
@@ -3476,8 +3479,15 @@ static int blk_mq_init_hctx(struct request_queue *q,
34763479 if (blk_mq_init_request (set , hctx -> fq -> flush_rq , hctx_idx ,
34773480 hctx -> numa_node ))
34783481 goto exit_hctx ;
3482+
3483+ if (xa_insert (& q -> hctx_table , hctx_idx , hctx , GFP_KERNEL ))
3484+ goto exit_flush_rq ;
3485+
34793486 return 0 ;
34803487
3488+ exit_flush_rq :
3489+ if (set -> ops -> exit_request )
3490+ set -> ops -> exit_request (set , hctx -> fq -> flush_rq , hctx_idx );
34813491 exit_hctx :
34823492 if (set -> ops -> exit_hctx )
34833493 set -> ops -> exit_hctx (hctx , hctx_idx );
@@ -3856,7 +3866,7 @@ void blk_mq_release(struct request_queue *q)
38563866 kobject_put (& hctx -> kobj );
38573867 }
38583868
3859- kfree ( q -> queue_hw_ctx );
3869+ xa_destroy ( & q -> hctx_table );
38603870
38613871 /*
38623872 * release .mq_kobj and sw queue's kobject now because
@@ -3945,46 +3955,28 @@ static struct blk_mq_hw_ctx *blk_mq_alloc_and_init_hctx(
39453955static void blk_mq_realloc_hw_ctxs (struct blk_mq_tag_set * set ,
39463956 struct request_queue * q )
39473957{
3948- int i , j , end ;
3949- struct blk_mq_hw_ctx * * hctxs = q -> queue_hw_ctx ;
3950-
3951- if (q -> nr_hw_queues < set -> nr_hw_queues ) {
3952- struct blk_mq_hw_ctx * * new_hctxs ;
3953-
3954- new_hctxs = kcalloc_node (set -> nr_hw_queues ,
3955- sizeof (* new_hctxs ), GFP_KERNEL ,
3956- set -> numa_node );
3957- if (!new_hctxs )
3958- return ;
3959- if (hctxs )
3960- memcpy (new_hctxs , hctxs , q -> nr_hw_queues *
3961- sizeof (* hctxs ));
3962- q -> queue_hw_ctx = new_hctxs ;
3963- kfree (hctxs );
3964- hctxs = new_hctxs ;
3965- }
3958+ struct blk_mq_hw_ctx * hctx ;
3959+ unsigned long i , j ;
39663960
39673961 /* protect against switching io scheduler */
39683962 mutex_lock (& q -> sysfs_lock );
39693963 for (i = 0 ; i < set -> nr_hw_queues ; i ++ ) {
39703964 int old_node ;
39713965 int node = blk_mq_get_hctx_node (set , i );
3972- struct blk_mq_hw_ctx * old_hctx = hctxs [ i ] ;
3966+ struct blk_mq_hw_ctx * old_hctx = xa_load ( & q -> hctx_table , i ) ;
39733967
39743968 if (old_hctx ) {
39753969 old_node = old_hctx -> numa_node ;
39763970 blk_mq_exit_hctx (q , set , old_hctx , i );
39773971 }
39783972
3979- hctxs [i ] = blk_mq_alloc_and_init_hctx (set , q , i , node );
3980- if (!hctxs [i ]) {
3973+ if (!blk_mq_alloc_and_init_hctx (set , q , i , node )) {
39813974 if (!old_hctx )
39823975 break ;
39833976 pr_warn ("Allocate new hctx on node %d fails, fallback to previous one on node %d\n" ,
39843977 node , old_node );
3985- hctxs [i ] = blk_mq_alloc_and_init_hctx (set , q , i ,
3986- old_node );
3987- WARN_ON_ONCE (!hctxs [i ]);
3978+ hctx = blk_mq_alloc_and_init_hctx (set , q , i , old_node );
3979+ WARN_ON_ONCE (!hctx );
39883980 }
39893981 }
39903982 /*
@@ -3993,21 +3985,13 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
39933985 */
39943986 if (i != set -> nr_hw_queues ) {
39953987 j = q -> nr_hw_queues ;
3996- end = i ;
39973988 } else {
39983989 j = i ;
3999- end = q -> nr_hw_queues ;
40003990 q -> nr_hw_queues = set -> nr_hw_queues ;
40013991 }
40023992
4003- for (; j < end ; j ++ ) {
4004- struct blk_mq_hw_ctx * hctx = hctxs [j ];
4005-
4006- if (hctx ) {
4007- blk_mq_exit_hctx (q , set , hctx , j );
4008- hctxs [j ] = NULL ;
4009- }
4010- }
3993+ xa_for_each_start (& q -> hctx_table , j , hctx , j )
3994+ blk_mq_exit_hctx (q , set , hctx , j );
40113995 mutex_unlock (& q -> sysfs_lock );
40123996}
40133997
@@ -4046,6 +4030,8 @@ int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
40464030 INIT_LIST_HEAD (& q -> unused_hctx_list );
40474031 spin_lock_init (& q -> unused_hctx_lock );
40484032
4033+ xa_init (& q -> hctx_table );
4034+
40494035 blk_mq_realloc_hw_ctxs (set , q );
40504036 if (!q -> nr_hw_queues )
40514037 goto err_hctxs ;
@@ -4075,7 +4061,7 @@ int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
40754061 return 0 ;
40764062
40774063err_hctxs :
4078- kfree ( q -> queue_hw_ctx );
4064+ xa_destroy ( & q -> hctx_table );
40794065 q -> nr_hw_queues = 0 ;
40804066 blk_mq_sysfs_deinit (q );
40814067err_poll :
0 commit comments