@@ -312,7 +312,7 @@ EXPORT_SYMBOL_GPL(blk_mq_unquiesce_queue);
312312void blk_mq_wake_waiters (struct request_queue * q )
313313{
314314 struct blk_mq_hw_ctx * hctx ;
315- unsigned int i ;
315+ unsigned long i ;
316316
317317 queue_for_each_hw_ctx (q , hctx , i )
318318 if (blk_mq_hw_queue_mapped (hctx ))
@@ -1442,7 +1442,7 @@ static void blk_mq_timeout_work(struct work_struct *work)
14421442 container_of (work , struct request_queue , timeout_work );
14431443 unsigned long next = 0 ;
14441444 struct blk_mq_hw_ctx * hctx ;
1445- int i ;
1445+ unsigned long i ;
14461446
14471447 /* A deadlock might occur if a request is stuck requiring a
14481448 * timeout at the same time a queue freeze is waiting
@@ -2143,7 +2143,7 @@ static struct blk_mq_hw_ctx *blk_mq_get_sq_hctx(struct request_queue *q)
21432143void blk_mq_run_hw_queues (struct request_queue * q , bool async )
21442144{
21452145 struct blk_mq_hw_ctx * hctx , * sq_hctx ;
2146- int i ;
2146+ unsigned long i ;
21472147
21482148 sq_hctx = NULL ;
21492149 if (blk_mq_has_sqsched (q ))
@@ -2171,7 +2171,7 @@ EXPORT_SYMBOL(blk_mq_run_hw_queues);
21712171void blk_mq_delay_run_hw_queues (struct request_queue * q , unsigned long msecs )
21722172{
21732173 struct blk_mq_hw_ctx * hctx , * sq_hctx ;
2174- int i ;
2174+ unsigned long i ;
21752175
21762176 sq_hctx = NULL ;
21772177 if (blk_mq_has_sqsched (q ))
@@ -2209,7 +2209,7 @@ EXPORT_SYMBOL(blk_mq_delay_run_hw_queues);
22092209bool blk_mq_queue_stopped (struct request_queue * q )
22102210{
22112211 struct blk_mq_hw_ctx * hctx ;
2212- int i ;
2212+ unsigned long i ;
22132213
22142214 queue_for_each_hw_ctx (q , hctx , i )
22152215 if (blk_mq_hctx_stopped (hctx ))
@@ -2248,7 +2248,7 @@ EXPORT_SYMBOL(blk_mq_stop_hw_queue);
22482248void blk_mq_stop_hw_queues (struct request_queue * q )
22492249{
22502250 struct blk_mq_hw_ctx * hctx ;
2251- int i ;
2251+ unsigned long i ;
22522252
22532253 queue_for_each_hw_ctx (q , hctx , i )
22542254 blk_mq_stop_hw_queue (hctx );
@@ -2266,7 +2266,7 @@ EXPORT_SYMBOL(blk_mq_start_hw_queue);
22662266void blk_mq_start_hw_queues (struct request_queue * q )
22672267{
22682268 struct blk_mq_hw_ctx * hctx ;
2269- int i ;
2269+ unsigned long i ;
22702270
22712271 queue_for_each_hw_ctx (q , hctx , i )
22722272 blk_mq_start_hw_queue (hctx );
@@ -2286,7 +2286,7 @@ EXPORT_SYMBOL_GPL(blk_mq_start_stopped_hw_queue);
22862286void blk_mq_start_stopped_hw_queues (struct request_queue * q , bool async )
22872287{
22882288 struct blk_mq_hw_ctx * hctx ;
2289- int i ;
2289+ unsigned long i ;
22902290
22912291 queue_for_each_hw_ctx (q , hctx , i )
22922292 blk_mq_start_stopped_hw_queue (hctx , async );
@@ -3446,7 +3446,7 @@ static void blk_mq_exit_hw_queues(struct request_queue *q,
34463446 struct blk_mq_tag_set * set , int nr_queue )
34473447{
34483448 struct blk_mq_hw_ctx * hctx ;
3449- unsigned int i ;
3449+ unsigned long i ;
34503450
34513451 queue_for_each_hw_ctx (q , hctx , i ) {
34523452 if (i == nr_queue )
@@ -3637,7 +3637,8 @@ static void __blk_mq_free_map_and_rqs(struct blk_mq_tag_set *set,
36373637
36383638static void blk_mq_map_swqueue (struct request_queue * q )
36393639{
3640- unsigned int i , j , hctx_idx ;
3640+ unsigned int j , hctx_idx ;
3641+ unsigned long i ;
36413642 struct blk_mq_hw_ctx * hctx ;
36423643 struct blk_mq_ctx * ctx ;
36433644 struct blk_mq_tag_set * set = q -> tag_set ;
@@ -3744,7 +3745,7 @@ static void blk_mq_map_swqueue(struct request_queue *q)
37443745static void queue_set_hctx_shared (struct request_queue * q , bool shared )
37453746{
37463747 struct blk_mq_hw_ctx * hctx ;
3747- int i ;
3748+ unsigned long i ;
37483749
37493750 queue_for_each_hw_ctx (q , hctx , i ) {
37503751 if (shared ) {
@@ -3844,7 +3845,7 @@ static int blk_mq_alloc_ctxs(struct request_queue *q)
38443845void blk_mq_release (struct request_queue * q )
38453846{
38463847 struct blk_mq_hw_ctx * hctx , * next ;
3847- int i ;
3848+ unsigned long i ;
38483849
38493850 queue_for_each_hw_ctx (q , hctx , i )
38503851 WARN_ON_ONCE (hctx && list_empty (& hctx -> hctx_list ));
@@ -4362,7 +4363,8 @@ int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
43624363{
43634364 struct blk_mq_tag_set * set = q -> tag_set ;
43644365 struct blk_mq_hw_ctx * hctx ;
4365- int i , ret ;
4366+ int ret ;
4367+ unsigned long i ;
43664368
43674369 if (!set )
43684370 return - EINVAL ;
@@ -4738,7 +4740,7 @@ void blk_mq_cancel_work_sync(struct request_queue *q)
47384740{
47394741 if (queue_is_mq (q )) {
47404742 struct blk_mq_hw_ctx * hctx ;
4741- int i ;
4743+ unsigned long i ;
47424744
47434745 cancel_delayed_work_sync (& q -> requeue_work );
47444746
0 commit comments