Skip to content

Commit

Permalink
block: Use the new blk_opf_t type
Browse files Browse the repository at this point in the history
Use the new blk_opf_t type for arguments and variables that represent
request flags or a bitwise combination of a request operation and
request flags. Rename the function arguments and also a structure member
that hold a request operation and flags from 'rw' into 'opf'.

This patch does not change any functionality.

Signed-off-by: Bart Van Assche <bvanassche@acm.org>
  • Loading branch information
bvanassche committed Jun 22, 2022
1 parent 389f5e4 commit 7852822
Show file tree
Hide file tree
Showing 19 changed files with 67 additions and 65 deletions.
16 changes: 8 additions & 8 deletions block/bfq-cgroup.c
Expand Up @@ -220,26 +220,26 @@ void bfqg_stats_update_avg_queue_size(struct bfq_group *bfqg)
}

void bfqg_stats_update_io_add(struct bfq_group *bfqg, struct bfq_queue *bfqq,
unsigned int op)
blk_opf_t op)
{
blkg_rwstat_add(&bfqg->stats.queued, op, 1);
bfqg_stats_end_empty_time(&bfqg->stats);
if (!(bfqq == ((struct bfq_data *)bfqg->bfqd)->in_service_queue))
bfqg_stats_set_start_group_wait_time(bfqg, bfqq_group(bfqq));
}

void bfqg_stats_update_io_remove(struct bfq_group *bfqg, unsigned int op)
void bfqg_stats_update_io_remove(struct bfq_group *bfqg, blk_opf_t op)
{
blkg_rwstat_add(&bfqg->stats.queued, op, -1);
}

void bfqg_stats_update_io_merged(struct bfq_group *bfqg, unsigned int op)
void bfqg_stats_update_io_merged(struct bfq_group *bfqg, blk_opf_t op)
{
blkg_rwstat_add(&bfqg->stats.merged, op, 1);
}

void bfqg_stats_update_completion(struct bfq_group *bfqg, u64 start_time_ns,
u64 io_start_time_ns, unsigned int op)
u64 io_start_time_ns, blk_opf_t op)
{
struct bfqg_stats *stats = &bfqg->stats;
u64 now = ktime_get_ns();
Expand All @@ -255,11 +255,11 @@ void bfqg_stats_update_completion(struct bfq_group *bfqg, u64 start_time_ns,
#else /* CONFIG_BFQ_CGROUP_DEBUG */

void bfqg_stats_update_io_add(struct bfq_group *bfqg, struct bfq_queue *bfqq,
unsigned int op) { }
void bfqg_stats_update_io_remove(struct bfq_group *bfqg, unsigned int op) { }
void bfqg_stats_update_io_merged(struct bfq_group *bfqg, unsigned int op) { }
blk_opf_t op) { }
void bfqg_stats_update_io_remove(struct bfq_group *bfqg, blk_opf_t op) { }
void bfqg_stats_update_io_merged(struct bfq_group *bfqg, blk_opf_t op) { }
void bfqg_stats_update_completion(struct bfq_group *bfqg, u64 start_time_ns,
u64 io_start_time_ns, unsigned int op) { }
u64 io_start_time_ns, blk_opf_t op) { }
void bfqg_stats_update_dequeue(struct bfq_group *bfqg) { }
void bfqg_stats_set_start_empty_time(struct bfq_group *bfqg) { }
void bfqg_stats_update_idle_time(struct bfq_group *bfqg) { }
Expand Down
8 changes: 4 additions & 4 deletions block/bfq-iosched.c
Expand Up @@ -668,7 +668,7 @@ static bool bfqq_request_over_limit(struct bfq_queue *bfqq, int limit)
* significantly affect service guarantees coming from the BFQ scheduling
* algorithm.
*/
static void bfq_limit_depth(unsigned int op, struct blk_mq_alloc_data *data)
static void bfq_limit_depth(blk_opf_t op, struct blk_mq_alloc_data *data)
{
struct bfq_data *bfqd = data->q->elevator->elevator_data;
struct bfq_io_cq *bic = bfq_bic_lookup(data->q);
Expand Down Expand Up @@ -6104,7 +6104,7 @@ static bool __bfq_insert_request(struct bfq_data *bfqd, struct request *rq)
static void bfq_update_insert_stats(struct request_queue *q,
struct bfq_queue *bfqq,
bool idle_timer_disabled,
unsigned int cmd_flags)
blk_opf_t cmd_flags)
{
if (!bfqq)
return;
Expand All @@ -6129,7 +6129,7 @@ static void bfq_update_insert_stats(struct request_queue *q,
static inline void bfq_update_insert_stats(struct request_queue *q,
struct bfq_queue *bfqq,
bool idle_timer_disabled,
unsigned int cmd_flags) {}
blk_opf_t cmd_flags) {}
#endif /* CONFIG_BFQ_CGROUP_DEBUG */

static struct bfq_queue *bfq_init_rq(struct request *rq);
Expand All @@ -6141,7 +6141,7 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
struct bfq_data *bfqd = q->elevator->elevator_data;
struct bfq_queue *bfqq;
bool idle_timer_disabled = false;
unsigned int cmd_flags;
blk_opf_t cmd_flags;
LIST_HEAD(free);

#ifdef CONFIG_BFQ_GROUP_IOSCHED
Expand Down
8 changes: 4 additions & 4 deletions block/bfq-iosched.h
Expand Up @@ -994,11 +994,11 @@ void bfq_put_async_queues(struct bfq_data *bfqd, struct bfq_group *bfqg);

void bfqg_stats_update_legacy_io(struct request_queue *q, struct request *rq);
void bfqg_stats_update_io_add(struct bfq_group *bfqg, struct bfq_queue *bfqq,
unsigned int op);
void bfqg_stats_update_io_remove(struct bfq_group *bfqg, unsigned int op);
void bfqg_stats_update_io_merged(struct bfq_group *bfqg, unsigned int op);
blk_opf_t op);
void bfqg_stats_update_io_remove(struct bfq_group *bfqg, blk_opf_t op);
void bfqg_stats_update_io_merged(struct bfq_group *bfqg, blk_opf_t op);
void bfqg_stats_update_completion(struct bfq_group *bfqg, u64 start_time_ns,
u64 io_start_time_ns, unsigned int op);
u64 io_start_time_ns, blk_opf_t op);
void bfqg_stats_update_dequeue(struct bfq_group *bfqg);
void bfqg_stats_set_start_empty_time(struct bfq_group *bfqg);
void bfqg_stats_update_idle_time(struct bfq_group *bfqg);
Expand Down
10 changes: 5 additions & 5 deletions block/bio.c
Expand Up @@ -239,7 +239,7 @@ static void bio_free(struct bio *bio)
* when IO has completed, or when the bio is released.
*/
void bio_init(struct bio *bio, struct block_device *bdev, struct bio_vec *table,
unsigned short max_vecs, unsigned int opf)
unsigned short max_vecs, blk_opf_t opf)
{
bio->bi_next = NULL;
bio->bi_bdev = bdev;
Expand Down Expand Up @@ -292,7 +292,7 @@ EXPORT_SYMBOL(bio_init);
* preserved are the ones that are initialized by bio_alloc_bioset(). See
* comment in struct bio.
*/
void bio_reset(struct bio *bio, struct block_device *bdev, unsigned int opf)
void bio_reset(struct bio *bio, struct block_device *bdev, blk_opf_t opf)
{
bio_uninit(bio);
memset(bio, 0, BIO_RESET_BYTES);
Expand Down Expand Up @@ -341,7 +341,7 @@ void bio_chain(struct bio *bio, struct bio *parent)
EXPORT_SYMBOL(bio_chain);

struct bio *blk_next_bio(struct bio *bio, struct block_device *bdev,
unsigned int nr_pages, unsigned int opf, gfp_t gfp)
unsigned int nr_pages, blk_opf_t opf, gfp_t gfp)
{
struct bio *new = bio_alloc(bdev, nr_pages, opf, gfp);

Expand Down Expand Up @@ -409,7 +409,7 @@ static void punt_bios_to_rescuer(struct bio_set *bs)
}

static struct bio *bio_alloc_percpu_cache(struct block_device *bdev,
unsigned short nr_vecs, unsigned int opf, gfp_t gfp,
unsigned short nr_vecs, blk_opf_t opf, gfp_t gfp,
struct bio_set *bs)
{
struct bio_alloc_cache *cache;
Expand Down Expand Up @@ -468,7 +468,7 @@ static struct bio *bio_alloc_percpu_cache(struct block_device *bdev,
* Returns: Pointer to new bio on success, NULL on failure.
*/
struct bio *bio_alloc_bioset(struct block_device *bdev, unsigned short nr_vecs,
unsigned int opf, gfp_t gfp_mask,
blk_opf_t opf, gfp_t gfp_mask,
struct bio_set *bs)
{
gfp_t saved_gfp = gfp_mask;
Expand Down
2 changes: 1 addition & 1 deletion block/blk-cgroup-rwstat.h
Expand Up @@ -59,7 +59,7 @@ void blkg_rwstat_recursive_sum(struct blkcg_gq *blkg, struct blkcg_policy *pol,
* caller is responsible for synchronizing calls to this function.
*/
static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat,
unsigned int op, uint64_t val)
blk_opf_t op, uint64_t val)
{
struct percpu_counter *cnt;

Expand Down
2 changes: 1 addition & 1 deletion block/blk-core.c
Expand Up @@ -1260,7 +1260,7 @@ EXPORT_SYMBOL_GPL(blk_io_schedule);

int __init blk_dev_init(void)
{
BUILD_BUG_ON(REQ_OP_LAST >= (1 << REQ_OP_BITS));
BUILD_BUG_ON((__force u32)REQ_OP_LAST >= (1 << REQ_OP_BITS));
BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 *
sizeof_field(struct request, cmd_flags));
BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 *
Expand Down
6 changes: 3 additions & 3 deletions block/blk-flush.c
Expand Up @@ -94,7 +94,7 @@ enum {
};

static void blk_kick_flush(struct request_queue *q,
struct blk_flush_queue *fq, unsigned int flags);
struct blk_flush_queue *fq, blk_opf_t flags);

static inline struct blk_flush_queue *
blk_get_flush_queue(struct request_queue *q, struct blk_mq_ctx *ctx)
Expand Down Expand Up @@ -173,7 +173,7 @@ static void blk_flush_complete_seq(struct request *rq,
{
struct request_queue *q = rq->q;
struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx];
unsigned int cmd_flags;
blk_opf_t cmd_flags;

BUG_ON(rq->flush.seq & seq);
rq->flush.seq |= seq;
Expand Down Expand Up @@ -290,7 +290,7 @@ bool is_flush_rq(struct request *rq)
*
*/
static void blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq,
unsigned int flags)
blk_opf_t flags)
{
struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx];
struct request *first_rq =
Expand Down
6 changes: 3 additions & 3 deletions block/blk-merge.c
Expand Up @@ -712,7 +712,7 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
*/
void blk_rq_set_mixed_merge(struct request *rq)
{
unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK;
blk_opf_t ff = rq->cmd_flags & REQ_FAILFAST_MASK;
struct bio *bio;

if (rq->rq_flags & RQF_MIXED_MERGE)
Expand Down Expand Up @@ -928,7 +928,7 @@ enum bio_merge_status {
static enum bio_merge_status bio_attempt_back_merge(struct request *req,
struct bio *bio, unsigned int nr_segs)
{
const int ff = bio->bi_opf & REQ_FAILFAST_MASK;
const blk_opf_t ff = bio->bi_opf & REQ_FAILFAST_MASK;

if (!ll_back_merge_fn(req, bio, nr_segs))
return BIO_MERGE_FAILED;
Expand All @@ -952,7 +952,7 @@ static enum bio_merge_status bio_attempt_back_merge(struct request *req,
static enum bio_merge_status bio_attempt_front_merge(struct request *req,
struct bio *bio, unsigned int nr_segs)
{
const int ff = bio->bi_opf & REQ_FAILFAST_MASK;
const blk_opf_t ff = bio->bi_opf & REQ_FAILFAST_MASK;

if (!ll_front_merge_fn(req, bio, nr_segs))
return BIO_MERGE_FAILED;
Expand Down
4 changes: 2 additions & 2 deletions block/blk-mq-debugfs.c
Expand Up @@ -315,8 +315,8 @@ int __blk_mq_debugfs_rq_show(struct seq_file *m, struct request *rq)
else
seq_printf(m, "%s", op_str);
seq_puts(m, ", .cmd_flags=");
blk_flags_show(m, rq->cmd_flags & ~REQ_OP_MASK, cmd_flag_name,
ARRAY_SIZE(cmd_flag_name));
blk_flags_show(m, (__force unsigned int)(rq->cmd_flags & ~REQ_OP_MASK),
cmd_flag_name, ARRAY_SIZE(cmd_flag_name));
seq_puts(m, ", .rq_flags=");
blk_flags_show(m, (__force unsigned int)rq->rq_flags, rqf_name,
ARRAY_SIZE(rqf_name));
Expand Down
11 changes: 6 additions & 5 deletions block/blk-mq.c
Expand Up @@ -507,13 +507,13 @@ static struct request *__blk_mq_alloc_requests(struct blk_mq_alloc_data *data)
alloc_time_ns);
}

struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op,
struct request *blk_mq_alloc_request(struct request_queue *q, blk_opf_t opf,
blk_mq_req_flags_t flags)
{
struct blk_mq_alloc_data data = {
.q = q,
.flags = flags,
.cmd_flags = op,
.cmd_flags = opf,
.nr_tags = 1,
};
struct request *rq;
Expand Down Expand Up @@ -654,7 +654,7 @@ void blk_dump_rq_flags(struct request *rq, char *msg)
{
printk(KERN_INFO "%s: dev %s: flags=%llx\n", msg,
rq->q->disk ? rq->q->disk->disk_name : "?",
(unsigned long long) rq->cmd_flags);
(__force unsigned long long) rq->cmd_flags);

printk(KERN_INFO " sector %llu, nr/cnr %u/%u\n",
(unsigned long long)blk_rq_pos(rq),
Expand Down Expand Up @@ -707,8 +707,9 @@ static void blk_print_req_error(struct request *req, blk_status_t status)
"phys_seg %u prio class %u\n",
blk_status_to_str(status),
req->q->disk ? req->q->disk->disk_name : "?",
blk_rq_pos(req), req_op(req), blk_op_str(req_op(req)),
req->cmd_flags & ~REQ_OP_MASK,
blk_rq_pos(req), (__force u32)req_op(req),
blk_op_str(req_op(req)),
(__force u32)(req->cmd_flags & ~REQ_OP_MASK),
req->nr_phys_segments,
IOPRIO_PRIO_CLASS(req->ioprio));
}
Expand Down
6 changes: 3 additions & 3 deletions block/blk-mq.h
Expand Up @@ -86,7 +86,7 @@ static inline struct blk_mq_hw_ctx *blk_mq_map_queue_type(struct request_queue *
return xa_load(&q->hctx_table, q->tag_set->map[type].mq_map[cpu]);
}

static inline enum hctx_type blk_mq_get_hctx_type(unsigned int opf)
static inline enum hctx_type blk_mq_get_hctx_type(blk_opf_t opf)
{
enum hctx_type type = HCTX_TYPE_DEFAULT;

Expand All @@ -107,7 +107,7 @@ static inline enum hctx_type blk_mq_get_hctx_type(unsigned int opf)
* @ctx: software queue cpu ctx
*/
static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
unsigned int opf,
blk_opf_t opf,
struct blk_mq_ctx *ctx)
{
return ctx->hctxs[blk_mq_get_hctx_type(opf)];
Expand Down Expand Up @@ -151,7 +151,7 @@ struct blk_mq_alloc_data {
struct request_queue *q;
blk_mq_req_flags_t flags;
unsigned int shallow_depth;
unsigned int cmd_flags;
blk_opf_t cmd_flags;
req_flags_t rq_flags;

/* allocate multiple requests/tags in one go */
Expand Down
16 changes: 8 additions & 8 deletions block/blk-wbt.c
Expand Up @@ -451,7 +451,7 @@ static bool close_io(struct rq_wb *rwb)

#define REQ_HIPRIO (REQ_SYNC | REQ_META | REQ_PRIO)

static inline unsigned int get_limit(struct rq_wb *rwb, unsigned long rw)
static inline unsigned int get_limit(struct rq_wb *rwb, blk_opf_t opf)
{
unsigned int limit;

Expand All @@ -462,7 +462,7 @@ static inline unsigned int get_limit(struct rq_wb *rwb, unsigned long rw)
if (!rwb_enabled(rwb))
return UINT_MAX;

if ((rw & REQ_OP_MASK) == REQ_OP_DISCARD)
if ((opf & REQ_OP_MASK) == REQ_OP_DISCARD)
return rwb->wb_background;

/*
Expand All @@ -473,9 +473,9 @@ static inline unsigned int get_limit(struct rq_wb *rwb, unsigned long rw)
* the idle limit, or go to normal if we haven't had competing
* IO for a bit.
*/
if ((rw & REQ_HIPRIO) || wb_recent_wait(rwb) || current_is_kswapd())
if ((opf & REQ_HIPRIO) || wb_recent_wait(rwb) || current_is_kswapd())
limit = rwb->rq_depth.max_depth;
else if ((rw & REQ_BACKGROUND) || close_io(rwb)) {
else if ((opf & REQ_BACKGROUND) || close_io(rwb)) {
/*
* If less than 100ms since we completed unrelated IO,
* limit us to half the depth for background writeback.
Expand All @@ -490,13 +490,13 @@ static inline unsigned int get_limit(struct rq_wb *rwb, unsigned long rw)
struct wbt_wait_data {
struct rq_wb *rwb;
enum wbt_flags wb_acct;
unsigned long rw;
blk_opf_t opf;
};

static bool wbt_inflight_cb(struct rq_wait *rqw, void *private_data)
{
struct wbt_wait_data *data = private_data;
return rq_wait_inc_below(rqw, get_limit(data->rwb, data->rw));
return rq_wait_inc_below(rqw, get_limit(data->rwb, data->opf));
}

static void wbt_cleanup_cb(struct rq_wait *rqw, void *private_data)
Expand All @@ -510,13 +510,13 @@ static void wbt_cleanup_cb(struct rq_wait *rqw, void *private_data)
* the timer to kick off queuing again.
*/
static void __wbt_wait(struct rq_wb *rwb, enum wbt_flags wb_acct,
unsigned long rw)
blk_opf_t opf)
{
struct rq_wait *rqw = get_rq_wait(rwb, wb_acct);
struct wbt_wait_data data = {
.rwb = rwb,
.wb_acct = wb_acct,
.rw = rw,
.opf = opf,
};

rq_qos_wait(rqw, &data, wbt_inflight_cb, wbt_cleanup_cb);
Expand Down
2 changes: 1 addition & 1 deletion block/elevator.h
Expand Up @@ -34,7 +34,7 @@ struct elevator_mq_ops {
int (*request_merge)(struct request_queue *q, struct request **, struct bio *);
void (*request_merged)(struct request_queue *, struct request *, enum elv_merge);
void (*requests_merged)(struct request_queue *, struct request *, struct request *);
void (*limit_depth)(unsigned int, struct blk_mq_alloc_data *);
void (*limit_depth)(blk_opf_t, struct blk_mq_alloc_data *);
void (*prepare_request)(struct request *);
void (*finish_request)(struct request *);
void (*insert_requests)(struct blk_mq_hw_ctx *, struct list_head *, bool);
Expand Down
8 changes: 4 additions & 4 deletions block/fops.c
Expand Up @@ -32,9 +32,9 @@ static int blkdev_get_block(struct inode *inode, sector_t iblock,
return 0;
}

static unsigned int dio_bio_write_op(struct kiocb *iocb)
static blk_opf_t dio_bio_write_op(struct kiocb *iocb)
{
unsigned int op = REQ_OP_WRITE | REQ_SYNC | REQ_IDLE;
blk_opf_t op = REQ_OP_WRITE | REQ_SYNC | REQ_IDLE;

/* avoid the need for a I/O completion work item */
if (iocb->ki_flags & IOCB_DSYNC)
Expand Down Expand Up @@ -175,7 +175,7 @@ static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
struct blkdev_dio *dio;
struct bio *bio;
bool is_read = (iov_iter_rw(iter) == READ), is_sync;
unsigned int opf = is_read ? REQ_OP_READ : dio_bio_write_op(iocb);
blk_opf_t opf = is_read ? REQ_OP_READ : dio_bio_write_op(iocb);
loff_t pos = iocb->ki_pos;
int ret = 0;

Expand Down Expand Up @@ -297,7 +297,7 @@ static ssize_t __blkdev_direct_IO_async(struct kiocb *iocb,
{
struct block_device *bdev = iocb->ki_filp->private_data;
bool is_read = iov_iter_rw(iter) == READ;
unsigned int opf = is_read ? REQ_OP_READ : dio_bio_write_op(iocb);
blk_opf_t opf = is_read ? REQ_OP_READ : dio_bio_write_op(iocb);
struct blkdev_dio *dio;
struct bio *bio;
loff_t pos = iocb->ki_pos;
Expand Down

0 comments on commit 7852822

Please sign in to comment.