Skip to content

Commit

Permalink
block: move a few merge helpers out of <linux/blkdev.h>
Browse files Browse the repository at this point in the history
These are block-layer internal helpers, so move them to block/blk.h and
block/blk-merge.c.  Also update a comment a bit to use better grammar.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
Link: https://lore.kernel.org/r/20210920123328.1399408-16-hch@lst.de
Signed-off-by: Jens Axboe <axboe@kernel.dk>
  • Loading branch information
Christoph Hellwig authored and axboe committed Oct 18, 2021
1 parent b81e0c2 commit badf7f6
Show file tree
Hide file tree
Showing 3 changed files with 62 additions and 64 deletions.
24 changes: 24 additions & 0 deletions block/blk-merge.c
Original file line number Diff line number Diff line change
Expand Up @@ -558,6 +558,23 @@ static inline unsigned int blk_rq_get_max_segments(struct request *rq)
return queue_max_segments(rq->q);
}

static inline unsigned int blk_rq_get_max_sectors(struct request *rq,
sector_t offset)
{
struct request_queue *q = rq->q;

if (blk_rq_is_passthrough(rq))
return q->limits.max_hw_sectors;

if (!q->limits.chunk_sectors ||
req_op(rq) == REQ_OP_DISCARD ||
req_op(rq) == REQ_OP_SECURE_ERASE)
return blk_queue_get_max_sectors(q, req_op(rq));

return min(blk_max_size_offset(q, offset, 0),
blk_queue_get_max_sectors(q, req_op(rq)));
}

static inline int ll_new_hw_segment(struct request *req, struct bio *bio,
unsigned int nr_phys_segs)
{
Expand Down Expand Up @@ -718,6 +735,13 @@ static enum elv_merge blk_try_req_merge(struct request *req,
return ELEVATOR_NO_MERGE;
}

static inline bool blk_write_same_mergeable(struct bio *a, struct bio *b)
{
if (bio_page(a) == bio_page(b) && bio_offset(a) == bio_offset(b))
return true;
return false;
}

/*
* For non-mq, this has to be called with the request spinlock acquired.
* For mq with scheduling, the appropriate queue wide lock should be held.
Expand Down
38 changes: 38 additions & 0 deletions block/blk.h
Original file line number Diff line number Diff line change
Expand Up @@ -96,6 +96,44 @@ static inline bool bvec_gap_to_prev(struct request_queue *q,
return __bvec_gap_to_prev(q, bprv, offset);
}

static inline bool rq_mergeable(struct request *rq)
{
if (blk_rq_is_passthrough(rq))
return false;

if (req_op(rq) == REQ_OP_FLUSH)
return false;

if (req_op(rq) == REQ_OP_WRITE_ZEROES)
return false;

if (req_op(rq) == REQ_OP_ZONE_APPEND)
return false;

if (rq->cmd_flags & REQ_NOMERGE_FLAGS)
return false;
if (rq->rq_flags & RQF_NOMERGE_FLAGS)
return false;

return true;
}

/*
* There are two different ways to handle DISCARD merges:
* 1) If max_discard_segments > 1, the driver treats every bio as a range and
* send the bios to controller together. The ranges don't need to be
* contiguous.
* 2) Otherwise, the request will be normal read/write requests. The ranges
* need to be contiguous.
*/
static inline bool blk_discard_mergable(struct request *req)
{
if (req_op(req) == REQ_OP_DISCARD &&
queue_max_discard_segments(req->q) > 1)
return true;
return false;
}

#ifdef CONFIG_BLK_DEV_INTEGRITY
void blk_flush_integrity(void);
bool __bio_integrity_endio(struct bio *);
Expand Down
64 changes: 0 additions & 64 deletions include/linux/blkdev.h
Original file line number Diff line number Diff line change
Expand Up @@ -745,37 +745,6 @@ static inline bool rq_is_sync(struct request *rq)
return op_is_sync(rq->cmd_flags);
}

static inline bool rq_mergeable(struct request *rq)
{
if (blk_rq_is_passthrough(rq))
return false;

if (req_op(rq) == REQ_OP_FLUSH)
return false;

if (req_op(rq) == REQ_OP_WRITE_ZEROES)
return false;

if (req_op(rq) == REQ_OP_ZONE_APPEND)
return false;

if (rq->cmd_flags & REQ_NOMERGE_FLAGS)
return false;
if (rq->rq_flags & RQF_NOMERGE_FLAGS)
return false;

return true;
}

static inline bool blk_write_same_mergeable(struct bio *a, struct bio *b)
{
if (bio_page(a) == bio_page(b) &&
bio_offset(a) == bio_offset(b))
return true;

return false;
}

static inline unsigned int blk_queue_depth(struct request_queue *q)
{
if (q->queue_depth)
Expand Down Expand Up @@ -1030,23 +999,6 @@ static inline unsigned int blk_max_size_offset(struct request_queue *q,
return min(q->limits.max_sectors, chunk_sectors);
}

static inline unsigned int blk_rq_get_max_sectors(struct request *rq,
sector_t offset)
{
struct request_queue *q = rq->q;

if (blk_rq_is_passthrough(rq))
return q->limits.max_hw_sectors;

if (!q->limits.chunk_sectors ||
req_op(rq) == REQ_OP_DISCARD ||
req_op(rq) == REQ_OP_SECURE_ERASE)
return blk_queue_get_max_sectors(q, req_op(rq));

return min(blk_max_size_offset(q, offset, 0),
blk_queue_get_max_sectors(q, req_op(rq)));
}

static inline unsigned int blk_rq_count_bios(struct request *rq)
{
unsigned int nr_bios = 0;
Expand Down Expand Up @@ -1490,22 +1442,6 @@ static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector
return offset << SECTOR_SHIFT;
}

/*
* Two cases of handling DISCARD merge:
* If max_discard_segments > 1, the driver takes every bio
* as a range and send them to controller together. The ranges
* needn't to be contiguous.
* Otherwise, the bios/requests will be handled as same as
* others which should be contiguous.
*/
static inline bool blk_discard_mergable(struct request *req)
{
if (req_op(req) == REQ_OP_DISCARD &&
queue_max_discard_segments(req->q) > 1)
return true;
return false;
}

static inline int bdev_discard_alignment(struct block_device *bdev)
{
struct request_queue *q = bdev_get_queue(bdev);
Expand Down

0 comments on commit badf7f6

Please sign in to comment.