Skip to content

Commit

Permalink
Browse files Browse the repository at this point in the history
block: add emulation for copy
For the devices which does not support copy, copy emulation is
added. Copy-emulation is implemented by reading from source ranges
into memory and writing to the corresponding destination synchronously.

Signed-off-by: Nitesh Shetty <nj.shetty@samsung.com>
Signed-off-by: Vincent Fu <vincent.fu@samsung.com>
Signed-off-by: Arnav Dawn <arnav.dawn@samsung.com>
  • Loading branch information
nj-shetty authored and intel-lab-lkp committed Apr 26, 2022
1 parent 38c8413 commit c406c51
Show file tree
Hide file tree
Showing 3 changed files with 130 additions and 2 deletions.
128 changes: 127 additions & 1 deletion block/blk-lib.c
Expand Up @@ -273,6 +273,65 @@ int blk_copy_offload(struct block_device *src_bdev, int nr_srcs,
return cio_await_completion(cio);
}

int blk_submit_rw_buf(struct block_device *bdev, void *buf, sector_t buf_len,
sector_t sector, unsigned int op, gfp_t gfp_mask)
{
struct request_queue *q = bdev_get_queue(bdev);
struct bio *bio, *parent = NULL;
sector_t max_hw_len = min_t(unsigned int, queue_max_hw_sectors(q),
queue_max_segments(q) << (PAGE_SHIFT - SECTOR_SHIFT)) << SECTOR_SHIFT;
sector_t len, remaining;
int ret;

for (remaining = buf_len; remaining > 0; remaining -= len) {
len = min_t(int, max_hw_len, remaining);
retry:
bio = bio_map_kern(q, buf, len, gfp_mask);
if (IS_ERR(bio)) {
len >>= 1;
if (len)
goto retry;
return PTR_ERR(bio);
}

bio->bi_iter.bi_sector = sector >> SECTOR_SHIFT;
bio->bi_opf = op;
bio_set_dev(bio, bdev);
bio->bi_end_io = NULL;
bio->bi_private = NULL;

if (parent) {
bio_chain(parent, bio);
submit_bio(parent);
}
parent = bio;
sector += len;
buf = (char *) buf + len;
}
ret = submit_bio_wait(bio);
bio_put(bio);

return ret;
}

static void *blk_alloc_buf(sector_t req_size, sector_t *alloc_size, gfp_t gfp_mask)
{
int min_size = PAGE_SIZE;
void *buf;

while (req_size >= min_size) {
buf = kvmalloc(req_size, gfp_mask);
if (buf) {
*alloc_size = req_size;
return buf;
}
/* retry half the requested size */
req_size >>= 1;
}

return NULL;
}

static inline int blk_copy_sanity_check(struct block_device *src_bdev,
struct block_device *dst_bdev, struct range_entry *rlist, int nr)
{
Expand All @@ -298,6 +357,68 @@ static inline int blk_copy_sanity_check(struct block_device *src_bdev,
return 0;
}

/* returns the total copy length still need to be copied */
static inline sector_t blk_copy_max_range(struct range_entry *rlist, int nr, sector_t *max_len)
{
int i;
sector_t len = 0;

*max_len = 0;
for (i = 0; i < nr; i++) {
*max_len = max(*max_len, rlist[i].len - rlist[i].comp_len);
len += (rlist[i].len - rlist[i].comp_len);
}

return len;
}

/*
* If native copy offload feature is absent, this function tries to emulate,
* by copying data from source to a temporary buffer and from buffer to
* destination device.
*/
static int blk_copy_emulate(struct block_device *src_bdev, int nr,
struct range_entry *rlist, struct block_device *dest_bdev, gfp_t gfp_mask)
{
void *buf = NULL;
int ret, nr_i = 0;
sector_t src, dst, copy_len, buf_len, read_len, copied_len,
max_len = 0, remaining = 0, offset = 0;

copy_len = blk_copy_max_range(rlist, nr, &max_len);
buf = blk_alloc_buf(max_len, &buf_len, gfp_mask);
if (!buf)
return -ENOMEM;

for (copied_len = 0; copied_len < copy_len; copied_len += read_len) {
if (!remaining) {
offset = rlist[nr_i].comp_len;
src = rlist[nr_i].src + offset;
dst = rlist[nr_i].dst + offset;
remaining = rlist[nr_i++].len - offset;
}

read_len = min_t(sector_t, remaining, buf_len);
if (!read_len)
continue;
ret = blk_submit_rw_buf(src_bdev, buf, read_len, src, REQ_OP_READ, gfp_mask);
if (ret)
goto out;
src += read_len;
remaining -= read_len;
ret = blk_submit_rw_buf(dest_bdev, buf, read_len, dst, REQ_OP_WRITE,
gfp_mask);
if (ret)
goto out;
else
rlist[nr_i - 1].comp_len += read_len;
dst += read_len;
}
out:
kvfree(buf);
return ret;
}

static inline bool blk_check_copy_offload(struct request_queue *src_q,
struct request_queue *dest_q)
{
Expand Down Expand Up @@ -325,6 +446,7 @@ int blkdev_issue_copy(struct block_device *src_bdev, int nr,
struct request_queue *src_q = bdev_get_queue(src_bdev);
struct request_queue *dest_q = bdev_get_queue(dest_bdev);
int ret = -EINVAL;
bool offload = false;

if (!src_q || !dest_q)
return -ENXIO;
Expand All @@ -342,9 +464,13 @@ int blkdev_issue_copy(struct block_device *src_bdev, int nr,
if (ret)
return ret;

if (blk_check_copy_offload(src_q, dest_q))
offload = blk_check_copy_offload(src_q, dest_q);
if (offload)
ret = blk_copy_offload(src_bdev, nr, rlist, dest_bdev, gfp_mask);

if (ret || !offload)
ret = blk_copy_emulate(src_bdev, nr, rlist, dest_bdev, gfp_mask);

return ret;
}
EXPORT_SYMBOL_GPL(blkdev_issue_copy);
Expand Down
2 changes: 1 addition & 1 deletion block/blk-map.c
Expand Up @@ -340,7 +340,7 @@ static void bio_map_kern_endio(struct bio *bio)
* Map the kernel address into a bio suitable for io to a block
* device. Returns an error pointer in case of error.
*/
static struct bio *bio_map_kern(struct request_queue *q, void *data,
struct bio *bio_map_kern(struct request_queue *q, void *data,
unsigned int len, gfp_t gfp_mask)
{
unsigned long kaddr = (unsigned long)data;
Expand Down
2 changes: 2 additions & 0 deletions include/linux/blkdev.h
Expand Up @@ -1121,6 +1121,8 @@ int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
sector_t nr_sects, gfp_t gfp_mask, struct bio **biop);
int blkdev_issue_secure_erase(struct block_device *bdev, sector_t sector,
sector_t nr_sects, gfp_t gfp);
struct bio *bio_map_kern(struct request_queue *q, void *data, unsigned int len,
gfp_t gfp_mask);
int blkdev_issue_copy(struct block_device *src_bdev, int nr_srcs,
struct range_entry *src_rlist, struct block_device *dest_bdev, gfp_t gfp_mask);

Expand Down

0 comments on commit c406c51

Please sign in to comment.