Skip to content

Commit

Permalink
Browse files Browse the repository at this point in the history
dm: Add support for copy offload.
Before enabling copy for dm target, check if underlying devices and
dm target support copy. Avoid split happening inside dm target.
Fail early if the request needs split, currently splitting copy
request is not supported.

Signed-off-by: Nitesh Shetty <nj.shetty@samsung.com>
  • Loading branch information
nj-shetty authored and intel-lab-lkp committed Apr 26, 2022
1 parent 6a9ea85 commit 913c8c5
Show file tree
Hide file tree
Showing 3 changed files with 56 additions and 0 deletions.
45 changes: 45 additions & 0 deletions drivers/md/dm-table.c
Expand Up @@ -1893,6 +1893,38 @@ static bool dm_table_supports_nowait(struct dm_table *t)
return true;
}

static int device_not_copy_capable(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
{
struct request_queue *q = bdev_get_queue(dev->bdev);

return !blk_queue_copy(q);
}

static bool dm_table_supports_copy(struct dm_table *t)
{
struct dm_target *ti;
unsigned int i;

for (i = 0; i < dm_table_get_num_targets(t); i++) {
ti = dm_table_get_target(t, i);

if (!ti->copy_offload_supported)
return false;

/*
* target provides copy support (as implied by setting 'copy_offload_supported')
* and it relies on _all_ data devices having copy support.
*/
if (ti->copy_offload_supported &&
(!ti->type->iterate_devices ||
ti->type->iterate_devices(ti, device_not_copy_capable, NULL)))
return false;
}

return true;
}

static int device_not_discard_capable(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
{
Expand Down Expand Up @@ -1981,6 +2013,19 @@ int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
q->limits.discard_misaligned = 0;
}

if (!dm_table_supports_copy(t)) {
blk_queue_flag_clear(QUEUE_FLAG_COPY, q);
/* Must also clear copy limits... */
q->limits.max_copy_sectors = 0;
q->limits.max_hw_copy_sectors = 0;
q->limits.max_copy_range_sectors = 0;
q->limits.max_hw_copy_range_sectors = 0;
q->limits.max_copy_nr_ranges = 0;
q->limits.max_hw_copy_nr_ranges = 0;
} else {
blk_queue_flag_set(QUEUE_FLAG_COPY, q);
}

if (!dm_table_supports_secure_erase(t))
q->limits.max_secure_erase_sectors = 0;

Expand Down
6 changes: 6 additions & 0 deletions drivers/md/dm.c
Expand Up @@ -1595,6 +1595,12 @@ static blk_status_t __split_and_process_bio(struct clone_info *ci)
else if (unlikely(ci->is_abnormal_io))
return __process_abnormal_io(ci, ti);

if ((unlikely(op_is_copy(ci->bio->bi_opf)) &&
max_io_len(ti, ci->sector) < ci->sector_count)) {
DMERR("%s: Error IO size(%u) is greater than maximum target size(%llu)\n",
__func__, ci->sector_count, max_io_len(ti, ci->sector));
return -EIO;
}
/*
* Only support bio polling for normal IO, and the target io is
* exactly inside the dm_io instance (verified in dm_poll_dm_io)
Expand Down
5 changes: 5 additions & 0 deletions include/linux/device-mapper.h
Expand Up @@ -362,6 +362,11 @@ struct dm_target {
* after returning DM_MAPIO_SUBMITTED from its map function.
*/
bool accounts_remapped_io:1;

/*
* copy offload is supported
*/
bool copy_offload_supported:1;
};

void *dm_per_bio_data(struct bio *bio, size_t data_size);
Expand Down

0 comments on commit 913c8c5

Please sign in to comment.