Skip to content

Commit 1147f58

Browse files
GuoqingJiang-Linuxliu-song-6
authored andcommitted
md/raid5: avoid redundant bio clone in raid5_read_one_chunk
After enable io accounting, chunk read bio could be cloned twice which is not good. To avoid such inefficiency, let's clone align_bio from io_acct_set too, then we need only call md_account_bio in make_request unconditionally. Signed-off-by: Guoqing Jiang <jiangguoqing@kylinos.cn> Signed-off-by: Song Liu <song@kernel.org>
1 parent c82aa1b commit 1147f58

File tree

1 file changed

+15
-14
lines changed

1 file changed

+15
-14
lines changed

drivers/md/raid5.c

Lines changed: 15 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -5364,11 +5364,13 @@ static struct bio *remove_bio_from_retry(struct r5conf *conf,
53645364
*/
53655365
static void raid5_align_endio(struct bio *bi)
53665366
{
5367-
struct bio* raid_bi = bi->bi_private;
5367+
struct md_io_acct *md_io_acct = bi->bi_private;
5368+
struct bio *raid_bi = md_io_acct->orig_bio;
53685369
struct mddev *mddev;
53695370
struct r5conf *conf;
53705371
struct md_rdev *rdev;
53715372
blk_status_t error = bi->bi_status;
5373+
unsigned long start_time = md_io_acct->start_time;
53725374

53735375
bio_put(bi);
53745376

@@ -5380,6 +5382,8 @@ static void raid5_align_endio(struct bio *bi)
53805382
rdev_dec_pending(rdev, conf->mddev);
53815383

53825384
if (!error) {
5385+
if (blk_queue_io_stat(raid_bi->bi_bdev->bd_disk->queue))
5386+
bio_end_io_acct(raid_bi, start_time);
53835387
bio_endio(raid_bi);
53845388
if (atomic_dec_and_test(&conf->active_aligned_reads))
53855389
wake_up(&conf->wait_for_quiescent);
@@ -5398,6 +5402,7 @@ static int raid5_read_one_chunk(struct mddev *mddev, struct bio *raid_bio)
53985402
struct md_rdev *rdev;
53995403
sector_t sector, end_sector, first_bad;
54005404
int bad_sectors, dd_idx;
5405+
struct md_io_acct *md_io_acct;
54015406

54025407
if (!in_chunk_boundary(mddev, raid_bio)) {
54035408
pr_debug("%s: non aligned\n", __func__);
@@ -5434,14 +5439,18 @@ static int raid5_read_one_chunk(struct mddev *mddev, struct bio *raid_bio)
54345439
return 0;
54355440
}
54365441

5437-
align_bio = bio_clone_fast(raid_bio, GFP_NOIO, &mddev->bio_set);
5442+
align_bio = bio_clone_fast(raid_bio, GFP_NOIO, &mddev->io_acct_set);
5443+
md_io_acct = container_of(align_bio, struct md_io_acct, bio_clone);
5444+
raid_bio->bi_next = (void *)rdev;
5445+
if (blk_queue_io_stat(raid_bio->bi_bdev->bd_disk->queue))
5446+
md_io_acct->start_time = bio_start_io_acct(raid_bio);
5447+
md_io_acct->orig_bio = raid_bio;
5448+
54385449
bio_set_dev(align_bio, rdev->bdev);
54395450
align_bio->bi_end_io = raid5_align_endio;
5440-
align_bio->bi_private = raid_bio;
5451+
align_bio->bi_private = md_io_acct;
54415452
align_bio->bi_iter.bi_sector = sector;
54425453

5443-
raid_bio->bi_next = (void *)rdev;
5444-
54455454
/* No reshape active, so we can trust rdev->data_offset */
54465455
align_bio->bi_iter.bi_sector += rdev->data_offset;
54475456

@@ -5468,7 +5477,6 @@ static struct bio *chunk_aligned_read(struct mddev *mddev, struct bio *raid_bio)
54685477
sector_t sector = raid_bio->bi_iter.bi_sector;
54695478
unsigned chunk_sects = mddev->chunk_sectors;
54705479
unsigned sectors = chunk_sects - (sector & (chunk_sects-1));
5471-
struct r5conf *conf = mddev->private;
54725480

54735481
if (sectors < bio_sectors(raid_bio)) {
54745482
struct r5conf *conf = mddev->private;
@@ -5478,9 +5486,6 @@ static struct bio *chunk_aligned_read(struct mddev *mddev, struct bio *raid_bio)
54785486
raid_bio = split;
54795487
}
54805488

5481-
if (raid_bio->bi_pool != &conf->bio_split)
5482-
md_account_bio(mddev, &raid_bio);
5483-
54845489
if (!raid5_read_one_chunk(mddev, raid_bio))
54855490
return raid_bio;
54865491

@@ -5760,7 +5765,6 @@ static bool raid5_make_request(struct mddev *mddev, struct bio * bi)
57605765
DEFINE_WAIT(w);
57615766
bool do_prepare;
57625767
bool do_flush = false;
5763-
bool do_clone = false;
57645768

57655769
if (unlikely(bi->bi_opf & REQ_PREFLUSH)) {
57665770
int ret = log_handle_flush_request(conf, bi);
@@ -5789,7 +5793,6 @@ static bool raid5_make_request(struct mddev *mddev, struct bio * bi)
57895793
if (rw == READ && mddev->degraded == 0 &&
57905794
mddev->reshape_position == MaxSector) {
57915795
bi = chunk_aligned_read(mddev, bi);
5792-
do_clone = true;
57935796
if (!bi)
57945797
return true;
57955798
}
@@ -5804,9 +5807,7 @@ static bool raid5_make_request(struct mddev *mddev, struct bio * bi)
58045807
last_sector = bio_end_sector(bi);
58055808
bi->bi_next = NULL;
58065809

5807-
if (!do_clone)
5808-
md_account_bio(mddev, &bi);
5809-
5810+
md_account_bio(mddev, &bi);
58105811
prepare_to_wait(&conf->wait_for_overlap, &w, TASK_UNINTERRUPTIBLE);
58115812
for (; logical_sector < last_sector; logical_sector += RAID5_STRIPE_SECTORS(conf)) {
58125813
int previous;

0 commit comments

Comments
 (0)