Skip to content
Browse files

block: Abstract out bvec iterator

Immutable biovecs are going to require an explicit iterator. To
implement immutable bvecs, a later patch is going to add a bi_bvec_done
member to this struct; for now, this patch effectively just renames
things.

Signed-off-by: Kent Overstreet <kmo@daterainc.com>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: Geert Uytterhoeven <geert@linux-m68k.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: "Ed L. Cashin" <ecashin@coraid.com>
Cc: Nick Piggin <npiggin@kernel.dk>
Cc: Lars Ellenberg <drbd-dev@lists.linbit.com>
Cc: Jiri Kosina <jkosina@suse.cz>
Cc: Matthew Wilcox <willy@linux.intel.com>
Cc: Geoff Levand <geoff@infradead.org>
Cc: Yehuda Sadeh <yehuda@inktank.com>
Cc: Sage Weil <sage@inktank.com>
Cc: Alex Elder <elder@inktank.com>
Cc: ceph-devel@vger.kernel.org
Cc: Joshua Morris <josh.h.morris@us.ibm.com>
Cc: Philip Kelleher <pjk1939@linux.vnet.ibm.com>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: "Michael S. Tsirkin" <mst@redhat.com>
Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Cc: Jeremy Fitzhardinge <jeremy@goop.org>
Cc: Neil Brown <neilb@suse.de>
Cc: Alasdair Kergon <agk@redhat.com>
Cc: Mike Snitzer <snitzer@redhat.com>
Cc: dm-devel@redhat.com
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: linux390@de.ibm.com
Cc: Boaz Harrosh <bharrosh@panasas.com>
Cc: Benny Halevy <bhalevy@tonian.com>
Cc: "James E.J. Bottomley" <JBottomley@parallels.com>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: "Nicholas A. Bellinger" <nab@linux-iscsi.org>
Cc: Alexander Viro <viro@zeniv.linux.org.uk>
Cc: Chris Mason <chris.mason@fusionio.com>
Cc: "Theodore Ts'o" <tytso@mit.edu>
Cc: Andreas Dilger <adilger.kernel@dilger.ca>
Cc: Jaegeuk Kim <jaegeuk.kim@samsung.com>
Cc: Steven Whitehouse <swhiteho@redhat.com>
Cc: Dave Kleikamp <shaggy@kernel.org>
Cc: Joern Engel <joern@logfs.org>
Cc: Prasad Joshi <prasadjoshi.linux@gmail.com>
Cc: Trond Myklebust <Trond.Myklebust@netapp.com>
Cc: KONISHI Ryusuke <konishi.ryusuke@lab.ntt.co.jp>
Cc: Mark Fasheh <mfasheh@suse.com>
Cc: Joel Becker <jlbec@evilplan.org>
Cc: Ben Myers <bpm@sgi.com>
Cc: xfs@oss.sgi.com
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Len Brown <len.brown@intel.com>
Cc: Pavel Machek <pavel@ucw.cz>
Cc: "Rafael J. Wysocki" <rjw@sisk.pl>
Cc: Herton Ronaldo Krzesinski <herton.krzesinski@canonical.com>
Cc: Ben Hutchings <ben@decadent.org.uk>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Guo Chao <yan@linux.vnet.ibm.com>
Cc: Tejun Heo <tj@kernel.org>
Cc: Asai Thambi S P <asamymuthupa@micron.com>
Cc: Selvan Mani <smani@micron.com>
Cc: Sam Bradshaw <sbradshaw@micron.com>
Cc: Wei Yongjun <yongjun_wei@trendmicro.com.cn>
Cc: "Roger Pau Monné" <roger.pau@citrix.com>
Cc: Jan Beulich <jbeulich@suse.com>
Cc: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
Cc: Ian Campbell <Ian.Campbell@citrix.com>
Cc: Sebastian Ott <sebott@linux.vnet.ibm.com>
Cc: Christian Borntraeger <borntraeger@de.ibm.com>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Jiang Liu <jiang.liu@huawei.com>
Cc: Nitin Gupta <ngupta@vflare.org>
Cc: Jerome Marchand <jmarchand@redhat.com>
Cc: Joe Perches <joe@perches.com>
Cc: Peng Tao <tao.peng@emc.com>
Cc: Andy Adamson <andros@netapp.com>
Cc: fanchaoting <fanchaoting@cn.fujitsu.com>
Cc: Jie Liu <jeff.liu@oracle.com>
Cc: Sunil Mushran <sunil.mushran@gmail.com>
Cc: "Martin K. Petersen" <martin.petersen@oracle.com>
Cc: Namjae Jeon <namjae.jeon@samsung.com>
Cc: Pankaj Kumar <pankaj.km@samsung.com>
Cc: Dan Magenheimer <dan.magenheimer@oracle.com>
Cc: Mel Gorman <mgorman@suse.de>6
1 parent ed9c47b commit 4f024f3797c43cb4b73cd2c50cec728842d0e49e Kent Overstreet committed Oct 11, 2013
Showing with 700 additions and 638 deletions.
  1. +3 −4 Documentation/block/biodoc.txt
  2. +1 −1 arch/m68k/emu/nfblock.c
  3. +2 −1 arch/powerpc/sysdev/axonram.c
  4. +18 −18 block/blk-core.c
  5. +1 −1 block/blk-flush.c
  6. +6 −6 block/blk-lib.c
  7. +3 −3 block/blk-map.c
  8. +2 −2 block/blk-merge.c
  9. +1 −1 block/blk-mq.c
  10. +7 −7 block/blk-throttle.c
  11. +1 −1 block/elevator.c
  12. +3 −3 drivers/block/aoe/aoecmd.c
  13. +2 −2 drivers/block/brd.c
  14. +1 −1 drivers/block/drbd/drbd_actlog.c
  15. +1 −1 drivers/block/drbd/drbd_bitmap.c
  16. +3 −3 drivers/block/drbd/drbd_receiver.c
  17. +3 −3 drivers/block/drbd/drbd_req.c
  18. +1 −1 drivers/block/drbd/drbd_req.h
  19. +2 −2 drivers/block/floppy.c
  20. +2 −2 drivers/block/loop.c
  21. +4 −3 drivers/block/mtip32xx/mtip32xx.c
  22. +13 −12 drivers/block/nvme-core.c
  23. +29 −25 drivers/block/pktcdvd.c
  24. +1 −1 drivers/block/ps3disk.c
  25. +1 −1 drivers/block/ps3vram.c
  26. +11 −10 drivers/block/rbd.c
  27. +3 −3 drivers/block/rsxx/dev.c
  28. +2 −2 drivers/block/rsxx/dma.c
  29. +5 −4 drivers/block/umem.c
  30. +1 −1 drivers/block/xen-blkback/blkback.c
  31. +1 −1 drivers/block/xen-blkfront.c
  32. +2 −2 drivers/md/bcache/btree.c
  33. +1 −1 drivers/md/bcache/debug.c
  34. +13 −13 drivers/md/bcache/io.c
  35. +6 −6 drivers/md/bcache/journal.c
  36. +2 −2 drivers/md/bcache/movinggc.c
  37. +29 −29 drivers/md/bcache/request.c
  38. +8 −8 drivers/md/bcache/super.c
  39. +2 −2 drivers/md/bcache/util.c
  40. +3 −3 drivers/md/bcache/writeback.c
  41. +1 −1 drivers/md/bcache/writeback.h
  42. +6 −6 drivers/md/dm-bio-record.h
  43. +1 −1 drivers/md/dm-bufio.c
  44. +2 −2 drivers/md/dm-cache-policy-mq.c
  45. +12 −10 drivers/md/dm-cache-target.c
  46. +10 −9 drivers/md/dm-crypt.c
  47. +4 −3 drivers/md/dm-delay.c
  48. +4 −3 drivers/md/dm-flakey.c
  49. +3 −3 drivers/md/dm-io.c
  50. +2 −1 drivers/md/dm-linear.c
  51. +8 −8 drivers/md/dm-raid1.c
  52. +2 −1 drivers/md/dm-region-hash.c
  53. +9 −9 drivers/md/dm-snap.c
  54. +8 −5 drivers/md/dm-stripe.c
  55. +2 −2 drivers/md/dm-switch.c
  56. +12 −10 drivers/md/dm-thin.c
  57. +4 −4 drivers/md/dm-verity.c
  58. +13 −12 drivers/md/dm.c
  59. +12 −7 drivers/md/faulty.c
  60. +6 −6 drivers/md/linear.c
  61. +5 −5 drivers/md/md.c
  62. +7 −6 drivers/md/multipath.c
  63. +9 −7 drivers/md/raid0.c
  64. +39 −36 drivers/md/raid1.c
  65. +49 −42 drivers/md/raid10.c
  66. +37 −35 drivers/md/raid5.c
  67. +3 −2 drivers/s390/block/dcssblk.c
  68. +5 −4 drivers/s390/block/xpram.c
  69. +1 −1 drivers/scsi/osd/osd_initiator.c
  70. +7 −5 drivers/staging/lustre/lustre/llite/lloop.c
  71. +8 −6 drivers/staging/zram/zram_drv.c
  72. +1 −1 drivers/target/target_core_iblock.c
  73. +4 −4 fs/bio-integrity.c
  74. +29 −27 fs/bio.c
  75. +4 −4 fs/btrfs/check-integrity.c
  76. +9 −8 fs/btrfs/compression.c
  77. +7 −7 fs/btrfs/extent_io.c
  78. +10 −9 fs/btrfs/file-item.c
  79. +12 −10 fs/btrfs/inode.c
  80. +11 −11 fs/btrfs/raid56.c
  81. +6 −6 fs/btrfs/scrub.c
  82. +6 −6 fs/btrfs/volumes.c
  83. +6 −6 fs/buffer.c
  84. +2 −2 fs/direct-io.c
  85. +2 −2 fs/ext4/page-io.c
  86. +1 −1 fs/f2fs/data.c
  87. +1 −1 fs/f2fs/segment.c
  88. +1 −1 fs/gfs2/lops.c
  89. +1 −1 fs/gfs2/ops_fstype.c
  90. +1 −1 fs/hfsplus/wrapper.c
  91. +6 −6 fs/jfs/jfs_logmgr.c
  92. +5 −4 fs/jfs/jfs_metapage.c
  93. +10 −10 fs/logfs/dev_bdev.c
  94. +1 −1 fs/mpage.c
  95. +5 −4 fs/nfs/blocklayout/blocklayout.c
  96. +2 −1 fs/nilfs2/segbuf.c
  97. +1 −1 fs/ocfs2/cluster/heartbeat.c
  98. +1 −1 fs/xfs/xfs_aops.c
  99. +2 −2 fs/xfs/xfs_buf.c
  100. +8 −8 include/linux/bio.h
  101. +12 −7 include/linux/blk_types.h
  102. +13 −13 include/trace/events/bcache.h
  103. +13 −13 include/trace/events/block.h
  104. +2 −2 include/trace/events/f2fs.h
  105. +1 −1 kernel/power/block_io.c
  106. +8 −7 kernel/trace/blktrace.c
  107. +5 −5 mm/page_io.c
View
7 Documentation/block/biodoc.txt
@@ -447,14 +447,13 @@ struct bio_vec {
* main unit of I/O for the block layer and lower layers (ie drivers)
*/
struct bio {
- sector_t bi_sector;
struct bio *bi_next; /* request queue link */
struct block_device *bi_bdev; /* target device */
unsigned long bi_flags; /* status, command, etc */
unsigned long bi_rw; /* low bits: r/w, high: priority */
unsigned int bi_vcnt; /* how may bio_vec's */
- unsigned int bi_idx; /* current index into bio_vec array */
+ struct bvec_iter bi_iter; /* current index into bio_vec array */
unsigned int bi_size; /* total size in bytes */
unsigned short bi_phys_segments; /* segments after physaddr coalesce*/
@@ -480,7 +479,7 @@ With this multipage bio design:
- Code that traverses the req list can find all the segments of a bio
by using rq_for_each_segment. This handles the fact that a request
has multiple bios, each of which can have multiple segments.
-- Drivers which can't process a large bio in one shot can use the bi_idx
+- Drivers which can't process a large bio in one shot can use the bi_iter
field to keep track of the next bio_vec entry to process.
(e.g a 1MB bio_vec needs to be handled in max 128kB chunks for IDE)
[TBD: Should preferably also have a bi_voffset and bi_vlen to avoid modifying
@@ -589,7 +588,7 @@ driver should not modify these values. The block layer sets up the
nr_sectors and current_nr_sectors fields (based on the corresponding
hard_xxx values and the number of bytes transferred) and updates it on
every transfer that invokes end_that_request_first. It does the same for the
-buffer, bio, bio->bi_idx fields too.
+buffer, bio, bio->bi_iter fields too.
The buffer field is just a virtual address mapping of the current segment
of the i/o buffer in cases where the buffer resides in low-memory. For high
View
2 arch/m68k/emu/nfblock.c
@@ -64,7 +64,7 @@ static void nfhd_make_request(struct request_queue *queue, struct bio *bio)
struct nfhd_device *dev = queue->queuedata;
struct bio_vec *bvec;
int i, dir, len, shift;
- sector_t sec = bio->bi_sector;
+ sector_t sec = bio->bi_iter.bi_sector;
dir = bio_data_dir(bio);
shift = dev->bshift;
View
3 arch/powerpc/sysdev/axonram.c
@@ -113,7 +113,8 @@ axon_ram_make_request(struct request_queue *queue, struct bio *bio)
unsigned int transfered;
unsigned short idx;
- phys_mem = bank->io_addr + (bio->bi_sector << AXON_RAM_SECTOR_SHIFT);
+ phys_mem = bank->io_addr + (bio->bi_iter.bi_sector <<
+ AXON_RAM_SECTOR_SHIFT);
phys_end = bank->io_addr + bank->size;
transfered = 0;
bio_for_each_segment(vec, bio, idx) {
View
36 block/blk-core.c
@@ -130,7 +130,7 @@ static void req_bio_endio(struct request *rq, struct bio *bio,
bio_advance(bio, nbytes);
/* don't actually finish bio if it's part of flush sequence */
- if (bio->bi_size == 0 && !(rq->cmd_flags & REQ_FLUSH_SEQ))
+ if (bio->bi_iter.bi_size == 0 && !(rq->cmd_flags & REQ_FLUSH_SEQ))
bio_endio(bio, error);
}
@@ -1326,7 +1326,7 @@ void blk_add_request_payload(struct request *rq, struct page *page,
bio->bi_io_vec->bv_offset = 0;
bio->bi_io_vec->bv_len = len;
- bio->bi_size = len;
+ bio->bi_iter.bi_size = len;
bio->bi_vcnt = 1;
bio->bi_phys_segments = 1;
@@ -1351,7 +1351,7 @@ bool bio_attempt_back_merge(struct request_queue *q, struct request *req,
req->biotail->bi_next = bio;
req->biotail = bio;
- req->__data_len += bio->bi_size;
+ req->__data_len += bio->bi_iter.bi_size;
req->ioprio = ioprio_best(req->ioprio, bio_prio(bio));
blk_account_io_start(req, false);
@@ -1380,8 +1380,8 @@ bool bio_attempt_front_merge(struct request_queue *q, struct request *req,
* not touch req->buffer either...
*/
req->buffer = bio_data(bio);
- req->__sector = bio->bi_sector;
- req->__data_len += bio->bi_size;
+ req->__sector = bio->bi_iter.bi_sector;
+ req->__data_len += bio->bi_iter.bi_size;
req->ioprio = ioprio_best(req->ioprio, bio_prio(bio));
blk_account_io_start(req, false);
@@ -1459,7 +1459,7 @@ void init_request_from_bio(struct request *req, struct bio *bio)
req->cmd_flags |= REQ_FAILFAST_MASK;
req->errors = 0;
- req->__sector = bio->bi_sector;
+ req->__sector = bio->bi_iter.bi_sector;
req->ioprio = bio_prio(bio);
blk_rq_bio_prep(req->q, req, bio);
}
@@ -1583,12 +1583,12 @@ static inline void blk_partition_remap(struct bio *bio)
if (bio_sectors(bio) && bdev != bdev->bd_contains) {
struct hd_struct *p = bdev->bd_part;
- bio->bi_sector += p->start_sect;
+ bio->bi_iter.bi_sector += p->start_sect;
bio->bi_bdev = bdev->bd_contains;
trace_block_bio_remap(bdev_get_queue(bio->bi_bdev), bio,
bdev->bd_dev,
- bio->bi_sector - p->start_sect);
+ bio->bi_iter.bi_sector - p->start_sect);
}
}
@@ -1654,7 +1654,7 @@ static inline int bio_check_eod(struct bio *bio, unsigned int nr_sectors)
/* Test device or partition size, when known. */
maxsector = i_size_read(bio->bi_bdev->bd_inode) >> 9;
if (maxsector) {
- sector_t sector = bio->bi_sector;
+ sector_t sector = bio->bi_iter.bi_sector;
if (maxsector < nr_sectors || maxsector - nr_sectors < sector) {
/*
@@ -1690,7 +1690,7 @@ generic_make_request_checks(struct bio *bio)
"generic_make_request: Trying to access "
"nonexistent block-device %s (%Lu)\n",
bdevname(bio->bi_bdev, b),
- (long long) bio->bi_sector);
+ (long long) bio->bi_iter.bi_sector);
goto end_io;
}
@@ -1704,9 +1704,9 @@ generic_make_request_checks(struct bio *bio)
}
part = bio->bi_bdev->bd_part;
- if (should_fail_request(part, bio->bi_size) ||
+ if (should_fail_request(part, bio->bi_iter.bi_size) ||
should_fail_request(&part_to_disk(part)->part0,
- bio->bi_size))
+ bio->bi_iter.bi_size))
goto end_io;
/*
@@ -1865,7 +1865,7 @@ void submit_bio(int rw, struct bio *bio)
if (rw & WRITE) {
count_vm_events(PGPGOUT, count);
} else {
- task_io_account_read(bio->bi_size);
+ task_io_account_read(bio->bi_iter.bi_size);
count_vm_events(PGPGIN, count);
}
@@ -1874,7 +1874,7 @@ void submit_bio(int rw, struct bio *bio)
printk(KERN_DEBUG "%s(%d): %s block %Lu on %s (%u sectors)\n",
current->comm, task_pid_nr(current),
(rw & WRITE) ? "WRITE" : "READ",
- (unsigned long long)bio->bi_sector,
+ (unsigned long long)bio->bi_iter.bi_sector,
bdevname(bio->bi_bdev, b),
count);
}
@@ -2007,7 +2007,7 @@ unsigned int blk_rq_err_bytes(const struct request *rq)
for (bio = rq->bio; bio; bio = bio->bi_next) {
if ((bio->bi_rw & ff) != ff)
break;
- bytes += bio->bi_size;
+ bytes += bio->bi_iter.bi_size;
}
/* this could lead to infinite loop */
@@ -2378,9 +2378,9 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
total_bytes = 0;
while (req->bio) {
struct bio *bio = req->bio;
- unsigned bio_bytes = min(bio->bi_size, nr_bytes);
+ unsigned bio_bytes = min(bio->bi_iter.bi_size, nr_bytes);
- if (bio_bytes == bio->bi_size)
+ if (bio_bytes == bio->bi_iter.bi_size)
req->bio = bio->bi_next;
req_bio_endio(req, bio, bio_bytes, error);
@@ -2728,7 +2728,7 @@ void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
rq->nr_phys_segments = bio_phys_segments(q, bio);
rq->buffer = bio_data(bio);
}
- rq->__data_len = bio->bi_size;
+ rq->__data_len = bio->bi_iter.bi_size;
rq->bio = rq->biotail = bio;
if (bio->bi_bdev)
View
2 block/blk-flush.c
@@ -548,7 +548,7 @@ int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
* copied from blk_rq_pos(rq).
*/
if (error_sector)
- *error_sector = bio->bi_sector;
+ *error_sector = bio->bi_iter.bi_sector;
bio_put(bio);
return ret;
View
12 block/blk-lib.c
@@ -108,12 +108,12 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
req_sects = end_sect - sector;
}
- bio->bi_sector = sector;
+ bio->bi_iter.bi_sector = sector;
bio->bi_end_io = bio_batch_end_io;
bio->bi_bdev = bdev;
bio->bi_private = &bb;
- bio->bi_size = req_sects << 9;
+ bio->bi_iter.bi_size = req_sects << 9;
nr_sects -= req_sects;
sector = end_sect;
@@ -174,7 +174,7 @@ int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
break;
}
- bio->bi_sector = sector;
+ bio->bi_iter.bi_sector = sector;
bio->bi_end_io = bio_batch_end_io;
bio->bi_bdev = bdev;
bio->bi_private = &bb;
@@ -184,11 +184,11 @@ int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
bio->bi_io_vec->bv_len = bdev_logical_block_size(bdev);
if (nr_sects > max_write_same_sectors) {
- bio->bi_size = max_write_same_sectors << 9;
+ bio->bi_iter.bi_size = max_write_same_sectors << 9;
nr_sects -= max_write_same_sectors;
sector += max_write_same_sectors;
} else {
- bio->bi_size = nr_sects << 9;
+ bio->bi_iter.bi_size = nr_sects << 9;
nr_sects = 0;
}
@@ -240,7 +240,7 @@ int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
break;
}
- bio->bi_sector = sector;
+ bio->bi_iter.bi_sector = sector;
bio->bi_bdev = bdev;
bio->bi_end_io = bio_batch_end_io;
bio->bi_private = &bb;
View
6 block/blk-map.c
@@ -20,7 +20,7 @@ int blk_rq_append_bio(struct request_queue *q, struct request *rq,
rq->biotail->bi_next = bio;
rq->biotail = bio;
- rq->__data_len += bio->bi_size;
+ rq->__data_len += bio->bi_iter.bi_size;
}
return 0;
}
@@ -76,7 +76,7 @@ static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
ret = blk_rq_append_bio(q, rq, bio);
if (!ret)
- return bio->bi_size;
+ return bio->bi_iter.bi_size;
/* if it was boucned we must call the end io function */
bio_endio(bio, 0);
@@ -220,7 +220,7 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
if (IS_ERR(bio))
return PTR_ERR(bio);
- if (bio->bi_size != len) {
+ if (bio->bi_iter.bi_size != len) {
/*
* Grab an extra reference to this bio, as bio_unmap_user()
* expects to be able to drop it twice as it happens on the
View
4 block/blk-merge.c
@@ -543,9 +543,9 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
int blk_try_merge(struct request *rq, struct bio *bio)
{
- if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_sector)
+ if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector)
return ELEVATOR_BACK_MERGE;
- else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_sector)
+ else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_iter.bi_sector)
return ELEVATOR_FRONT_MERGE;
return ELEVATOR_NO_MERGE;
}
View
2 block/blk-mq.c
@@ -301,7 +301,7 @@ void blk_mq_complete_request(struct request *rq, int error)
struct bio *next = bio->bi_next;
bio->bi_next = NULL;
- bytes += bio->bi_size;
+ bytes += bio->bi_iter.bi_size;
blk_mq_bio_endio(rq, bio, error);
bio = next;
}
View
14 block/blk-throttle.c
@@ -877,14 +877,14 @@ static bool tg_with_in_bps_limit(struct throtl_grp *tg, struct bio *bio,
do_div(tmp, HZ);
bytes_allowed = tmp;
- if (tg->bytes_disp[rw] + bio->bi_size <= bytes_allowed) {
+ if (tg->bytes_disp[rw] + bio->bi_iter.bi_size <= bytes_allowed) {
if (wait)
*wait = 0;
return 1;
}
/* Calc approx time to dispatch */
- extra_bytes = tg->bytes_disp[rw] + bio->bi_size - bytes_allowed;
+ extra_bytes = tg->bytes_disp[rw] + bio->bi_iter.bi_size - bytes_allowed;
jiffy_wait = div64_u64(extra_bytes * HZ, tg->bps[rw]);
if (!jiffy_wait)
@@ -987,7 +987,7 @@ static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
bool rw = bio_data_dir(bio);
/* Charge the bio to the group */
- tg->bytes_disp[rw] += bio->bi_size;
+ tg->bytes_disp[rw] += bio->bi_iter.bi_size;
tg->io_disp[rw]++;
/*
@@ -1003,8 +1003,8 @@ static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
*/
if (!(bio->bi_rw & REQ_THROTTLED)) {
bio->bi_rw |= REQ_THROTTLED;
- throtl_update_dispatch_stats(tg_to_blkg(tg), bio->bi_size,
- bio->bi_rw);
+ throtl_update_dispatch_stats(tg_to_blkg(tg),
+ bio->bi_iter.bi_size, bio->bi_rw);
}
}
@@ -1508,7 +1508,7 @@ bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
if (tg) {
if (!tg->has_rules[rw]) {
throtl_update_dispatch_stats(tg_to_blkg(tg),
- bio->bi_size, bio->bi_rw);
+ bio->bi_iter.bi_size, bio->bi_rw);
goto out_unlock_rcu;
}
}
@@ -1564,7 +1564,7 @@ bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
/* out-of-limit, queue to @tg */
throtl_log(sq, "[%c] bio. bdisp=%llu sz=%u bps=%llu iodisp=%u iops=%u queued=%d/%d",
rw == READ ? 'R' : 'W',
- tg->bytes_disp[rw], bio->bi_size, tg->bps[rw],
+ tg->bytes_disp[rw], bio->bi_iter.bi_size, tg->bps[rw],
tg->io_disp[rw], tg->iops[rw],
sq->nr_queued[READ], sq->nr_queued[WRITE]);
View
2 block/elevator.c
@@ -440,7 +440,7 @@ int elv_merge(struct request_queue *q, struct request **req, struct bio *bio)
/*
* See if our hash lookup can find a potential backmerge.
*/
- __rq = elv_rqhash_find(q, bio->bi_sector);
+ __rq = elv_rqhash_find(q, bio->bi_iter.bi_sector);
if (__rq && elv_rq_merge_ok(__rq, bio)) {
*req = __rq;
return ELEVATOR_BACK_MERGE;
View
6 drivers/block/aoe/aoecmd.c
@@ -929,8 +929,8 @@ bufinit(struct buf *buf, struct request *rq, struct bio *bio)
memset(buf, 0, sizeof(*buf));
buf->rq = rq;
buf->bio = bio;
- buf->resid = bio->bi_size;
- buf->sector = bio->bi_sector;
+ buf->resid = bio->bi_iter.bi_size;
+ buf->sector = bio->bi_iter.bi_sector;
bio_pageinc(bio);
buf->bv = bio_iovec(bio);
buf->bv_resid = buf->bv->bv_len;
@@ -1152,7 +1152,7 @@ aoe_end_request(struct aoedev *d, struct request *rq, int fastfail)
do {
bio = rq->bio;
bok = !fastfail && test_bit(BIO_UPTODATE, &bio->bi_flags);
- } while (__blk_end_request(rq, bok ? 0 : -EIO, bio->bi_size));
+ } while (__blk_end_request(rq, bok ? 0 : -EIO, bio->bi_iter.bi_size));
/* cf. http://lkml.org/lkml/2006/10/31/28 */
if (!fastfail)
View
4 drivers/block/brd.c
@@ -333,13 +333,13 @@ static void brd_make_request(struct request_queue *q, struct bio *bio)
int i;
int err = -EIO;
- sector = bio->bi_sector;
+ sector = bio->bi_iter.bi_sector;
if (bio_end_sector(bio) > get_capacity(bdev->bd_disk))
goto out;
if (unlikely(bio->bi_rw & REQ_DISCARD)) {
err = 0;
- discard_from_brd(brd, sector, bio->bi_size);
+ discard_from_brd(brd, sector, bio->bi_iter.bi_size);
goto out;
}
View
2 drivers/block/drbd/drbd_actlog.c
@@ -159,7 +159,7 @@ static int _drbd_md_sync_page_io(struct drbd_conf *mdev,
bio = bio_alloc_drbd(GFP_NOIO);
bio->bi_bdev = bdev->md_bdev;
- bio->bi_sector = sector;
+ bio->bi_iter.bi_sector = sector;
err = -EIO;
if (bio_add_page(bio, page, size, 0) != size)
goto out;
View
2 drivers/block/drbd/drbd_bitmap.c
@@ -1028,7 +1028,7 @@ static void bm_page_io_async(struct bm_aio_ctx *ctx, int page_nr, int rw) __must
} else
page = b->bm_pages[page_nr];
bio->bi_bdev = mdev->ldev->md_bdev;
- bio->bi_sector = on_disk_sector;
+ bio->bi_iter.bi_sector = on_disk_sector;
/* bio_add_page of a single page to an empty bio will always succeed,
* according to api. Do we want to assert that? */
bio_add_page(bio, page, len, 0);
View
6 drivers/block/drbd/drbd_receiver.c
@@ -1333,7 +1333,7 @@ int drbd_submit_peer_request(struct drbd_conf *mdev,
goto fail;
}
/* > peer_req->i.sector, unless this is the first bio */
- bio->bi_sector = sector;
+ bio->bi_iter.bi_sector = sector;
bio->bi_bdev = mdev->ldev->backing_bdev;
bio->bi_rw = rw;
bio->bi_private = peer_req;
@@ -1353,7 +1353,7 @@ int drbd_submit_peer_request(struct drbd_conf *mdev,
dev_err(DEV,
"bio_add_page failed for len=%u, "
"bi_vcnt=0 (bi_sector=%llu)\n",
- len, (unsigned long long)bio->bi_sector);
+ len, (uint64_t)bio->bi_iter.bi_sector);
err = -ENOSPC;
goto fail;
}
@@ -1615,7 +1615,7 @@ static int recv_dless_read(struct drbd_conf *mdev, struct drbd_request *req,
mdev->recv_cnt += data_size>>9;
bio = req->master_bio;
- D_ASSERT(sector == bio->bi_sector);
+ D_ASSERT(sector == bio->bi_iter.bi_sector);
bio_for_each_segment(bvec, bio, i) {
void *mapped = kmap(bvec->bv_page) + bvec->bv_offset;
View
6 drivers/block/drbd/drbd_req.c
@@ -77,8 +77,8 @@ static struct drbd_request *drbd_req_new(struct drbd_conf *mdev,
req->epoch = 0;
drbd_clear_interval(&req->i);
- req->i.sector = bio_src->bi_sector;
- req->i.size = bio_src->bi_size;
+ req->i.sector = bio_src->bi_iter.bi_sector;
+ req->i.size = bio_src->bi_iter.bi_size;
req->i.local = true;
req->i.waiting = false;
@@ -1280,7 +1280,7 @@ void drbd_make_request(struct request_queue *q, struct bio *bio)
/*
* what we "blindly" assume:
*/
- D_ASSERT(IS_ALIGNED(bio->bi_size, 512));
+ D_ASSERT(IS_ALIGNED(bio->bi_iter.bi_size, 512));
inc_ap_bio(mdev);
__drbd_make_request(mdev, bio, start_time);
View
2 drivers/block/drbd/drbd_req.h
@@ -269,7 +269,7 @@ static inline void drbd_req_make_private_bio(struct drbd_request *req, struct bi
/* Short lived temporary struct on the stack.
* We could squirrel the error to be returned into
- * bio->bi_size, or similar. But that would be too ugly. */
+ * bio->bi_iter.bi_size, or similar. But that would be too ugly. */
struct bio_and_error {
struct bio *bio;
int error;
View
4 drivers/block/floppy.c
@@ -3775,9 +3775,9 @@ static int __floppy_read_block_0(struct block_device *bdev)
bio_vec.bv_len = size;
bio_vec.bv_offset = 0;
bio.bi_vcnt = 1;
- bio.bi_size = size;
+ bio.bi_iter.bi_size = size;
bio.bi_bdev = bdev;
- bio.bi_sector = 0;
+ bio.bi_iter.bi_sector = 0;
bio.bi_flags = (1 << BIO_QUIET);
init_completion(&complete);
bio.bi_private = &complete;
View
4 drivers/block/loop.c
@@ -415,7 +415,7 @@ static int do_bio_filebacked(struct loop_device *lo, struct bio *bio)
loff_t pos;
int ret;
- pos = ((loff_t) bio->bi_sector << 9) + lo->lo_offset;
+ pos = ((loff_t) bio->bi_iter.bi_sector << 9) + lo->lo_offset;
if (bio_rw(bio) == WRITE) {
struct file *file = lo->lo_backing_file;
@@ -444,7 +444,7 @@ static int do_bio_filebacked(struct loop_device *lo, struct bio *bio)
goto out;
}
ret = file->f_op->fallocate(file, mode, pos,
- bio->bi_size);
+ bio->bi_iter.bi_size);
if (unlikely(ret && ret != -EINVAL &&
ret != -EOPNOTSUPP))
ret = -EIO;
View
7 drivers/block/mtip32xx/mtip32xx.c
@@ -3993,7 +3993,7 @@ static void mtip_make_request(struct request_queue *queue, struct bio *bio)
}
if (unlikely(bio->bi_rw & REQ_DISCARD)) {
- bio_endio(bio, mtip_send_trim(dd, bio->bi_sector,
+ bio_endio(bio, mtip_send_trim(dd, bio->bi_iter.bi_sector,
bio_sectors(bio)));
return;
}
@@ -4006,7 +4006,8 @@ static void mtip_make_request(struct request_queue *queue, struct bio *bio)
if (bio_data_dir(bio) == WRITE && bio_sectors(bio) <= 64 &&
dd->unal_qdepth) {
- if (bio->bi_sector % 8 != 0) /* Unaligned on 4k boundaries */
+ if (bio->bi_iter.bi_sector % 8 != 0)
+ /* Unaligned on 4k boundaries */
unaligned = 1;
else if (bio_sectors(bio) % 8 != 0) /* Aligned but not 4k/8k */
unaligned = 1;
@@ -4035,7 +4036,7 @@ static void mtip_make_request(struct request_queue *queue, struct bio *bio)
/* Issue the read/write. */
mtip_hw_submit_io(dd,
- bio->bi_sector,
+ bio->bi_iter.bi_sector,
bio_sectors(bio),
nents,
tag,
View
25 drivers/block/nvme-core.c
@@ -468,7 +468,7 @@ static struct nvme_bio_pair *nvme_bio_split(struct bio *bio, int idx,
{
struct nvme_bio_pair *bp;
- BUG_ON(len > bio->bi_size);
+ BUG_ON(len > bio->bi_iter.bi_size);
BUG_ON(idx > bio->bi_vcnt);
bp = kmalloc(sizeof(*bp), GFP_ATOMIC);
@@ -479,11 +479,11 @@ static struct nvme_bio_pair *nvme_bio_split(struct bio *bio, int idx,
bp->b1 = *bio;
bp->b2 = *bio;
- bp->b1.bi_size = len;
- bp->b2.bi_size -= len;
+ bp->b1.bi_iter.bi_size = len;
+ bp->b2.bi_iter.bi_size -= len;
bp->b1.bi_vcnt = idx;
- bp->b2.bi_idx = idx;
- bp->b2.bi_sector += len >> 9;
+ bp->b2.bi_iter.bi_idx = idx;
+ bp->b2.bi_iter.bi_sector += len >> 9;
if (offset) {
bp->bv1 = kmalloc(bio->bi_max_vecs * sizeof(struct bio_vec),
@@ -552,11 +552,12 @@ static int nvme_map_bio(struct nvme_queue *nvmeq, struct nvme_iod *iod,
{
struct bio_vec *bvec, *bvprv = NULL;
struct scatterlist *sg = NULL;
- int i, length = 0, nsegs = 0, split_len = bio->bi_size;
+ int i, length = 0, nsegs = 0, split_len = bio->bi_iter.bi_size;
if (nvmeq->dev->stripe_size)
split_len = nvmeq->dev->stripe_size -
- ((bio->bi_sector << 9) & (nvmeq->dev->stripe_size - 1));
+ ((bio->bi_iter.bi_sector << 9) &
+ (nvmeq->dev->stripe_size - 1));
sg_init_table(iod->sg, psegs);
bio_for_each_segment(bvec, bio, i) {
@@ -584,7 +585,7 @@ static int nvme_map_bio(struct nvme_queue *nvmeq, struct nvme_iod *iod,
if (dma_map_sg(nvmeq->q_dmadev, iod->sg, iod->nents, dma_dir) == 0)
return -ENOMEM;
- BUG_ON(length != bio->bi_size);
+ BUG_ON(length != bio->bi_iter.bi_size);
return length;
}
@@ -608,8 +609,8 @@ static int nvme_submit_discard(struct nvme_queue *nvmeq, struct nvme_ns *ns,
iod->npages = 0;
range->cattr = cpu_to_le32(0);
- range->nlb = cpu_to_le32(bio->bi_size >> ns->lba_shift);
- range->slba = cpu_to_le64(nvme_block_nr(ns, bio->bi_sector));
+ range->nlb = cpu_to_le32(bio->bi_iter.bi_size >> ns->lba_shift);
+ range->slba = cpu_to_le64(nvme_block_nr(ns, bio->bi_iter.bi_sector));
memset(cmnd, 0, sizeof(*cmnd));
cmnd->dsm.opcode = nvme_cmd_dsm;
@@ -674,7 +675,7 @@ static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns,
}
result = -ENOMEM;
- iod = nvme_alloc_iod(psegs, bio->bi_size, GFP_ATOMIC);
+ iod = nvme_alloc_iod(psegs, bio->bi_iter.bi_size, GFP_ATOMIC);
if (!iod)
goto nomem;
iod->private = bio;
@@ -723,7 +724,7 @@ static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns,
cmnd->rw.nsid = cpu_to_le32(ns->ns_id);
length = nvme_setup_prps(nvmeq->dev, &cmnd->common, iod, length,
GFP_ATOMIC);
- cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, bio->bi_sector));
+ cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, bio->bi_iter.bi_sector));
cmnd->rw.length = cpu_to_le16((length >> ns->lba_shift) - 1);
cmnd->rw.control = cpu_to_le16(control);
cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt);
View
54 drivers/block/pktcdvd.c
@@ -651,7 +651,7 @@ static struct pkt_rb_node *pkt_rbtree_find(struct pktcdvd_device *pd, sector_t s
for (;;) {
tmp = rb_entry(n, struct pkt_rb_node, rb_node);
- if (s <= tmp->bio->bi_sector)
+ if (s <= tmp->bio->bi_iter.bi_sector)
next = n->rb_left;
else
next = n->rb_right;
@@ -660,12 +660,12 @@ static struct pkt_rb_node *pkt_rbtree_find(struct pktcdvd_device *pd, sector_t s
n = next;
}
- if (s > tmp->bio->bi_sector) {
+ if (s > tmp->bio->bi_iter.bi_sector) {
tmp = pkt_rbtree_next(tmp);
if (!tmp)
return NULL;
}
- BUG_ON(s > tmp->bio->bi_sector);
+ BUG_ON(s > tmp->bio->bi_iter.bi_sector);
return tmp;
}
@@ -676,13 +676,13 @@ static void pkt_rbtree_insert(struct pktcdvd_device *pd, struct pkt_rb_node *nod
{
struct rb_node **p = &pd->bio_queue.rb_node;
struct rb_node *parent = NULL;
- sector_t s = node->bio->bi_sector;
+ sector_t s = node->bio->bi_iter.bi_sector;
struct pkt_rb_node *tmp;
while (*p) {
parent = *p;
tmp = rb_entry(parent, struct pkt_rb_node, rb_node);
- if (s < tmp->bio->bi_sector)
+ if (s < tmp->bio->bi_iter.bi_sector)
p = &(*p)->rb_left;
else
p = &(*p)->rb_right;
@@ -857,7 +857,8 @@ static void pkt_iosched_process_queue(struct pktcdvd_device *pd)
spin_lock(&pd->iosched.lock);
bio = bio_list_peek(&pd->iosched.write_queue);
spin_unlock(&pd->iosched.lock);
- if (bio && (bio->bi_sector == pd->iosched.last_write))
+ if (bio && (bio->bi_iter.bi_sector ==
+ pd->iosched.last_write))
need_write_seek = 0;
if (need_write_seek && reads_queued) {
if (atomic_read(&pd->cdrw.pending_bios) > 0) {
@@ -888,7 +889,8 @@ static void pkt_iosched_process_queue(struct pktcdvd_device *pd)
continue;
if (bio_data_dir(bio) == READ)
- pd->iosched.successive_reads += bio->bi_size >> 10;
+ pd->iosched.successive_reads +=
+ bio->bi_iter.bi_size >> 10;
else {
pd->iosched.successive_reads = 0;
pd->iosched.last_write = bio_end_sector(bio);
@@ -978,7 +980,7 @@ static void pkt_end_io_read(struct bio *bio, int err)
pkt_dbg(2, pd, "bio=%p sec0=%llx sec=%llx err=%d\n",
bio, (unsigned long long)pkt->sector,
- (unsigned long long)bio->bi_sector, err);
+ (unsigned long long)bio->bi_iter.bi_sector, err);
if (err)
atomic_inc(&pkt->io_errors);
@@ -1026,8 +1028,9 @@ static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt)
memset(written, 0, sizeof(written));
spin_lock(&pkt->lock);
bio_list_for_each(bio, &pkt->orig_bios) {
- int first_frame = (bio->bi_sector - pkt->sector) / (CD_FRAMESIZE >> 9);
- int num_frames = bio->bi_size / CD_FRAMESIZE;
+ int first_frame = (bio->bi_iter.bi_sector - pkt->sector) /
+ (CD_FRAMESIZE >> 9);
+ int num_frames = bio->bi_iter.bi_size / CD_FRAMESIZE;
pd->stats.secs_w += num_frames * (CD_FRAMESIZE >> 9);
BUG_ON(first_frame < 0);
BUG_ON(first_frame + num_frames > pkt->frames);
@@ -1053,7 +1056,7 @@ static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt)
bio = pkt->r_bios[f];
bio_reset(bio);
- bio->bi_sector = pkt->sector + f * (CD_FRAMESIZE >> 9);
+ bio->bi_iter.bi_sector = pkt->sector + f * (CD_FRAMESIZE >> 9);
bio->bi_bdev = pd->bdev;
bio->bi_end_io = pkt_end_io_read;
bio->bi_private = pkt;
@@ -1150,8 +1153,8 @@ static int pkt_start_recovery(struct packet_data *pkt)
bio_reset(pkt->bio);
pkt->bio->bi_bdev = pd->bdev;
pkt->bio->bi_rw = REQ_WRITE;
- pkt->bio->bi_sector = new_sector;
- pkt->bio->bi_size = pkt->frames * CD_FRAMESIZE;
+ pkt->bio->bi_iter.bi_sector = new_sector;
+ pkt->bio->bi_iter.bi_size = pkt->frames * CD_FRAMESIZE;
pkt->bio->bi_vcnt = pkt->frames;
pkt->bio->bi_end_io = pkt_end_io_packet_write;
@@ -1213,7 +1216,7 @@ static int pkt_handle_queue(struct pktcdvd_device *pd)
node = first_node;
while (node) {
bio = node->bio;
- zone = get_zone(bio->bi_sector, pd);
+ zone = get_zone(bio->bi_iter.bi_sector, pd);
list_for_each_entry(p, &pd->cdrw.pkt_active_list, list) {
if (p->sector == zone) {
bio = NULL;
@@ -1252,14 +1255,14 @@ static int pkt_handle_queue(struct pktcdvd_device *pd)
pkt_dbg(2, pd, "looking for zone %llx\n", (unsigned long long)zone);
while ((node = pkt_rbtree_find(pd, zone)) != NULL) {
bio = node->bio;
- pkt_dbg(2, pd, "found zone=%llx\n",
- (unsigned long long)get_zone(bio->bi_sector, pd));
- if (get_zone(bio->bi_sector, pd) != zone)
+ pkt_dbg(2, pd, "found zone=%llx\n", (unsigned long long)
+ get_zone(bio->bi_iter.bi_sector, pd));
+ if (get_zone(bio->bi_iter.bi_sector, pd) != zone)
break;
pkt_rbtree_erase(pd, node);
spin_lock(&pkt->lock);
bio_list_add(&pkt->orig_bios, bio);
- pkt->write_size += bio->bi_size / CD_FRAMESIZE;
+ pkt->write_size += bio->bi_iter.bi_size / CD_FRAMESIZE;
spin_unlock(&pkt->lock);
}
/* check write congestion marks, and if bio_queue_size is
@@ -1293,7 +1296,7 @@ static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt)
struct bio_vec *bvec = pkt->w_bio->bi_io_vec;
bio_reset(pkt->w_bio);
- pkt->w_bio->bi_sector = pkt->sector;
+ pkt->w_bio->bi_iter.bi_sector = pkt->sector;
pkt->w_bio->bi_bdev = pd->bdev;
pkt->w_bio->bi_end_io = pkt_end_io_packet_write;
pkt->w_bio->bi_private = pkt;
@@ -2370,20 +2373,20 @@ static void pkt_make_request(struct request_queue *q, struct bio *bio)
if (!test_bit(PACKET_WRITABLE, &pd->flags)) {
pkt_notice(pd, "WRITE for ro device (%llu)\n",
- (unsigned long long)bio->bi_sector);
+ (unsigned long long)bio->bi_iter.bi_sector);
goto end_io;
}
- if (!bio->bi_size || (bio->bi_size % CD_FRAMESIZE)) {
+ if (!bio->bi_iter.bi_size || (bio->bi_iter.bi_size % CD_FRAMESIZE)) {
pkt_err(pd, "wrong bio size\n");
goto end_io;
}
blk_queue_bounce(q, &bio);
- zone = get_zone(bio->bi_sector, pd);
+ zone = get_zone(bio->bi_iter.bi_sector, pd);
pkt_dbg(2, pd, "start = %6llx stop = %6llx\n",
- (unsigned long long)bio->bi_sector,
+ (unsigned long long)bio->bi_iter.bi_sector,
(unsigned long long)bio_end_sector(bio));
/* Check if we have to split the bio */
@@ -2395,7 +2398,7 @@ static void pkt_make_request(struct request_queue *q, struct bio *bio)
last_zone = get_zone(bio_end_sector(bio) - 1, pd);
if (last_zone != zone) {
BUG_ON(last_zone != zone + pd->settings.size);
- first_sectors = last_zone - bio->bi_sector;
+ first_sectors = last_zone - bio->bi_iter.bi_sector;
bp = bio_split(bio, first_sectors);
BUG_ON(!bp);
pkt_make_request(q, &bp->bio1);
@@ -2417,7 +2420,8 @@ static void pkt_make_request(struct request_queue *q, struct bio *bio)
if ((pkt->state == PACKET_WAITING_STATE) ||
(pkt->state == PACKET_READ_WAIT_STATE)) {
bio_list_add(&pkt->orig_bios, bio);
- pkt->write_size += bio->bi_size / CD_FRAMESIZE;
+ pkt->write_size +=
+ bio->bi_iter.bi_size / CD_FRAMESIZE;
if ((pkt->write_size >= pkt->frames) &&
(pkt->state == PACKET_WAITING_STATE)) {
atomic_inc(&pkt->run_sm);
View
2 drivers/block/ps3disk.c
@@ -104,7 +104,7 @@ static void ps3disk_scatter_gather(struct ps3_storage_device *dev,
dev_dbg(&dev->sbd.core,
"%s:%u: bio %u: %u segs %u sectors from %lu\n",
__func__, __LINE__, i, bio_segments(iter.bio),
- bio_sectors(iter.bio), iter.bio->bi_sector);
+ bio_sectors(iter.bio), iter.bio->bi_iter.bi_sector);
size = bvec->bv_len;
buf = bvec_kmap_irq(bvec, &flags);
View
2 drivers/block/ps3vram.c
@@ -553,7 +553,7 @@ static struct bio *ps3vram_do_bio(struct ps3_system_bus_device *dev,
struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev);
int write = bio_data_dir(bio) == WRITE;
const char *op = write ? "write" : "read";
- loff_t offset = bio->bi_sector << 9;
+ loff_t offset = bio->bi_iter.bi_sector << 9;
int error = 0;
struct bio_vec *bvec;
unsigned int i;
View
21 drivers/block/rbd.c
@@ -1183,14 +1183,14 @@ static struct bio *bio_clone_range(struct bio *bio_src,
/* Handle the easy case for the caller */
- if (!offset && len == bio_src->bi_size)
+ if (!offset && len == bio_src->bi_iter.bi_size)
return bio_clone(bio_src, gfpmask);
if (WARN_ON_ONCE(!len))
return NULL;
- if (WARN_ON_ONCE(len > bio_src->bi_size))
+ if (WARN_ON_ONCE(len > bio_src->bi_iter.bi_size))
return NULL;
- if (WARN_ON_ONCE(offset > bio_src->bi_size - len))
+ if (WARN_ON_ONCE(offset > bio_src->bi_iter.bi_size - len))
return NULL;
/* Find first affected segment... */
@@ -1220,7 +1220,8 @@ static struct bio *bio_clone_range(struct bio *bio_src,
return NULL; /* ENOMEM */
bio->bi_bdev = bio_src->bi_bdev;
- bio->bi_sector = bio_src->bi_sector + (offset >> SECTOR_SHIFT);
+ bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector +
+ (offset >> SECTOR_SHIFT);
bio->bi_rw = bio_src->bi_rw;
bio->bi_flags |= 1 << BIO_CLONED;
@@ -1239,8 +1240,7 @@ static struct bio *bio_clone_range(struct bio *bio_src,
}
bio->bi_vcnt = vcnt;
- bio->bi_size = len;
- bio->bi_idx = 0;
+ bio->bi_iter.bi_size = len;
return bio;
}
@@ -1271,7 +1271,7 @@ static struct bio *bio_chain_clone_range(struct bio **bio_src,
/* Build up a chain of clone bios up to the limit */
- if (!bi || off >= bi->bi_size || !len)
+ if (!bi || off >= bi->bi_iter.bi_size || !len)
return NULL; /* Nothing to clone */
end = &chain;
@@ -1283,7 +1283,7 @@ static struct bio *bio_chain_clone_range(struct bio **bio_src,
rbd_warn(NULL, "bio_chain exhausted with %u left", len);
goto out_err; /* EINVAL; ran out of bio's */
}
- bi_size = min_t(unsigned int, bi->bi_size - off, len);
+ bi_size = min_t(unsigned int, bi->bi_iter.bi_size - off, len);
bio = bio_clone_range(bi, off, bi_size, gfpmask);
if (!bio)
goto out_err; /* ENOMEM */
@@ -1292,7 +1292,7 @@ static struct bio *bio_chain_clone_range(struct bio **bio_src,
end = &bio->bi_next;
off += bi_size;
- if (off == bi->bi_size) {
+ if (off == bi->bi_iter.bi_size) {
bi = bi->bi_next;
off = 0;
}
@@ -2186,7 +2186,8 @@ static int rbd_img_request_fill(struct rbd_img_request *img_request,
if (type == OBJ_REQUEST_BIO) {
bio_list = data_desc;
- rbd_assert(img_offset == bio_list->bi_sector << SECTOR_SHIFT);
+ rbd_assert(img_offset ==
+ bio_list->bi_iter.bi_sector << SECTOR_SHIFT);
} else {
rbd_assert(type == OBJ_REQUEST_PAGES);
pages = data_desc;
View
6 drivers/block/rsxx/dev.c
@@ -174,7 +174,7 @@ static void rsxx_make_request(struct request_queue *q, struct bio *bio)
if (!card)
goto req_err;
- if (bio->bi_sector + (bio->bi_size >> 9) > get_capacity(card->gendisk))
+ if (bio_end_sector(bio) > get_capacity(card->gendisk))
goto req_err;
if (unlikely(card->halt)) {
@@ -187,7 +187,7 @@ static void rsxx_make_request(struct request_queue *q, struct bio *bio)
goto req_err;
}
- if (bio->bi_size == 0) {
+ if (bio->bi_iter.bi_size == 0) {
dev_err(CARD_TO_DEV(card), "size zero BIO!\n");
goto req_err;
}
@@ -208,7 +208,7 @@ static void rsxx_make_request(struct request_queue *q, struct bio *bio)
dev_dbg(CARD_TO_DEV(card), "BIO[%c]: meta: %p addr8: x%llx size: %d\n",
bio_data_dir(bio) ? 'W' : 'R', bio_meta,
- (u64)bio->bi_sector << 9, bio->bi_size);
+ (u64)bio->bi_iter.bi_sector << 9, bio->bi_iter.bi_size);
st = rsxx_dma_queue_bio(card, bio, &bio_meta->pending_dmas,
bio_dma_done_cb, bio_meta);
View
4 drivers/block/rsxx/dma.c
@@ -696,7 +696,7 @@ int rsxx_dma_queue_bio(struct rsxx_cardinfo *card,
int st;
int i;
- addr8 = bio->bi_sector << 9; /* sectors are 512 bytes */
+ addr8 = bio->bi_iter.bi_sector << 9; /* sectors are 512 bytes */
atomic_set(n_dmas, 0);
for (i = 0; i < card->n_targets; i++) {
@@ -705,7 +705,7 @@ int rsxx_dma_queue_bio(struct rsxx_cardinfo *card,
}
if (bio->bi_rw & REQ_DISCARD) {
- bv_len = bio->bi_size;
+ bv_len = bio->bi_iter.bi_size;
while (bv_len > 0) {
tgt = rsxx_get_dma_tgt(card, addr8);
View
9 drivers/block/umem.c
@@ -352,8 +352,8 @@ static int add_bio(struct cardinfo *card)
bio = card->currentbio;
if (!bio && card->bio) {
card->currentbio = card->bio;
- card->current_idx = card->bio->bi_idx;
- card->current_sector = card->bio->bi_sector;
+ card->current_idx = card->bio->bi_iter.bi_idx;
+ card->current_sector = card->bio->bi_iter.bi_sector;
card->bio = card->bio->bi_next;
if (card->bio == NULL)
card->biotail = &card->bio;
@@ -451,7 +451,7 @@ static void process_page(unsigned long data)
if (page->idx >= bio->bi_vcnt) {
page->bio = bio->bi_next;
if (page->bio)
- page->idx = page->bio->bi_idx;
+ page->idx = page->bio->bi_iter.bi_idx;
}
pci_unmap_page(card->dev, desc->data_dma_handle,
@@ -532,7 +532,8 @@ static void mm_make_request(struct request_queue *q, struct bio *bio)
{
struct cardinfo *card = q->queuedata;
pr_debug("mm_make_request %llu %u\n",
- (unsigned long long)bio->bi_sector, bio->bi_size);
+ (unsigned long long)bio->bi_iter.bi_sector,
+ bio->bi_iter.bi_size);
spin_lock_irq(&card->lock);
*card->biotail = bio;
View
2 drivers/block/xen-blkback/blkback.c
@@ -1257,7 +1257,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
bio->bi_bdev = preq.bdev;
bio->bi_private = pending_req;
bio->bi_end_io = end_block_io_op;
- bio->bi_sector = preq.sector_number;
+ bio->bi_iter.bi_sector = preq.sector_number;
}
preq.sector_number += seg[i].nsec;
View
2 drivers/block/xen-blkfront.c
@@ -1547,7 +1547,7 @@ static int blkif_recover(struct blkfront_info *info)
for (i = 0; i < pending; i++) {
offset = (i * segs * PAGE_SIZE) >> 9;
size = min((unsigned int)(segs * PAGE_SIZE) >> 9,
- (unsigned int)(bio->bi_size >> 9) - offset);
+ (unsigned int)bio_sectors(bio) - offset);
cloned_bio = bio_clone(bio, GFP_NOIO);
BUG_ON(cloned_bio == NULL);
bio_trim(cloned_bio, offset, size);
View
4 drivers/md/bcache/btree.c
@@ -299,7 +299,7 @@ void bch_btree_node_read(struct btree *b)
bio = bch_bbio_alloc(b->c);
bio->bi_rw = REQ_META|READ_SYNC;
- bio->bi_size = KEY_SIZE(&b->key) << 9;
+ bio->bi_iter.bi_size = KEY_SIZE(&b->key) << 9;
bio->bi_end_io = btree_node_read_endio;
bio->bi_private = &cl;
@@ -395,7 +395,7 @@ static void do_btree_node_write(struct btree *b)
b->bio->bi_end_io = btree_node_write_endio;
b->bio->bi_private = cl;
b->bio->bi_rw = REQ_META|WRITE_SYNC|REQ_FUA;
- b->bio->bi_size = set_blocks(i, b->c) * block_bytes(b->c);
+ b->bio->bi_iter.bi_size = set_blocks(i, b->c) * block_bytes(b->c);
bch_bio_map(b->bio, i);
/*
View
2 drivers/md/bcache/debug.c
@@ -195,7 +195,7 @@ void bch_data_verify(struct cached_dev *dc, struct bio *bio)
dc->disk.c,
"verify failed at dev %s sector %llu",
bdevname(dc->bdev, name),
- (uint64_t) bio->bi_sector);
+ (uint64_t) bio->bi_iter.bi_sector);
kunmap_atomic(p1);
}
View
26 drivers/md/bcache/io.c
@@ -21,18 +21,18 @@ static void bch_bi_idx_hack_endio(struct bio *bio, int error)
static void bch_generic_make_request_hack(struct bio *bio)
{
- if (bio->bi_idx) {
+ if (bio->bi_iter.bi_idx) {
struct bio *clone = bio_alloc(GFP_NOIO, bio_segments(bio));
memcpy(clone->bi_io_vec,
bio_iovec(bio),
bio_segments(bio) * sizeof(struct bio_vec));
- clone->bi_sector = bio->bi_sector;
+ clone->bi_iter.bi_sector = bio->bi_iter.bi_sector;
clone->bi_bdev = bio->bi_bdev;
clone->bi_rw = bio->bi_rw;
clone->bi_vcnt = bio_segments(bio);
- clone->bi_size = bio->bi_size;
+ clone->bi_iter.bi_size = bio->bi_iter.bi_size;
clone->bi_private = bio;
clone->bi_end_io = bch_bi_idx_hack_endio;
@@ -72,7 +72,7 @@ static void bch_generic_make_request_hack(struct bio *bio)
struct bio *bch_bio_split(struct bio *bio, int sectors,
gfp_t gfp, struct bio_set *bs)
{
- unsigned idx = bio->bi_idx, vcnt = 0, nbytes = sectors << 9;
+ unsigned idx = bio->bi_iter.bi_idx, vcnt = 0, nbytes = sectors << 9;
struct bio_vec *bv;
struct bio *ret = NULL;
@@ -90,7 +90,7 @@ struct bio *bch_bio_split(struct bio *bio, int sectors,
}
bio_for_each_segment(bv, bio, idx) {
- vcnt = idx - bio->bi_idx;
+ vcnt = idx - bio->bi_iter.bi_idx;
if (!nbytes) {
ret = bio_alloc_bioset(gfp, vcnt, bs);
@@ -119,15 +119,15 @@ struct bio *bch_bio_split(struct bio *bio, int sectors,
}
out:
ret->bi_bdev = bio->bi_bdev;
- ret->bi_sector = bio->bi_sector;
- ret->bi_size = sectors << 9;
+ ret->bi_iter.bi_sector = bio->bi_iter.bi_sector;
+ ret->bi_iter.bi_size = sectors << 9;
ret->bi_rw = bio->bi_rw;
ret->bi_vcnt = vcnt;
ret->bi_max_vecs = vcnt;
- bio->bi_sector += sectors;
- bio->bi_size -= sectors << 9;
- bio->bi_idx = idx;
+ bio->bi_iter.bi_sector += sectors;
+ bio->bi_iter.bi_size -= sectors << 9;
+ bio->bi_iter.bi_idx = idx;
if (bio_integrity(bio)) {
if (bio_integrity_clone(ret, bio, gfp)) {
@@ -162,7 +162,7 @@ static unsigned bch_bio_max_sectors(struct bio *bio)
bio_for_each_segment(bv, bio, i) {
struct bvec_merge_data bvm = {
.bi_bdev = bio->bi_bdev,
- .bi_sector = bio->bi_sector,
+ .bi_sector = bio->bi_iter.bi_sector,
.bi_size = ret << 9,
.bi_rw = bio->bi_rw,
};
@@ -272,8 +272,8 @@ void __bch_submit_bbio(struct bio *bio, struct cache_set *c)
{
struct bbio *b = container_of(bio, struct bbio, bio);
- bio->bi_sector = PTR_OFFSET(&b->key, 0);
- bio->bi_bdev = PTR_CACHE(c, &b->key, 0)->bdev;
+ bio->bi_iter.bi_sector = PTR_OFFSET(&b->key, 0);
+ bio->bi_bdev = PTR_CACHE(c, &b->key, 0)->bdev;
b->submit_time_us = local_clock_us();
closure_bio_submit(bio, bio->bi_private, PTR_CACHE(c, &b->key, 0));
View
12 drivers/md/bcache/journal.c
@@ -51,10 +51,10 @@ reread: left = ca->sb.bucket_size - offset;
len = min_t(unsigned, left, PAGE_SECTORS * 8);
bio_reset(bio);
- bio->bi_sector = bucket + offset;
+ bio->bi_iter.bi_sector = bucket + offset;
bio->bi_bdev = ca->bdev;
bio->bi_rw = READ;
- bio->bi_size = len << 9;
+ bio->bi_iter.bi_size = len << 9;
bio->bi_end_io = journal_read_endio;
bio->bi_private = &cl;
@@ -437,13 +437,13 @@ static void do_journal_discard(struct cache *ca)
atomic_set(&ja->discard_in_flight, DISCARD_IN_FLIGHT);
bio_init(bio);
- bio->bi_sector = bucket_to_sector(ca->set,
+ bio->bi_iter.bi_sector = bucket_to_sector(ca->set,
ca->sb.d[ja->discard_idx]);
bio->bi_bdev = ca->bdev;
bio->bi_rw = REQ_WRITE|REQ_DISCARD;
bio->bi_max_vecs = 1;
bio->bi_io_vec = bio->bi_inline_vecs;
- bio->bi_size = bucket_bytes(ca);
+ bio->bi_iter.bi_size = bucket_bytes(ca);
bio->bi_end_io = journal_discard_endio;
closure_get(&ca->set->cl);
@@ -608,10 +608,10 @@ static void journal_write_unlocked(struct closure *cl)
atomic_long_add(sectors, &ca->meta_sectors_written);
bio_reset(bio);
- bio->bi_sector = PTR_OFFSET(k, i);
+ bio->bi_iter.bi_sector = PTR_OFFSET(k, i);
bio->bi_bdev = ca->bdev;
bio->bi_rw = REQ_WRITE|REQ_SYNC|REQ_META|REQ_FLUSH|REQ_FUA;
- bio->bi_size = sectors << 9;
+ bio->bi_iter.bi_size = sectors << 9;
bio->bi_end_io = journal_write_endio;
bio->bi_private = w;
View
4 drivers/md/bcache/movinggc.c
@@ -82,7 +82,7 @@ static void moving_init(struct moving_io *io)
bio_get(bio);
bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0));
- bio->bi_size = KEY_SIZE(&io->w->key) << 9;
+ bio->bi_iter.bi_size = KEY_SIZE(&io->w->key) << 9;
bio->bi_max_vecs = DIV_ROUND_UP(KEY_SIZE(&io->w->key),
PAGE_SECTORS);
bio->bi_private = &io->cl;
@@ -98,7 +98,7 @@ static void write_moving(struct closure *cl)
if (!op->error) {
moving_init(io);
- io->bio.bio.bi_sector = KEY_START(&io->w->key);
+ io->bio.bio.bi_iter.bi_sector = KEY_START(&io->w->key);
op->write_prio = 1;
op->bio = &io->bio.bio;
View
58 drivers/md/bcache/request.c
@@ -261,7 +261,7 @@ static void bch_data_invalidate(struct closure *cl)
struct bio *bio = op->bio;
pr_debug("invalidating %i sectors from %llu",
- bio_sectors(bio), (uint64_t) bio->bi_sector);
+ bio_sectors(bio), (uint64_t) bio->bi_iter.bi_sector);
while (bio_sectors(bio)) {
unsigned sectors = min(bio_sectors(bio),
@@ -270,11 +270,11 @@ static void bch_data_invalidate(struct closure *cl)
if (bch_keylist_realloc(&op->insert_keys, 0, op->c))
goto out;
- bio->bi_sector += sectors;
- bio->bi_size -= sectors << 9;
+ bio->bi_iter.bi_sector += sectors;
+ bio->bi_iter.bi_size -= sectors << 9;
bch_keylist_add(&op->insert_keys,
- &KEY(op->inode, bio->bi_sector, sectors));
+ &KEY(op->inode, bio->bi_iter.bi_sector, sectors));
}
op->insert_data_done = true;
@@ -364,7 +364,7 @@ static void bch_data_insert_start(struct closure *cl)
k = op->insert_keys.top;
bkey_init(k);
SET_KEY_INODE(k, op->inode);
- SET_KEY_OFFSET(k, bio->bi_sector);
+ SET_KEY_OFFSET(k, bio->bi_iter.bi_sector);
if (!bch_alloc_sectors(op->c, k, bio_sectors(bio),
op->write_point, op->write_prio,
@@ -522,7 +522,7 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
(bio->bi_rw & REQ_WRITE)))
goto skip;
- if (bio->bi_sector & (c->sb.block_size - 1) ||
+ if (bio->bi_iter.bi_sector & (c->sb.block_size - 1) ||
bio_sectors(bio) & (c->sb.block_size - 1)) {
pr_debug("skipping unaligned io");
goto skip;
@@ -546,8 +546,8 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
spin_lock(&dc->io_lock);
- hlist_for_each_entry(i, iohash(dc, bio->bi_sector), hash)
- if (i->last == bio->bi_sector &&
+ hlist_for_each_entry(i, iohash(dc, bio->bi_iter.bi_sector), hash)
+ if (i->last == bio->bi_iter.bi_sector &&
time_before(jiffies, i->jiffies))
goto found;
@@ -556,8 +556,8 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
add_sequential(task);
i->sequential = 0;
found:
- if (i->sequential + bio->bi_size > i->sequential)
- i->sequential += bio->bi_size;
+ if (i->sequential + bio->bi_iter.bi_size > i->sequential)
+ i->sequential += bio->bi_iter.bi_size;
i->last = bio_end_sector(bio);
i->jiffies = jiffies + msecs_to_jiffies(5000);
@@ -650,15 +650,15 @@ static int cache_lookup_fn(struct btree_op *op, struct btree *b, struct bkey *k)
struct bkey *bio_key;
unsigned ptr;
- if (bkey_cmp(k, &KEY(s->iop.inode, bio->bi_sector, 0)) <= 0)
+ if (bkey_cmp(k, &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0)) <= 0)
return MAP_CONTINUE;
if (KEY_INODE(k) != s->iop.inode ||
- KEY_START(k) > bio->bi_sector) {
+ KEY_START(k) > bio->bi_iter.bi_sector) {
unsigned bio_sectors = bio_sectors(bio);
unsigned sectors = KEY_INODE(k) == s->iop.inode
? min_t(uint64_t, INT_MAX,
- KEY_START(k) - bio->bi_sector)
+ KEY_START(k) - bio->bi_iter.bi_sector)
: INT_MAX;
int ret = s->d->cache_miss(b, s, bio, sectors);
@@ -681,13 +681,13 @@ static int cache_lookup_fn(struct btree_op *op, struct btree *b, struct bkey *k)
s->read_dirty_data = true;
n = bch_bio_split(bio, min_t(uint64_t, INT_MAX,
- KEY_OFFSET(k) - bio->bi_sector),
+ KEY_OFFSET(k) - bio->bi_iter.bi_sector),
GFP_NOIO, s->d->bio_split);
bio_key = &container_of(n, struct bbio, bio)->key;
bch_bkey_copy_single_ptr(bio_key, k, ptr);
- bch_cut_front(&KEY(s->iop.inode, n->bi_sector, 0), bio_key);
+ bch_cut_front(&KEY(s->iop.inode, n->bi_iter.bi_sector, 0), bio_key);
bch_cut_back(&KEY(s->iop.inode, bio_end_sector(n), 0), bio_key);
n->bi_end_io = bch_cache_read_endio;
@@ -714,7 +714,7 @@ static void cache_lookup(struct closure *cl)
struct bio *bio = &s->bio.bio;
int ret = bch_btree_map_keys(&s->op, s->iop.c,
- &KEY(s->iop.inode, bio->bi_sector, 0),
+ &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0),
cache_lookup_fn, MAP_END_KEY);
if (ret == -EAGAIN)
continue_at(cl, cache_lookup, bcache_wq);
@@ -872,9 +872,9 @@ static void cached_dev_read_done(struct closure *cl)
if (s->iop.bio) {
bio_reset(s->iop.bio);
- s->iop.bio->bi_sector = s->cache_miss->bi_sector;
+ s->iop.bio->bi_iter.bi_sector = s->cache_miss->bi_iter.bi_sector;
s->iop.bio->bi_bdev = s->cache_miss->bi_bdev;
- s->iop.bio->bi_size = s->insert_bio_sectors << 9;
+ s->iop.bio->bi_iter.bi_size = s->insert_bio_sectors << 9;
bch_bio_map(s->iop.bio, NULL);
bio_copy_data(s->cache_miss, s->iop.bio);
@@ -937,7 +937,7 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
s->insert_bio_sectors = min(sectors, bio_sectors(bio) + reada);
s->iop.replace_key = KEY(s->iop.inode,
- bio->bi_sector + s->insert_bio_sectors,
+ bio->bi_iter.bi_sector + s->insert_bio_sectors,
s->insert_bio_sectors);
ret = bch_btree_insert_check_key(b, &s->op, &s->iop.replace_key);
@@ -957,9 +957,9 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
if (!cache_bio)
goto out_submit;
- cache_bio->bi_sector = miss->bi_sector;
- cache_bio->bi_bdev = miss->bi_bdev;
- cache_bio->bi_size = s->insert_bio_sectors << 9;
+ cache_bio->bi_iter.bi_sector = miss->bi_iter.bi_sector;
+ cache_bio->bi_bdev = miss->bi_bdev;
+ cache_bio->bi_iter.bi_size = s->insert_bio_sectors << 9;
cache_bio->bi_end_io = request_endio;
cache_bio->bi_private = &s->cl;
@@ -1009,7 +1009,7 @@ static void cached_dev_write(struct cached_dev *dc, struct search *s)
{
struct closure *cl = &s->cl;
struct bio *bio = &s->bio.bio;
- struct bkey start = KEY(dc->disk.id, bio->bi_sector, 0);
+ struct bkey start = KEY(dc->disk.id, bio->bi_iter.bi_sector, 0);
struct bkey end = KEY(dc->disk.id, bio_end_sector(bio), 0);
bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys, &start, &end);
@@ -1104,13 +1104,13 @@ static void cached_dev_make_request(struct request_queue *q, struct bio *bio)
part_stat_unlock();
bio->bi_bdev = dc->bdev;
- bio->bi_sector += dc->sb.data_offset;
+ bio->bi_iter.bi_sector += dc->sb.data_offset;
if (cached_dev_get(dc)) {
s = search_alloc(bio, d);
trace_bcache_request_start(s->d, bio);
- if (!bio->bi_size) {
+ if (!bio->bi_iter.bi_size) {
/*
* can't call bch_journal_meta from under
* generic_make_request
@@ -1197,9 +1197,9 @@ static int flash_dev_cache_miss(struct btree *b, struct search *s,
sectors -= j;
}
- bio_advance(bio, min(sectors << 9, bio->bi_size));
+ bio_advance(bio, min(sectors << 9, bio->bi_iter.bi_size));
- if (!bio->bi_size)
+ if (!bio->bi_iter.bi_size)
return MAP_DONE;
return MAP_CONTINUE;
@@ -1233,7 +1233,7 @@ static void flash_dev_make_request(struct request_queue *q, struct bio *bio)
trace_bcache_request_start(s->d, bio);
- if (!bio->bi_size) {
+ if (!bio->bi_iter.bi_size) {
/*
* can't call bch_journal_meta from under
* generic_make_request
@@ -1243,7 +1243,7 @@ static void flash_dev_make_request(struct request_queue *q, struct bio *bio)
bcache_wq);
} else if (rw) {
bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys,
- &KEY(d->id, bio->bi_sector, 0),
+ &KEY(d->id, bio->bi_iter.bi_sector, 0),
&KEY(d->id, bio_end_sector(bio), 0));
s->iop.bypass = (bio->bi_rw & REQ_DISCARD) != 0;
View
16 drivers/md/bcache/super.c
@@ -233,9 +233,9 @@ static void __write_super(struct cache_sb *sb, struct bio *bio)
struct cache_sb *out = page_address(bio->bi_io_vec[0].bv_page);
unsigned i;
- bio->bi_sector = SB_SECTOR;
- bio->bi_rw = REQ_SYNC|REQ_META;
- bio->bi_size = SB_SIZE;
+ bio->bi_iter.bi_sector = SB_SECTOR;
+ bio->bi_rw = REQ_SYNC|REQ_META;
+ bio->bi_iter.bi_size = SB_SIZE;
bch_bio_map(bio, NULL);
out->offset = cpu_to_le64(sb->offset);
@@ -347,7 +347,7 @@ static void uuid_io(struct cache_set *c, unsigned long rw,
struct bio *bio = bch_bbio_alloc(c);
bio->bi_rw = REQ_SYNC|REQ_META|rw;
- bio->bi_size = KEY_SIZE(k) << 9;
+ bio->bi_iter.bi_size = KEY_SIZE(k) << 9;
bio->bi_end_io = uuid_endio;
bio->bi_private = cl;
@@ -503,10 +503,10 @@ static void prio_io(struct cache *ca, uint64_t bucket, unsigned long rw)
closure_init_stack(cl);
- bio->bi_sector = bucket * ca->sb.bucket_size;
- bio->bi_bdev = ca->bdev;
- bio->bi_rw = REQ_SYNC|REQ_META|rw;
- bio->bi_size = bucket_bytes(ca);
+ bio->bi_iter.bi_sector = bucket * ca->sb.bucket_size;
+ bio->bi_bdev = ca->bdev;
+ bio->bi_rw = REQ_SYNC|REQ_META|rw;
+ bio->bi_iter.bi_size = bucket_bytes(ca);
bio->bi_end_io = prio_endio;
bio->bi_private = ca;
View
4 drivers/md/bcache/util.c
@@ -218,10 +218,10 @@ uint64_t bch_next_delay(struct bch_ratelimit *d, uint64_t done)
void bch_bio_map(struct bio *bio, void *base)
{
- size_t size = bio->bi_size;
+ size_t size = bio->bi_iter.bi_size;
struct bio_vec *bv = bio->bi_io_vec;
- BUG_ON(!bio->bi_size);
+ BUG_ON(!bio->bi_iter.bi_size);
BUG_ON(bio->bi_vcnt);
bv->bv_offset = base ? ((unsigned long) base) % PAGE_SIZE : 0;
View
6 drivers/md/bcache/writeback.c
@@ -113,7 +113,7 @@ static void dirty_init(struct keybuf_key *w)
if (!io->dc->writeback_percent)
bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0));
- bio->bi_size = KEY_SIZE(&w->key) << 9;
+ bio->bi_iter.bi_size = KEY_SIZE(&w->key) << 9;
bio->bi_max_vecs = DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS);
bio->bi_private = w;
bio->bi_io_vec = bio->bi_inline_vecs;
@@ -186,7 +186,7 @@ static void write_dirty(struct closure *cl)
dirty_init(w);
io->bio.bi_rw = WRITE;
- io->bio.bi_sector = KEY_START(&w->key);
+ io->bio.bi_iter.bi_sector = KEY_START(&w->key);
io->bio.bi_bdev = io->dc->bdev;
io->bio.bi_end_io = dirty_endio;
@@ -255,7 +255,7 @@ static void read_dirty(struct cached_dev *dc)
io->dc = dc;
dirty_init(w);
- io->bio.bi_sector = PTR_OFFSET(&w->key, 0);
+ io->bio.bi_iter.bi_sector = PTR_OFFSET(&w->key, 0);
io->bio.bi_bdev = PTR_CACHE(dc->disk.c,
&w->key, 0)->bdev;
io->bio.bi_rw = READ;
View
2 drivers/md/bcache/writeback.h
@@ -50,7 +50,7 @@ static inline bool should_writeback(struct cached_dev *dc, struct bio *bio,
return false;
if (dc->partial_stripes_expensive &&
- bcache_dev_stripe_dirty(dc, bio->bi_sector,
+ bcache_dev_stripe_dirty(dc, bio->bi_iter.bi_sector,
bio_sectors(bio)))
return true;
View
12 drivers/md/dm-bio-record.h
@@ -40,10 +40,10 @@ static inline void dm_bio_record(struct dm_bio_details *bd, struct bio *bio)
{
unsigned i;
- bd->bi_sector = bio->bi_sector;
+ bd->bi_sector = bio->bi_iter.bi_sector;
bd->bi_bdev = bio->bi_bdev;
- bd->bi_size = bio->bi_size;
- bd->bi_idx = bio->bi_idx;
+ bd->bi_size = bio->bi_iter.bi_size;
+ bd->bi_idx = bio->bi_iter.bi_idx;
bd->bi_flags = bio->bi_flags;
for (i = 0; i < bio->bi_vcnt; i++) {
@@ -56,10 +56,10 @@ static inline void dm_bio_restore(struct dm_bio_details *bd, struct bio *bio)
{
unsigned i;
- bio->bi_sector = bd->bi_sector;
+ bio->bi_iter.bi_sector = bd->bi_sector;
bio->bi_bdev = bd->bi_bdev;
- bio->bi_size = bd->bi_size;
- bio->bi_idx = bd->bi_idx;
+ bio->bi_iter.bi_size = bd->bi_size;
+ bio->bi_iter.bi_idx = bd->bi_idx;
bio->bi_flags = bd->bi_flags;
for (i = 0; i < bio->bi_vcnt; i++) {
View
2 drivers/md/dm-bufio.c
@@ -538,7 +538,7 @@ static void use_inline_bio(struct dm_buffer *b, int rw, sector_t block,
bio_init(&b->bio);
b->bio.bi_io_vec = b->bio_vec;
b->bio.bi_max_vecs = DM_BUFIO_INLINE_VECS;
- b->bio.bi_sector = block << b->c->sectors_per_block_bits;
+ b->bio.bi_iter.bi_sector = block << b->c->sectors_per_block_bits;
b->bio.bi_bdev = b->c->bdev;
b->bio.bi_end_io = end_io;
View
4 drivers/md/dm-cache-policy-mq.c
@@ -72,7 +72,7 @@ static enum io_pattern iot_pattern(struct io_tracker *t)
static void iot_update_stats(struct io_tracker *t, struct bio *bio)
{
- if (bio->bi_sector == from_oblock(t->last_end_oblock) + 1)
+ if (bio->bi_iter.bi_sector == from_oblock(t->last_end_oblock) + 1)
t->nr_seq_samples++;
else {
/*
@@ -87,7 +87,7 @@ static void iot_update_stats(struct io_tracker *t, struct bio *bio)
t->nr_rand_samples++;
}
- t->last_end_oblock = to_oblock(bio->bi_sector + bio_sectors(bio) - 1);
+ t->last_end_oblock = to_oblock(bio_end_sector(bio) - 1);
}
static void iot_check_for_pattern_switch(struct io_tracker *t)
View
22 drivers/md/dm-cache-target.c
@@ -664,15 +664,17 @@ static void remap_to_origin(struct cache *cache, struct bio *bio)
static void remap_to_cache(struct cache *cache, struct bio *bio,
dm_cblock_t cblock)
{
- sector_t bi_sector = bio->bi_sector;
+ sector_t bi_sector = bio->bi_iter.bi_sector;
bio->bi_bdev = cache->cache_dev->bdev;
if (!block_size_is_power_of_two(cache))
- bio->bi_sector = (from_cblock(cblock) * cache->sectors_per_block) +
- sector_div(bi_sector, cache->sectors_per_block);
+ bio->bi_iter.bi_sector =
+ (from_cblock(cblock) * cache->sectors_per_block) +
+ sector_div(bi_sector, cache->sectors_per_block);
else
- bio->bi_sector = (from_cblock(cblock) << cache->sectors_per_block_shift) |
- (bi_sector & (cache->sectors_per_block - 1));
+ bio->bi_iter.bi_sector =
+ (from_cblock(cblock) << cache->sectors_per_block_shift) |
+ (bi_sector & (cache->sectors_per_block - 1));
}
static void check_if_tick_bio_needed(struct cache *cache, struct bio *bio)
@@ -712,7 +714,7 @@ static void remap_to_cache_dirty(struct cache *cache, struct bio *bio,
static dm_oblock_t get_bio_block(struct cache *cache, struct bio *bio)
{
- sector_t block_nr = bio->bi_sector;
+ sector_t block_nr = bio->bi_iter.bi_sector;
if (!block_size_is_power_of_two(cache))
(void) sector_div(block_nr, cache->sec