Skip to content

Commit 45354f1

Browse files
Mikulas Patockasnitm
authored andcommitted
dm bufio: don't embed a bio in the dm_buffer structure
The bio structure consumes a substantial part of dm_buffer. The bio structure is only needed when doing I/O on the buffer, thus we don't have to embed it in the buffer. Allocate the bio structure only when doing I/O. We don't need to create a bio_set because, in case of allocation failure, dm-bufio falls back to using dm-io (which keeps its own bio_set). Signed-off-by: Mikulas Patocka <mpatocka@redhat.com> Signed-off-by: Mike Snitzer <snitzer@redhat.com>
1 parent f51f2e0 commit 45354f1

File tree

1 file changed

+45
-60
lines changed

1 file changed

+45
-60
lines changed

drivers/md/dm-bufio.c

Lines changed: 45 additions & 60 deletions
Original file line numberDiff line numberDiff line change
@@ -50,12 +50,6 @@
5050
*/
5151
#define DM_BUFIO_DEFAULT_RETAIN_BYTES (256 * 1024)
5252

53-
/*
54-
* The number of bvec entries that are embedded directly in the buffer.
55-
* If the chunk size is larger, dm-io is used to do the io.
56-
*/
57-
#define DM_BUFIO_INLINE_VECS 16
58-
5953
/*
6054
* Align buffer writes to this boundary.
6155
* Tests show that SSDs have the highest IOPS when using 4k writes.
@@ -153,8 +147,7 @@ struct dm_buffer {
153147
unsigned write_end;
154148
struct dm_bufio_client *c;
155149
struct list_head write_list;
156-
struct bio bio;
157-
struct bio_vec bio_vec[DM_BUFIO_INLINE_VECS];
150+
void (*end_io)(struct dm_buffer *, blk_status_t);
158151
#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
159152
#define MAX_STACK 10
160153
struct stack_trace stack_trace;
@@ -534,12 +527,11 @@ static void dmio_complete(unsigned long error, void *context)
534527
{
535528
struct dm_buffer *b = context;
536529

537-
b->bio.bi_status = error ? BLK_STS_IOERR : 0;
538-
b->bio.bi_end_io(&b->bio);
530+
b->end_io(b, unlikely(error != 0) ? BLK_STS_IOERR : 0);
539531
}
540532

541533
static void use_dmio(struct dm_buffer *b, int rw, sector_t sector,
542-
unsigned n_sectors, unsigned offset, bio_end_io_t *end_io)
534+
unsigned n_sectors, unsigned offset)
543535
{
544536
int r;
545537
struct dm_io_request io_req = {
@@ -563,71 +555,69 @@ static void use_dmio(struct dm_buffer *b, int rw, sector_t sector,
563555
io_req.mem.ptr.vma = (char *)b->data + offset;
564556
}
565557

566-
b->bio.bi_end_io = end_io;
567-
568558
r = dm_io(&io_req, 1, &region, NULL);
569-
if (r) {
570-
b->bio.bi_status = errno_to_blk_status(r);
571-
end_io(&b->bio);
572-
}
559+
if (unlikely(r))
560+
b->end_io(b, errno_to_blk_status(r));
573561
}
574562

575-
static void inline_endio(struct bio *bio)
563+
static void bio_complete(struct bio *bio)
576564
{
577-
bio_end_io_t *end_fn = bio->bi_private;
565+
struct dm_buffer *b = bio->bi_private;
578566
blk_status_t status = bio->bi_status;
579-
580-
/*
581-
* Reset the bio to free any attached resources
582-
* (e.g. bio integrity profiles).
583-
*/
584-
bio_reset(bio);
585-
586-
bio->bi_status = status;
587-
end_fn(bio);
567+
bio_put(bio);
568+
b->end_io(b, status);
588569
}
589570

590-
static void use_inline_bio(struct dm_buffer *b, int rw, sector_t sector,
591-
unsigned n_sectors, unsigned offset, bio_end_io_t *end_io)
571+
static void use_bio(struct dm_buffer *b, int rw, sector_t sector,
572+
unsigned n_sectors, unsigned offset)
592573
{
574+
struct bio *bio;
593575
char *ptr;
594-
unsigned len;
576+
unsigned vec_size, len;
595577

596-
bio_init(&b->bio, b->bio_vec, DM_BUFIO_INLINE_VECS);
597-
b->bio.bi_iter.bi_sector = sector;
598-
bio_set_dev(&b->bio, b->c->bdev);
599-
b->bio.bi_end_io = inline_endio;
600-
/*
601-
* Use of .bi_private isn't a problem here because
602-
* the dm_buffer's inline bio is local to bufio.
603-
*/
604-
b->bio.bi_private = end_io;
605-
bio_set_op_attrs(&b->bio, rw, 0);
578+
vec_size = b->c->block_size >> PAGE_SHIFT;
579+
if (unlikely(b->c->sectors_per_block_bits < PAGE_SHIFT - SECTOR_SHIFT))
580+
vec_size += 2;
581+
582+
bio = bio_kmalloc(GFP_NOWAIT | __GFP_NORETRY | __GFP_NOWARN, vec_size);
583+
if (!bio) {
584+
dmio:
585+
use_dmio(b, rw, sector, n_sectors, offset);
586+
return;
587+
}
588+
589+
bio->bi_iter.bi_sector = sector;
590+
bio_set_dev(bio, b->c->bdev);
591+
bio_set_op_attrs(bio, rw, 0);
592+
bio->bi_end_io = bio_complete;
593+
bio->bi_private = b;
606594

607595
ptr = (char *)b->data + offset;
608596
len = n_sectors << SECTOR_SHIFT;
609597

610598
do {
611599
unsigned this_step = min((unsigned)(PAGE_SIZE - offset_in_page(ptr)), len);
612-
if (!bio_add_page(&b->bio, virt_to_page(ptr), this_step,
600+
if (!bio_add_page(bio, virt_to_page(ptr), this_step,
613601
offset_in_page(ptr))) {
614-
use_dmio(b, rw, sector, n_sectors, offset, end_io);
615-
return;
602+
bio_put(bio);
603+
goto dmio;
616604
}
617605

618606
len -= this_step;
619607
ptr += this_step;
620608
} while (len > 0);
621609

622-
submit_bio(&b->bio);
610+
submit_bio(bio);
623611
}
624612

625-
static void submit_io(struct dm_buffer *b, int rw, bio_end_io_t *end_io)
613+
static void submit_io(struct dm_buffer *b, int rw, void (*end_io)(struct dm_buffer *, blk_status_t))
626614
{
627615
unsigned n_sectors;
628616
sector_t sector;
629617
unsigned offset, end;
630618

619+
b->end_io = end_io;
620+
631621
if (likely(b->c->sectors_per_block_bits >= 0))
632622
sector = b->block << b->c->sectors_per_block_bits;
633623
else
@@ -652,11 +642,10 @@ static void submit_io(struct dm_buffer *b, int rw, bio_end_io_t *end_io)
652642
n_sectors = (end - offset) >> SECTOR_SHIFT;
653643
}
654644

655-
if (n_sectors <= ((DM_BUFIO_INLINE_VECS * PAGE_SIZE) >> SECTOR_SHIFT) &&
656-
b->data_mode != DATA_MODE_VMALLOC)
657-
use_inline_bio(b, rw, sector, n_sectors, offset, end_io);
645+
if (b->data_mode != DATA_MODE_VMALLOC)
646+
use_bio(b, rw, sector, n_sectors, offset);
658647
else
659-
use_dmio(b, rw, sector, n_sectors, offset, end_io);
648+
use_dmio(b, rw, sector, n_sectors, offset);
660649
}
661650

662651
/*----------------------------------------------------------------
@@ -669,16 +658,14 @@ static void submit_io(struct dm_buffer *b, int rw, bio_end_io_t *end_io)
669658
* Set the error, clear B_WRITING bit and wake anyone who was waiting on
670659
* it.
671660
*/
672-
static void write_endio(struct bio *bio)
661+
static void write_endio(struct dm_buffer *b, blk_status_t status)
673662
{
674-
struct dm_buffer *b = container_of(bio, struct dm_buffer, bio);
675-
676-
b->write_error = bio->bi_status;
677-
if (unlikely(bio->bi_status)) {
663+
b->write_error = status;
664+
if (unlikely(status)) {
678665
struct dm_bufio_client *c = b->c;
679666

680667
(void)cmpxchg(&c->async_write_error, 0,
681-
blk_status_to_errno(bio->bi_status));
668+
blk_status_to_errno(status));
682669
}
683670

684671
BUG_ON(!test_bit(B_WRITING, &b->state));
@@ -1055,11 +1042,9 @@ static struct dm_buffer *__bufio_new(struct dm_bufio_client *c, sector_t block,
10551042
* The endio routine for reading: set the error, clear the bit and wake up
10561043
* anyone waiting on the buffer.
10571044
*/
1058-
static void read_endio(struct bio *bio)
1045+
static void read_endio(struct dm_buffer *b, blk_status_t status)
10591046
{
1060-
struct dm_buffer *b = container_of(bio, struct dm_buffer, bio);
1061-
1062-
b->read_error = bio->bi_status;
1047+
b->read_error = status;
10631048

10641049
BUG_ON(!test_bit(B_READING, &b->state));
10651050

0 commit comments

Comments
 (0)