Skip to content

Commit 5169b84

Browse files
neilbrownakpm00
authored andcommitted
mm: submit multipage reads for SWP_FS_OPS swap-space
swap_readpage() is given one page at a time, but may be called repeatedly in succession. For block-device swap-space, the blk_plug functionality allows the multiple pages to be combined together at lower layers. That cannot be used for SWP_FS_OPS as blk_plug may not exist - it is only active when CONFIG_BLOCK=y. Consequently all swap reads over NFS are single page reads. With this patch we pass in a pointer-to-pointer when swap_readpage can store state between calls - much like the effect of blk_plug. After calling swap_readpage() some number of times, the state will be passed to swap_read_unplug() which can submit the combined request. Link: https://lkml.kernel.org/r/164859778127.29473.14059420492644907783.stgit@noble.brown Signed-off-by: NeilBrown <neilb@suse.de> Reviewed-by: Christoph Hellwig <hch@lst.de> Tested-by: David Howells <dhowells@redhat.com> Tested-by: Geert Uytterhoeven <geert+renesas@glider.be> Cc: Hugh Dickins <hughd@google.com> Cc: Mel Gorman <mgorman@techsingularity.net> Cc: Trond Myklebust <trond.myklebust@hammerspace.com> Cc: Miaohe Lin <linmiaohe@huawei.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
1 parent cba738f commit 5169b84

File tree

5 files changed

+104
-47
lines changed

5 files changed

+104
-47
lines changed

mm/madvise.c

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -198,6 +198,7 @@ static int swapin_walk_pmd_entry(pmd_t *pmd, unsigned long start,
198198
pte_t *orig_pte;
199199
struct vm_area_struct *vma = walk->private;
200200
unsigned long index;
201+
struct swap_iocb *splug = NULL;
201202

202203
if (pmd_none_or_trans_huge_or_clear_bad(pmd))
203204
return 0;
@@ -219,10 +220,11 @@ static int swapin_walk_pmd_entry(pmd_t *pmd, unsigned long start,
219220
continue;
220221

221222
page = read_swap_cache_async(entry, GFP_HIGHUSER_MOVABLE,
222-
vma, index, false);
223+
vma, index, false, &splug);
223224
if (page)
224225
put_page(page);
225226
}
227+
swap_read_unplug(splug);
226228

227229
return 0;
228230
}
@@ -238,6 +240,7 @@ static void force_shm_swapin_readahead(struct vm_area_struct *vma,
238240
XA_STATE(xas, &mapping->i_pages, linear_page_index(vma, start));
239241
pgoff_t end_index = linear_page_index(vma, end + PAGE_SIZE - 1);
240242
struct page *page;
243+
struct swap_iocb *splug = NULL;
241244

242245
rcu_read_lock();
243246
xas_for_each(&xas, page, end_index) {
@@ -250,13 +253,14 @@ static void force_shm_swapin_readahead(struct vm_area_struct *vma,
250253

251254
swap = radix_to_swp_entry(page);
252255
page = read_swap_cache_async(swap, GFP_HIGHUSER_MOVABLE,
253-
NULL, 0, false);
256+
NULL, 0, false, &splug);
254257
if (page)
255258
put_page(page);
256259

257260
rcu_read_lock();
258261
}
259262
rcu_read_unlock();
263+
swap_read_unplug(splug);
260264

261265
lru_add_drain(); /* Push any new pages onto the LRU now */
262266
}

mm/memory.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3633,7 +3633,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
36333633

36343634
/* To provide entry to swap_readpage() */
36353635
set_page_private(page, entry.val);
3636-
swap_readpage(page, true);
3636+
swap_readpage(page, true, NULL);
36373637
set_page_private(page, 0);
36383638
}
36393639
} else {

mm/page_io.c

Lines changed: 69 additions & 35 deletions
Original file line numberDiff line numberDiff line change
@@ -237,7 +237,8 @@ static void bio_associate_blkg_from_page(struct bio *bio, struct page *page)
237237

238238
struct swap_iocb {
239239
struct kiocb iocb;
240-
struct bio_vec bvec;
240+
struct bio_vec bvec[SWAP_CLUSTER_MAX];
241+
int pages;
241242
};
242243
static mempool_t *sio_pool;
243244

@@ -257,7 +258,7 @@ int sio_pool_init(void)
257258
static void sio_write_complete(struct kiocb *iocb, long ret)
258259
{
259260
struct swap_iocb *sio = container_of(iocb, struct swap_iocb, iocb);
260-
struct page *page = sio->bvec.bv_page;
261+
struct page *page = sio->bvec[0].bv_page;
261262

262263
if (ret != PAGE_SIZE) {
263264
/*
@@ -295,10 +296,10 @@ static int swap_writepage_fs(struct page *page, struct writeback_control *wbc)
295296
init_sync_kiocb(&sio->iocb, swap_file);
296297
sio->iocb.ki_complete = sio_write_complete;
297298
sio->iocb.ki_pos = page_file_offset(page);
298-
sio->bvec.bv_page = page;
299-
sio->bvec.bv_len = PAGE_SIZE;
300-
sio->bvec.bv_offset = 0;
301-
iov_iter_bvec(&from, WRITE, &sio->bvec, 1, PAGE_SIZE);
299+
sio->bvec[0].bv_page = page;
300+
sio->bvec[0].bv_len = PAGE_SIZE;
301+
sio->bvec[0].bv_offset = 0;
302+
iov_iter_bvec(&from, WRITE, &sio->bvec[0], 1, PAGE_SIZE);
302303
ret = mapping->a_ops->swap_rw(&sio->iocb, &from);
303304
if (ret != -EIOCBQUEUED)
304305
sio_write_complete(&sio->iocb, ret);
@@ -346,46 +347,66 @@ int __swap_writepage(struct page *page, struct writeback_control *wbc,
346347
static void sio_read_complete(struct kiocb *iocb, long ret)
347348
{
348349
struct swap_iocb *sio = container_of(iocb, struct swap_iocb, iocb);
349-
struct page *page = sio->bvec.bv_page;
350+
int p;
350351

351-
if (ret != 0 && ret != PAGE_SIZE) {
352-
SetPageError(page);
353-
ClearPageUptodate(page);
354-
pr_alert_ratelimited("Read-error on swap-device\n");
352+
if (ret == PAGE_SIZE * sio->pages) {
353+
for (p = 0; p < sio->pages; p++) {
354+
struct page *page = sio->bvec[p].bv_page;
355+
356+
SetPageUptodate(page);
357+
unlock_page(page);
358+
}
359+
count_vm_events(PSWPIN, sio->pages);
355360
} else {
356-
SetPageUptodate(page);
357-
count_vm_event(PSWPIN);
361+
for (p = 0; p < sio->pages; p++) {
362+
struct page *page = sio->bvec[p].bv_page;
363+
364+
SetPageError(page);
365+
ClearPageUptodate(page);
366+
unlock_page(page);
367+
}
368+
pr_alert_ratelimited("Read-error on swap-device\n");
358369
}
359-
unlock_page(page);
360370
mempool_free(sio, sio_pool);
361371
}
362372

363-
static int swap_readpage_fs(struct page *page)
373+
static void swap_readpage_fs(struct page *page,
374+
struct swap_iocb **plug)
364375
{
365376
struct swap_info_struct *sis = page_swap_info(page);
366-
struct file *swap_file = sis->swap_file;
367-
struct address_space *mapping = swap_file->f_mapping;
368-
struct iov_iter from;
369-
struct swap_iocb *sio;
377+
struct swap_iocb *sio = NULL;
370378
loff_t pos = page_file_offset(page);
371-
int ret;
372-
373-
sio = mempool_alloc(sio_pool, GFP_KERNEL);
374-
init_sync_kiocb(&sio->iocb, swap_file);
375-
sio->iocb.ki_pos = pos;
376-
sio->iocb.ki_complete = sio_read_complete;
377-
sio->bvec.bv_page = page;
378-
sio->bvec.bv_len = PAGE_SIZE;
379-
sio->bvec.bv_offset = 0;
380379

381-
iov_iter_bvec(&from, READ, &sio->bvec, 1, PAGE_SIZE);
382-
ret = mapping->a_ops->swap_rw(&sio->iocb, &from);
383-
if (ret != -EIOCBQUEUED)
384-
sio_read_complete(&sio->iocb, ret);
385-
return ret;
380+
if (plug)
381+
sio = *plug;
382+
if (sio) {
383+
if (sio->iocb.ki_filp != sis->swap_file ||
384+
sio->iocb.ki_pos + sio->pages * PAGE_SIZE != pos) {
385+
swap_read_unplug(sio);
386+
sio = NULL;
387+
}
388+
}
389+
if (!sio) {
390+
sio = mempool_alloc(sio_pool, GFP_KERNEL);
391+
init_sync_kiocb(&sio->iocb, sis->swap_file);
392+
sio->iocb.ki_pos = pos;
393+
sio->iocb.ki_complete = sio_read_complete;
394+
sio->pages = 0;
395+
}
396+
sio->bvec[sio->pages].bv_page = page;
397+
sio->bvec[sio->pages].bv_len = PAGE_SIZE;
398+
sio->bvec[sio->pages].bv_offset = 0;
399+
sio->pages += 1;
400+
if (sio->pages == ARRAY_SIZE(sio->bvec) || !plug) {
401+
swap_read_unplug(sio);
402+
sio = NULL;
403+
}
404+
if (plug)
405+
*plug = sio;
386406
}
387407

388-
int swap_readpage(struct page *page, bool synchronous)
408+
int swap_readpage(struct page *page, bool synchronous,
409+
struct swap_iocb **plug)
389410
{
390411
struct bio *bio;
391412
int ret = 0;
@@ -413,7 +434,7 @@ int swap_readpage(struct page *page, bool synchronous)
413434
}
414435

415436
if (data_race(sis->flags & SWP_FS_OPS)) {
416-
ret = swap_readpage_fs(page);
437+
swap_readpage_fs(page, plug);
417438
goto out;
418439
}
419440

@@ -459,3 +480,16 @@ int swap_readpage(struct page *page, bool synchronous)
459480
delayacct_swapin_end();
460481
return ret;
461482
}
483+
484+
void __swap_read_unplug(struct swap_iocb *sio)
485+
{
486+
struct iov_iter from;
487+
struct address_space *mapping = sio->iocb.ki_filp->f_mapping;
488+
int ret;
489+
490+
iov_iter_bvec(&from, READ, sio->bvec, sio->pages,
491+
PAGE_SIZE * sio->pages);
492+
ret = mapping->a_ops->swap_rw(&sio->iocb, &from);
493+
if (ret != -EIOCBQUEUED)
494+
sio_read_complete(&sio->iocb, ret);
495+
}

mm/swap.h

Lines changed: 14 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,15 @@
77

88
/* linux/mm/page_io.c */
99
int sio_pool_init(void);
10-
int swap_readpage(struct page *page, bool do_poll);
10+
struct swap_iocb;
11+
int swap_readpage(struct page *page, bool do_poll,
12+
struct swap_iocb **plug);
13+
void __swap_read_unplug(struct swap_iocb *plug);
14+
static inline void swap_read_unplug(struct swap_iocb *plug)
15+
{
16+
if (unlikely(plug))
17+
__swap_read_unplug(plug);
18+
}
1119
int swap_writepage(struct page *page, struct writeback_control *wbc);
1220
void end_swap_bio_write(struct bio *bio);
1321
int __swap_writepage(struct page *page, struct writeback_control *wbc,
@@ -41,7 +49,8 @@ struct page *find_get_incore_page(struct address_space *mapping, pgoff_t index);
4149
struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
4250
struct vm_area_struct *vma,
4351
unsigned long addr,
44-
bool do_poll);
52+
bool do_poll,
53+
struct swap_iocb **plug);
4554
struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
4655
struct vm_area_struct *vma,
4756
unsigned long addr,
@@ -56,7 +65,9 @@ static inline unsigned int page_swap_flags(struct page *page)
5665
return page_swap_info(page)->flags;
5766
}
5867
#else /* CONFIG_SWAP */
59-
static inline int swap_readpage(struct page *page, bool do_poll)
68+
struct swap_iocb;
69+
static inline int swap_readpage(struct page *page, bool do_poll,
70+
struct swap_iocb **plug)
6071
{
6172
return 0;
6273
}

mm/swap_state.c

Lines changed: 14 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -520,14 +520,16 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
520520
* the swap entry is no longer in use.
521521
*/
522522
struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
523-
struct vm_area_struct *vma, unsigned long addr, bool do_poll)
523+
struct vm_area_struct *vma,
524+
unsigned long addr, bool do_poll,
525+
struct swap_iocb **plug)
524526
{
525527
bool page_was_allocated;
526528
struct page *retpage = __read_swap_cache_async(entry, gfp_mask,
527529
vma, addr, &page_was_allocated);
528530

529531
if (page_was_allocated)
530-
swap_readpage(retpage, do_poll);
532+
swap_readpage(retpage, do_poll, plug);
531533

532534
return retpage;
533535
}
@@ -621,6 +623,7 @@ struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask,
621623
unsigned long mask;
622624
struct swap_info_struct *si = swp_swap_info(entry);
623625
struct blk_plug plug;
626+
struct swap_iocb *splug = NULL;
624627
bool do_poll = true, page_allocated;
625628
struct vm_area_struct *vma = vmf->vma;
626629
unsigned long addr = vmf->address;
@@ -647,7 +650,7 @@ struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask,
647650
if (!page)
648651
continue;
649652
if (page_allocated) {
650-
swap_readpage(page, false);
653+
swap_readpage(page, false, &splug);
651654
if (offset != entry_offset) {
652655
SetPageReadahead(page);
653656
count_vm_event(SWAP_RA);
@@ -656,10 +659,12 @@ struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask,
656659
put_page(page);
657660
}
658661
blk_finish_plug(&plug);
662+
swap_read_unplug(splug);
659663

660664
lru_add_drain(); /* Push any new pages onto the LRU now */
661665
skip:
662-
return read_swap_cache_async(entry, gfp_mask, vma, addr, do_poll);
666+
/* The page was likely read above, so no need for plugging here */
667+
return read_swap_cache_async(entry, gfp_mask, vma, addr, do_poll, NULL);
663668
}
664669

665670
int init_swap_address_space(unsigned int type, unsigned long nr_pages)
@@ -790,6 +795,7 @@ static struct page *swap_vma_readahead(swp_entry_t fentry, gfp_t gfp_mask,
790795
struct vm_fault *vmf)
791796
{
792797
struct blk_plug plug;
798+
struct swap_iocb *splug = NULL;
793799
struct vm_area_struct *vma = vmf->vma;
794800
struct page *page;
795801
pte_t *pte, pentry;
@@ -820,7 +826,7 @@ static struct page *swap_vma_readahead(swp_entry_t fentry, gfp_t gfp_mask,
820826
if (!page)
821827
continue;
822828
if (page_allocated) {
823-
swap_readpage(page, false);
829+
swap_readpage(page, false, &splug);
824830
if (i != ra_info.offset) {
825831
SetPageReadahead(page);
826832
count_vm_event(SWAP_RA);
@@ -829,10 +835,12 @@ static struct page *swap_vma_readahead(swp_entry_t fentry, gfp_t gfp_mask,
829835
put_page(page);
830836
}
831837
blk_finish_plug(&plug);
838+
swap_read_unplug(splug);
832839
lru_add_drain();
833840
skip:
841+
/* The page was likely read above, so no need for plugging here */
834842
return read_swap_cache_async(fentry, gfp_mask, vma, vmf->address,
835-
ra_info.win == 1);
843+
ra_info.win == 1, NULL);
836844
}
837845

838846
/**

0 commit comments

Comments
 (0)