Skip to content

Commit 8238287

Browse files
Matthew Wilcox (Oracle)torvalds
authored andcommitted
mm/readahead: make do_page_cache_ra take a readahead_control
Rename __do_page_cache_readahead() to do_page_cache_ra() and call it directly from ondemand_readahead() instead of indirecting via ra_submit(). Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Cc: David Howells <dhowells@redhat.com> Cc: Eric Biggers <ebiggers@google.com> Link: https://lkml.kernel.org/r/20200903140844.14194-5-willy@infradead.org Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
1 parent 73bb49d commit 8238287

File tree

2 files changed

+20
-19
lines changed

2 files changed

+20
-19
lines changed

mm/internal.h

Lines changed: 5 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -51,18 +51,17 @@ void unmap_page_range(struct mmu_gather *tlb,
5151

5252
void force_page_cache_readahead(struct address_space *, struct file *,
5353
pgoff_t index, unsigned long nr_to_read);
54-
void __do_page_cache_readahead(struct address_space *, struct file *,
55-
pgoff_t index, unsigned long nr_to_read,
56-
unsigned long lookahead_size);
54+
void do_page_cache_ra(struct readahead_control *,
55+
unsigned long nr_to_read, unsigned long lookahead_size);
5756

5857
/*
5958
* Submit IO for the read-ahead request in file_ra_state.
6059
*/
6160
static inline void ra_submit(struct file_ra_state *ra,
62-
struct address_space *mapping, struct file *filp)
61+
struct address_space *mapping, struct file *file)
6362
{
64-
__do_page_cache_readahead(mapping, filp,
65-
ra->start, ra->size, ra->async_size);
63+
DEFINE_READAHEAD(ractl, file, mapping, ra->start);
64+
do_page_cache_ra(&ractl, ra->size, ra->async_size);
6665
}
6766

6867
struct page *find_get_entry(struct address_space *mapping, pgoff_t index);

mm/readahead.c

Lines changed: 15 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -241,17 +241,16 @@ void page_cache_ra_unbounded(struct readahead_control *ractl,
241241
EXPORT_SYMBOL_GPL(page_cache_ra_unbounded);
242242

243243
/*
244-
* __do_page_cache_readahead() actually reads a chunk of disk. It allocates
244+
* do_page_cache_ra() actually reads a chunk of disk. It allocates
245245
* the pages first, then submits them for I/O. This avoids the very bad
246246
* behaviour which would occur if page allocations are causing VM writeback.
247247
* We really don't want to intermingle reads and writes like that.
248248
*/
249-
void __do_page_cache_readahead(struct address_space *mapping,
250-
struct file *file, pgoff_t index, unsigned long nr_to_read,
251-
unsigned long lookahead_size)
249+
void do_page_cache_ra(struct readahead_control *ractl,
250+
unsigned long nr_to_read, unsigned long lookahead_size)
252251
{
253-
DEFINE_READAHEAD(ractl, file, mapping, index);
254-
struct inode *inode = mapping->host;
252+
struct inode *inode = ractl->mapping->host;
253+
unsigned long index = readahead_index(ractl);
255254
loff_t isize = i_size_read(inode);
256255
pgoff_t end_index; /* The last page we want to read */
257256

@@ -265,18 +264,19 @@ void __do_page_cache_readahead(struct address_space *mapping,
265264
if (nr_to_read > end_index - index)
266265
nr_to_read = end_index - index + 1;
267266

268-
page_cache_ra_unbounded(&ractl, nr_to_read, lookahead_size);
267+
page_cache_ra_unbounded(ractl, nr_to_read, lookahead_size);
269268
}
270269

271270
/*
272271
* Chunk the readahead into 2 megabyte units, so that we don't pin too much
273272
* memory at once.
274273
*/
275274
void force_page_cache_readahead(struct address_space *mapping,
276-
struct file *filp, pgoff_t index, unsigned long nr_to_read)
275+
struct file *file, pgoff_t index, unsigned long nr_to_read)
277276
{
277+
DEFINE_READAHEAD(ractl, file, mapping, index);
278278
struct backing_dev_info *bdi = inode_to_bdi(mapping->host);
279-
struct file_ra_state *ra = &filp->f_ra;
279+
struct file_ra_state *ra = &file->f_ra;
280280
unsigned long max_pages;
281281

282282
if (unlikely(!mapping->a_ops->readpage && !mapping->a_ops->readpages &&
@@ -294,7 +294,7 @@ void force_page_cache_readahead(struct address_space *mapping,
294294

295295
if (this_chunk > nr_to_read)
296296
this_chunk = nr_to_read;
297-
__do_page_cache_readahead(mapping, filp, index, this_chunk, 0);
297+
do_page_cache_ra(&ractl, this_chunk, 0);
298298

299299
index += this_chunk;
300300
nr_to_read -= this_chunk;
@@ -432,10 +432,11 @@ static int try_context_readahead(struct address_space *mapping,
432432
* A minimal readahead algorithm for trivial sequential/random reads.
433433
*/
434434
static void ondemand_readahead(struct address_space *mapping,
435-
struct file_ra_state *ra, struct file *filp,
435+
struct file_ra_state *ra, struct file *file,
436436
bool hit_readahead_marker, pgoff_t index,
437437
unsigned long req_size)
438438
{
439+
DEFINE_READAHEAD(ractl, file, mapping, index);
439440
struct backing_dev_info *bdi = inode_to_bdi(mapping->host);
440441
unsigned long max_pages = ra->ra_pages;
441442
unsigned long add_pages;
@@ -516,7 +517,7 @@ static void ondemand_readahead(struct address_space *mapping,
516517
* standalone, small random read
517518
* Read as is, and do not pollute the readahead state.
518519
*/
519-
__do_page_cache_readahead(mapping, filp, index, req_size, 0);
520+
do_page_cache_ra(&ractl, req_size, 0);
520521
return;
521522

522523
initial_readahead:
@@ -542,7 +543,8 @@ static void ondemand_readahead(struct address_space *mapping,
542543
}
543544
}
544545

545-
ra_submit(ra, mapping, filp);
546+
ractl._index = ra->start;
547+
do_page_cache_ra(&ractl, ra->size, ra->async_size);
546548
}
547549

548550
/**

0 commit comments

Comments
 (0)