@@ -241,17 +241,16 @@ void page_cache_ra_unbounded(struct readahead_control *ractl,
241241EXPORT_SYMBOL_GPL (page_cache_ra_unbounded );
242242
243243/*
244- * __do_page_cache_readahead () actually reads a chunk of disk. It allocates
244+ * do_page_cache_ra () actually reads a chunk of disk. It allocates
245245 * the pages first, then submits them for I/O. This avoids the very bad
246246 * behaviour which would occur if page allocations are causing VM writeback.
247247 * We really don't want to intermingle reads and writes like that.
248248 */
249- void __do_page_cache_readahead (struct address_space * mapping ,
250- struct file * file , pgoff_t index , unsigned long nr_to_read ,
251- unsigned long lookahead_size )
249+ void do_page_cache_ra (struct readahead_control * ractl ,
250+ unsigned long nr_to_read , unsigned long lookahead_size )
252251{
253- DEFINE_READAHEAD ( ractl , file , mapping , index ) ;
254- struct inode * inode = mapping -> host ;
252+ struct inode * inode = ractl -> mapping -> host ;
253+ unsigned long index = readahead_index ( ractl ) ;
255254 loff_t isize = i_size_read (inode );
256255 pgoff_t end_index ; /* The last page we want to read */
257256
@@ -265,18 +264,19 @@ void __do_page_cache_readahead(struct address_space *mapping,
265264 if (nr_to_read > end_index - index )
266265 nr_to_read = end_index - index + 1 ;
267266
268- page_cache_ra_unbounded (& ractl , nr_to_read , lookahead_size );
267+ page_cache_ra_unbounded (ractl , nr_to_read , lookahead_size );
269268}
270269
271270/*
272271 * Chunk the readahead into 2 megabyte units, so that we don't pin too much
273272 * memory at once.
274273 */
275274void force_page_cache_readahead (struct address_space * mapping ,
276- struct file * filp , pgoff_t index , unsigned long nr_to_read )
275+ struct file * file , pgoff_t index , unsigned long nr_to_read )
277276{
277+ DEFINE_READAHEAD (ractl , file , mapping , index );
278278 struct backing_dev_info * bdi = inode_to_bdi (mapping -> host );
279- struct file_ra_state * ra = & filp -> f_ra ;
279+ struct file_ra_state * ra = & file -> f_ra ;
280280 unsigned long max_pages ;
281281
282282 if (unlikely (!mapping -> a_ops -> readpage && !mapping -> a_ops -> readpages &&
@@ -294,7 +294,7 @@ void force_page_cache_readahead(struct address_space *mapping,
294294
295295 if (this_chunk > nr_to_read )
296296 this_chunk = nr_to_read ;
297- __do_page_cache_readahead ( mapping , filp , index , this_chunk , 0 );
297+ do_page_cache_ra ( & ractl , this_chunk , 0 );
298298
299299 index += this_chunk ;
300300 nr_to_read -= this_chunk ;
@@ -432,10 +432,11 @@ static int try_context_readahead(struct address_space *mapping,
432432 * A minimal readahead algorithm for trivial sequential/random reads.
433433 */
434434static void ondemand_readahead (struct address_space * mapping ,
435- struct file_ra_state * ra , struct file * filp ,
435+ struct file_ra_state * ra , struct file * file ,
436436 bool hit_readahead_marker , pgoff_t index ,
437437 unsigned long req_size )
438438{
439+ DEFINE_READAHEAD (ractl , file , mapping , index );
439440 struct backing_dev_info * bdi = inode_to_bdi (mapping -> host );
440441 unsigned long max_pages = ra -> ra_pages ;
441442 unsigned long add_pages ;
@@ -516,7 +517,7 @@ static void ondemand_readahead(struct address_space *mapping,
516517 * standalone, small random read
517518 * Read as is, and do not pollute the readahead state.
518519 */
519- __do_page_cache_readahead ( mapping , filp , index , req_size , 0 );
520+ do_page_cache_ra ( & ractl , req_size , 0 );
520521 return ;
521522
522523initial_readahead :
@@ -542,7 +543,8 @@ static void ondemand_readahead(struct address_space *mapping,
542543 }
543544 }
544545
545- ra_submit (ra , mapping , filp );
546+ ractl ._index = ra -> start ;
547+ do_page_cache_ra (& ractl , ra -> size , ra -> async_size );
546548}
547549
548550/**
0 commit comments