Skip to content

Commit 09cbfea

Browse files
kiryltorvalds
authored andcommitted
mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros
PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time ago with promise that one day it will be possible to implement page cache with bigger chunks than PAGE_SIZE. This promise never materialized. And unlikely will. We have many places where PAGE_CACHE_SIZE assumed to be equal to PAGE_SIZE. And it's constant source of confusion on whether PAGE_CACHE_* or PAGE_* constant should be used in a particular case, especially on the border between fs and mm. Global switching to PAGE_CACHE_SIZE != PAGE_SIZE would cause to much breakage to be doable. Let's stop pretending that pages in page cache are special. They are not. The changes are pretty straight-forward: - <foo> << (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>; - <foo> >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>; - PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} -> PAGE_{SIZE,SHIFT,MASK,ALIGN}; - page_cache_get() -> get_page(); - page_cache_release() -> put_page(); This patch contains automated changes generated with coccinelle using script below. For some reason, coccinelle doesn't patch header files. I've called spatch for them manually. The only adjustment after coccinelle is revert of changes to PAGE_CAHCE_ALIGN definition: we are going to drop it later. There are few places in the code where coccinelle didn't reach. I'll fix them manually in a separate patch. Comments and documentation also will be addressed with the separate patch. virtual patch @@ expression E; @@ - E << (PAGE_CACHE_SHIFT - PAGE_SHIFT) + E @@ expression E; @@ - E >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) + E @@ @@ - PAGE_CACHE_SHIFT + PAGE_SHIFT @@ @@ - PAGE_CACHE_SIZE + PAGE_SIZE @@ @@ - PAGE_CACHE_MASK + PAGE_MASK @@ expression E; @@ - PAGE_CACHE_ALIGN(E) + PAGE_ALIGN(E) @@ expression E; @@ - page_cache_get(E) + get_page(E) @@ expression E; @@ - page_cache_release(E) + put_page(E) Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Acked-by: Michal Hocko <mhocko@suse.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
1 parent c05c2ec commit 09cbfea

File tree

381 files changed

+2722
-2721
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

381 files changed

+2722
-2721
lines changed

arch/arc/mm/cache.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -628,7 +628,7 @@ void flush_dcache_page(struct page *page)
628628

629629
/* kernel reading from page with U-mapping */
630630
phys_addr_t paddr = (unsigned long)page_address(page);
631-
unsigned long vaddr = page->index << PAGE_CACHE_SHIFT;
631+
unsigned long vaddr = page->index << PAGE_SHIFT;
632632

633633
if (addr_not_cache_congruent(paddr, vaddr))
634634
__flush_dcache_page(paddr, vaddr);

arch/arm/mm/flush.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -235,7 +235,7 @@ void __flush_dcache_page(struct address_space *mapping, struct page *page)
235235
*/
236236
if (mapping && cache_is_vipt_aliasing())
237237
flush_pfn_alias(page_to_pfn(page),
238-
page->index << PAGE_CACHE_SHIFT);
238+
page->index << PAGE_SHIFT);
239239
}
240240

241241
static void __flush_dcache_aliases(struct address_space *mapping, struct page *page)
@@ -250,7 +250,7 @@ static void __flush_dcache_aliases(struct address_space *mapping, struct page *p
250250
* data in the current VM view associated with this page.
251251
* - aliasing VIPT: we only need to find one mapping of this page.
252252
*/
253-
pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
253+
pgoff = page->index;
254254

255255
flush_dcache_mmap_lock(mapping);
256256
vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) {

arch/parisc/kernel/cache.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -319,7 +319,7 @@ void flush_dcache_page(struct page *page)
319319
if (!mapping)
320320
return;
321321

322-
pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
322+
pgoff = page->index;
323323

324324
/* We have carefully arranged in arch_get_unmapped_area() that
325325
* *any* mappings of a file are always congruently mapped (whether

arch/powerpc/platforms/cell/spufs/inode.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -732,8 +732,8 @@ spufs_fill_super(struct super_block *sb, void *data, int silent)
732732
return -ENOMEM;
733733

734734
sb->s_maxbytes = MAX_LFS_FILESIZE;
735-
sb->s_blocksize = PAGE_CACHE_SIZE;
736-
sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
735+
sb->s_blocksize = PAGE_SIZE;
736+
sb->s_blocksize_bits = PAGE_SHIFT;
737737
sb->s_magic = SPUFS_MAGIC;
738738
sb->s_op = &s_ops;
739739
sb->s_fs_info = info;

arch/s390/hypfs/inode.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -278,8 +278,8 @@ static int hypfs_fill_super(struct super_block *sb, void *data, int silent)
278278
sbi->uid = current_uid();
279279
sbi->gid = current_gid();
280280
sb->s_fs_info = sbi;
281-
sb->s_blocksize = PAGE_CACHE_SIZE;
282-
sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
281+
sb->s_blocksize = PAGE_SIZE;
282+
sb->s_blocksize_bits = PAGE_SHIFT;
283283
sb->s_magic = HYPFS_MAGIC;
284284
sb->s_op = &hypfs_s_ops;
285285
if (hypfs_parse_options(data, sb))

block/bio.c

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1339,7 +1339,7 @@ struct bio *bio_map_user_iov(struct request_queue *q,
13391339
* release the pages we didn't map into the bio, if any
13401340
*/
13411341
while (j < page_limit)
1342-
page_cache_release(pages[j++]);
1342+
put_page(pages[j++]);
13431343
}
13441344

13451345
kfree(pages);
@@ -1365,7 +1365,7 @@ struct bio *bio_map_user_iov(struct request_queue *q,
13651365
for (j = 0; j < nr_pages; j++) {
13661366
if (!pages[j])
13671367
break;
1368-
page_cache_release(pages[j]);
1368+
put_page(pages[j]);
13691369
}
13701370
out:
13711371
kfree(pages);
@@ -1385,7 +1385,7 @@ static void __bio_unmap_user(struct bio *bio)
13851385
if (bio_data_dir(bio) == READ)
13861386
set_page_dirty_lock(bvec->bv_page);
13871387

1388-
page_cache_release(bvec->bv_page);
1388+
put_page(bvec->bv_page);
13891389
}
13901390

13911391
bio_put(bio);
@@ -1658,7 +1658,7 @@ void bio_check_pages_dirty(struct bio *bio)
16581658
struct page *page = bvec->bv_page;
16591659

16601660
if (PageDirty(page) || PageCompound(page)) {
1661-
page_cache_release(page);
1661+
put_page(page);
16621662
bvec->bv_page = NULL;
16631663
} else {
16641664
nr_clean_pages++;

block/blk-core.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -706,7 +706,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
706706
goto fail_id;
707707

708708
q->backing_dev_info.ra_pages =
709-
(VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
709+
(VM_MAX_READAHEAD * 1024) / PAGE_SIZE;
710710
q->backing_dev_info.capabilities = BDI_CAP_CGROUP_WRITEBACK;
711711
q->backing_dev_info.name = "block";
712712
q->node = node_id;

block/blk-settings.c

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -239,8 +239,8 @@ void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_secto
239239
struct queue_limits *limits = &q->limits;
240240
unsigned int max_sectors;
241241

242-
if ((max_hw_sectors << 9) < PAGE_CACHE_SIZE) {
243-
max_hw_sectors = 1 << (PAGE_CACHE_SHIFT - 9);
242+
if ((max_hw_sectors << 9) < PAGE_SIZE) {
243+
max_hw_sectors = 1 << (PAGE_SHIFT - 9);
244244
printk(KERN_INFO "%s: set to minimum %d\n",
245245
__func__, max_hw_sectors);
246246
}
@@ -329,8 +329,8 @@ EXPORT_SYMBOL(blk_queue_max_segments);
329329
**/
330330
void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size)
331331
{
332-
if (max_size < PAGE_CACHE_SIZE) {
333-
max_size = PAGE_CACHE_SIZE;
332+
if (max_size < PAGE_SIZE) {
333+
max_size = PAGE_SIZE;
334334
printk(KERN_INFO "%s: set to minimum %d\n",
335335
__func__, max_size);
336336
}
@@ -760,8 +760,8 @@ EXPORT_SYMBOL_GPL(blk_queue_dma_drain);
760760
**/
761761
void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask)
762762
{
763-
if (mask < PAGE_CACHE_SIZE - 1) {
764-
mask = PAGE_CACHE_SIZE - 1;
763+
if (mask < PAGE_SIZE - 1) {
764+
mask = PAGE_SIZE - 1;
765765
printk(KERN_INFO "%s: set to minimum %lx\n",
766766
__func__, mask);
767767
}

block/blk-sysfs.c

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -76,7 +76,7 @@ queue_requests_store(struct request_queue *q, const char *page, size_t count)
7676
static ssize_t queue_ra_show(struct request_queue *q, char *page)
7777
{
7878
unsigned long ra_kb = q->backing_dev_info.ra_pages <<
79-
(PAGE_CACHE_SHIFT - 10);
79+
(PAGE_SHIFT - 10);
8080

8181
return queue_var_show(ra_kb, (page));
8282
}
@@ -90,7 +90,7 @@ queue_ra_store(struct request_queue *q, const char *page, size_t count)
9090
if (ret < 0)
9191
return ret;
9292

93-
q->backing_dev_info.ra_pages = ra_kb >> (PAGE_CACHE_SHIFT - 10);
93+
q->backing_dev_info.ra_pages = ra_kb >> (PAGE_SHIFT - 10);
9494

9595
return ret;
9696
}
@@ -117,7 +117,7 @@ static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page)
117117
if (blk_queue_cluster(q))
118118
return queue_var_show(queue_max_segment_size(q), (page));
119119

120-
return queue_var_show(PAGE_CACHE_SIZE, (page));
120+
return queue_var_show(PAGE_SIZE, (page));
121121
}
122122

123123
static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page)
@@ -198,7 +198,7 @@ queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
198198
{
199199
unsigned long max_sectors_kb,
200200
max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1,
201-
page_kb = 1 << (PAGE_CACHE_SHIFT - 10);
201+
page_kb = 1 << (PAGE_SHIFT - 10);
202202
ssize_t ret = queue_var_store(&max_sectors_kb, page, count);
203203

204204
if (ret < 0)

block/cfq-iosched.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4075,7 +4075,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
40754075
* idle timer unplug to continue working.
40764076
*/
40774077
if (cfq_cfqq_wait_request(cfqq)) {
4078-
if (blk_rq_bytes(rq) > PAGE_CACHE_SIZE ||
4078+
if (blk_rq_bytes(rq) > PAGE_SIZE ||
40794079
cfqd->busy_queues > 1) {
40804080
cfq_del_timer(cfqd, cfqq);
40814081
cfq_clear_cfqq_wait_request(cfqq);

0 commit comments

Comments
 (0)