Skip to content

Commit a23f517

Browse files
Kefeng Wangakpm00
authored andcommitted
mm: convert mm_counter() to take a folio
Now all callers of mm_counter() have a folio, convert mm_counter() to take a folio. Saves a call to compound_head() hidden inside PageAnon(). Link: https://lkml.kernel.org/r/20240111152429.3374566-10-willy@infradead.org Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: David Hildenbrand <david@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
1 parent eabafaa commit a23f517

File tree

5 files changed

+14
-14
lines changed

5 files changed

+14
-14
lines changed

arch/s390/mm/pgtable.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -723,7 +723,7 @@ static void ptep_zap_swap_entry(struct mm_struct *mm, swp_entry_t entry)
723723
else if (is_migration_entry(entry)) {
724724
struct folio *folio = pfn_swap_entry_folio(entry);
725725

726-
dec_mm_counter(mm, mm_counter(&folio->page));
726+
dec_mm_counter(mm, mm_counter(folio));
727727
}
728728
free_swap_and_cache(entry);
729729
}

include/linux/mm.h

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -2603,11 +2603,11 @@ static inline int mm_counter_file(struct page *page)
26032603
return MM_FILEPAGES;
26042604
}
26052605

2606-
static inline int mm_counter(struct page *page)
2606+
static inline int mm_counter(struct folio *folio)
26072607
{
2608-
if (PageAnon(page))
2608+
if (folio_test_anon(folio))
26092609
return MM_ANONPAGES;
2610-
return mm_counter_file(page);
2610+
return mm_counter_file(&folio->page);
26112611
}
26122612

26132613
static inline unsigned long get_mm_rss(struct mm_struct *mm)

mm/memory.c

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -808,7 +808,7 @@ copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
808808
} else if (is_migration_entry(entry)) {
809809
folio = pfn_swap_entry_folio(entry);
810810

811-
rss[mm_counter(&folio->page)]++;
811+
rss[mm_counter(folio)]++;
812812

813813
if (!is_readable_migration_entry(entry) &&
814814
is_cow_mapping(vm_flags)) {
@@ -840,7 +840,7 @@ copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
840840
* keep things as they are.
841841
*/
842842
folio_get(folio);
843-
rss[mm_counter(page)]++;
843+
rss[mm_counter(folio)]++;
844844
/* Cannot fail as these pages cannot get pinned. */
845845
folio_try_dup_anon_rmap_pte(folio, page, src_vma);
846846

@@ -1476,7 +1476,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
14761476
if (pte_young(ptent) && likely(vma_has_recency(vma)))
14771477
folio_mark_accessed(folio);
14781478
}
1479-
rss[mm_counter(page)]--;
1479+
rss[mm_counter(folio)]--;
14801480
if (!delay_rmap) {
14811481
folio_remove_rmap_pte(folio, page, vma);
14821482
if (unlikely(page_mapcount(page) < 0))
@@ -1504,7 +1504,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
15041504
* see zap_install_uffd_wp_if_needed().
15051505
*/
15061506
WARN_ON_ONCE(!vma_is_anonymous(vma));
1507-
rss[mm_counter(page)]--;
1507+
rss[mm_counter(folio)]--;
15081508
if (is_device_private_entry(entry))
15091509
folio_remove_rmap_pte(folio, page, vma);
15101510
folio_put(folio);
@@ -1519,7 +1519,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
15191519
folio = pfn_swap_entry_folio(entry);
15201520
if (!should_zap_folio(details, folio))
15211521
continue;
1522-
rss[mm_counter(&folio->page)]--;
1522+
rss[mm_counter(folio)]--;
15231523
} else if (pte_marker_entry_uffd_wp(entry)) {
15241524
/*
15251525
* For anon: always drop the marker; for file: only

mm/rmap.c

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1780,7 +1780,7 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
17801780
set_huge_pte_at(mm, address, pvmw.pte, pteval,
17811781
hsz);
17821782
} else {
1783-
dec_mm_counter(mm, mm_counter(&folio->page));
1783+
dec_mm_counter(mm, mm_counter(folio));
17841784
set_pte_at(mm, address, pvmw.pte, pteval);
17851785
}
17861786

@@ -1795,7 +1795,7 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
17951795
* migration) will not expect userfaults on already
17961796
* copied pages.
17971797
*/
1798-
dec_mm_counter(mm, mm_counter(&folio->page));
1798+
dec_mm_counter(mm, mm_counter(folio));
17991799
} else if (folio_test_anon(folio)) {
18001800
swp_entry_t entry = page_swap_entry(subpage);
18011801
pte_t swp_pte;
@@ -2181,7 +2181,7 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
21812181
set_huge_pte_at(mm, address, pvmw.pte, pteval,
21822182
hsz);
21832183
} else {
2184-
dec_mm_counter(mm, mm_counter(&folio->page));
2184+
dec_mm_counter(mm, mm_counter(folio));
21852185
set_pte_at(mm, address, pvmw.pte, pteval);
21862186
}
21872187

@@ -2196,7 +2196,7 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
21962196
* migration) will not expect userfaults on already
21972197
* copied pages.
21982198
*/
2199-
dec_mm_counter(mm, mm_counter(&folio->page));
2199+
dec_mm_counter(mm, mm_counter(folio));
22002200
} else {
22012201
swp_entry_t entry;
22022202
pte_t swp_pte;

mm/userfaultfd.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -124,7 +124,7 @@ int mfill_atomic_install_pte(pmd_t *dst_pmd,
124124
* Must happen after rmap, as mm_counter() checks mapping (via
125125
* PageAnon()), which is set by __page_set_anon_rmap().
126126
*/
127-
inc_mm_counter(dst_mm, mm_counter(page));
127+
inc_mm_counter(dst_mm, mm_counter(folio));
128128

129129
set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
130130

0 commit comments

Comments
 (0)