Skip to content

Commit 713da0b

Browse files
Matthew Wilcox (Oracle)akpm00
authored andcommitted
mm: renovate page_address_in_vma()
This function doesn't modify any of its arguments, so if we make a few other functions take const pointers, we can make page_address_in_vma() take const pointers too. All of its callers have the containing folio already, so pass that in as an argument instead of recalculating it. Also add kernel-doc Link: https://lkml.kernel.org/r/20241005200121.3231142-4-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
1 parent 7d3e93e commit 713da0b

File tree

7 files changed

+30
-21
lines changed

7 files changed

+30
-21
lines changed

include/linux/rmap.h

Lines changed: 2 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -728,11 +728,8 @@ page_vma_mapped_walk_restart(struct page_vma_mapped_walk *pvmw)
728728
}
729729

730730
bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw);
731-
732-
/*
733-
* Used by swapoff to help locate where page is expected in vma.
734-
*/
735-
unsigned long page_address_in_vma(struct page *, struct vm_area_struct *);
731+
unsigned long page_address_in_vma(const struct folio *folio,
732+
const struct page *, const struct vm_area_struct *);
736733

737734
/*
738735
* Cleans the PTEs of shared mappings.

mm/internal.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -841,7 +841,7 @@ static inline bool free_area_empty(struct free_area *area, int migratetype)
841841
}
842842

843843
/* mm/util.c */
844-
struct anon_vma *folio_anon_vma(struct folio *folio);
844+
struct anon_vma *folio_anon_vma(const struct folio *folio);
845845

846846
#ifdef CONFIG_MMU
847847
void unmap_mapping_folio(struct folio *folio);
@@ -959,7 +959,7 @@ extern pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma);
959959
* If any page in this range is mapped by this VMA, return the first address
960960
* where any of these pages appear. Otherwise, return -EFAULT.
961961
*/
962-
static inline unsigned long vma_address(struct vm_area_struct *vma,
962+
static inline unsigned long vma_address(const struct vm_area_struct *vma,
963963
pgoff_t pgoff, unsigned long nr_pages)
964964
{
965965
unsigned long address;

mm/ksm.c

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1256,7 +1256,7 @@ static int write_protect_page(struct vm_area_struct *vma, struct folio *folio,
12561256
if (WARN_ON_ONCE(folio_test_large(folio)))
12571257
return err;
12581258

1259-
pvmw.address = page_address_in_vma(&folio->page, vma);
1259+
pvmw.address = page_address_in_vma(folio, folio_page(folio, 0), vma);
12601260
if (pvmw.address == -EFAULT)
12611261
goto out;
12621262

@@ -1340,7 +1340,7 @@ static int replace_page(struct vm_area_struct *vma, struct page *page,
13401340
{
13411341
struct folio *kfolio = page_folio(kpage);
13421342
struct mm_struct *mm = vma->vm_mm;
1343-
struct folio *folio;
1343+
struct folio *folio = page_folio(page);
13441344
pmd_t *pmd;
13451345
pmd_t pmde;
13461346
pte_t *ptep;
@@ -1350,7 +1350,7 @@ static int replace_page(struct vm_area_struct *vma, struct page *page,
13501350
int err = -EFAULT;
13511351
struct mmu_notifier_range range;
13521352

1353-
addr = page_address_in_vma(page, vma);
1353+
addr = page_address_in_vma(folio, page, vma);
13541354
if (addr == -EFAULT)
13551355
goto out;
13561356

@@ -1416,7 +1416,6 @@ static int replace_page(struct vm_area_struct *vma, struct page *page,
14161416
ptep_clear_flush(vma, addr, ptep);
14171417
set_pte_at(mm, addr, ptep, newpte);
14181418

1419-
folio = page_folio(page);
14201419
folio_remove_rmap_pte(folio, page, vma);
14211420
if (!folio_mapped(folio))
14221421
folio_free_swap(folio);

mm/memory-failure.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -671,7 +671,7 @@ static void collect_procs_file(struct folio *folio, struct page *page,
671671
*/
672672
if (vma->vm_mm != t->mm)
673673
continue;
674-
addr = page_address_in_vma(page, vma);
674+
addr = page_address_in_vma(folio, page, vma);
675675
add_to_kill_anon_file(t, page, vma, to_kill, addr);
676676
}
677677
}

mm/mempolicy.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1367,7 +1367,7 @@ static long do_mbind(unsigned long start, unsigned long len,
13671367
if (!list_entry_is_head(folio, &pagelist, lru)) {
13681368
vma_iter_init(&vmi, mm, start);
13691369
for_each_vma_range(vmi, vma, end) {
1370-
addr = page_address_in_vma(
1370+
addr = page_address_in_vma(folio,
13711371
folio_page(folio, 0), vma);
13721372
if (addr != -EFAULT)
13731373
break;

mm/rmap.c

Lines changed: 20 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -767,14 +767,27 @@ static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags)
767767
}
768768
#endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */
769769

770-
/*
771-
* At what user virtual address is page expected in vma?
772-
* Caller should check the page is actually part of the vma.
770+
/**
771+
* page_address_in_vma - The virtual address of a page in this VMA.
772+
* @folio: The folio containing the page.
773+
* @page: The page within the folio.
774+
* @vma: The VMA we need to know the address in.
775+
*
776+
* Calculates the user virtual address of this page in the specified VMA.
777+
* It is the caller's responsibililty to check the page is actually
778+
* within the VMA. There may not currently be a PTE pointing at this
779+
* page, but if a page fault occurs at this address, this is the page
780+
* which will be accessed.
781+
*
782+
* Context: Caller should hold a reference to the folio. Caller should
783+
* hold a lock (eg the i_mmap_lock or the mmap_lock) which keeps the
784+
* VMA from being altered.
785+
*
786+
* Return: The virtual address corresponding to this page in the VMA.
773787
*/
774-
unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
788+
unsigned long page_address_in_vma(const struct folio *folio,
789+
const struct page *page, const struct vm_area_struct *vma)
775790
{
776-
struct folio *folio = page_folio(page);
777-
778791
if (folio_test_anon(folio)) {
779792
struct anon_vma *page__anon_vma = folio_anon_vma(folio);
780793
/*
@@ -790,7 +803,7 @@ unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
790803
return -EFAULT;
791804
}
792805

793-
/* The !page__anon_vma above handles KSM folios */
806+
/* KSM folios don't reach here because of the !page__anon_vma check */
794807
return vma_address(vma, page_pgoff(folio, page), 1);
795808
}
796809

mm/util.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -820,7 +820,7 @@ void *vcalloc_noprof(size_t n, size_t size)
820820
}
821821
EXPORT_SYMBOL(vcalloc_noprof);
822822

823-
struct anon_vma *folio_anon_vma(struct folio *folio)
823+
struct anon_vma *folio_anon_vma(const struct folio *folio)
824824
{
825825
unsigned long mapping = (unsigned long)folio->mapping;
826826

0 commit comments

Comments
 (0)