Skip to content

Commit

Permalink
hugetlb: Pass vma into huge_pte_alloc() and huge_pmd_share()
Browse files Browse the repository at this point in the history
It is a preparation work to be able to behave differently in the per
architecture huge_pte_alloc() according to different VMA attributes.

Pass it deeper into huge_pmd_share() so that we can avoid the find_vma() call.

Suggested-by: Mike Kravetz <mike.kravetz@oracle.com>
Reviewed-by: Mike Kravetz <mike.kravetz@oracle.com>
Signed-off-by: Peter Xu <peterx@redhat.com>
  • Loading branch information
xzpeter authored and intel-lab-lkp committed Feb 17, 2021
1 parent 1ffa976 commit 58ca9ac
Show file tree
Hide file tree
Showing 11 changed files with 24 additions and 19 deletions.
4 changes: 2 additions & 2 deletions arch/arm64/mm/hugetlbpage.c
Expand Up @@ -252,7 +252,7 @@ void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr,
set_pte(ptep, pte);
}

pte_t *huge_pte_alloc(struct mm_struct *mm,
pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long addr, unsigned long sz)
{
pgd_t *pgdp;
Expand Down Expand Up @@ -286,7 +286,7 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
} else if (sz == PMD_SIZE) {
if (IS_ENABLED(CONFIG_ARCH_WANT_HUGE_PMD_SHARE) &&
pud_none(READ_ONCE(*pudp)))
ptep = huge_pmd_share(mm, addr, pudp);
ptep = huge_pmd_share(mm, vma, addr, pudp);
else
ptep = (pte_t *)pmd_alloc(mm, pudp, addr);
} else if (sz == (CONT_PMD_SIZE)) {
Expand Down
3 changes: 2 additions & 1 deletion arch/ia64/mm/hugetlbpage.c
Expand Up @@ -25,7 +25,8 @@ unsigned int hpage_shift = HPAGE_SHIFT_DEFAULT;
EXPORT_SYMBOL(hpage_shift);

pte_t *
huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz)
huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long addr, unsigned long sz)
{
unsigned long taddr = htlbpage_to_page(addr);
pgd_t *pgd;
Expand Down
4 changes: 2 additions & 2 deletions arch/mips/mm/hugetlbpage.c
Expand Up @@ -21,8 +21,8 @@
#include <asm/tlb.h>
#include <asm/tlbflush.h>

pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr,
unsigned long sz)
pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long addr, unsigned long sz)
{
pgd_t *pgd;
p4d_t *p4d;
Expand Down
2 changes: 1 addition & 1 deletion arch/parisc/mm/hugetlbpage.c
Expand Up @@ -44,7 +44,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
}


pte_t *huge_pte_alloc(struct mm_struct *mm,
pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long addr, unsigned long sz)
{
pgd_t *pgd;
Expand Down
3 changes: 2 additions & 1 deletion arch/powerpc/mm/hugetlbpage.c
Expand Up @@ -106,7 +106,8 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
* At this point we do the placement change only for BOOK3S 64. This would
* possibly work on other subarchs.
*/
pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz)
pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long addr, unsigned long sz)
{
pgd_t *pg;
p4d_t *p4;
Expand Down
2 changes: 1 addition & 1 deletion arch/s390/mm/hugetlbpage.c
Expand Up @@ -189,7 +189,7 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
return pte;
}

pte_t *huge_pte_alloc(struct mm_struct *mm,
pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long addr, unsigned long sz)
{
pgd_t *pgdp;
Expand Down
2 changes: 1 addition & 1 deletion arch/sh/mm/hugetlbpage.c
Expand Up @@ -21,7 +21,7 @@
#include <asm/tlbflush.h>
#include <asm/cacheflush.h>

pte_t *huge_pte_alloc(struct mm_struct *mm,
pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long addr, unsigned long sz)
{
pgd_t *pgd;
Expand Down
1 change: 1 addition & 0 deletions arch/sparc/mm/hugetlbpage.c
Expand Up @@ -280,6 +280,7 @@ unsigned long pmd_leaf_size(pmd_t pmd) { return 1UL << tte_to_shift(*(pte_t *)&p
unsigned long pte_leaf_size(pte_t pte) { return 1UL << tte_to_shift(pte); }

pte_t *huge_pte_alloc(struct mm_struct *mm,
pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long addr, unsigned long sz)
{
pgd_t *pgd;
Expand Down
5 changes: 3 additions & 2 deletions include/linux/hugetlb.h
Expand Up @@ -152,7 +152,8 @@ void hugetlb_fix_reserve_counts(struct inode *inode);
extern struct mutex *hugetlb_fault_mutex_table;
u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx);

pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud);
pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long addr, pud_t *pud);

struct address_space *hugetlb_page_mapping_lock_write(struct page *hpage);

Expand All @@ -161,7 +162,7 @@ extern struct list_head huge_boot_pages;

/* arch callbacks */

pte_t *huge_pte_alloc(struct mm_struct *mm,
pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long addr, unsigned long sz);
pte_t *huge_pte_offset(struct mm_struct *mm,
unsigned long addr, unsigned long sz);
Expand Down
15 changes: 8 additions & 7 deletions mm/hugetlb.c
Expand Up @@ -3766,7 +3766,7 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
src_pte = huge_pte_offset(src, addr, sz);
if (!src_pte)
continue;
dst_pte = huge_pte_alloc(dst, addr, sz);
dst_pte = huge_pte_alloc(dst, vma, addr, sz);
if (!dst_pte) {
ret = -ENOMEM;
break;
Expand Down Expand Up @@ -4503,7 +4503,7 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
*/
mapping = vma->vm_file->f_mapping;
i_mmap_lock_read(mapping);
ptep = huge_pte_alloc(mm, haddr, huge_page_size(h));
ptep = huge_pte_alloc(mm, vma, haddr, huge_page_size(h));
if (!ptep) {
i_mmap_unlock_read(mapping);
return VM_FAULT_OOM;
Expand Down Expand Up @@ -5293,9 +5293,9 @@ void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
* if !vma_shareable check at the beginning of the routine. i_mmap_rwsem is
* only required for subsequent processing.
*/
pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long addr, pud_t *pud)
{
struct vm_area_struct *vma = find_vma(mm, addr);
struct address_space *mapping = vma->vm_file->f_mapping;
pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) +
vma->vm_pgoff;
Expand Down Expand Up @@ -5373,7 +5373,8 @@ int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
}
#define want_pmd_share() (1)
#else /* !CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct vma,
unsigned long addr, pud_t *pud)
{
return NULL;
}
Expand All @@ -5392,7 +5393,7 @@ void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
#endif /* CONFIG_ARCH_WANT_HUGE_PMD_SHARE */

#ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB
pte_t *huge_pte_alloc(struct mm_struct *mm,
pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long addr, unsigned long sz)
{
pgd_t *pgd;
Expand All @@ -5411,7 +5412,7 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
} else {
BUG_ON(sz != PMD_SIZE);
if (want_pmd_share() && pud_none(*pud))
pte = huge_pmd_share(mm, addr, pud);
pte = huge_pmd_share(mm, vma, addr, pud);
else
pte = (pte_t *)pmd_alloc(mm, pud, addr);
}
Expand Down
2 changes: 1 addition & 1 deletion mm/userfaultfd.c
Expand Up @@ -290,7 +290,7 @@ static __always_inline ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm,
mutex_lock(&hugetlb_fault_mutex_table[hash]);

err = -ENOMEM;
dst_pte = huge_pte_alloc(dst_mm, dst_addr, vma_hpagesize);
dst_pte = huge_pte_alloc(dst_mm, dst_vma, dst_addr, vma_hpagesize);
if (!dst_pte) {
mutex_unlock(&hugetlb_fault_mutex_table[hash]);
i_mmap_unlock_read(mapping);
Expand Down

0 comments on commit 58ca9ac

Please sign in to comment.