Skip to content

Commit 28c0571

Browse files
kvaneeshtorvalds
authored andcommitted
powerpc/mm/hugetlb: remove follow_huge_addr for powerpc
With generic code now handling hugetlb entries at pgd level and also supporting hugepage directory format, we can now remove the powerpc sepcific follow_huge_addr implementation. Link: http://lkml.kernel.org/r/1494926612-23928-9-git-send-email-aneesh.kumar@linux.vnet.ibm.com Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Cc: Anshuman Khandual <khandual@linux.vnet.ibm.com> Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Mike Kravetz <kravetz@us.ibm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
1 parent 50791e6 commit 28c0571

File tree

1 file changed

+0
-64
lines changed

1 file changed

+0
-64
lines changed

arch/powerpc/mm/hugetlbpage.c

Lines changed: 0 additions & 64 deletions
Original file line numberDiff line numberDiff line change
@@ -619,11 +619,6 @@ void hugetlb_free_pgd_range(struct mmu_gather *tlb,
619619
} while (addr = next, addr != end);
620620
}
621621

622-
/*
623-
* 64 bit book3s use generic follow_page_mask
624-
*/
625-
#ifdef CONFIG_PPC_BOOK3S_64
626-
627622
struct page *follow_huge_pd(struct vm_area_struct *vma,
628623
unsigned long address, hugepd_t hpd,
629624
int flags, int pdshift)
@@ -657,65 +652,6 @@ struct page *follow_huge_pd(struct vm_area_struct *vma,
657652
return page;
658653
}
659654

660-
#else /* !CONFIG_PPC_BOOK3S_64 */
661-
662-
/*
663-
* We are holding mmap_sem, so a parallel huge page collapse cannot run.
664-
* To prevent hugepage split, disable irq.
665-
*/
666-
struct page *
667-
follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
668-
{
669-
bool is_thp;
670-
pte_t *ptep, pte;
671-
unsigned shift;
672-
unsigned long mask, flags;
673-
struct page *page = ERR_PTR(-EINVAL);
674-
675-
local_irq_save(flags);
676-
ptep = find_linux_pte_or_hugepte(mm->pgd, address, &is_thp, &shift);
677-
if (!ptep)
678-
goto no_page;
679-
pte = READ_ONCE(*ptep);
680-
/*
681-
* Verify it is a huge page else bail.
682-
* Transparent hugepages are handled by generic code. We can skip them
683-
* here.
684-
*/
685-
if (!shift || is_thp)
686-
goto no_page;
687-
688-
if (!pte_present(pte)) {
689-
page = NULL;
690-
goto no_page;
691-
}
692-
mask = (1UL << shift) - 1;
693-
page = pte_page(pte);
694-
if (page)
695-
page += (address & mask) / PAGE_SIZE;
696-
697-
no_page:
698-
local_irq_restore(flags);
699-
return page;
700-
}
701-
702-
struct page *
703-
follow_huge_pmd(struct mm_struct *mm, unsigned long address,
704-
pmd_t *pmd, int write)
705-
{
706-
BUG();
707-
return NULL;
708-
}
709-
710-
struct page *
711-
follow_huge_pud(struct mm_struct *mm, unsigned long address,
712-
pud_t *pud, int write)
713-
{
714-
BUG();
715-
return NULL;
716-
}
717-
#endif
718-
719655
static unsigned long hugepte_addr_end(unsigned long addr, unsigned long end,
720656
unsigned long sz)
721657
{

0 commit comments

Comments
 (0)