Skip to content

Commit cfe28c5

Browse files
x86: mm: Remove x86 version of huge_pmd_share.
The huge_pmd_share code has been copied over to mm/hugetlb.c to make it accessible to other architectures. Remove the x86 copy of the huge_pmd_share code and enable the ARCH_WANT_HUGE_PMD_SHARE config flag. That way we reference the general one. Signed-off-by: Steve Capper <steve.capper@linaro.org> Acked-by: Catalin Marinas <catalin.marinas@arm.com> Acked-by: Andrew Morton <akpm@linux-foundation.org>
1 parent 3212b53 commit cfe28c5

File tree

2 files changed

+3
-120
lines changed

2 files changed

+3
-120
lines changed

arch/x86/Kconfig

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -207,6 +207,9 @@ config ARCH_HIBERNATION_POSSIBLE
207207
config ARCH_SUSPEND_POSSIBLE
208208
def_bool y
209209

210+
config ARCH_WANT_HUGE_PMD_SHARE
211+
def_bool y
212+
210213
config ZONE_DMA32
211214
bool
212215
default X86_64

arch/x86/mm/hugetlbpage.c

Lines changed: 0 additions & 120 deletions
Original file line numberDiff line numberDiff line change
@@ -16,126 +16,6 @@
1616
#include <asm/tlbflush.h>
1717
#include <asm/pgalloc.h>
1818

19-
static unsigned long page_table_shareable(struct vm_area_struct *svma,
20-
struct vm_area_struct *vma,
21-
unsigned long addr, pgoff_t idx)
22-
{
23-
unsigned long saddr = ((idx - svma->vm_pgoff) << PAGE_SHIFT) +
24-
svma->vm_start;
25-
unsigned long sbase = saddr & PUD_MASK;
26-
unsigned long s_end = sbase + PUD_SIZE;
27-
28-
/* Allow segments to share if only one is marked locked */
29-
unsigned long vm_flags = vma->vm_flags & ~VM_LOCKED;
30-
unsigned long svm_flags = svma->vm_flags & ~VM_LOCKED;
31-
32-
/*
33-
* match the virtual addresses, permission and the alignment of the
34-
* page table page.
35-
*/
36-
if (pmd_index(addr) != pmd_index(saddr) ||
37-
vm_flags != svm_flags ||
38-
sbase < svma->vm_start || svma->vm_end < s_end)
39-
return 0;
40-
41-
return saddr;
42-
}
43-
44-
static int vma_shareable(struct vm_area_struct *vma, unsigned long addr)
45-
{
46-
unsigned long base = addr & PUD_MASK;
47-
unsigned long end = base + PUD_SIZE;
48-
49-
/*
50-
* check on proper vm_flags and page table alignment
51-
*/
52-
if (vma->vm_flags & VM_MAYSHARE &&
53-
vma->vm_start <= base && end <= vma->vm_end)
54-
return 1;
55-
return 0;
56-
}
57-
58-
/*
59-
* Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc()
60-
* and returns the corresponding pte. While this is not necessary for the
61-
* !shared pmd case because we can allocate the pmd later as well, it makes the
62-
* code much cleaner. pmd allocation is essential for the shared case because
63-
* pud has to be populated inside the same i_mmap_mutex section - otherwise
64-
* racing tasks could either miss the sharing (see huge_pte_offset) or select a
65-
* bad pmd for sharing.
66-
*/
67-
static pte_t *
68-
huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
69-
{
70-
struct vm_area_struct *vma = find_vma(mm, addr);
71-
struct address_space *mapping = vma->vm_file->f_mapping;
72-
pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) +
73-
vma->vm_pgoff;
74-
struct vm_area_struct *svma;
75-
unsigned long saddr;
76-
pte_t *spte = NULL;
77-
pte_t *pte;
78-
79-
if (!vma_shareable(vma, addr))
80-
return (pte_t *)pmd_alloc(mm, pud, addr);
81-
82-
mutex_lock(&mapping->i_mmap_mutex);
83-
vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) {
84-
if (svma == vma)
85-
continue;
86-
87-
saddr = page_table_shareable(svma, vma, addr, idx);
88-
if (saddr) {
89-
spte = huge_pte_offset(svma->vm_mm, saddr);
90-
if (spte) {
91-
get_page(virt_to_page(spte));
92-
break;
93-
}
94-
}
95-
}
96-
97-
if (!spte)
98-
goto out;
99-
100-
spin_lock(&mm->page_table_lock);
101-
if (pud_none(*pud))
102-
pud_populate(mm, pud, (pmd_t *)((unsigned long)spte & PAGE_MASK));
103-
else
104-
put_page(virt_to_page(spte));
105-
spin_unlock(&mm->page_table_lock);
106-
out:
107-
pte = (pte_t *)pmd_alloc(mm, pud, addr);
108-
mutex_unlock(&mapping->i_mmap_mutex);
109-
return pte;
110-
}
111-
112-
/*
113-
* unmap huge page backed by shared pte.
114-
*
115-
* Hugetlb pte page is ref counted at the time of mapping. If pte is shared
116-
* indicated by page_count > 1, unmap is achieved by clearing pud and
117-
* decrementing the ref count. If count == 1, the pte page is not shared.
118-
*
119-
* called with vma->vm_mm->page_table_lock held.
120-
*
121-
* returns: 1 successfully unmapped a shared pte page
122-
* 0 the underlying pte page is not shared, or it is the last user
123-
*/
124-
int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
125-
{
126-
pgd_t *pgd = pgd_offset(mm, *addr);
127-
pud_t *pud = pud_offset(pgd, *addr);
128-
129-
BUG_ON(page_count(virt_to_page(ptep)) == 0);
130-
if (page_count(virt_to_page(ptep)) == 1)
131-
return 0;
132-
133-
pud_clear(pud);
134-
put_page(virt_to_page(ptep));
135-
*addr = ALIGN(*addr, HPAGE_SIZE * PTRS_PER_PTE) - HPAGE_SIZE;
136-
return 1;
137-
}
138-
13919
pte_t *huge_pte_alloc(struct mm_struct *mm,
14020
unsigned long addr, unsigned long sz)
14121
{

0 commit comments

Comments
 (0)