Skip to content

Commit

Permalink
thp: prevent hugepages during args/env copying into the user stack
Browse files Browse the repository at this point in the history
Transparent hugepages can only be created if rmap is fully
functional. So we must prevent hugepages to be created while
is_vma_temporary_stack() is true.

This also optmizes away some harmless but unnecessary setting of
khugepaged_scan.address and it switches some BUG_ON to VM_BUG_ON.

Signed-off-by: Andrea Arcangeli <aarcange@redhat.com>
Acked-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
  • Loading branch information
aagit authored and torvalds committed Feb 15, 2011
1 parent 09f586b commit a7d6e4e
Show file tree
Hide file tree
Showing 2 changed files with 18 additions and 20 deletions.
3 changes: 2 additions & 1 deletion include/linux/huge_mm.h
Expand Up @@ -57,7 +57,8 @@ extern pmd_t *page_check_address_pmd(struct page *page,
(transparent_hugepage_flags & \
(1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG) && \
((__vma)->vm_flags & VM_HUGEPAGE))) && \
!((__vma)->vm_flags & VM_NOHUGEPAGE))
!((__vma)->vm_flags & VM_NOHUGEPAGE) && \
!is_vma_temporary_stack(__vma))
#define transparent_hugepage_defrag(__vma) \
((transparent_hugepage_flags & \
(1<<TRANSPARENT_HUGEPAGE_DEFRAG_FLAG)) || \
Expand Down
35 changes: 16 additions & 19 deletions mm/huge_memory.c
Expand Up @@ -1811,6 +1811,8 @@ static void collapse_huge_page(struct mm_struct *mm,
/* VM_PFNMAP vmas may have vm_ops null but vm_file set */
if (!vma->anon_vma || vma->vm_ops || vma->vm_file)
goto out;
if (is_vma_temporary_stack(vma))
goto out;
VM_BUG_ON(is_linear_pfn_mapping(vma) || is_pfn_mapping(vma));

pgd = pgd_offset(mm, address);
Expand Down Expand Up @@ -2032,32 +2034,27 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
if ((!(vma->vm_flags & VM_HUGEPAGE) &&
!khugepaged_always()) ||
(vma->vm_flags & VM_NOHUGEPAGE)) {
skip:
progress++;
continue;
}

/* VM_PFNMAP vmas may have vm_ops null but vm_file set */
if (!vma->anon_vma || vma->vm_ops || vma->vm_file) {
khugepaged_scan.address = vma->vm_end;
progress++;
continue;
}
if (!vma->anon_vma || vma->vm_ops || vma->vm_file)
goto skip;
if (is_vma_temporary_stack(vma))
goto skip;

VM_BUG_ON(is_linear_pfn_mapping(vma) || is_pfn_mapping(vma));

hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
hend = vma->vm_end & HPAGE_PMD_MASK;
if (hstart >= hend) {
progress++;
continue;
}
if (hstart >= hend)
goto skip;
if (khugepaged_scan.address > hend)
goto skip;
if (khugepaged_scan.address < hstart)
khugepaged_scan.address = hstart;
if (khugepaged_scan.address > hend) {
khugepaged_scan.address = hend + HPAGE_PMD_SIZE;
progress++;
continue;
}
BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK);
VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK);

while (khugepaged_scan.address < hend) {
int ret;
Expand Down Expand Up @@ -2086,7 +2083,7 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
breakouterloop_mmap_sem:

spin_lock(&khugepaged_mm_lock);
BUG_ON(khugepaged_scan.mm_slot != mm_slot);
VM_BUG_ON(khugepaged_scan.mm_slot != mm_slot);
/*
* Release the current mm_slot if this mm is about to die, or
* if we scanned all vmas of this mm.
Expand Down Expand Up @@ -2241,9 +2238,9 @@ static int khugepaged(void *none)

for (;;) {
mutex_unlock(&khugepaged_mutex);
BUG_ON(khugepaged_thread != current);
VM_BUG_ON(khugepaged_thread != current);
khugepaged_loop();
BUG_ON(khugepaged_thread != current);
VM_BUG_ON(khugepaged_thread != current);

mutex_lock(&khugepaged_mutex);
if (!khugepaged_enabled())
Expand Down

0 comments on commit a7d6e4e

Please sign in to comment.