Skip to content

Commit

Permalink
Revert "MIPS: make userspace mapping young by default"
Browse files Browse the repository at this point in the history
commit 50c25ee upstream.

This reverts commit f685a53.

The MIPS cache flush logic needs to know whether the mapping was already
established to decide how to flush caches.  This is done by checking the
valid bit in the PTE.  The commit above breaks this logic by setting the
valid in the PTE in new mappings, which causes kernel crashes.

Link: https://lkml.kernel.org/r/20210526094335.92948-1-tsbogend@alpha.franken.de
Fixes: f685a53 ("MIPS: make userspace mapping young by default")
Reported-by: Zhou Yanjie <zhouyanjie@wanyeetech.com>
Signed-off-by: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
Cc: Huang Pei <huangpei@loongson.cn>
Cc: Nicholas Piggin <npiggin@gmail.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
  • Loading branch information
tsbogend authored and gregkh committed Jun 10, 2021
1 parent 62fa43c commit 08ec01b
Show file tree
Hide file tree
Showing 3 changed files with 26 additions and 16 deletions.
30 changes: 14 additions & 16 deletions arch/mips/mm/cache.c
Expand Up @@ -157,31 +157,29 @@ unsigned long _page_cachable_default;
EXPORT_SYMBOL(_page_cachable_default);

#define PM(p) __pgprot(_page_cachable_default | (p))
#define PVA(p) PM(_PAGE_VALID | _PAGE_ACCESSED | (p))

static inline void setup_protection_map(void)
{
protection_map[0] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
protection_map[1] = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC);
protection_map[2] = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
protection_map[3] = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC);
protection_map[4] = PVA(_PAGE_PRESENT);
protection_map[5] = PVA(_PAGE_PRESENT);
protection_map[6] = PVA(_PAGE_PRESENT);
protection_map[7] = PVA(_PAGE_PRESENT);
protection_map[1] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC);
protection_map[2] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
protection_map[3] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC);
protection_map[4] = PM(_PAGE_PRESENT);
protection_map[5] = PM(_PAGE_PRESENT);
protection_map[6] = PM(_PAGE_PRESENT);
protection_map[7] = PM(_PAGE_PRESENT);

protection_map[8] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
protection_map[9] = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC);
protection_map[10] = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE |
protection_map[9] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC);
protection_map[10] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE |
_PAGE_NO_READ);
protection_map[11] = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE);
protection_map[12] = PVA(_PAGE_PRESENT);
protection_map[13] = PVA(_PAGE_PRESENT);
protection_map[14] = PVA(_PAGE_PRESENT);
protection_map[15] = PVA(_PAGE_PRESENT);
protection_map[11] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE);
protection_map[12] = PM(_PAGE_PRESENT);
protection_map[13] = PM(_PAGE_PRESENT);
protection_map[14] = PM(_PAGE_PRESENT | _PAGE_WRITE);
protection_map[15] = PM(_PAGE_PRESENT | _PAGE_WRITE);
}

#undef _PVA
#undef PM

void cpu_cache_init(void)
Expand Down
8 changes: 8 additions & 0 deletions include/linux/pgtable.h
Expand Up @@ -432,6 +432,14 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addres
* To be differentiate with macro pte_mkyoung, this macro is used on platforms
* where software maintains page access bit.
*/
#ifndef pte_sw_mkyoung
static inline pte_t pte_sw_mkyoung(pte_t pte)
{
return pte;
}
#define pte_sw_mkyoung pte_sw_mkyoung
#endif

#ifndef pte_savedwrite
#define pte_savedwrite pte_write
#endif
Expand Down
4 changes: 4 additions & 0 deletions mm/memory.c
Expand Up @@ -2896,6 +2896,7 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
}
flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte));
entry = mk_pte(new_page, vma->vm_page_prot);
entry = pte_sw_mkyoung(entry);
entry = maybe_mkwrite(pte_mkdirty(entry), vma);

/*
Expand Down Expand Up @@ -3561,6 +3562,7 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
__SetPageUptodate(page);

entry = mk_pte(page, vma->vm_page_prot);
entry = pte_sw_mkyoung(entry);
if (vma->vm_flags & VM_WRITE)
entry = pte_mkwrite(pte_mkdirty(entry));

Expand Down Expand Up @@ -3745,6 +3747,8 @@ void do_set_pte(struct vm_fault *vmf, struct page *page, unsigned long addr)

if (prefault && arch_wants_old_prefaulted_pte())
entry = pte_mkold(entry);
else
entry = pte_sw_mkyoung(entry);

if (write)
entry = maybe_mkwrite(pte_mkdirty(entry), vma);
Expand Down

0 comments on commit 08ec01b

Please sign in to comment.