Skip to content

Commit

Permalink
mm/mmap: Drop generic protection_map[] array
Browse files Browse the repository at this point in the history
Move the protection_array[] array inside the arch for those platforms which
do not enable ARCH_HAS_VM_GET_PAGE_PROT. Afterwards __SXXX/__PXX macros can
be dropped completely which are now redundant.

Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: linux-mm@kvack.org
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Anshuman Khandual <anshuman.khandual@arm.com>
  • Loading branch information
Anshuman Khandual authored and intel-lab-lkp committed Jun 13, 2022
1 parent 847c271 commit 696f81b
Show file tree
Hide file tree
Showing 50 changed files with 497 additions and 489 deletions.
17 changes: 0 additions & 17 deletions arch/alpha/include/asm/pgtable.h
Expand Up @@ -116,23 +116,6 @@ struct vm_area_struct;
* arch/alpha/mm/fault.c)
*/
/* xwr */
#define __P000 _PAGE_P(_PAGE_FOE | _PAGE_FOW | _PAGE_FOR)
#define __P001 _PAGE_P(_PAGE_FOE | _PAGE_FOW)
#define __P010 _PAGE_P(_PAGE_FOE)
#define __P011 _PAGE_P(_PAGE_FOE)
#define __P100 _PAGE_P(_PAGE_FOW | _PAGE_FOR)
#define __P101 _PAGE_P(_PAGE_FOW)
#define __P110 _PAGE_P(0)
#define __P111 _PAGE_P(0)

#define __S000 _PAGE_S(_PAGE_FOE | _PAGE_FOW | _PAGE_FOR)
#define __S001 _PAGE_S(_PAGE_FOE | _PAGE_FOW)
#define __S010 _PAGE_S(_PAGE_FOE)
#define __S011 _PAGE_S(_PAGE_FOE)
#define __S100 _PAGE_S(_PAGE_FOW | _PAGE_FOR)
#define __S101 _PAGE_S(_PAGE_FOW)
#define __S110 _PAGE_S(0)
#define __S111 _PAGE_S(0)

/*
* pgprot_noncached() is only for infiniband pci support, and a real
Expand Down
21 changes: 21 additions & 0 deletions arch/alpha/mm/init.c
Expand Up @@ -280,3 +280,24 @@ mem_init(void)
high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
memblock_free_all();
}

pgprot_t protection_map[16] __ro_after_init = {
[VM_NONE] = _PAGE_P(_PAGE_FOE | _PAGE_FOW |
_PAGE_FOR),
[VM_READ] = _PAGE_P(_PAGE_FOE | _PAGE_FOW),
[VM_WRITE] = _PAGE_P(_PAGE_FOE),
[VM_WRITE | VM_READ] = _PAGE_P(_PAGE_FOE),
[VM_EXEC] = _PAGE_P(_PAGE_FOW | _PAGE_FOR),
[VM_EXEC | VM_READ] = _PAGE_P(_PAGE_FOW),
[VM_EXEC | VM_WRITE] = _PAGE_P(0),
[VM_EXEC | VM_WRITE | VM_READ] = _PAGE_P(0),
[VM_SHARED] = _PAGE_S(_PAGE_FOE | _PAGE_FOW |
_PAGE_FOR),
[VM_SHARED | VM_READ] = _PAGE_S(_PAGE_FOE | _PAGE_FOW),
[VM_SHARED | VM_WRITE] = _PAGE_S(_PAGE_FOE),
[VM_SHARED | VM_WRITE | VM_READ] = _PAGE_S(_PAGE_FOE),
[VM_SHARED | VM_EXEC] = _PAGE_S(_PAGE_FOW | _PAGE_FOR),
[VM_SHARED | VM_EXEC | VM_READ] = _PAGE_S(_PAGE_FOW),
[VM_SHARED | VM_EXEC | VM_WRITE] = _PAGE_S(0),
[VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = _PAGE_S(0)
};
18 changes: 0 additions & 18 deletions arch/arc/include/asm/pgtable-bits-arcv2.h
Expand Up @@ -72,24 +72,6 @@
* This is to enable COW mechanism
*/
/* xwr */
#define __P000 PAGE_U_NONE
#define __P001 PAGE_U_R
#define __P010 PAGE_U_R /* Pvt-W => !W */
#define __P011 PAGE_U_R /* Pvt-W => !W */
#define __P100 PAGE_U_X_R /* X => R */
#define __P101 PAGE_U_X_R
#define __P110 PAGE_U_X_R /* Pvt-W => !W and X => R */
#define __P111 PAGE_U_X_R /* Pvt-W => !W */

#define __S000 PAGE_U_NONE
#define __S001 PAGE_U_R
#define __S010 PAGE_U_W_R /* W => R */
#define __S011 PAGE_U_W_R
#define __S100 PAGE_U_X_R /* X => R */
#define __S101 PAGE_U_X_R
#define __S110 PAGE_U_X_W_R /* X => R */
#define __S111 PAGE_U_X_W_R

#ifndef __ASSEMBLY__

#define pte_write(pte) (pte_val(pte) & _PAGE_WRITE)
Expand Down
19 changes: 19 additions & 0 deletions arch/arc/mm/mmap.c
Expand Up @@ -74,3 +74,22 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
info.align_offset = pgoff << PAGE_SHIFT;
return vm_unmapped_area(&info);
}

pgprot_t protection_map[16] __ro_after_init = {
[VM_NONE] = PAGE_U_NONE,
[VM_READ] = PAGE_U_R,
[VM_WRITE] = PAGE_U_R,
[VM_WRITE | VM_READ] = PAGE_U_R,
[VM_EXEC] = PAGE_U_X_R,
[VM_EXEC | VM_READ] = PAGE_U_X_R,
[VM_EXEC | VM_WRITE] = PAGE_U_X_R,
[VM_EXEC | VM_WRITE | VM_READ] = PAGE_U_X_R,
[VM_SHARED] = PAGE_U_NONE,
[VM_SHARED | VM_READ] = PAGE_U_R,
[VM_SHARED | VM_WRITE] = PAGE_U_W_R,
[VM_SHARED | VM_WRITE | VM_READ] = PAGE_U_W_R,
[VM_SHARED | VM_EXEC] = PAGE_U_X_R,
[VM_SHARED | VM_EXEC | VM_READ] = PAGE_U_X_R,
[VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_U_X_W_R,
[VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_U_X_W_R
};
17 changes: 0 additions & 17 deletions arch/arm/include/asm/pgtable.h
Expand Up @@ -137,23 +137,6 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
* 2) If we could do execute protection, then read is implied
* 3) write implies read permissions
*/
#define __P000 __PAGE_NONE
#define __P001 __PAGE_READONLY
#define __P010 __PAGE_COPY
#define __P011 __PAGE_COPY
#define __P100 __PAGE_READONLY_EXEC
#define __P101 __PAGE_READONLY_EXEC
#define __P110 __PAGE_COPY_EXEC
#define __P111 __PAGE_COPY_EXEC

#define __S000 __PAGE_NONE
#define __S001 __PAGE_READONLY
#define __S010 __PAGE_SHARED
#define __S011 __PAGE_SHARED
#define __S100 __PAGE_READONLY_EXEC
#define __S101 __PAGE_READONLY_EXEC
#define __S110 __PAGE_SHARED_EXEC
#define __S111 __PAGE_SHARED_EXEC

#ifndef __ASSEMBLY__
/*
Expand Down
2 changes: 1 addition & 1 deletion arch/arm/lib/uaccess_with_memcpy.c
Expand Up @@ -237,7 +237,7 @@ static int __init test_size_treshold(void)
if (!dst_page)
goto no_dst;
kernel_ptr = page_address(src_page);
user_ptr = vmap(&dst_page, 1, VM_IOREMAP, __pgprot(__P010));
user_ptr = vmap(&dst_page, 1, VM_IOREMAP, __pgprot(__PAGE_COPY));
if (!user_ptr)
goto no_vmap;

Expand Down
19 changes: 19 additions & 0 deletions arch/arm/mm/mmu.c
Expand Up @@ -1773,3 +1773,22 @@ void set_pte_at(struct mm_struct *mm, unsigned long addr,

set_pte_ext(ptep, pteval, ext);
}

pgprot_t protection_map[16] __ro_after_init = {
[VM_NONE] = __PAGE_NONE,
[VM_READ] = __PAGE_READONLY,
[VM_WRITE] = __PAGE_COPY,
[VM_WRITE | VM_READ] = __PAGE_COPY,
[VM_EXEC] = __PAGE_READONLY_EXEC,
[VM_EXEC | VM_READ] = __PAGE_READONLY_EXEC,
[VM_EXEC | VM_WRITE] = __PAGE_COPY_EXEC,
[VM_EXEC | VM_WRITE | VM_READ] = __PAGE_COPY_EXEC,
[VM_SHARED] = __PAGE_NONE,
[VM_SHARED | VM_READ] = __PAGE_READONLY,
[VM_SHARED | VM_WRITE] = __PAGE_SHARED,
[VM_SHARED | VM_WRITE | VM_READ] = __PAGE_SHARED,
[VM_SHARED | VM_EXEC] = __PAGE_READONLY_EXEC,
[VM_SHARED | VM_EXEC | VM_READ] = __PAGE_READONLY_EXEC,
[VM_SHARED | VM_EXEC | VM_WRITE] = __PAGE_SHARED_EXEC,
[VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = __PAGE_SHARED_EXEC
};
18 changes: 0 additions & 18 deletions arch/csky/include/asm/pgtable.h
Expand Up @@ -77,24 +77,6 @@
#define MAX_SWAPFILES_CHECK() \
BUILD_BUG_ON(MAX_SWAPFILES_SHIFT != 5)

#define __P000 PAGE_NONE
#define __P001 PAGE_READ
#define __P010 PAGE_READ
#define __P011 PAGE_READ
#define __P100 PAGE_READ
#define __P101 PAGE_READ
#define __P110 PAGE_READ
#define __P111 PAGE_READ

#define __S000 PAGE_NONE
#define __S001 PAGE_READ
#define __S010 PAGE_WRITE
#define __S011 PAGE_WRITE
#define __S100 PAGE_READ
#define __S101 PAGE_READ
#define __S110 PAGE_WRITE
#define __S111 PAGE_WRITE

extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))

Expand Down
19 changes: 19 additions & 0 deletions arch/csky/mm/init.c
Expand Up @@ -197,3 +197,22 @@ void __init fixaddr_init(void)
vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
fixrange_init(vaddr, vaddr + PMD_SIZE, swapper_pg_dir);
}

pgprot_t protection_map[16] __ro_after_init = {
[VM_NONE] = PAGE_NONE,
[VM_READ] = PAGE_READ,
[VM_WRITE] = PAGE_READ,
[VM_WRITE | VM_READ] = PAGE_READ,
[VM_EXEC] = PAGE_READ,
[VM_EXEC | VM_READ] = PAGE_READ,
[VM_EXEC | VM_WRITE] = PAGE_READ,
[VM_EXEC | VM_WRITE | VM_READ] = PAGE_READ,
[VM_SHARED] = PAGE_NONE,
[VM_SHARED | VM_READ] = PAGE_READ,
[VM_SHARED | VM_WRITE] = PAGE_WRITE,
[VM_SHARED | VM_WRITE | VM_READ] = PAGE_WRITE,
[VM_SHARED | VM_EXEC] = PAGE_READ,
[VM_SHARED | VM_EXEC | VM_READ] = PAGE_READ,
[VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_WRITE,
[VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_WRITE
};
27 changes: 0 additions & 27 deletions arch/hexagon/include/asm/pgtable.h
Expand Up @@ -126,33 +126,6 @@ extern unsigned long _dflt_cache_att;
*/
#define CACHEDEF (CACHE_DEFAULT << 6)

/* Private (copy-on-write) page protections. */
#define __P000 __pgprot(_PAGE_PRESENT | _PAGE_USER | CACHEDEF)
#define __P001 __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | CACHEDEF)
#define __P010 __P000 /* Write-only copy-on-write */
#define __P011 __P001 /* Read/Write copy-on-write */
#define __P100 __pgprot(_PAGE_PRESENT | _PAGE_USER | \
_PAGE_EXECUTE | CACHEDEF)
#define __P101 __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_EXECUTE | \
_PAGE_READ | CACHEDEF)
#define __P110 __P100 /* Write/execute copy-on-write */
#define __P111 __P101 /* Read/Write/Execute, copy-on-write */

/* Shared page protections. */
#define __S000 __P000
#define __S001 __P001
#define __S010 __pgprot(_PAGE_PRESENT | _PAGE_USER | \
_PAGE_WRITE | CACHEDEF)
#define __S011 __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | \
_PAGE_WRITE | CACHEDEF)
#define __S100 __pgprot(_PAGE_PRESENT | _PAGE_USER | \
_PAGE_EXECUTE | CACHEDEF)
#define __S101 __P101
#define __S110 __pgprot(_PAGE_PRESENT | _PAGE_USER | \
_PAGE_EXECUTE | _PAGE_WRITE | CACHEDEF)
#define __S111 __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | \
_PAGE_EXECUTE | _PAGE_WRITE | CACHEDEF)

extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; /* located in head.S */

/* HUGETLB not working currently */
Expand Down
41 changes: 41 additions & 0 deletions arch/hexagon/mm/init.c
Expand Up @@ -234,3 +234,44 @@ void __init setup_arch_memory(void)
* which is called by start_kernel() later on in the process
*/
}

pgprot_t protection_map[16] __ro_after_init = {
[VM_NONE] = __pgprot(_PAGE_PRESENT | _PAGE_USER |
CACHEDEF),
[VM_READ] = __pgprot(_PAGE_PRESENT | _PAGE_USER |
_PAGE_READ | CACHEDEF),
[VM_WRITE] = __pgprot(_PAGE_PRESENT | _PAGE_USER |
CACHEDEF),
[VM_WRITE | VM_READ] = __pgprot(_PAGE_PRESENT | _PAGE_USER |
_PAGE_READ | CACHEDEF),
[VM_EXEC] = __pgprot(_PAGE_PRESENT | _PAGE_USER |
_PAGE_EXECUTE | CACHEDEF),
[VM_EXEC | VM_READ] = __pgprot(_PAGE_PRESENT | _PAGE_USER |
_PAGE_EXECUTE | _PAGE_READ |
CACHEDEF),
[VM_EXEC | VM_WRITE] = __pgprot(_PAGE_PRESENT | _PAGE_USER |
_PAGE_EXECUTE | CACHEDEF),
[VM_EXEC | VM_WRITE | VM_READ] = __pgprot(_PAGE_PRESENT | _PAGE_USER |
_PAGE_EXECUTE | _PAGE_READ |
CACHEDEF),
[VM_SHARED] = __pgprot(_PAGE_PRESENT | _PAGE_USER |
CACHEDEF),
[VM_SHARED | VM_READ] = __pgprot(_PAGE_PRESENT | _PAGE_USER |
_PAGE_READ | CACHEDEF),
[VM_SHARED | VM_WRITE] = __pgprot(_PAGE_PRESENT | _PAGE_USER |
_PAGE_WRITE | CACHEDEF),
[VM_SHARED | VM_WRITE | VM_READ] = __pgprot(_PAGE_PRESENT | _PAGE_USER |
_PAGE_READ | _PAGE_WRITE |
CACHEDEF),
[VM_SHARED | VM_EXEC] = __pgprot(_PAGE_PRESENT | _PAGE_USER |
_PAGE_EXECUTE | CACHEDEF),
[VM_SHARED | VM_EXEC | VM_READ] = __pgprot(_PAGE_PRESENT | _PAGE_USER |
_PAGE_EXECUTE | _PAGE_READ |
CACHEDEF),
[VM_SHARED | VM_EXEC | VM_WRITE] = __pgprot(_PAGE_PRESENT | _PAGE_USER |
_PAGE_EXECUTE | _PAGE_WRITE |
CACHEDEF),
[VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = __pgprot(_PAGE_PRESENT | _PAGE_USER |
_PAGE_READ | _PAGE_EXECUTE |
_PAGE_WRITE | CACHEDEF)
};
18 changes: 0 additions & 18 deletions arch/ia64/include/asm/pgtable.h
Expand Up @@ -161,24 +161,6 @@
* attempts to write to the page.
*/
/* xwr */
#define __P000 PAGE_NONE
#define __P001 PAGE_READONLY
#define __P010 PAGE_READONLY /* write to priv pg -> copy & make writable */
#define __P011 PAGE_READONLY /* ditto */
#define __P100 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_X_RX)
#define __P101 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
#define __P110 PAGE_COPY_EXEC
#define __P111 PAGE_COPY_EXEC

#define __S000 PAGE_NONE
#define __S001 PAGE_READONLY
#define __S010 PAGE_SHARED /* we don't have (and don't need) write-only */
#define __S011 PAGE_SHARED
#define __S100 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_X_RX)
#define __S101 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
#define __S110 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RWX)
#define __S111 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RWX)

#define pgd_ERROR(e) printk("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e))
#if CONFIG_PGTABLE_LEVELS == 4
#define pud_ERROR(e) printk("%s:%d: bad pud %016lx.\n", __FILE__, __LINE__, pud_val(e))
Expand Down
27 changes: 26 additions & 1 deletion arch/ia64/mm/init.c
Expand Up @@ -273,7 +273,7 @@ static int __init gate_vma_init(void)
gate_vma.vm_start = FIXADDR_USER_START;
gate_vma.vm_end = FIXADDR_USER_END;
gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
gate_vma.vm_page_prot = __P101;
gate_vma.vm_page_prot = __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX);

return 0;
}
Expand Down Expand Up @@ -490,3 +490,28 @@ void arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap)
__remove_pages(start_pfn, nr_pages, altmap);
}
#endif

pgprot_t protection_map[16] __ro_after_init = {
[VM_NONE] = PAGE_NONE,
[VM_READ] = PAGE_READONLY,
[VM_WRITE] = PAGE_READONLY,
[VM_WRITE | VM_READ] = PAGE_READONLY,
[VM_EXEC] = __pgprot(__ACCESS_BITS | _PAGE_PL_3 |
_PAGE_AR_X_RX),
[VM_EXEC | VM_READ] = __pgprot(__ACCESS_BITS | _PAGE_PL_3 |
_PAGE_AR_RX),
[VM_EXEC | VM_WRITE] = PAGE_COPY_EXEC,
[VM_EXEC | VM_WRITE | VM_READ] = PAGE_COPY_EXEC,
[VM_SHARED] = PAGE_NONE,
[VM_SHARED | VM_READ] = PAGE_READONLY,
[VM_SHARED | VM_WRITE] = PAGE_SHARED,
[VM_SHARED | VM_WRITE | VM_READ] = PAGE_SHARED,
[VM_SHARED | VM_EXEC] = __pgprot(__ACCESS_BITS | _PAGE_PL_3 |
_PAGE_AR_X_RX),
[VM_SHARED | VM_EXEC | VM_READ] = __pgprot(__ACCESS_BITS | _PAGE_PL_3 |
_PAGE_AR_RX),
[VM_SHARED | VM_EXEC | VM_WRITE] = __pgprot(__ACCESS_BITS | _PAGE_PL_3 |
_PAGE_AR_RWX),
[VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = __pgprot(__ACCESS_BITS | _PAGE_PL_3 |
_PAGE_AR_RWX)
};
19 changes: 0 additions & 19 deletions arch/loongarch/include/asm/pgtable-bits.h
Expand Up @@ -83,25 +83,6 @@
_PAGE_GLOBAL | _PAGE_KERN | _CACHE_SUC)
#define PAGE_KERNEL_WUC __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \
_PAGE_GLOBAL | _PAGE_KERN | _CACHE_WUC)

#define __P000 __pgprot(_CACHE_CC | _PAGE_USER | _PAGE_PROTNONE | _PAGE_NO_EXEC | _PAGE_NO_READ)
#define __P001 __pgprot(_CACHE_CC | _PAGE_VALID | _PAGE_USER | _PAGE_PRESENT | _PAGE_NO_EXEC)
#define __P010 __pgprot(_CACHE_CC | _PAGE_VALID | _PAGE_USER | _PAGE_PRESENT | _PAGE_NO_EXEC)
#define __P011 __pgprot(_CACHE_CC | _PAGE_VALID | _PAGE_USER | _PAGE_PRESENT | _PAGE_NO_EXEC)
#define __P100 __pgprot(_CACHE_CC | _PAGE_VALID | _PAGE_USER | _PAGE_PRESENT)
#define __P101 __pgprot(_CACHE_CC | _PAGE_VALID | _PAGE_USER | _PAGE_PRESENT)
#define __P110 __pgprot(_CACHE_CC | _PAGE_VALID | _PAGE_USER | _PAGE_PRESENT)
#define __P111 __pgprot(_CACHE_CC | _PAGE_VALID | _PAGE_USER | _PAGE_PRESENT)

#define __S000 __pgprot(_CACHE_CC | _PAGE_USER | _PAGE_PROTNONE | _PAGE_NO_EXEC | _PAGE_NO_READ)
#define __S001 __pgprot(_CACHE_CC | _PAGE_VALID | _PAGE_USER | _PAGE_PRESENT | _PAGE_NO_EXEC)
#define __S010 __pgprot(_CACHE_CC | _PAGE_VALID | _PAGE_USER | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE)
#define __S011 __pgprot(_CACHE_CC | _PAGE_VALID | _PAGE_USER | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE)
#define __S100 __pgprot(_CACHE_CC | _PAGE_VALID | _PAGE_USER | _PAGE_PRESENT)
#define __S101 __pgprot(_CACHE_CC | _PAGE_VALID | _PAGE_USER | _PAGE_PRESENT)
#define __S110 __pgprot(_CACHE_CC | _PAGE_VALID | _PAGE_USER | _PAGE_PRESENT | _PAGE_WRITE)
#define __S111 __pgprot(_CACHE_CC | _PAGE_VALID | _PAGE_USER | _PAGE_PRESENT | _PAGE_WRITE)

#ifndef __ASSEMBLY__

#define pgprot_noncached pgprot_noncached
Expand Down

0 comments on commit 696f81b

Please sign in to comment.