Skip to content

Commit

Permalink
LoongArch: Add memory management
Browse files Browse the repository at this point in the history
Add memory management support for LoongArch, including: cache and tlb
management, page fault handling and ioremap/mmap support.

Reviewed-by: WANG Xuerui <git@xen0n.name>
Reviewed-by: Jiaxun Yang <jiaxun.yang@flygoat.com>
Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
  • Loading branch information
chenhuacai committed Jun 3, 2022
1 parent 803b0fc commit 09cfefb
Show file tree
Hide file tree
Showing 26 changed files with 3,172 additions and 0 deletions.
13 changes: 13 additions & 0 deletions arch/loongarch/include/asm/cache.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
#ifndef _ASM_CACHE_H
#define _ASM_CACHE_H

#define L1_CACHE_SHIFT CONFIG_L1_CACHE_SHIFT
#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)

#define __read_mostly __section(".data..read_mostly")

#endif /* _ASM_CACHE_H */
80 changes: 80 additions & 0 deletions arch/loongarch/include/asm/cacheflush.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,80 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
#ifndef _ASM_CACHEFLUSH_H
#define _ASM_CACHEFLUSH_H

#include <linux/mm.h>
#include <asm/cpu-features.h>
#include <asm/cacheops.h>

extern void local_flush_icache_range(unsigned long start, unsigned long end);

#define flush_icache_range local_flush_icache_range
#define flush_icache_user_range local_flush_icache_range

#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0

#define flush_cache_all() do { } while (0)
#define flush_cache_mm(mm) do { } while (0)
#define flush_cache_dup_mm(mm) do { } while (0)
#define flush_cache_range(vma, start, end) do { } while (0)
#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
#define flush_cache_vmap(start, end) do { } while (0)
#define flush_cache_vunmap(start, end) do { } while (0)
#define flush_icache_page(vma, page) do { } while (0)
#define flush_icache_user_page(vma, page, addr, len) do { } while (0)
#define flush_dcache_page(page) do { } while (0)
#define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0)

#define cache_op(op, addr) \
__asm__ __volatile__( \
" cacop %0, %1 \n" \
: \
: "i" (op), "ZC" (*(unsigned char *)(addr)))

static inline void flush_icache_line_indexed(unsigned long addr)
{
cache_op(Index_Invalidate_I, addr);
}

static inline void flush_dcache_line_indexed(unsigned long addr)
{
cache_op(Index_Writeback_Inv_D, addr);
}

static inline void flush_vcache_line_indexed(unsigned long addr)
{
cache_op(Index_Writeback_Inv_V, addr);
}

static inline void flush_scache_line_indexed(unsigned long addr)
{
cache_op(Index_Writeback_Inv_S, addr);
}

static inline void flush_icache_line(unsigned long addr)
{
cache_op(Hit_Invalidate_I, addr);
}

static inline void flush_dcache_line(unsigned long addr)
{
cache_op(Hit_Writeback_Inv_D, addr);
}

static inline void flush_vcache_line(unsigned long addr)
{
cache_op(Hit_Writeback_Inv_V, addr);
}

static inline void flush_scache_line(unsigned long addr)
{
cache_op(Hit_Writeback_Inv_S, addr);
}

#include <asm-generic/cacheflush.h>

#endif /* _ASM_CACHEFLUSH_H */
37 changes: 37 additions & 0 deletions arch/loongarch/include/asm/cacheops.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,37 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Cache operations for the cache instruction.
*
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
#ifndef __ASM_CACHEOPS_H
#define __ASM_CACHEOPS_H

/*
* Most cache ops are split into a 2 bit field identifying the cache, and a 3
* bit field identifying the cache operation.
*/
#define CacheOp_Cache 0x03
#define CacheOp_Op 0x1c

#define Cache_I 0x00
#define Cache_D 0x01
#define Cache_V 0x02
#define Cache_S 0x03

#define Index_Invalidate 0x08
#define Index_Writeback_Inv 0x08
#define Hit_Invalidate 0x10
#define Hit_Writeback_Inv 0x10
#define CacheOp_User_Defined 0x18

#define Index_Invalidate_I (Cache_I | Index_Invalidate)
#define Index_Writeback_Inv_D (Cache_D | Index_Writeback_Inv)
#define Index_Writeback_Inv_V (Cache_V | Index_Writeback_Inv)
#define Index_Writeback_Inv_S (Cache_S | Index_Writeback_Inv)
#define Hit_Invalidate_I (Cache_I | Hit_Invalidate)
#define Hit_Writeback_Inv_D (Cache_D | Hit_Writeback_Inv)
#define Hit_Writeback_Inv_V (Cache_V | Hit_Writeback_Inv)
#define Hit_Writeback_Inv_S (Cache_S | Hit_Writeback_Inv)

#endif /* __ASM_CACHEOPS_H */
13 changes: 13 additions & 0 deletions arch/loongarch/include/asm/fixmap.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* fixmap.h: compile-time virtual memory allocation
*
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/

#ifndef _ASM_FIXMAP_H
#define _ASM_FIXMAP_H

#define NR_FIX_BTMAPS 64

#endif
83 changes: 83 additions & 0 deletions arch/loongarch/include/asm/hugetlb.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,83 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/

#ifndef __ASM_HUGETLB_H
#define __ASM_HUGETLB_H

#include <asm/page.h>

uint64_t pmd_to_entrylo(unsigned long pmd_val);

#define __HAVE_ARCH_PREPARE_HUGEPAGE_RANGE
static inline int prepare_hugepage_range(struct file *file,
unsigned long addr,
unsigned long len)
{
unsigned long task_size = STACK_TOP;
struct hstate *h = hstate_file(file);

if (len & ~huge_page_mask(h))
return -EINVAL;
if (addr & ~huge_page_mask(h))
return -EINVAL;
if (len > task_size)
return -ENOMEM;
if (task_size - len < addr)
return -EINVAL;
return 0;
}

#define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR
static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{
pte_t clear;
pte_t pte = *ptep;

pte_val(clear) = (unsigned long)invalid_pte_table;
set_pte_at(mm, addr, ptep, clear);
return pte;
}

#define __HAVE_ARCH_HUGE_PTEP_CLEAR_FLUSH
static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep)
{
pte_t pte;

pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
flush_tlb_page(vma, addr);
return pte;
}

#define __HAVE_ARCH_HUGE_PTE_NONE
static inline int huge_pte_none(pte_t pte)
{
unsigned long val = pte_val(pte) & ~_PAGE_GLOBAL;
return !val || (val == (unsigned long)invalid_pte_table);
}

#define __HAVE_ARCH_HUGE_PTEP_SET_ACCESS_FLAGS
static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
unsigned long addr,
pte_t *ptep, pte_t pte,
int dirty)
{
int changed = !pte_same(*ptep, pte);

if (changed) {
set_pte_at(vma->vm_mm, addr, ptep, pte);
/*
* There could be some standard sized pages in there,
* get them all.
*/
flush_tlb_range(vma, addr, addr + HPAGE_SIZE);
}
return changed;
}

#include <asm-generic/hugetlb.h>

#endif /* __ASM_HUGETLB_H */
115 changes: 115 additions & 0 deletions arch/loongarch/include/asm/page.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,115 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
#ifndef _ASM_PAGE_H
#define _ASM_PAGE_H

#include <linux/const.h>

/*
* PAGE_SHIFT determines the page size
*/
#ifdef CONFIG_PAGE_SIZE_4KB
#define PAGE_SHIFT 12
#endif
#ifdef CONFIG_PAGE_SIZE_16KB
#define PAGE_SHIFT 14
#endif
#ifdef CONFIG_PAGE_SIZE_64KB
#define PAGE_SHIFT 16
#endif
#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
#define PAGE_MASK (~(PAGE_SIZE - 1))

#define HPAGE_SHIFT (PAGE_SHIFT + PAGE_SHIFT - 3)
#define HPAGE_SIZE (_AC(1, UL) << HPAGE_SHIFT)
#define HPAGE_MASK (~(HPAGE_SIZE - 1))
#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)

#ifndef __ASSEMBLY__

#include <linux/kernel.h>
#include <linux/pfn.h>

#define MAX_DMA32_PFN (1UL << (32 - PAGE_SHIFT))

/*
* It's normally defined only for FLATMEM config but it's
* used in our early mem init code for all memory models.
* So always define it.
*/
#define ARCH_PFN_OFFSET PFN_UP(PHYS_OFFSET)

extern void clear_page(void *page);
extern void copy_page(void *to, void *from);

#define clear_user_page(page, vaddr, pg) clear_page(page)
#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)

extern unsigned long shm_align_mask;

struct page;
struct vm_area_struct;
void copy_user_highpage(struct page *to, struct page *from,
unsigned long vaddr, struct vm_area_struct *vma);

#define __HAVE_ARCH_COPY_USER_HIGHPAGE

typedef struct { unsigned long pte; } pte_t;
#define pte_val(x) ((x).pte)
#define __pte(x) ((pte_t) { (x) })
typedef struct page *pgtable_t;

typedef struct { unsigned long pgd; } pgd_t;
#define pgd_val(x) ((x).pgd)
#define __pgd(x) ((pgd_t) { (x) })

/*
* Manipulate page protection bits
*/
typedef struct { unsigned long pgprot; } pgprot_t;
#define pgprot_val(x) ((x).pgprot)
#define __pgprot(x) ((pgprot_t) { (x) })
#define pte_pgprot(x) __pgprot(pte_val(x) & ~_PFN_MASK)

#define ptep_buddy(x) ((pte_t *)((unsigned long)(x) ^ sizeof(pte_t)))

/*
* __pa()/__va() should be used only during mem init.
*/
#define __pa(x) PHYSADDR(x)
#define __va(x) ((void *)((unsigned long)(x) + PAGE_OFFSET - PHYS_OFFSET))

#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)

#ifdef CONFIG_FLATMEM

static inline int pfn_valid(unsigned long pfn)
{
/* avoid <linux/mm.h> include hell */
extern unsigned long max_mapnr;
unsigned long pfn_offset = ARCH_PFN_OFFSET;

return pfn >= pfn_offset && pfn < max_mapnr;
}

#endif

#define virt_to_pfn(kaddr) PFN_DOWN(virt_to_phys((void *)(kaddr)))
#define virt_to_page(kaddr) pfn_to_page(virt_to_pfn(kaddr))

extern int __virt_addr_valid(volatile void *kaddr);
#define virt_addr_valid(kaddr) __virt_addr_valid((volatile void *)(kaddr))

#define VM_DATA_DEFAULT_FLAGS \
(VM_READ | VM_WRITE | \
((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)

#include <asm-generic/memory_model.h>
#include <asm-generic/getorder.h>

#endif /* !__ASSEMBLY__ */

#endif /* _ASM_PAGE_H */

0 comments on commit 09cfefb

Please sign in to comment.