Skip to content
This repository

HTTPS clone URL

Subversion checkout URL

You can clone with HTTPS or Subversion.

Download ZIP
Browse code

Add implementation of pmap.9 MD functions

  • Loading branch information...
commit 81a01e8a3fdbbdc78e30e5c88be018345847f1e1 1 parent ed27bd4
Yann Sionneau authored May 08, 2013
53  sys/arch/lm32/include/pmap.h
@@ -3,7 +3,6 @@
3 3
 #ifndef _LM32_PMAP_H_
4 4
 #define _LM32_PMAP_H_
5 5
 #include <sys/resource.h>
6  
-#include <uvm/uvm_page.h>
7 6
 
8 7
 #if !defined(_LOCORE) && (defined(MODULAR) || defined(_MODULE))
9 8
 
@@ -11,16 +10,44 @@ __CTASSERT(sizeof(struct vm_page_md) == sizeof(uintptr_t)*5);
11 10
 
12 11
 #endif /* !LOCORE && (MODULAR || _MODULE) */
13 12
 
  13
+#define	__PMAP_PTP_N	512	/* # of page table page maps 2GB. */
  14
+#define	PMAP_GROWKERNEL
  15
+#define PMAP_STEAL_MEMORY
  16
+
  17
+struct pmap;
  18
+
  19
+static __inline void
  20
+pmap_remove_all(struct pmap *pmap)
  21
+{
  22
+	/* Nothing. */
  23
+}
  24
+
  25
+void tlbflush(void);
  26
+void pmap_load(void);
  27
+void pmap_virtual_space(vaddr_t *start, vaddr_t *end);
  28
+paddr_t pmap_phys_address(paddr_t cookie);
  29
+void pmap_bootstrap(void);
  30
+
  31
+#define		pmap_resident_count(pmap)       ((pmap)->pm_stats.resident_count)
  32
+#define		pmap_wired_count(pmap)          ((pmap)->pm_stats.wired_count)
  33
+#define		pmap_update(pmap)               ((void)0)
  34
+#define	__HAVE_VM_PAGE_MD
  35
+
  36
+struct pv_entry;
  37
+struct vm_page_md {
  38
+	SLIST_HEAD(, pv_entry) pvh_head;
  39
+	int pvh_flags;
  40
+};
  41
+
  42
+#include <uvm/uvm_page.h>
14 43
 
15 44
 /*
16 45
  * Pmap stuff
17 46
  */
18 47
 struct pmap {
19  
-	pt_entry_t		*pm_ptab;	/* KVA of page table */
20  
-	u_int			pm_count;	/* pmap reference count */
  48
+	pt_entry_t **pm_ptp;
21 49
 	struct pmap_statistics	pm_stats;	/* pmap statistics */
22 50
 	int pm_refcnt;
23  
-	int			pm_ptpages;	/* more stats: PT pages */
24 51
 };
25 52
 
26 53
 #define L2_SLOT_PTE	(KERNBASE/NBPD_L2-1) /* 767: for recursive PDP map */
@@ -31,8 +58,20 @@ struct pmap {
31 58
 #define pmap_pte_set(p, n)		do { *(p) = (n); } while (0)
32 59
 #define pmap_pte_flush()		/* nothing */
33 60
 
34  
-void tlbflush(void);
35  
-void pmap_load(void);
36  
-void pmap_reference(pmap_t pmap);
  61
+#define	PVH_REFERENCED		1
  62
+#define	PVH_MODIFIED		2
  63
+
  64
+/* MD pmap utils. */
  65
+pt_entry_t *__pmap_pte_lookup(pmap_t, vaddr_t);
  66
+bool __pmap_pte_load(pmap_t, vaddr_t, int);
  67
+pt_entry_t *__pmap_kpte_lookup(vaddr_t);
  68
+
  69
+#define	VM_MDPAGE_INIT(pg)						\
  70
+do {									\
  71
+	struct vm_page_md *pvh = &(pg)->mdpage;				\
  72
+	SLIST_INIT(&pvh->pvh_head);					\
  73
+	pvh->pvh_flags = 0;						\
  74
+} while (/*CONSTCOND*/0)
  75
+
37 76
 
38 77
 #endif /* !_LM32_PMAP_H_ */
44  sys/arch/lm32/include/pte.h
@@ -80,38 +80,28 @@ typedef uint32_t pt_entry_t;		/* PTE */
80 80
  * now we define various for playing with virtual addresses
81 81
  */
82 82
 
83  
-#define	PG_FRAME	0xfffff000	/* page frame mask */
84  
-#define	PG_LGFRAME	0xffc00000	/* large (4MB) page frame mask */
  83
+#define	PG_FRAME	(0xfffff000)	/* page frame mask */
85 84
 
  85
+/* software emulated bits */
86 86
 
87  
-#define L1_SHIFT 	12
88  
-#define L2_SHIFT 	21
  87
+#define PG_WIRED		(1 << 11)
  88
+#define PG_NOCACHE		(1 << 10)
  89
+#define PG_WRITE_COMBINE	(1 << 9)
  90
+#define PG_WRITE_BACK		(1 << 8)
  91
+#define PG_NOCACHE_OVR		(1 << 7)
  92
+#define PG_D			(1 << 6)
  93
+
  94
+/* hardware managed bits */
  95
+#define PG_RO			(1 << 1) /* Page read only */
  96
+
  97
+#define PG_PR_MASK		(1 << 1) /* Page protection mask */
  98
+
  99
+#define L1_SHIFT 	(12)
  100
+#define L2_SHIFT 	(21)
89 101
 #define	NBPD_L1		(1ULL << L1_SHIFT) /* # bytes mapped by L1 ent (4K) */
90 102
 #define	NBPD_L2		(1ULL << L2_SHIFT) /* # bytes mapped by L2 ent (2MB) */
91 103
 
92  
-#define L1_MASK 	0x001ff000
93  
-
94  
-/*
95  
- * here we define the bits of the PDE/PTE, as described above:
96  
- *
97  
- * XXXCDC: need to rename these (PG_u == ugly).
98  
- */
  104
+#define L1_MASK 	(0x001ff000)
99 105
 
100  
-#define	PG_V		0x00000001	/* valid entry */
101  
-#define	PG_RO		0x00000000	/* read-only page */
102  
-#define	PG_RW		0x00000002	/* read-write page */
103  
-#define	PG_u		0x00000004	/* user accessible page */
104  
-#define	PG_PROT		0x00000806	/* all protection bits */
105  
-#define PG_WT		0x00000008	/* write through */
106  
-#define	PG_N		0x00000010	/* non-cacheable */
107  
-#define	PG_U		0x00000020	/* has been used */
108  
-#define	PG_M		0x00000040	/* has been modified */
109  
-#define PG_PAT		0x00000080	/* PAT (on pte) */
110  
-#define PG_PS		0x00000080	/* 4MB page size (2MB for PAE) */
111  
-#define PG_G		0x00000100	/* global, don't TLB flush */
112  
-#define PG_AVAIL1	0x00000200	/* ignored by hardware */
113  
-#define PG_AVAIL2	0x00000400	/* ignored by hardware */
114  
-#define PG_AVAIL3	0x00000800	/* ignored by hardware */
115  
-#define PG_LGPAT	0x00001000	/* PAT on large pages */
116 106
 
117 107
 #endif /* _LM32_PTE_H_ */
839  sys/arch/lm32/lm32/pmap.c
@@ -2,12 +2,170 @@
2 2
  * COPYRIGHT (C) 2013 Yann Sionneau <yann.sionneau@gmail.com>
3 3
  */
4 4
 
  5
+#include <sys/cdefs.h>
  6
+#include <sys/param.h>
  7
+#include <sys/systm.h>
  8
+#include <sys/pool.h>
  9
+#include <sys/msgbuf.h>
  10
+#include <sys/socketvar.h>	/* XXX: for sock_loan_thresh */
  11
+
  12
+#include <uvm/uvm.h>
5 13
 #include <lm32/pmap.h>
6 14
 #include <sys/types.h>
7 15
 #include <lm32/cpu.h>
8 16
 #include <sys/systm.h>
9 17
 #include <uvm/uvm_map.h>
10 18
 #include <uvm/uvm_extern.h>
  19
+#include <uvm/uvm_pmap.h>
  20
+
  21
+#ifdef DEBUG
  22
+#define	STATIC
  23
+#else
  24
+#define	STATIC	static
  25
+#endif
  26
+
  27
+#define	__PMAP_PTP_SHIFT	22
  28
+#define	__PMAP_PTP_TRUNC(va)						\
  29
+	(((va) + (1 << __PMAP_PTP_SHIFT) - 1) & ~((1 << __PMAP_PTP_SHIFT) - 1))
  30
+#define	__PMAP_PTP_PG_N		(PAGE_SIZE / sizeof(pt_entry_t))
  31
+#define	__PMAP_PTP_INDEX(va)	(((va) >> __PMAP_PTP_SHIFT) & (__PMAP_PTP_N - 1))
  32
+#define	__PMAP_PTP_OFSET(va)	((va >> PGSHIFT) & (__PMAP_PTP_PG_N - 1))
  33
+
  34
+/* pmap pool */
  35
+STATIC struct pool __pmap_pmap_pool;
  36
+STATIC struct pool __pmap_pv_pool;
  37
+struct pmap __pmap_kernel;
  38
+struct pmap *const kernel_pmap_ptr = &__pmap_kernel;
  39
+#define	__pmap_pv_alloc()	pool_get(&__pmap_pv_pool, PR_NOWAIT)
  40
+#define	__pmap_pv_free(pv)	pool_put(&__pmap_pv_pool, (pv))
  41
+STATIC void __pmap_pv_enter(pmap_t, struct vm_page *, vaddr_t);
  42
+STATIC void __pmap_pv_remove(pmap_t, struct vm_page *, vaddr_t);
  43
+STATIC void *__pmap_pv_page_alloc(struct pool *, int);
  44
+STATIC void __pmap_pv_page_free(struct pool *, void *);
  45
+STATIC struct pool_allocator pmap_pv_page_allocator = {
  46
+	__pmap_pv_page_alloc, __pmap_pv_page_free, 0,
  47
+};
  48
+
  49
+STATIC vaddr_t __pmap_kve;	/* VA of last kernel virtual */
  50
+paddr_t avail_start;		/* PA of first available physical page */
  51
+paddr_t avail_end;		/* PA of last available physical page */
  52
+
  53
+/* For the fast tlb miss handler */
  54
+pt_entry_t **curptd;		/* p1 va of curlwp->...->pm_ptp */
  55
+
  56
+struct pv_entry {
  57
+	struct pmap *pv_pmap;
  58
+	vaddr_t pv_va;
  59
+	SLIST_ENTRY(pv_entry) pv_link;
  60
+};
  61
+
  62
+/* page table entry ops. */
  63
+STATIC pt_entry_t *__pmap_pte_alloc(pmap_t, vaddr_t);
  64
+/* pmap_enter util */
  65
+STATIC bool __pmap_map_change(pmap_t, vaddr_t, paddr_t, vm_prot_t,
  66
+    pt_entry_t);
  67
+
  68
+void pmap_bootstrap(void)
  69
+{
  70
+	/* Steal msgbuf area */
  71
+	initmsgbuf((void *)uvm_pageboot_alloc(MSGBUFSIZE), MSGBUFSIZE);
  72
+
  73
+	avail_start = ptoa(VM_PHYSMEM_PTR(0)->start);
  74
+	avail_end = ptoa(VM_PHYSMEM_PTR(vm_nphysseg - 1)->end);
  75
+	__pmap_kve = VM_MIN_KERNEL_ADDRESS;
  76
+
  77
+	pmap_kernel()->pm_refcnt = 1;
  78
+	pmap_kernel()->pm_ptp = (pt_entry_t **)uvm_pageboot_alloc(PAGE_SIZE);
  79
+	memset(pmap_kernel()->pm_ptp, 0, PAGE_SIZE);
  80
+
  81
+	/* Mask all interrupt */
  82
+	asm volatile("wcsr IM, r0"); // IM = 0;
  83
+	/* Enable MMU */
  84
+	lm32_mmu_start();
  85
+}
  86
+
  87
+vaddr_t
  88
+pmap_steal_memory(vsize_t size, vaddr_t *vstart, vaddr_t *vend)
  89
+{
  90
+	struct vm_physseg *bank;
  91
+	int i, j, npage;
  92
+	paddr_t pa;
  93
+	vaddr_t va;
  94
+
  95
+	KDASSERT(!uvm.page_init_done);
  96
+
  97
+	size = round_page(size);
  98
+	npage = atop(size);
  99
+
  100
+	bank = NULL;
  101
+	for (i = 0; i < vm_nphysseg; i++) {
  102
+		bank = VM_PHYSMEM_PTR(i);
  103
+		if (npage <= bank->avail_end - bank->avail_start)
  104
+			break;
  105
+	}
  106
+	KDASSERT(i != vm_nphysseg);
  107
+	KDASSERT(bank != NULL);
  108
+
  109
+	/* Steal pages */
  110
+	pa = ptoa(bank->avail_start);
  111
+	bank->avail_start += npage;
  112
+	bank->start += npage;
  113
+
  114
+	/* GC memory bank */
  115
+	if (bank->avail_start == bank->end) {
  116
+		/* Remove this segment from the list. */
  117
+		vm_nphysseg--;
  118
+		KDASSERT(vm_nphysseg > 0);
  119
+		for (j = i; i < vm_nphysseg; j++)
  120
+			VM_PHYSMEM_PTR_SWAP(j, j + 1);
  121
+	}
  122
+
  123
+	va = pa;
  124
+	memset((void *)va, 0, size);
  125
+
  126
+	return (va);
  127
+}
  128
+
  129
+vaddr_t
  130
+pmap_growkernel(vaddr_t maxkvaddr)
  131
+{
  132
+	int i, n;
  133
+
  134
+	if (maxkvaddr <= __pmap_kve)
  135
+		return (__pmap_kve);
  136
+
  137
+	i = __PMAP_PTP_INDEX(__pmap_kve - VM_MIN_KERNEL_ADDRESS);
  138
+	__pmap_kve = __PMAP_PTP_TRUNC(maxkvaddr);
  139
+	n = __PMAP_PTP_INDEX(__pmap_kve - VM_MIN_KERNEL_ADDRESS);
  140
+
  141
+	/* Allocate page table pages */
  142
+	for (;i < n; i++) {
  143
+		if (__pmap_kernel.pm_ptp[i] != NULL)
  144
+			continue;
  145
+
  146
+		if (uvm.page_init_done) {
  147
+			struct vm_page *pg = uvm_pagealloc(NULL, 0, NULL,
  148
+			    UVM_PGA_USERESERVE | UVM_PGA_ZERO);
  149
+			if (pg == NULL)
  150
+				goto error;
  151
+			__pmap_kernel.pm_ptp[i] = (pt_entry_t *)
  152
+			    VM_PAGE_TO_PHYS(pg);
  153
+		} else {
  154
+			pt_entry_t *ptp = (pt_entry_t *)
  155
+			    uvm_pageboot_alloc(PAGE_SIZE);
  156
+			if (ptp == NULL)
  157
+				goto error;
  158
+			__pmap_kernel.pm_ptp[i] = ptp;
  159
+			memset(ptp, 0, PAGE_SIZE);
  160
+		}
  161
+	}
  162
+
  163
+	return (__pmap_kve);
  164
+ error:
  165
+	panic("pmap_growkernel: out of memory.");
  166
+	/* NOTREACHED */
  167
+}
  168
+
11 169
 void tlbflush(void)
12 170
 {
13 171
 	/* flush DTLB */
@@ -105,3 +263,684 @@ pmap_load(void)
105 263
 	kpreempt_enable();
106 264
 }
107 265
 
  266
+void pmap_destroy(pmap_t pmap)
  267
+{
  268
+	int i;
  269
+
  270
+	if (--pmap->pm_refcnt > 0)
  271
+		return;
  272
+
  273
+	/* Deallocate all page table page */
  274
+	for (i = 0; i < __PMAP_PTP_N; i++) {
  275
+		vaddr_t va = (vaddr_t)pmap->pm_ptp[i];
  276
+		if (va == 0)
  277
+			continue;
  278
+#ifdef DEBUG	/* Check no mapping exists. */
  279
+		{
  280
+			int j;
  281
+			pt_entry_t *pte = (pt_entry_t *)va;
  282
+			for (j = 0; j < __PMAP_PTP_PG_N; j++, pte++)
  283
+				KDASSERT(*pte == 0);
  284
+		}
  285
+#endif /* DEBUG */
  286
+		lm32_dtlb_invalidate_line((vaddr_t)va);
  287
+		/* invalidate CPU Data Cache not sure if this is needed */
  288
+		/* This is only needed if we can get cache aliasing */
  289
+//		lm32_dcache_invalidate();
  290
+		/* Free page table */
  291
+		uvm_pagefree(PHYS_TO_VM_PAGE(va));
  292
+	}
  293
+	/* Deallocate page table page holder */
  294
+	/* invalidate CPU Data Cache not sure if this is needed */
  295
+	/* This is only needed if we can get cache aliasing */
  296
+	lm32_dtlb_invalidate_line((vaddr_t)pmap->pm_ptp);
  297
+//	lm32_dcache_invalidate();
  298
+	uvm_pagefree(PHYS_TO_VM_PAGE((vaddr_t)pmap->pm_ptp));
  299
+
  300
+	pool_put(&__pmap_pmap_pool, pmap);
  301
+}
  302
+
  303
+pmap_t pmap_create(void)
  304
+{
  305
+	pmap_t pmap;
  306
+
  307
+	pmap = pool_get(&__pmap_pmap_pool, PR_WAITOK);
  308
+	memset(pmap, 0, sizeof(struct pmap));
  309
+	pmap->pm_refcnt = 1;
  310
+	/* Allocate page table page holder (512 slot) */
  311
+	pmap->pm_ptp = (pt_entry_t **)
  312
+	    VM_PAGE_TO_PHYS(
  313
+		    uvm_pagealloc(NULL, 0, NULL,
  314
+			UVM_PGA_USERESERVE | UVM_PGA_ZERO));
  315
+
  316
+	return (pmap);
  317
+}
  318
+
  319
+void pmap_init(void)
  320
+{
  321
+	/* Initialize pmap module */
  322
+	pool_init(&__pmap_pmap_pool, sizeof(struct pmap), 0, 0, 0, "pmappl",
  323
+	    &pool_allocator_nointr, IPL_NONE);
  324
+	pool_init(&__pmap_pv_pool, sizeof(struct pv_entry), 0, 0, 0, "pvpl",
  325
+	    &pmap_pv_page_allocator, IPL_NONE);
  326
+	pool_setlowat(&__pmap_pv_pool, 16);
  327
+	pool_setlowat(&__pmap_pv_pool, 252);
  328
+}
  329
+
  330
+paddr_t pmap_phys_address(paddr_t cookie)
  331
+{
  332
+	return (lm32_ptob(cookie));
  333
+}
  334
+
  335
+void pmap_virtual_space(vaddr_t *start, vaddr_t *end)
  336
+{
  337
+
  338
+	*start = VM_MIN_KERNEL_ADDRESS;
  339
+	*end = VM_MAX_KERNEL_ADDRESS;
  340
+}
  341
+
  342
+/*
  343
+ * pv_entry pool allocator:
  344
+ *	void *__pmap_pv_page_alloc(struct pool *pool, int flags):
  345
+ *	void __pmap_pv_page_free(struct pool *pool, void *v):
  346
+ */
  347
+void * __pmap_pv_page_alloc(struct pool *pool, int flags)
  348
+{
  349
+	struct vm_page *pg;
  350
+
  351
+	pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_USERESERVE);
  352
+	if (pg == NULL)
  353
+		return (NULL);
  354
+
  355
+	return ((void *)VM_PAGE_TO_PHYS(pg));
  356
+}
  357
+
  358
+void __pmap_pv_page_free(struct pool *pool, void *v)
  359
+{
  360
+	vaddr_t va = (vaddr_t)v;
  361
+
  362
+	/* Invalidate cache for next use of this page */
  363
+	/* Only needed if cache can have aliases */
  364
+//	lm32_icache_invalidate();
  365
+	uvm_pagefree(PHYS_TO_VM_PAGE(va));
  366
+}
  367
+
  368
+void pmap_activate(struct lwp *l)
  369
+{
  370
+	pmap_t pmap = l->l_proc->p_vmspace->vm_map.pmap;
  371
+
  372
+/* Doing lazy tlb updating ? 
  373
+	lm32_tlb_load_pmap(&pmap);
  374
+*/
  375
+
  376
+	curptd = pmap->pm_ptp;
  377
+}
  378
+
  379
+void pmap_deactivate(struct lwp *l)
  380
+{
  381
+
  382
+	/* Nothing to do */
  383
+}
  384
+
  385
+int pmap_enter(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags)
  386
+{
  387
+	struct vm_page *pg;
  388
+	struct vm_page_md *pvh;
  389
+	pt_entry_t entry, *pte;
  390
+
  391
+	/* "flags" never exceed "prot" */
  392
+	KDASSERT(prot != 0 && ((flags & VM_PROT_ALL) & ~prot) == 0);
  393
+
  394
+	pg = PHYS_TO_VM_PAGE(pa);
  395
+	entry = (pa & PG_FRAME);
  396
+	if (flags & PMAP_WIRED)
  397
+		entry |= PG_WIRED;
  398
+
  399
+	if (pg != NULL) {	/* memory-space */
  400
+		pvh = VM_PAGE_TO_MD(pg);
  401
+		entry &= ~(PG_NOCACHE); /* always cached */
  402
+
  403
+		/* Seed modified/reference tracking */
  404
+		if (flags & VM_PROT_WRITE) {
  405
+			entry |= PG_D;
  406
+			pvh->pvh_flags |= PVH_MODIFIED | PVH_REFERENCED;
  407
+		} else if (flags & VM_PROT_ALL) {
  408
+			pvh->pvh_flags |= PVH_REFERENCED;
  409
+		}
  410
+
  411
+		/* Protection */
  412
+		if ((prot & VM_PROT_WRITE) && (pvh->pvh_flags & PVH_MODIFIED)) {
  413
+			entry &= ~(PG_RO);
  414
+		} else {
  415
+			entry |= (PG_RO);
  416
+		}
  417
+
  418
+		/* Check for existing mapping */
  419
+		if (__pmap_map_change(pmap, va, pa, prot, entry))
  420
+			return (0);
  421
+
  422
+		/* Add to physical-virtual map list of this page */
  423
+		__pmap_pv_enter(pmap, pg, va);
  424
+
  425
+	} else {	/* bus-space (always uncached map) */
  426
+		if (prot & VM_PROT_WRITE)
  427
+		{
  428
+			entry &= ~(PG_RO);
  429
+			entry |= (PG_D);
  430
+		}
  431
+		else
  432
+			entry |= PG_RO;
  433
+	}
  434
+
  435
+	/* Register to page table */
  436
+	pte = __pmap_pte_alloc(pmap, va);
  437
+	if (pte == NULL) {
  438
+		if (flags & PMAP_CANFAIL)
  439
+			return ENOMEM;
  440
+		panic("pmap_enter: cannot allocate pte");
  441
+	}
  442
+
  443
+	*pte = entry;
  444
+
  445
+	lm32_dtlb_update(va, entry);
  446
+	if (prot & VM_PROT_EXECUTE)
  447
+		lm32_itlb_update(va, entry);
  448
+
  449
+/* No need to flush Instruction Cache IMO
  450
+	if (!SH_HAS_UNIFIED_CACHE &&
  451
+	    (prot == (VM_PROT_READ | VM_PROT_EXECUTE)))
  452
+		sh_icache_sync_range_index(va, PAGE_SIZE);
  453
+*/
  454
+	if (entry & PG_WIRED)
  455
+		pmap->pm_stats.wired_count++;
  456
+	pmap->pm_stats.resident_count++;
  457
+
  458
+	return (0);
  459
+}
  460
+
  461
+void pmap_remove(pmap_t pmap, vaddr_t sva, vaddr_t eva)
  462
+{
  463
+	struct vm_page *pg;
  464
+	pt_entry_t *pte, entry;
  465
+	vaddr_t va;
  466
+
  467
+	KDASSERT((sva & PGOFSET) == 0);
  468
+
  469
+	for (va = sva; va < eva; va += PAGE_SIZE) {
  470
+		if ((pte = __pmap_pte_lookup(pmap, va)) == NULL ||
  471
+		    (entry = *pte) == 0)
  472
+			continue;
  473
+
  474
+		if ((pg = PHYS_TO_VM_PAGE(entry & PG_FRAME)) != NULL)
  475
+			__pmap_pv_remove(pmap, pg, va);
  476
+
  477
+		if (entry & PG_WIRED)
  478
+			pmap->pm_stats.wired_count--;
  479
+		pmap->pm_stats.resident_count--;
  480
+		*pte = 0;
  481
+
  482
+		lm32_tlb_invalidate_line(va);
  483
+	}
  484
+}
  485
+
  486
+void pmap_protect(pmap_t pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
  487
+{
  488
+	bool prot_exec = prot & VM_PROT_EXECUTE;
  489
+	pt_entry_t *pte, entry, protbits;
  490
+	vaddr_t va;
  491
+
  492
+	sva = trunc_page(sva);
  493
+
  494
+	if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
  495
+		pmap_remove(pmap, sva, eva);
  496
+		return;
  497
+	}
  498
+
  499
+	switch (prot) {
  500
+	default:
  501
+		panic("pmap_protect: invalid protection mode %x", prot);
  502
+		/* NOTREACHED */
  503
+	case VM_PROT_READ:
  504
+		/* FALLTHROUGH */
  505
+	case VM_PROT_READ | VM_PROT_EXECUTE:
  506
+		protbits = PG_RO;
  507
+		break;
  508
+	case VM_PROT_READ | VM_PROT_WRITE:
  509
+		/* FALLTHROUGH */
  510
+	case VM_PROT_ALL:
  511
+		break;
  512
+	}
  513
+
  514
+	for (va = sva; va < eva; va += PAGE_SIZE) {
  515
+
  516
+		if (((pte = __pmap_pte_lookup(pmap, va)) == NULL) ||
  517
+		    (entry = *pte) == 0)
  518
+			continue;
  519
+
  520
+/* I think this is not needed
  521
+		if (LM32_HAS_VIRTUAL_ALIAS && (entry & PG_D)) {
  522
+			if (!SH_HAS_UNIFIED_CACHE && (prot & VM_PROT_EXECUTE))
  523
+				lm32_icache_invalidate();
  524
+			else
  525
+				lm32_dcache_invalidate();
  526
+		}
  527
+*/
  528
+		entry = (entry & ~PG_FRAME) | protbits;
  529
+		*pte = entry;
  530
+
  531
+		lm32_dtlb_update(va, entry);
  532
+		if (prot_exec)
  533
+			lm32_itlb_update(va, entry);
  534
+		else
  535
+			lm32_itlb_invalidate_line(va);
  536
+	}
  537
+}
  538
+
  539
+void pmap_unwire(pmap_t pmap, vaddr_t va)
  540
+{
  541
+	pt_entry_t *pte, entry;
  542
+
  543
+	if ((pte = __pmap_pte_lookup(pmap, va)) == NULL ||
  544
+	    (entry = *pte) == 0 ||
  545
+	    (entry & PG_WIRED) == 0)
  546
+		return;
  547
+
  548
+	*pte = entry & ~PG_WIRED;
  549
+	pmap->pm_stats.wired_count--;
  550
+}
  551
+
  552
+bool pmap_extract(pmap_t pmap, vaddr_t va, paddr_t *pap)
  553
+{
  554
+	pt_entry_t *pte;
  555
+
  556
+	pte = __pmap_pte_lookup(pmap, va);
  557
+	if (pte == NULL || *pte == 0)
  558
+		return (false);
  559
+
  560
+	if (pap != NULL)
  561
+		*pap = (*pte & PG_FRAME) | (va & PGOFSET);
  562
+
  563
+	return (true);
  564
+}
  565
+
  566
+void pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags)
  567
+{
  568
+	pt_entry_t *pte, entry;
  569
+
  570
+	KDASSERT((va & PGOFSET) == 0);
  571
+	KDASSERT(va >= VM_MIN_KERNEL_ADDRESS && va < VM_MAX_KERNEL_ADDRESS);
  572
+
  573
+	entry = (pa & PG_FRAME);
  574
+	if (prot & VM_PROT_WRITE)
  575
+		entry |= PG_D;
  576
+	else
  577
+		entry |= PG_RO;
  578
+
  579
+	pte = __pmap_kpte_lookup(va);
  580
+
  581
+	KDASSERT(*pte == 0);
  582
+	*pte = entry;
  583
+
  584
+	if (prot & VM_PROT_EXECUTE)
  585
+		lm32_itlb_update(va, entry);
  586
+	else
  587
+		lm32_itlb_invalidate_line(va);
  588
+
  589
+	lm32_dtlb_update(va, entry);
  590
+}
  591
+
  592
+void pmap_kremove(vaddr_t va, vsize_t len)
  593
+{
  594
+	pt_entry_t *pte;
  595
+	vaddr_t eva = va + len;
  596
+
  597
+	KDASSERT((va & PGOFSET) == 0);
  598
+	KDASSERT((len & PGOFSET) == 0);
  599
+	KDASSERT(va >= VM_MIN_KERNEL_ADDRESS && eva <= VM_MAX_KERNEL_ADDRESS);
  600
+
  601
+	for (; va < eva; va += PAGE_SIZE) {
  602
+		pte = __pmap_kpte_lookup(va);
  603
+		KDASSERT(pte != NULL);
  604
+		if (*pte == 0)
  605
+			continue;
  606
+/* I think this is not needed
  607
+		if (SH_HAS_VIRTUAL_ALIAS && PHYS_TO_VM_PAGE(*pte & PG_FRAME))
  608
+			sh_dcache_wbinv_range(va, PAGE_SIZE);
  609
+*/
  610
+		*pte = 0;
  611
+
  612
+		lm32_tlb_invalidate_line(va);
  613
+	}
  614
+}
  615
+
  616
+void pmap_copy_page(paddr_t src, paddr_t dst)
  617
+{
  618
+	memcpy((void *)dst, (void *)src, PAGE_SIZE);
  619
+}
  620
+
  621
+void pmap_zero_page(paddr_t phys)
  622
+{
  623
+	memset((void *)phys, 0, PAGE_SIZE);
  624
+}
  625
+
  626
+void pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
  627
+{
  628
+	struct vm_page_md *pvh = VM_PAGE_TO_MD(pg);
  629
+	struct pv_entry *pv;
  630
+	struct pmap *pmap;
  631
+	vaddr_t va;
  632
+	int s;
  633
+
  634
+	switch (prot) {
  635
+	case VM_PROT_READ | VM_PROT_WRITE:
  636
+		/* FALLTHROUGH */
  637
+	case VM_PROT_ALL:
  638
+		break;
  639
+
  640
+	case VM_PROT_READ:
  641
+		/* FALLTHROUGH */
  642
+	case VM_PROT_READ | VM_PROT_EXECUTE:
  643
+		s = splvm();
  644
+		SLIST_FOREACH(pv, &pvh->pvh_head, pv_link) {
  645
+			pmap = pv->pv_pmap;
  646
+			va = pv->pv_va;
  647
+
  648
+			KDASSERT(pmap);
  649
+			pmap_protect(pmap, va, va + PAGE_SIZE, prot);
  650
+		}
  651
+		splx(s);
  652
+		break;
  653
+
  654
+	default:
  655
+		/* Remove all */
  656
+		s = splvm();
  657
+		while ((pv = SLIST_FIRST(&pvh->pvh_head)) != NULL) {
  658
+			va = pv->pv_va;
  659
+			pmap_remove(pv->pv_pmap, va, va + PAGE_SIZE);
  660
+		}
  661
+		splx(s);
  662
+	}
  663
+}
  664
+
  665
+bool pmap_clear_modify(struct vm_page *pg)
  666
+{
  667
+	struct vm_page_md *pvh = VM_PAGE_TO_MD(pg);
  668
+	struct pv_entry *pv;
  669
+	struct pmap *pmap;
  670
+	pt_entry_t *pte, entry;
  671
+	bool modified;
  672
+	vaddr_t va;
  673
+	int s;
  674
+
  675
+	modified = pvh->pvh_flags & PVH_MODIFIED;
  676
+	if (!modified)
  677
+		return (false);
  678
+
  679
+	pvh->pvh_flags &= ~PVH_MODIFIED;
  680
+
  681
+	s = splvm();
  682
+	if (SLIST_EMPTY(&pvh->pvh_head)) {/* no map on this page */
  683
+		splx(s);
  684
+		return (true);
  685
+	}
  686
+
  687
+	/* Write-back and invalidate TLB entry */
  688
+//	if (!SH_HAS_VIRTUAL_ALIAS && SH_HAS_WRITEBACK_CACHE)
  689
+		lm32_dcache_invalidate();
  690
+
  691
+	SLIST_FOREACH(pv, &pvh->pvh_head, pv_link) {
  692
+		pmap = pv->pv_pmap;
  693
+		va = pv->pv_va;
  694
+		if ((pte = __pmap_pte_lookup(pmap, va)) == NULL)
  695
+			continue;
  696
+		entry = *pte;
  697
+		if ((entry & PG_D) == 0)
  698
+			continue;
  699
+// I think this is not needed
  700
+//		if (SH_HAS_VIRTUAL_ALIAS)
  701
+//			sh_dcache_wbinv_range_index(va, PAGE_SIZE);
  702
+
  703
+		*pte = entry & ~PG_D;
  704
+		lm32_tlb_invalidate_line(va);
  705
+	}
  706
+	splx(s);
  707
+
  708
+	return (true);
  709
+}
  710
+
  711
+bool pmap_clear_reference(struct vm_page *pg)
  712
+{
  713
+	struct vm_page_md *pvh = VM_PAGE_TO_MD(pg);
  714
+	struct pv_entry *pv;
  715
+	pt_entry_t *pte;
  716
+	pmap_t pmap;
  717
+	vaddr_t va;
  718
+	int s;
  719
+
  720
+	if ((pvh->pvh_flags & PVH_REFERENCED) == 0)
  721
+		return (false);
  722
+
  723
+	pvh->pvh_flags &= ~PVH_REFERENCED;
  724
+
  725
+	s = splvm();
  726
+	/* Restart reference bit emulation */
  727
+	SLIST_FOREACH(pv, &pvh->pvh_head, pv_link) {
  728
+		pmap = pv->pv_pmap;
  729
+		va = pv->pv_va;
  730
+
  731
+		if ((pte = __pmap_pte_lookup(pmap, va)) == NULL)
  732
+			continue;
  733
+
  734
+		lm32_tlb_invalidate_line(va);
  735
+	}
  736
+	splx(s);
  737
+
  738
+	return (true);
  739
+}
  740
+
  741
+bool pmap_is_modified(struct vm_page *pg)
  742
+{
  743
+	struct vm_page_md *pvh = VM_PAGE_TO_MD(pg);
  744
+
  745
+	return ((pvh->pvh_flags & PVH_MODIFIED) ? true : false);
  746
+}
  747
+
  748
+bool pmap_is_referenced(struct vm_page *pg)
  749
+{
  750
+	struct vm_page_md *pvh = VM_PAGE_TO_MD(pg);
  751
+
  752
+	return ((pvh->pvh_flags & PVH_REFERENCED) ? true : false);
  753
+}
  754
+
  755
+/*
  756
+ * pt_entry_t *__pmap_pte_lookup(pmap_t pmap, vaddr_t va):
  757
+ *	lookup page table entry, if not allocated, returns NULL.
  758
+ */
  759
+pt_entry_t *
  760
+__pmap_pte_lookup(pmap_t pmap, vaddr_t va)
  761
+{
  762
+	pt_entry_t *ptp;
  763
+
  764
+	/* Lookup page table page */
  765
+	ptp = pmap->pm_ptp[__PMAP_PTP_INDEX(va)];
  766
+	if (ptp == NULL)
  767
+		return (NULL);
  768
+
  769
+	return (ptp + __PMAP_PTP_OFSET(va));
  770
+}
  771
+
  772
+/*
  773
+ * bool __pmap_map_change(pmap_t pmap, vaddr_t va, paddr_t pa,
  774
+ *     vm_prot_t prot, pt_entry_t entry):
  775
+ *	Handle the situation that pmap_enter() is called to enter a
  776
+ *	mapping at a virtual address for which a mapping already
  777
+ *	exists.
  778
+ */
  779
+bool
  780
+__pmap_map_change(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot,
  781
+    pt_entry_t entry)
  782
+{
  783
+	pt_entry_t *pte, oentry;
  784
+	vaddr_t eva = va + PAGE_SIZE;
  785
+
  786
+	if ((pte = __pmap_pte_lookup(pmap, va)) == NULL ||
  787
+	    ((oentry = *pte) == 0))
  788
+		return (false);		/* no mapping exists. */
  789
+
  790
+	if (pa != (oentry & PG_FRAME)) {
  791
+		/* Enter a mapping at a mapping to another physical page. */
  792
+		pmap_remove(pmap, va, eva);
  793
+		return (false);
  794
+	}
  795
+
  796
+	/* Pre-existing mapping */
  797
+
  798
+	/* Protection change. */
  799
+	if ((oentry & PG_PR_MASK) != (entry & PG_PR_MASK))
  800
+		pmap_protect(pmap, va, eva, prot);
  801
+
  802
+	/* Wired change */
  803
+	if (oentry & PG_WIRED) {
  804
+		if (!(entry & PG_WIRED)) {
  805
+			/* wired -> unwired */
  806
+			*pte = entry;
  807
+			/* "wired" is software bits. no need to update TLB */
  808
+			pmap->pm_stats.wired_count--;
  809
+		}
  810
+	} else if (entry & PG_WIRED) {
  811
+		/* unwired -> wired. make sure to reflect "flags" */
  812
+		pmap_remove(pmap, va, eva);
  813
+		return (false);
  814
+	}
  815
+
  816
+	return (true);	/* mapping was changed. */
  817
+}
  818
+
  819
+/*
  820
+ * pt_entry_t *__pmap_kpte_lookup(vaddr_t va):
  821
+ *	kernel virtual only version of __pmap_pte_lookup().
  822
+ */
  823
+pt_entry_t *
  824
+__pmap_kpte_lookup(vaddr_t va)
  825
+{
  826
+	pt_entry_t *ptp;
  827
+
  828
+	ptp = __pmap_kernel.pm_ptp[__PMAP_PTP_INDEX(va-VM_MIN_KERNEL_ADDRESS)];
  829
+	if (ptp == NULL)
  830
+		return NULL;
  831
+
  832
+	return (ptp + __PMAP_PTP_OFSET(va));
  833
+}
  834
+
  835
+/*
  836
+ * void __pmap_pv_remove(pmap_t pmap, struct vm_page *pg, vaddr_t vaddr):
  837
+ *	Remove physical-virtual map from vm_page.
  838
+ */
  839
+void
  840
+__pmap_pv_remove(pmap_t pmap, struct vm_page *pg, vaddr_t vaddr)
  841
+{
  842
+	struct vm_page_md *pvh;
  843
+	struct pv_entry *pv;
  844
+	int s;
  845
+
  846
+	s = splvm();
  847
+	pvh = VM_PAGE_TO_MD(pg);
  848
+	SLIST_FOREACH(pv, &pvh->pvh_head, pv_link) {
  849
+		if (pv->pv_pmap == pmap && pv->pv_va == vaddr) {
  850
+// FIXME
  851
+/* is this needed ??? */
  852
+			if 
  853
+//			    (SH_HAS_VIRTUAL_ALIAS ||
  854
+//			    (SH_HAS_WRITEBACK_CACHE &&
  855
+				(pvh->pvh_flags & PVH_MODIFIED)
  856
+//			)) 
  857
+			{
  858
+				/*
  859
+				 * Always use index ops. since I don't want to
  860
+				 * worry about address space.
  861
+				 */
  862
+				lm32_dcache_invalidate();
  863
+			}
  864
+
  865
+			SLIST_REMOVE(&pvh->pvh_head, pv, pv_entry, pv_link);
  866
+			__pmap_pv_free(pv);
  867
+			break;
  868
+		}
  869
+	}
  870
+#ifdef DEBUG
  871
+	/* Check duplicated map. */
  872
+	SLIST_FOREACH(pv, &pvh->pvh_head, pv_link)
  873
+	    KDASSERT(!(pv->pv_pmap == pmap && pv->pv_va == vaddr));
  874
+#endif
  875
+	splx(s);
  876
+}
  877
+
  878
+/*
  879
+ * void __pmap_pv_enter(pmap_t pmap, struct vm_page *pg, vaddr_t vaddr):
  880
+ *	Insert physical-virtual map to vm_page.
  881
+ *	Assume pre-existed mapping is already removed.
  882
+ */
  883
+void
  884
+__pmap_pv_enter(pmap_t pmap, struct vm_page *pg, vaddr_t va)
  885
+{
  886
+	struct vm_page_md *pvh;
  887
+	struct pv_entry *pv;
  888
+	int s;
  889
+
  890
+	s = splvm();
  891
+//	if (SH_HAS_VIRTUAL_ALIAS) {
  892
+	/*
  893
+	 * Remove all other mappings on this physical page
  894
+	 * which have different virtual cache indexes to
  895
+	 * avoid virtual cache aliases.
  896
+	 *
  897
+	 * XXX We should also handle shared mappings which
  898
+	 * XXX have different virtual cache indexes by
  899
+	 * XXX mapping them uncached (like arm and mips do).
  900
+	 */
  901
+/*again:
  902
+	pvh = VM_PAGE_TO_MD(pg);
  903
+	SLIST_FOREACH(pv, &pvh->pvh_head, pv_link) {
  904
+		if (lm32_cache_indexof(va) !=
  905
+		    lm32_cache_indexof(pv->pv_va)) {
  906
+			pmap_remove(pv->pv_pmap, pv->pv_va,
  907
+			    pv->pv_va + PAGE_SIZE);
  908
+			goto again;
  909
+		}
  910
+	}
  911
+//	}
  912
+*/
  913
+	/* Register pv map */
  914
+	pvh = VM_PAGE_TO_MD(pg);
  915
+	pv = __pmap_pv_alloc();
  916
+	pv->pv_pmap = pmap;
  917
+	pv->pv_va = va;
  918
+
  919
+	SLIST_INSERT_HEAD(&pvh->pvh_head, pv, pv_link);
  920
+	splx(s);
  921
+}
  922
+
  923
+/*
  924
+ * pt_entry_t __pmap_pte_alloc(pmap_t pmap, vaddr_t va):
  925
+ *	lookup page table entry. if found returns it, else allocate it.
  926
+ *	page table is accessed via P1.
  927
+ */
  928
+pt_entry_t *
  929
+__pmap_pte_alloc(pmap_t pmap, vaddr_t va)
  930
+{
  931
+	struct vm_page *pg;
  932
+	pt_entry_t *ptp, *pte;
  933
+
  934
+	if ((pte = __pmap_pte_lookup(pmap, va)) != NULL)
  935
+		return (pte);
  936
+
  937
+	/* Allocate page table (not managed page) */
  938
+	pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_USERESERVE | UVM_PGA_ZERO);
  939
+	if (pg == NULL)
  940
+		return NULL;
  941
+
  942
+	ptp = (pt_entry_t *)VM_PAGE_TO_PHYS(pg);
  943
+	pmap->pm_ptp[__PMAP_PTP_INDEX(va)] = ptp;
  944
+
  945
+	return (ptp + __PMAP_PTP_OFSET(va));
  946
+}
3  sys/arch/milkymist/milkymist/genassym.cf
@@ -44,10 +44,7 @@ define	USRSTACK	USRSTACK
44 44
 
45 45
 
46 46
 # pte bits
47  
-define	PG_V		PG_V
48 47
 define	PG_RO		PG_RO
49  
-define	PG_RW		PG_RW
50  
-define	PG_PROT		PG_PROT
51 48
 define	PG_FRAME	PG_FRAME
52 49
 
53 50
 

0 notes on commit 81a01e8

Please sign in to comment.
Something went wrong with that request. Please try again.