Skip to content
This repository

HTTPS clone URL

Subversion checkout URL

You can clone with HTTPS or Subversion.

Download ZIP
Browse code

Implement copyout.9 and a draft of pmap_load with various fixes

Fix toolchain to not add underscores to C symbols when calling them from assembly
(define the builtin NO_LEADING_UNDERSCORE)

Fix intr.h, put _splraise() definition in intr.c

Add empty files for uart and timer SoC peripherals

Add information about files containing copyin/copyout/etc implementations for other archs in TODO file

Add a few defines in genassym.cf
  • Loading branch information...
commit 2f1952fcac7b6917e6b70be93bda26c454d7d456 1 parent 9d7eeea
Yann Sionneau authored
13  TODO
@@ -7,3 +7,16 @@ LM32 port TODO:
7 7
 - make sure it's fine to hardwire CLKF_USERMOD to 0 or 1 (this and the following one seem to be used for statistics purposes only)
8 8
 - make sure it's fine to hardwire CLKF_INTR to 0 or 1
9 9
 - Do I need to implement cpu_did_resched() ?
  10
+- implement copyin/copyout/copyinstr
  11
+  - amd64: sys/arch/amd64/amd64/copy.S:ENTRY(copyout)
  12
+  - arm: sys/arch/arm/arm/bcopyinout_xscale.S:ENTRY(copyout)
  13
+  - hppa: sys/arch/hppa/hppa/copy.S: * copyin/copyout, fuword/suword, etc.
  14
+  - i386: sys/arch/i386/i386/copy.S:ENTRY(copyout)
  15
+  - ia64: sys/arch/ia64/ia64/support.S:ENTRY(copyoutstr, 4)
  16
+  - m68k: sys/arch/m68k/060sp/dist/os.s:_copyout: | sys/arch/m68k/m68k/copy.s:ENTRY(copyout)
  17
+  - ppc: sys/arch/powerpc/booke/copyout.c
  18
+  - sh3: sys/arch/sh3/sh3/locore_subr.S:ENTRY(copyout)
  19
+  - sparc: sys/arch/sparc/sparc/locore.s:ENTRY(copyout)
  20
+  - sparc64: sys/arch/sparc64/sparc64/copy.S:ENTRY(copyout)
  21
+  - usermod: sys/arch/usermode/usermode/copy.c
  22
+  - vax: sys/arch/vax/boot/boot/if_le.c: copyout(void *f, int dest, int len)
1  external/gpl3/gcc/dist/gcc/config/lm32/lm32-netbsd.h
@@ -24,6 +24,7 @@ Boston, MA 02111-1307, USA.  */
24 24
   do						\
25 25
     {						\
26 26
       NETBSD_OS_CPP_BUILTINS_ELF();             \
  27
+      builtin_define ("__NO_LEADING_UNDERSCORES__");\
27 28
     }						\
28 29
   while (0)
29 30
 
2  sys/arch/lm32/conf/files.lm32
... ...
@@ -1,2 +1,4 @@
1 1
 include "arch/lm32/conf/majors.lm32"
2 2
 
  3
+file 	arch/lm32/lm32/pmap.c
  4
+file 	arch/lm32/lm32/copy.c
2  sys/arch/lm32/include/asm.h
@@ -92,6 +92,8 @@
92 92
 #define	NENTRY(y)	_ENTRY(_C_LABEL(y))
93 93
 #define	ASENTRY(y)	_ENTRY(_ASM_LABEL(y)) _PROF_PROLOGUE
94 94
 
  95
+#define CPUVAR(off) _C_LABEL(cpu_info_store)+__CONCAT(CPU_INFO_,off)
  96
+
95 97
 #define SET_ENTRY_SIZE(y) \
96 98
 	.size	_C_LABEL(y), . - _C_LABEL(y)
97 99
 
4  sys/arch/lm32/include/cpu.h
@@ -51,6 +51,8 @@
51 51
 #include <machine/frame.h>
52 52
 #include <machine/pte.h>
53 53
 
  54
+#define	curcpu()			(&cpu_info_store)
  55
+
54 56
 #include <sys/cpu_data.h>
55 57
 #include <sys/evcnt.h>
56 58
 #include <sys/device_if.h> /* for device_t */
@@ -68,6 +70,7 @@ struct cpu_info {
68 70
 	cpuid_t	ci_cpuid;
69 71
 	device_t ci_dev;		/* pointer to our device */
70 72
 	int ci_current_ipl;
  73
+	struct lwp *ci_curlwp;		/* current owner of the processor */
71 74
 	struct cpu_info *ci_self;	/* self-pointer */
72 75
 	void	*ci_tlog_base;		/* Trap log base */
73 76
 	int32_t ci_tlog_offset;		/* Trap log current offset */
@@ -114,7 +117,6 @@ extern struct cpu_info cpu_info_primary;
114 117
 extern struct cpu_info *cpu_info_list;
115 118
 
116 119
 extern struct cpu_info cpu_info_store;
117  
-#define	curcpu()			(&cpu_info_store)
118 120
 
119 121
 /*
120 122
  * definitions of cpu-dependent requirements
3  sys/arch/lm32/include/pmap.h
@@ -17,4 +17,7 @@ __CTASSERT(sizeof(struct vm_page_md) == sizeof(uintptr_t)*5);
17 17
 #define pmap_pte_set(p, n)		do { *(p) = (n); } while (0)
18 18
 #define pmap_pte_flush()		/* nothing */
19 19
 
  20
+void tlbflush(void);
  21
+void pmap_load(void);
  22
+
20 23
 #endif /* !_LM32_PMAP_H_ */
65  sys/arch/lm32/lm32/copy.c
... ...
@@ -0,0 +1,65 @@
  1
+/*
  2
+ * COPYRIGHT (C) 2013 Yann Sionneau <yann.sionneau@gmail.com>
  3
+ */
  4
+
  5
+#include <lm32/cpu.h>
  6
+#include <lm32/pmap.h>
  7
+#include <lm32/vmparam.h>
  8
+#include <sys/lwp.h>
  9
+#include <sys/errno.h>
  10
+
  11
+void do_pmap_load(void);
  12
+int copyout(const void *kaddr, void *uaddr, size_t len);
  13
+
  14
+void do_pmap_load(void)
  15
+{
  16
+	struct lwp *current_lwp = curcpu()->ci_curlwp;
  17
+
  18
+	do {
  19
+		current_lwp->l_nopreempt++;
  20
+		pmap_load();
  21
+		current_lwp->l_nopreempt--;
  22
+
  23
+		if (current_lwp->l_nopreempt == 0)
  24
+		{
  25
+			if (current_lwp->l_dopreempt != 0)
  26
+				kpreempt(0);
  27
+		}
  28
+
  29
+	} while (curcpu()->ci_want_pmapload != 0);
  30
+
  31
+}
  32
+
  33
+int copyout(const void *kaddr, void *uaddr, size_t len)
  34
+{
  35
+	uint32_t *uaddr_32 = uaddr;
  36
+	uint8_t *uaddr_8 = uaddr;
  37
+	uint32_t *kaddr_32 = uaddr;
  38
+	uint8_t *kaddr_8 = uaddr;
  39
+	int count;
  40
+
  41
+	if ( curcpu()->ci_want_pmapload )
  42
+		do_pmap_load();
  43
+
  44
+	uaddr_8 += len;
  45
+	if ((size_t)uaddr_8 < len) /* is it correct as an overflow condition ? */
  46
+		return EFAULT;
  47
+	if ((size_t)uaddr_8 > VM_MAXUSER_ADDRESS)
  48
+		return EFAULT;
  49
+
  50
+	count = len;
  51
+	count >>= 2;     /* count = count / 4 */
  52
+	while ( count-- > 0)
  53
+		*uaddr_32++ = *kaddr_32++;
  54
+
  55
+	count = len & 0x3;  /* test if it is a multiple of 4 */
  56
+	if (count == 0)
  57
+		return 0;
  58
+
  59
+	uaddr_8 = (uint8_t *)uaddr + len - count;
  60
+
  61
+	while (count-- > 0)
  62
+		*uaddr_8++ = *kaddr_8++;
  63
+
  64
+	return 0;
  65
+}
117  sys/arch/lm32/lm32/pmap.c
... ...
@@ -0,0 +1,117 @@
  1
+/*
  2
+ * COPYRIGHT (C) 2013 Yann Sionneau <yann.sionneau@gmail.com>
  3
+ */
  4
+
  5
+void tlbflush(void)
  6
+{
  7
+	/* flush DTLB */
  8
+	asm volatile("xor r11, r11, r11\n\t"
  9
+		     "ori r11, r11, 0x3\n\t"
  10
+		     "wcsr tlbvaddr, r11" ::: "r11");	
  11
+
  12
+	/* flush ITLB */
  13
+	asm volatile("xor r11, r11, r11\n\t"
  14
+		     "ori r11, r11, 0x2\n\t"
  15
+		     "wcsr tlbvaddr, r11" ::: "r11");	
  16
+}
  17
+
  18
+/*
  19
+ * pmap_load: perform the actual pmap switch
  20
+ *
  21
+ * Ensures that the current process' pmap is loaded on the current CPU's
  22
+ * MMU and that there are no stale TLB entries.
  23
+ *
  24
+ * => The caller should disable kernel preemption or do check-and-retry
  25
+ *    to prevent a preemption from undoing our efforts.
  26
+ * => This function may block.
  27
+ */
  28
+void
  29
+pmap_load(void)
  30
+{
  31
+	struct cpu_info *ci;
  32
+	struct pmap *pmap, *oldpmap;
  33
+	struct lwp *l;
  34
+	struct pcb *pcb;
  35
+	cpuid_t cid;
  36
+	uint64_t ncsw;
  37
+
  38
+	kpreempt_disable();
  39
+ retry:
  40
+	ci = curcpu();
  41
+	if (!ci->ci_want_pmapload) {
  42
+		kpreempt_enable();
  43
+		return;
  44
+	}
  45
+	l = ci->ci_curlwp;
  46
+	ncsw = l->l_ncsw;
  47
+
  48
+	/* should be able to take ipis. */
  49
+	KASSERT(ci->ci_ilevel < IPL_HIGH); 
  50
+
  51
+	KASSERT(l != NULL);
  52
+	pmap = vm_map_pmap(&l->l_proc->p_vmspace->vm_map);
  53
+	KASSERT(pmap != pmap_kernel());
  54
+	oldpmap = ci->ci_pmap;
  55
+	pcb = lwp_getpcb(l);
  56
+
  57
+	if (pmap == oldpmap) {
  58
+		if (!pmap_reactivate(pmap)) {
  59
+			u_int gen = uvm_emap_gen_return();
  60
+
  61
+			/*
  62
+			 * pmap has been changed during deactivated.
  63
+			 * our tlb may be stale.
  64
+			 */
  65
+
  66
+			tlbflush();
  67
+			uvm_emap_update(gen);
  68
+		}
  69
+
  70
+		ci->ci_want_pmapload = 0;
  71
+		kpreempt_enable();
  72
+		return;
  73
+	}
  74
+
  75
+	/*
  76
+	 * Acquire a reference to the new pmap and perform the switch.
  77
+	 */
  78
+
  79
+	pmap_reference(pmap);
  80
+
  81
+	cid = cpu_index(ci);
  82
+	kcpuset_atomic_clear(oldpmap->pm_cpus, cid);
  83
+	kcpuset_atomic_clear(oldpmap->pm_kernel_cpus, cid);
  84
+
  85
+	KASSERT(!kcpuset_isset(pmap->pm_cpus, cid));
  86
+	KASSERT(!kcpuset_isset(pmap->pm_kernel_cpus, cid));
  87
+
  88
+	/*
  89
+	 * Mark the pmap in use by this CPU.  Again, we must synchronize
  90
+	 * with TLB shootdown interrupts, so set the state VALID first,
  91
+	 * then register us for shootdown events on this pmap.
  92
+	 */
  93
+	ci->ci_tlbstate = TLBSTATE_VALID;
  94
+	kcpuset_atomic_set(pmap->pm_cpus, cid);
  95
+	kcpuset_atomic_set(pmap->pm_kernel_cpus, cid);
  96
+	ci->ci_pmap = pmap;
  97
+
  98
+
  99
+	u_int gen = uvm_emap_gen_return();
  100
+	cpu_load_pmap(pmap, oldpmap);
  101
+	uvm_emap_update(gen);
  102
+
  103
+	ci->ci_want_pmapload = 0;
  104
+
  105
+	/*
  106
+	 * we're now running with the new pmap.  drop the reference
  107
+	 * to the old pmap.  if we block, we need to go around again.
  108
+	 */
  109
+
  110
+	pmap_destroy(oldpmap);
  111
+	if (l->l_ncsw != ncsw) {
  112
+		goto retry;
  113
+	}
  114
+
  115
+	kpreempt_enable();
  116
+}
  117
+
2  sys/arch/milkymist/conf/files.milkymist
@@ -21,4 +21,6 @@ device	uart: tty
21 21
 attach	uart at mainbus
22 22
 file	arch/milkymist/dev/uart.c		uart needs-flag
23 23
 
  24
+file	arch/milkymist/milkymist/intr.c
  25
+
24 26
 include "arch/milkymist/conf/majors.milkymist"
0  sys/arch/milkymist/dev/timer.c
No changes.
0  sys/arch/milkymist/dev/uart.c
No changes.
3  sys/arch/milkymist/include/cpu.h
... ...
@@ -1,3 +1,6 @@
1 1
 /*	$NetBSD: cpu.h,v 1.3 2002/03/04 14:36:13 uch Exp $	*/
2 2
 
  3
+#define curcpu() (&cpu_info_store)
  4
+extern struct cpu_info cpu_info_store;
  5
+
3 6
 #include <lm32/cpu.h>
32  sys/arch/milkymist/include/intr.h
@@ -37,7 +37,8 @@
37 37
 #define _MACHINE_INTR_H_
38 38
 
39 39
 #include <lm32/intr.h>
40  
-#include <lm32/cpu.h>
  40
+#include <machine/cpu.h>
  41
+#include <lib/libkern/libkern.h>
41 42
 
42 43
 /* Define the various Interrupt Priority Levels */
43 44
 
@@ -89,6 +90,8 @@ typedef struct {
89 90
 	ipl_t  _ipl;
90 91
 } ipl_cookie_t;
91 92
 
  93
+ipl_t _splraise(ipl_t level);
  94
+
92 95
 static inline ipl_cookie_t
93 96
 makeiplcookie(ipl_t ipl)
94 97
 {
@@ -99,33 +102,6 @@ makeiplcookie(ipl_t ipl)
99 102
 #define LM32_CSR_PSW_IE_SHIFT (0x0)
100 103
 #define LM32_CSR_PSW_IE (1 << LM32_CSR_PSW_IE_SHIFT)
101 104
 
102  
-
103  
-static inline ipl_t _splraise(ipl_t level)
104  
-{
105  
-	struct cpu_info *ci = curcpu();
106  
-	ipl_t olevel;
107  
-	int psw;
108  
-
109  
-	if (ci->ci_current_ipl == level)
110  
-		return level;
111  
-	olevel = ci->ci_current_ipl;
112  
-
113  
-//	KASSERT(level < NIPL);
114  
-
115  
-	ci->ci_current_ipl = max(level, olevel);
116  
-
117  
-	asm volatile("rcsr %0, PSW" : "=r"(psw) :: );
118  
-
119  
-	if (level == IPL_NONE)
120  
-		psw |= LM32_CSR_PSW_IE;
121  
-	else
122  
-		psw &= ~(LM32_CSR_PSW_IE);
123  
-
124  
-	asm volatile("wcsr PSW, %0" :: "r"(psw) : );
125  
-
126  
-	return olevel;
127  
-}
128  
-
129 105
 static inline int
130 106
 splraiseipl(ipl_cookie_t icookie)
131 107
 {
12  sys/arch/milkymist/milkymist/genassym.cf
@@ -34,7 +34,9 @@
34 34
 
35 35
 include <machine/pte.h>
36 36
 include <machine/vmparam.h>
37  
-
  37
+include <machine/types.h>
  38
+include <sys/cpu.h>
  39
+#include <sys/lwp.h>
38 40
 
39 41
 # general constants
40 42
 define	PAGE_SIZE	PAGE_SIZE
@@ -47,3 +49,11 @@ define	PG_RO		PG_RO
47 49
 define	PG_RW		PG_RW
48 50
 define	PG_PROT		PG_PROT
49 51
 define	PG_FRAME	PG_FRAME
  52
+
  53
+
  54
+define	CPU_INFO_SELF		offsetof(struct cpu_info, ci_self)
  55
+define	CPU_INFO_CURLWP		offsetof(struct cpu_info, ci_curlwp)
  56
+
  57
+
  58
+define	L_NOPREEMPT		offsetof(struct lwp, l_nopreempt)
  59
+define	L_DOPREEMPT		offsetof(struct lwp, l_dopreempt)
32  sys/arch/milkymist/milkymist/intr.c
... ...
@@ -0,0 +1,32 @@
  1
+/*
  2
+ * COPYRIGHT (C) 2013 Yann Sionneau <yann.sionneau@gmail.com>
  3
+ */
  4
+
  5
+#include <lm32/cpu.h>
  6
+#include <lib/libkern/libkern.h>
  7
+
  8
+ipl_t _splraise(ipl_t level)
  9
+{
  10
+	struct cpu_info *ci = curcpu();
  11
+	ipl_t olevel;
  12
+	int psw;
  13
+
  14
+	if (ci->ci_current_ipl == level)
  15
+		return level;
  16
+	olevel = ci->ci_current_ipl;
  17
+
  18
+//	KASSERT(level < NIPL);
  19
+
  20
+	ci->ci_current_ipl = max(level, olevel);
  21
+
  22
+	asm volatile("rcsr %0, PSW" : "=r"(psw) :: );
  23
+
  24
+	if (level == IPL_NONE)
  25
+		psw |= LM32_CSR_PSW_IE;
  26
+	else
  27
+		psw &= ~(LM32_CSR_PSW_IE);
  28
+
  29
+	asm volatile("wcsr PSW, %0" :: "r"(psw) : );
  30
+
  31
+	return olevel;
  32
+}

0 notes on commit 2f1952f

Please sign in to comment.
Something went wrong with that request. Please try again.