Skip to content
This repository

HTTPS clone URL

Subversion checkout URL

You can clone with HTTPS or Subversion.

Download ZIP
  • 15 commits
  • 11 files changed
  • 0 comments
  • 1 contributor
Feb 26, 2013
Lars-Peter Clausen lm32: Get rid of pm_idle
This is currently unused and will be gone in upstream soon.

Signed-off-by: Lars-Peter Clausen <lars@metafoo.de>
64d30a6
Lars-Peter Clausen lm32: Use _save_altstack helper
Signed-off-by: Lars-Peter Clausen <lars@metafoo.de>
b533446
Lars-Peter Clausen lm32: switch to generic sys_sigaltstack
Signed-off-by: Lars-Peter Clausen <lars@metafoo.de>
a94271a
Lars-Peter Clausen lm32: sys_rt_sigreturn: Use current_pt_regs()
Use current_pt_regs() to get a pointer to the registers of the current process.
None of the syscalls expect a pointer to registers of the current process in r7
anymore, so we can also get rid of that.

Signed-off-by: Lars-Peter Clausen <lars@metafoo.de>
d2374d9
Lars-Peter Clausen lm32: Properly update stack pointer copy_thread()
If we get a new stack pointer address for the new thread in copy_thread() we
need to update the register set to the new stack address.

Signed-off-by: Lars-Peter Clausen <lars@metafoo.de>
fa7fba2
Lars-Peter Clausen lm32: Don't clobber userspace stack during syscall
We shouldn't use the userspace stack to backup the registers which we using
during the early syscall code, this only works by chance.

Signed-off-by: Lars-Peter Clausen <lars@metafoo.de>
0a92396
Lars-Peter Clausen lm32: Switch to kernel stack during interrupts
Don't run the interrupt handlers on userspace stack, this is quite wrong and
quite dangerous and may cause random process corruption. Instead switch to the
kernel space stack as soon as we enter kernel space.

Signed-off-by: Lars-Peter Clausen <lars@metafoo.de>
fbc42a6
Lars-Peter Clausen lm32: Simplify current_thread_info()
Now that we are always on kernel stack in kernel mode we can calculate the
current thread info address based on the stack pointer. The thread info is
always stored at the lowest address of the kernel stack of a process.

Signed-off-by: Lars-Peter Clausen <lars@metafoo.de>
b1ea119
Lars-Peter Clausen lm32: We are always on kernel stack in resume
We are always on kernel stack now and always resume to kernel stack during a
context switch. So there is no need to check on which stack we are.

Signed-off-by: Lars-Peter Clausen <lars@metafoo.de>
664ec9d
Lars-Peter Clausen lm32: Inline _{save,restore}_irq_frame
There is really no point in having these as separate functions since there is
only one invocation of them. And making them a macro aslo means we can reuse
them in _{save,restore}_syscall_frame to get rid of some duplicated code.

Signed-off-by: Lars-Peter Clausen <lars@metafoo.de>
d5c38bd
Lars-Peter Clausen lm32: Remove usp field from thread_struct struct
The usp is always stored in the sp field of the pt_regs of the process. No need
to track it separately.

Signed-off-by: Lars-Peter Clausen <lars@metafoo.de>
c6e4d95
Lars-Peter Clausen lm32: Simplify mode switching
Whenever we enter kernel mode we'll switch to the kernel stack. We will always
start at the bottom of the kernel stack, once we leave kernel mode we'll be at
the bottom of the kernel stack again. Since we can calculate the kernel stack
address based on the current thread_info address (current thread_info address is
always at the lowest address of the kernel stack) we do not have to track the
kernel stack address of a process separately. Also the which_stack field is
redundant since we are always on kernel stack in kernel mode and always on user
stack in user mode, so we can remove it altogether as well.

Finally as a minor optimization put all the global variables used during mode
switch in a common struct. This is for one quite cache friendly and we only have
to load the address of the struct and can use relative addressing to access the
members instead of loading the address of each global variable individually.

Signed-off-by: Lars-Peter Clausen <lars@metafoo.de>
50c3718
Lars-Peter Clausen lm32: Save the process state in the thread_info struct
Put the process state which is saved during process switching in the thread_info
stack instead of on top of the stack. This has the advantage that we know where
the registers are saved and don't need to track this, so we can finally get rid
of the ksp field of the thread_struct struct. Also only save those registers
which are callee saved. All other register will already be saved on previous
stack frames. As a result copy_thread() also looks much nicer.

Signed-off-by: Lars-Peter Clausen <lars@metafoo.de>
52d0953
Lars-Peter Clausen lm32: Cleanup start_thread()
There is no need to call set_fs(USER_DS) in since start_thread() since this is
already done in generic places. Also don't memset regs to 0 since some of the
callers pass in preinitialized registers.

Signed-off-by: Lars-Peter Clausen <lars@metafoo.de>
9a504d6
Lars-Peter Clausen lm32: Cleanup processor.h/process.c a bit
Signed-off-by: Lars-Peter Clausen <lars@metafoo.de>
e7f8b2e
1  arch/lm32/Kconfig
@@ -8,6 +8,7 @@ config LM32
8 8
 	select GENERIC_CPU_DEVICES
9 9
 	select GENERIC_SYSCALL_TABLE
10 10
 	select GENERIC_ATOMIC64
  11
+	select GENERIC_SIGALTSTACK
11 12
 	select ARCH_REQUIRE_GPIOLIB
12 13
 	select OF
13 14
 	select OF_EARLY_FLATTREE
40  arch/lm32/include/asm/processor.h
@@ -54,29 +54,14 @@
54 54
  */
55 55
 #define TASK_UNMAPPED_BASE	0
56 56
 
57  
-/* 
58  
- * if you change this structure, you must change the code and offsets
59  
- * in asm-offsets.c
60  
- */
61  
-   
62  
-struct thread_struct {
63  
-	unsigned long ksp;	/* kernel stack pointer */
64  
-	unsigned long usp;	/* user stack pointer */
65  
-	unsigned long which_stack; /* 0 if we are on kernel stack, 1 if we are on user stack */
66  
-};
  57
+struct thread_struct {};
  58
+#define INIT_THREAD   {}
67 59
 
68 60
 #define KSTK_TOS(tsk) ((unsigned long)task_stack_page(tsk) + THREAD_SIZE - 32)
69 61
 #define task_pt_regs(tsk) ((struct pt_regs *)KSTK_TOS(tsk) - 1)
70 62
 #define KSTK_EIP(tsk) 0
71 63
 #define KSTK_ESP(tsk) 0
72 64
 
73  
-#define INIT_THREAD  { \
74  
-	sizeof(init_stack) + (unsigned long) init_stack, 0, \
75  
-	0, \
76  
-	0 \
77  
-}
78  
-
79  
-#define	reformat(_regs)		do { } while (0)
80 65
 
81 66
 /*
82 67
  * Do necessary setup to start up a newly executed thread.
@@ -86,26 +71,19 @@ extern void start_thread(struct pt_regs * regs, unsigned long pc, unsigned long
86 71
 /* Forward declaration, a strange C thing */
87 72
 struct task_struct;
88 73
 
89  
-/* Free all resources held by a thread. */
90  
-static inline void release_thread(struct task_struct *dead_task)
  74
+static inline void release_thread(struct task_struct *dead_task) { }
  75
+static inline void exit_thread(void) { }
  76
+
  77
+static inline unsigned long thread_saved_pc(struct task_struct *tsk)
91 78
 {
  79
+	return 0;
92 80
 }
93 81
 
94  
-/* Prepare to copy thread state - unlazy all lazy status */
95  
-#define prepare_to_copy(tsk)	do { } while (0)
96  
-
97  
-extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
98  
-
99  
-/*
100  
- * Free current thread data structures etc..
101  
- */
102  
-static inline void exit_thread(void)
  82
+static inline unsigned long get_wchan(struct task_struct *p)
103 83
 {
  84
+	return 0;
104 85
 }
105 86
 
106  
-unsigned long thread_saved_pc(struct task_struct *tsk);
107  
-unsigned long get_wchan(struct task_struct *p);
108  
-
109 87
 #define cpu_relax()    barrier()
110 88
 
111 89
 #endif
15  arch/lm32/include/asm/switch_to.h
... ...
@@ -1,16 +1,15 @@
1 1
 #ifndef __LM32_SYSTEM_H
2 2
 #define __LM32_SYSTEM_H
3 3
 
4  
-#include <linux/thread_info.h>
5 4
 #include <linux/linkage.h>
  5
+#include <linux/thread_info.h>
6 6
 
7  
-struct task_struct;
8  
-extern asmlinkage struct task_struct* resume(struct task_struct* last, struct task_struct* next);
  7
+extern asmlinkage struct task_struct* _switch_to(struct task_struct *,
  8
+	struct thread_info *, struct thread_info *);
9 9
 
10  
-#define switch_to(prev, next, last)					\
11  
-	do {								\
12  
-		lm32_current_thread = task_thread_info(next); \
13  
-		((last) = resume((prev), (next)));			\
14  
-	} while (0)
  10
+#define switch_to(prev,next,last) \
  11
+do  { \
  12
+	last = _switch_to(prev, task_thread_info(prev), task_thread_info(next)); \
  13
+} while (0)
15 14
 
16 15
 #endif
40  arch/lm32/include/asm/thread_info.h
@@ -25,6 +25,30 @@ typedef struct {
25 25
 	unsigned long seg;
26 26
 } mm_segment_t;
27 27
 
  28
+struct cpu_context_save {
  29
+	unsigned long r11;
  30
+	unsigned long r12;
  31
+	unsigned long r13;
  32
+	unsigned long r14;
  33
+	unsigned long r15;
  34
+	unsigned long r16;
  35
+	unsigned long r17;
  36
+	unsigned long r18;
  37
+	unsigned long r19;
  38
+	unsigned long r20;
  39
+	unsigned long r21;
  40
+	unsigned long r22;
  41
+	unsigned long r23;
  42
+	unsigned long r24;
  43
+	unsigned long r25;
  44
+	unsigned long gp;
  45
+	unsigned long fp;
  46
+	unsigned long sp;
  47
+	unsigned long ra;
  48
+	unsigned long ea;
  49
+	unsigned long ba;
  50
+};
  51
+
28 52
 /*
29 53
  * low level task data.
30 54
  * If you change this, change the TI_* offsets below to match.
@@ -37,6 +61,7 @@ struct thread_info {
37 61
 	int			preempt_count;	/* 0 => preemptable, <0 => BUG */
38 62
 	struct restart_block	restart_block;
39 63
 	mm_segment_t		addr_limit;
  64
+	struct cpu_context_save	cpu_context;
40 65
 };
41 66
 
42 67
 #define init_thread_info	(init_thread_union.thread_info)
@@ -44,13 +69,22 @@ struct thread_info {
44 69
 
45 70
 
46 71
 /* how to get the thread information struct from C */
47  
-static inline struct thread_info *current_thread_info(void) __attribute_const__;
  72
+static inline struct thread_info *current_thread_info(void) __pure;
  73
+
  74
+struct lm32_state {
  75
+	struct thread_info *current_thread;
  76
+	unsigned long kernel_mode;
  77
+	unsigned long saved_r9;
  78
+	unsigned long saved_r10;
  79
+	unsigned long saved_r11;
  80
+};
48 81
 
49  
-extern struct thread_info* lm32_current_thread;
  82
+extern struct lm32_state lm32_state;
50 83
 
51 84
 static inline struct thread_info *current_thread_info(void)
52 85
 {
53  
-	return lm32_current_thread;
  86
+	register unsigned long sp asm ("sp");
  87
+	return (struct thread_info *)(sp & ~(THREAD_SIZE - 1));
54 88
 }
55 89
 
56 90
 
34  arch/lm32/kernel/asm-offsets.c
@@ -27,10 +27,6 @@ int main(void)
27 27
 	DEFINE(TASK_MM, offsetof(struct task_struct, mm));
28 28
 	DEFINE(TASK_ACTIVE_MM, offsetof(struct task_struct, active_mm));
29 29
 
30  
-	DEFINE(TASK_KSP, offsetof(struct task_struct, thread.ksp));
31  
-	DEFINE(TASK_USP, offsetof(struct task_struct, thread.usp));
32  
-	DEFINE(TASK_WHICH_STACK, offsetof(struct task_struct, thread.which_stack));
33  
-
34 30
 	DEFINE(PT_R0, offsetof(struct pt_regs, r0));
35 31
 	DEFINE(PT_R1, offsetof(struct pt_regs, r1));
36 32
 	DEFINE(PT_R2, offsetof(struct pt_regs, r2));
@@ -72,9 +68,33 @@ int main(void)
72 68
 	DEFINE(TI_ADDR_LIMIT, offsetof(struct thread_info, addr_limit));
73 69
 	DEFINE(_THREAD_SIZE, THREAD_SIZE);
74 70
 
75  
-	DEFINE(THREAD_KSP, offsetof(struct thread_struct, ksp));
76  
-	DEFINE(THREAD_USP, offsetof(struct thread_struct, usp));
77  
-	DEFINE(THREAD_WHICH_STACK, offsetof(struct thread_struct, which_stack));
  71
+	DEFINE(TI_CC_R11, offsetof(struct thread_info, cpu_context.r11));
  72
+	DEFINE(TI_CC_R12, offsetof(struct thread_info, cpu_context.r12));
  73
+	DEFINE(TI_CC_R13, offsetof(struct thread_info, cpu_context.r13));
  74
+	DEFINE(TI_CC_R14, offsetof(struct thread_info, cpu_context.r14));
  75
+	DEFINE(TI_CC_R15, offsetof(struct thread_info, cpu_context.r15));
  76
+	DEFINE(TI_CC_R16, offsetof(struct thread_info, cpu_context.r16));
  77
+	DEFINE(TI_CC_R17, offsetof(struct thread_info, cpu_context.r17));
  78
+	DEFINE(TI_CC_R18, offsetof(struct thread_info, cpu_context.r18));
  79
+	DEFINE(TI_CC_R19, offsetof(struct thread_info, cpu_context.r19));
  80
+	DEFINE(TI_CC_R20, offsetof(struct thread_info, cpu_context.r20));
  81
+	DEFINE(TI_CC_R21, offsetof(struct thread_info, cpu_context.r21));
  82
+	DEFINE(TI_CC_R22, offsetof(struct thread_info, cpu_context.r22));
  83
+	DEFINE(TI_CC_R23, offsetof(struct thread_info, cpu_context.r23));
  84
+	DEFINE(TI_CC_R24, offsetof(struct thread_info, cpu_context.r24));
  85
+	DEFINE(TI_CC_R25, offsetof(struct thread_info, cpu_context.r25));
  86
+	DEFINE(TI_CC_GP, offsetof(struct thread_info, cpu_context.gp));
  87
+	DEFINE(TI_CC_FP, offsetof(struct thread_info, cpu_context.fp));
  88
+	DEFINE(TI_CC_SP, offsetof(struct thread_info, cpu_context.sp));
  89
+	DEFINE(TI_CC_RA, offsetof(struct thread_info, cpu_context.ra));
  90
+	DEFINE(TI_CC_EA, offsetof(struct thread_info, cpu_context.ea));
  91
+	DEFINE(TI_CC_BA, offsetof(struct thread_info, cpu_context.ba));
  92
+
  93
+	DEFINE(STATE_CURRENT_THREAD, offsetof(struct lm32_state, current_thread));
  94
+	DEFINE(STATE_KERNEL_MODE, offsetof(struct lm32_state, kernel_mode));
  95
+	DEFINE(STATE_SAVED_R9, offsetof(struct lm32_state, saved_r9));
  96
+	DEFINE(STATE_SAVED_R10, offsetof(struct lm32_state, saved_r10));
  97
+	DEFINE(STATE_SAVED_R11, offsetof(struct lm32_state, saved_r11));
78 98
 
79 99
 	return 0;
80 100
 }
406  arch/lm32/kernel/entry.S
@@ -62,51 +62,71 @@ ENTRY(interrupt_handler)
62 62
 	nop
63 63
 	nop
64 64
 
65  
-ENTRY(system_call)
66  
-	/* break */
67  
-	/* store away r9,r10 so that we can use it here TODO: use clobbered ones*/
68  
-	sw (sp+0), r9 /* needed for various */
69  
-	sw (sp+-4), r10 /* needed for current = current_thread_info()->task */
70  
-	sw (sp+-8), r11 /* needed for user stack pointer, if switching */
71  
-
72  
-	/* test if already on kernel stack: test current_thread_info->task->which_stack */
73  
-	mvhi r9, hi(lm32_current_thread)
74  
-	ori r9, r9, lo(lm32_current_thread)
75  
-	lw r9, (r9+0) /* dereference lm32_current_thread */
76  
-	lw r10, (r9+TI_TASK) /* load pointer to task */
77  
-	lw r9, (r10+TASK_WHICH_STACK)
78  
-
79  
-	mv r11, sp /* remember sp for restoring r9, r10, r11 */
80  
-
81  
-	be r9, r0, 1f
82  
-
83  
-	/* we are on user stack, have to switch */
84  
-	sw (r10+TASK_USP), sp /* store usp */
85  
-	lw sp, (r10+TASK_KSP) /* load ksp */
86  
-	sw (r10+TASK_WHICH_STACK), r0 /* set which_stack to 0 */
87  
-
  65
+.macro switch_to_kernel_mode
  66
+	/*
  67
+	 * Store away r9,r10,r11 so that we can use it here. The tricky part is that we
  68
+	 * need to do this without clobbering any other registers. The r0 register
  69
+	 * is supposed to be always 0. Since we are running with interrupts we can
  70
+	 * allow ourselves to temporarily change it's value. Note though that r0 is
  71
+	 * also used in pseudo instructions like 'mv', so we need to restore it
  72
+	 * immediately afterwards.
  73
+	 */
  74
+	mvhi r0, hi(lm32_state)
  75
+	ori r0, r0, lo(lm32_state)
  76
+	sw (r0+STATE_SAVED_R9), r9
  77
+	sw (r0+STATE_SAVED_R10), r10
  78
+	sw (r0+STATE_SAVED_R11), r11
  79
+	mv r9, r0 /* mv is 'or rX, rY, r0', so this works */
  80
+	xor r0, r0, r0
  81
+
  82
+	/*
  83
+	 * store the current kernel_mode value to the stack frame and set
  84
+	 * kernel_mode to 1
  85
+	 */
  86
+	lw r10, (r9+STATE_KERNEL_MODE)
  87
+
  88
+	mv r11, sp
  89
+
  90
+	bne r10, r0, 1f
  91
+
  92
+	lw sp, (r9+STATE_CURRENT_THREAD)
  93
+	addi sp, sp, THREAD_SIZE - 36
88 94
 1:/* already on kernel stack */
89 95
 
90 96
 	addi sp, sp, -132
  97
+
  98
+	/* save pt_mode, stack pointer and ra in current stack frame */
  99
+	sw (sp+132), r10
91 100
 	sw (sp+116), r11
  101
+	sw (sp+120), ra
  102
+
  103
+	mvi r10, PT_MODE_KERNEL
  104
+	sw (r9+STATE_KERNEL_MODE), r10
92 105
 
93  
-	/* restore r9, r10, r11 */
94  
-	lw r9, (r11+0)
95  
-	lw r10, (r11+-4)
96  
-	lw r11, (r11+-8)
  106
+	lw r11, (r9+STATE_SAVED_R11)
  107
+	lw r10, (r9+STATE_SAVED_R10)
  108
+	lw r9, (r9+STATE_SAVED_R9)
  109
+.endm
97 110
 
98  
-  /* save registers */
99  
-	sw    (sp + 120), ra
  111
+.macro switch_to_user_mode
  112
+	rcsr r2, IE
  113
+	andi r3, r2, 0xfffe
  114
+	wcsr IE, r3
  115
+	lw r2, (sp+132)
  116
+	mvhi r1, hi(lm32_state)
  117
+	ori r1, r1, lo(lm32_state)
  118
+	sw (r1+STATE_KERNEL_MODE), r2
  119
+.endm
  120
+
  121
+ENTRY(system_call)
  122
+	switch_to_kernel_mode
  123
+	/* save registers */
100 124
 	calli _save_syscall_frame
101 125
 
102 126
 	rcsr r11, IE
103 127
 	ori r11, r11, 1
104 128
 	wcsr IE, r11
105 129
 
106  
-	/* r7 always holds the pointer to struct pt_regs */
107  
-	addi  r7, sp, 4
108  
-	#addi  r4, sp, 4
109  
-
110 130
 	/* r8 always holds the syscall number */
111 131
 	/* check if syscall number is valid */
112 132
 	mvi r9, __NR_syscalls
@@ -167,21 +187,44 @@ ENTRY(ret_from_kernel_thread)
167 187
 	ori ra, ra, lo(syscall_tail)
168 188
 	b r11
169 189
 
170  
-ENTRY(sys_rt_sigreturn)
171  
-	mv r1, r7
172  
-	bi _sys_rt_sigreturn
  190
+.macro save_irq_frame
  191
+	sw      (sp+8),   r1
  192
+	sw      (sp+12),  r2
  193
+	sw      (sp+16),  r3
  194
+	sw      (sp+20),  r4
  195
+	sw      (sp+24),  r5
  196
+	sw      (sp+28),  r6
  197
+	sw      (sp+32),  r7
  198
+	sw      (sp+36),  r8
  199
+	sw      (sp+40),  r9
  200
+	sw      (sp+44),  r10
  201
+	/* ra (sp + 120) has already been written */
  202
+	sw      (sp+124), ea
  203
+.endm
173 204
 
174  
-ENTRY(sys_sigaltstack)
175  
-	lw r3, (r7+112)
176  
-	bi do_sigaltstack
  205
+/* restore all caller saved registers saved in save_irq_frame */
  206
+.macro restore_irq_frame
  207
+	lw      r1,  (sp+8);
  208
+	lw      r2,  (sp+12);
  209
+	lw      r3,  (sp+16);
  210
+	lw      r4,  (sp+20);
  211
+	lw      r5,  (sp+24);
  212
+	lw      r6,  (sp+28);
  213
+	lw      r7,  (sp+32);
  214
+	lw      r8,  (sp+36);
  215
+	lw      r9,  (sp+40);
  216
+	lw      r10, (sp+44);
  217
+	lw      ra,  (sp+120)
  218
+	lw      ea,  (sp+124)
  219
+	lw      sp,  (sp+116)
  220
+.endm
177 221
 
178 222
 /* in IRQ we call a function between save and restore */
179 223
 /* we therefore only save and restore the caller saved registers */
180 224
 /* (r1-r10, ra, ea because an interrupt could interrupt another one) */
181 225
 _long_interrupt_handler:
182  
-	addi    sp, sp, -132
183  
-	sw      (sp+120), ra
184  
-	calli   _save_irq_frame
  226
+	switch_to_kernel_mode
  227
+	save_irq_frame
185 228
 
186 229
 	/* Workaround hardware hazard. Sometimes the interrupt handler is entered
187 230
 	 * although interrupts are disabled */
@@ -225,66 +268,12 @@ _long_interrupt_handler:
225 268
 	addi    r1, sp, 4
226 269
 	calli   manage_signals_irq
227 270
 6:
228  
-	bi      _restore_irq_frame_and_return
229  
-
230  
-_save_irq_frame:
231  
-	sw      (sp+8),   r1
232  
-	sw      (sp+12),  r2
233  
-	sw      (sp+16),  r3
234  
-	sw      (sp+20),  r4
235  
-	sw      (sp+24),  r5
236  
-	sw      (sp+28),  r6
237  
-	sw      (sp+32),  r7
238  
-	sw      (sp+36),  r8
239  
-	sw      (sp+40),  r9
240  
-	sw      (sp+44),  r10
241  
-	/* ra (sp + 120) has already been written */
242  
-	sw      (sp+124), ea
243  
-
244  
-	mvhi r1, hi(kernel_mode)
245  
-	ori r1, r1, lo(kernel_mode)
246  
-	lw r2, (r1+0)
247  
-	sw (sp+132), r2
248  
-	mvi r2, PT_MODE_KERNEL
249  
-	sw (r1+0), r2
250  
-ret
251  
-
252  
-/* restore all caller saved registers saved in _save_irq_frame and return from exception */
253  
-_restore_irq_frame_and_return:
254  
-	rcsr r2, IE
255  
-	andi r3, r2, 0xfffe
256  
-	wcsr IE, r3
257  
-	lw r2, (sp+132)
258  
-	mvhi r1, hi(kernel_mode)
259  
-	ori r1, r1, lo(kernel_mode)
260  
-	sw (r1+0), r2
261  
-
262  
-	lw      r1,  (sp+8);
263  
-	lw      r2,  (sp+12);
264  
-	lw      r3,  (sp+16);
265  
-	lw      r4,  (sp+20);
266  
-	lw      r5,  (sp+24);
267  
-	lw      r6,  (sp+28);
268  
-	lw      r7,  (sp+32);
269  
-	lw      r8,  (sp+36);
270  
-	lw      r9,  (sp+40);
271  
-	lw      r10, (sp+44);
272  
-	lw      ra,  (sp+120)
273  
-	lw      ea,  (sp+124)
274  
-	addi    sp, sp, 132
  271
+	switch_to_user_mode
  272
+	restore_irq_frame
275 273
 	eret
276 274
 
277 275
 _save_syscall_frame:
278  
-	sw      (sp+8),   r1
279  
-	sw      (sp+12),  r2
280  
-	sw      (sp+16),  r3
281  
-	sw      (sp+20),  r4
282  
-	sw      (sp+24),  r5
283  
-	sw      (sp+28),  r6
284  
-	sw      (sp+32),  r7
285  
-	sw      (sp+36),  r8
286  
-	sw      (sp+40),  r9
287  
-	sw      (sp+44),  r10
  276
+	save_irq_frame
288 277
 	sw      (sp+48),  r11
289 278
 	sw      (sp+52),  r12
290 279
 	sw      (sp+56),  r13
@@ -303,15 +292,8 @@ _save_syscall_frame:
303 292
 	sw      (sp+108), r26
304 293
 	sw      (sp+112), r27
305 294
 	/* ra (sp + 120) has already been written */
306  
-	sw      (sp+124), ea
307 295
 	sw      (sp+128), ba
308 296
 
309  
-	mvhi r11, hi(kernel_mode)
310  
-	ori r11, r11, lo(kernel_mode)
311  
-	lw r12, (r11+0)
312  
-	sw (sp+132), r12
313  
-	mvi r12, PT_MODE_KERNEL
314  
-	sw (r11+0), r12
315 297
 	ret
316 298
 
317 299
 /************************/
@@ -325,68 +307,29 @@ _save_syscall_frame:
325 307
 
326 308
 #define RETURN_FROM_SYSCALL_OR_EXCEPTION(label, addr_register, return_instr) \
327 309
 label: \
328  
-	rcsr r2, IE; \
329  
-	andi r3, r2, 0xfffe; \
330  
-	wcsr IE, r3; \
331  
-	lw r2, (sp+132); \
332  
-	mvhi r1, hi(kernel_mode); \
333  
-	ori r1, r1, lo(kernel_mode); \
334  
-	sw (r1+0), r2; \
335  
-	/* prepare switch to user stack but keep kernel stack pointer in r11 */ \
336  
-	/* r9: scratch register */ \
337  
-	/* r10: current = current_thread_info()->task */ \
338  
-	/* r11: ksp backup */ \
339  
-	/* setup r10 = current */ \
340  
-	addi sp, sp, 132; \
341  
-	bne r2, r0, 1f; \
342  
-	mvhi r9, hi(lm32_current_thread); \
343  
-	ori r9, r9, lo(lm32_current_thread); \
344  
-	lw r9, (r9+0); /* dereference lm32_current_thread */ \
345  
-	lw r10, (r9+TI_TASK); /* load pointer to task */ \
346  
-	/* set task->thread.which_stack to 1 (user stack) */ \
347  
-	mvi r9, TASK_USP - TASK_KSP; \
348  
-	sw (r10+TASK_WHICH_STACK), r9; \
349  
-	/* store ksp (after restore of frame) into task->thread.ksp */ \
350  
-	sw (r10+TASK_KSP), sp; \
351  
-	/* save sp into r11 */ \
352  
-	/* get usp into sp*/ \
353  
-	1: \
354  
-	addi r11, sp, -132; \
  310
+	switch_to_user_mode; \
355 311
 	/* restore frame from original kernel stack */ \
356 312
 	/* restore r1 as the return value is stored onto the stack */ \
357  
-	lw      r1,  (r11+8); \
358  
-	lw      r2,  (r11+12); \
359  
-	lw      r3,  (r11+16); \
360  
-	lw      r4,  (r11+20); \
361  
-	lw      r5,  (r11+24); \
362  
-	lw      r6,  (r11+28); \
363  
-	lw      r7,  (r11+32); \
364  
-	lw      r8,  (r11+36); \
365  
-	lw      r9,  (r11+40); \
366  
-	lw      r10, (r11+44); \
367  
-	/* skip r11 */; \
368  
-	lw      r12, (r11+52); \
369  
-	lw      r13, (r11+56); \
370  
-	lw      r14, (r11+60); \
371  
-	lw      r15, (r11+64); \
372  
-	lw      r16, (r11+68); \
373  
-	lw      r17, (r11+72); \
374  
-	lw      r18, (r11+76); \
375  
-	lw      r19, (r11+80); \
376  
-	lw      r20, (r11+84); \
377  
-	lw      r21, (r11+88); \
378  
-	lw      r22, (r11+92); \
379  
-	lw      r23, (r11+96); \
380  
-	lw      r24, (r11+100); \
381  
-	lw      r25, (r11+104); \
382  
-	lw      r26, (r11+108); \
383  
-	lw      r27, (r11+112); \
384  
-	lw      sp,  (r11+116); \
385  
-	lw      ra,  (r11+120); \
386  
-	lw      ea,  (r11+124); \
387  
-	lw      ba,  (r11+128); \
388  
-	/* r11 must be restored last */ \
389  
-	lw      r11,  (r11+48); \
  313
+	lw      r11, (sp+48); \
  314
+	lw      r12, (sp+52); \
  315
+	lw      r13, (sp+56); \
  316
+	lw      r14, (sp+60); \
  317
+	lw      r15, (sp+64); \
  318
+	lw      r16, (sp+68); \
  319
+	lw      r17, (sp+72); \
  320
+	lw      r18, (sp+76); \
  321
+	lw      r19, (sp+80); \
  322
+	lw      r20, (sp+84); \
  323
+	lw      r21, (sp+88); \
  324
+	lw      r22, (sp+92); \
  325
+	lw      r23, (sp+96); \
  326
+	lw      r24, (sp+100); \
  327
+	lw      r25, (sp+104); \
  328
+	lw      r26, (sp+108); \
  329
+	lw      r27, (sp+112); \
  330
+	lw      ra,  (sp+120); \
  331
+	lw      ba,  (sp+128); \
  332
+	restore_irq_frame; \
390 333
 	/* scall stores pc into ea/ba register, not pc+4, so we have to add 4 */ \
391 334
 	addi	addr_register, addr_register, 4; \
392 335
 	return_instr
@@ -394,88 +337,61 @@ label: \
394 337
 RETURN_FROM_SYSCALL_OR_EXCEPTION(_restore_and_return_exception,ea,eret)
395 338
 
396 339
 /*
397  
- * struct task_struct* resume(struct task_struct* prev, struct task_struct* next)
  340
+ * struct task_struct* switch_to(struct task_struct* prev,
  341
+ *		struct thread_info *prev_ti, struct thread_info *next_ti)
398 342
  * Returns the previous task
399 343
  */
400  
-ENTRY(resume)
401  
-	/* store whole state to current stack (may be usp or ksp) */
402  
-	addi sp, sp, -132
403  
-	sw  (sp+16),  r3
404  
-	sw  (sp+20),  r4
405  
-	sw  (sp+24),  r5
406  
-	sw  (sp+28),  r6
407  
-	sw  (sp+32),  r7
408  
-	sw  (sp+36),  r8
409  
-	sw  (sp+40),  r9
410  
-	sw  (sp+44),  r10
411  
-	sw  (sp+48),  r11
412  
-	sw  (sp+52),  r12
413  
-	sw  (sp+56),  r13
414  
-	sw  (sp+60),  r14
415  
-	sw  (sp+64),  r15
416  
-	sw  (sp+68),  r16
417  
-	sw  (sp+72),  r17
418  
-	sw  (sp+76),  r18
419  
-	sw  (sp+80),  r19
420  
-	sw  (sp+84),  r20
421  
-	sw  (sp+88),  r21
422  
-	sw  (sp+92),  r22
423  
-	sw  (sp+96),  r23
424  
-	sw  (sp+100), r24
425  
-	sw  (sp+104), r25
426  
-	sw  (sp+108), r26
427  
-	sw  (sp+112), r27
428  
-	addi r3, sp, 132 /* special case for stack pointer */
429  
-	sw  (sp+116), r3 /* special case for stack pointer */
430  
-	sw	(sp+120), ra
431  
-/*	sw  (sp+124), ea
432  
-	sw  (sp+128), ba */
433  
-
434  
-
435  
-	/* TODO: Aren't we always on kernel stack at this point? */
436  
-
437  
-	/* find out whether we are on kernel or user stack */
438  
-	lw  r3, (r1 + TASK_WHICH_STACK)
439  
-	add r3, r3, r1
440  
-	sw  (r3 + TASK_KSP), sp
  344
+ENTRY(_switch_to)
  345
+
  346
+	/* r1 gets passed through unmodified */
  347
+
  348
+	sw  (r2+TI_CC_R11), r11
  349
+	sw  (r2+TI_CC_R12), r12
  350
+	sw  (r2+TI_CC_R13), r13
  351
+	sw  (r2+TI_CC_R14), r14
  352
+	sw  (r2+TI_CC_R15), r15
  353
+	sw  (r2+TI_CC_R16), r16
  354
+	sw  (r2+TI_CC_R17), r17
  355
+	sw  (r2+TI_CC_R18), r18
  356
+	sw  (r2+TI_CC_R19), r19
  357
+	sw  (r2+TI_CC_R20), r20
  358
+	sw  (r2+TI_CC_R21), r21
  359
+	sw  (r2+TI_CC_R22), r22
  360
+	sw  (r2+TI_CC_R23), r23
  361
+	sw  (r2+TI_CC_R24), r24
  362
+	sw  (r2+TI_CC_R25), r25
  363
+	sw  (r2+TI_CC_GP), r26
  364
+	sw  (r2+TI_CC_FP), r27
  365
+	sw  (r2+TI_CC_SP), sp
  366
+	sw  (r2+TI_CC_RA), ra
  367
+	sw  (r2+TI_CC_EA), ea
  368
+	sw  (r2+TI_CC_BA), ba
  369
+
  370
+	mvhi r4, hi(lm32_state)
  371
+	ori r4, r4, lo(lm32_state)
  372
+	sw (r4+STATE_CURRENT_THREAD), r3
441 373
 
442 374
 	/* restore next */
443  
-
444  
-	/* find out whether we will be on kernel or user stack */
445  
-	lw  r3, (r2 + TASK_WHICH_STACK)
446  
-	add r3, r3, r2
447  
-	lw  sp, (r3 + TASK_KSP)
448  
-
449  
-	lw  r3,  (sp+16)
450  
-	lw  r4,  (sp+20)
451  
-	lw  r5,  (sp+24)
452  
-	lw  r6,  (sp+28)
453  
-	lw  r7,  (sp+32)
454  
-	lw  r8,  (sp+36)
455  
-	lw  r9,  (sp+40)
456  
-	lw  r10, (sp+44)
457  
-	lw  r11, (sp+48)
458  
-	lw  r12, (sp+52)
459  
-	lw  r13, (sp+56)
460  
-	lw  r14, (sp+60)
461  
-	lw  r15, (sp+64)
462  
-	lw  r16, (sp+68)
463  
-	lw  r17, (sp+72)
464  
-	lw  r18, (sp+76)
465  
-	lw  r19, (sp+80)
466  
-	lw  r20, (sp+84)
467  
-	lw  r21, (sp+88)
468  
-	lw  r22, (sp+92)
469  
-	lw  r23, (sp+96)
470  
-	lw  r24, (sp+100)
471  
-	lw  r25, (sp+104)
472  
-	lw  r26, (sp+108)
473  
-	lw  r27, (sp+112)
474  
-	/* skip sp for now */
475  
-	lw  ra,  (sp+120)
476  
-/*	lw  ea,  (sp+124)
477  
-	lw  ba,  (sp+128) */
478  
-	/* Stack pointer must be restored last --- it will be updated */
479  
-	lw  sp,  (sp+116)
  375
+	lw  r11, (r3+TI_CC_R11)
  376
+	lw  r12, (r3+TI_CC_R12)
  377
+	lw  r13, (r3+TI_CC_R13)
  378
+	lw  r14, (r3+TI_CC_R14)
  379
+	lw  r15, (r3+TI_CC_R15)
  380
+	lw  r16, (r3+TI_CC_R16)
  381
+	lw  r17, (r3+TI_CC_R17)
  382
+	lw  r18, (r3+TI_CC_R18)
  383
+	lw  r19, (r3+TI_CC_R19)
  384
+	lw  r20, (r3+TI_CC_R20)
  385
+	lw  r21, (r3+TI_CC_R21)
  386
+	lw  r22, (r3+TI_CC_R22)
  387
+	lw  r23, (r3+TI_CC_R23)
  388
+	lw  r24, (r3+TI_CC_R24)
  389
+	lw  r25, (r3+TI_CC_R25)
  390
+	lw  r26, (r3+TI_CC_GP)
  391
+	lw  r27, (r3+TI_CC_FP)
  392
+	lw  sp,  (r3+TI_CC_SP)
  393
+	lw  ra,  (r3+TI_CC_RA)
  394
+	lw  ea,  (r3+TI_CC_EA)
  395
+	lw  ba,  (r3+TI_CC_BA)
480 396
 
481 397
 	ret
104  arch/lm32/kernel/process.c
@@ -50,14 +50,6 @@ asmlinkage void ret_from_fork(void);
50 50
 asmlinkage void ret_from_kernel_thread(void);
51 51
 asmlinkage void syscall_tail(void);
52 52
 
53  
-struct thread_info* lm32_current_thread;
54  
-
55  
-/*
56  
- * The following aren't currently used.
57  
- */
58  
-void (*pm_idle)(void);
59  
-EXPORT_SYMBOL(pm_idle);
60  
-
61 53
 void (*pm_power_off)(void);
62 54
 EXPORT_SYMBOL(pm_power_off);
63 55
 
@@ -70,8 +62,6 @@ static void default_idle(void)
70 62
 		__asm__ __volatile__("and r0, r0, r0" ::: "memory");
71 63
 }
72 64
 
73  
-void (*idle)(void) = default_idle;
74  
-
75 65
 /*
76 66
  * The idle thread. There's no useful work to be
77 67
  * done, so just try to conserve power and have a
@@ -82,7 +72,7 @@ void cpu_idle(void)
82 72
 {
83 73
 	/* endless idle loop with no priority at all */
84 74
 	while (1) {
85  
-		idle();
  75
+		default_idle();
86 76
 		preempt_enable_no_resched();
87 77
 		schedule();
88 78
 		preempt_disable();
@@ -132,80 +122,28 @@ void flush_thread(void)
132 122
 {
133 123
 }
134 124
 
135  
-/* no stack unwinding */
136  
-unsigned long get_wchan(struct task_struct *p)
137  
-{
138  
-	return 0;
139  
-}
140  
-
141  
-unsigned long thread_saved_pc(struct task_struct *tsk)
142  
-{
143  
-	return 0;
144  
-}
145  
-
146  
-int copy_thread(unsigned long clone_flags,
147  
-		unsigned long usp_thread_fn, unsigned long thread_fn_arg,
148  
-		struct task_struct *p)
  125
+int copy_thread(unsigned long clone_flags, unsigned long usp_thread_fn,
  126
+	unsigned long thread_fn_arg, struct task_struct *p)
149 127
 {
150  
-	unsigned long child_tos = KSTK_TOS(p);
151 128
 	struct pt_regs *childregs = task_pt_regs(p);
  129
+	struct cpu_context_save *cc = &task_thread_info(p)->cpu_context;
152 130
 
153  
-	if (p->flags & PF_KTHREAD) {
154  
-		/* kernel thread */
155  
-
156  
-		childregs = (struct pt_regs *)(child_tos) - 1;
157  
-		memset(childregs, 0, sizeof(childregs));
158  
-		childregs->r11 = usp_thread_fn;
159  
-		childregs->r12 = thread_fn_arg;
160  
-		/* childregs = full task switch frame on kernel stack of child */
161  
-
162  
-		/* return via ret_from_fork */
163  
-		childregs->ra = (unsigned long)ret_from_kernel_thread;
  131
+	memset(cc, 0, sizeof(*cc));
  132
+	cc->sp = (unsigned long)childregs - 4;
164 133
 
165  
-		/* setup ksp/usp */
166  
-		p->thread.ksp = (unsigned long)childregs - 4; /* perhaps not necessary */
167  
-		childregs->sp = p->thread.ksp;
168  
-		p->thread.usp = 0;
169  
-		p->thread.which_stack = 0; /* kernel stack */
  134
+	if (p->flags & PF_KTHREAD) {
  135
+		memset(childregs, 0, sizeof(*childregs));
  136
+		childregs->pt_mode = PT_MODE_KERNEL;
170 137
 
171  
-		//printk("copy_thread1: ->pid=%d tsp=%lx r5=%lx p->thread.ksp=%lx p->thread.usp=%lx\n",
172  
-		//		p->pid, task_stack_page(p), childregs->r5, p->thread.ksp, p->thread.usp);
  138
+		cc->r11 = usp_thread_fn;
  139
+		cc->r12 = thread_fn_arg;
  140
+		cc->ra = (unsigned long)ret_from_kernel_thread;
173 141
 	} else {
174  
-		/* userspace thread (vfork, clone) */
175  
-
176  
-		struct pt_regs* childsyscallregs;
177  
-
178  
-		/* childsyscallregs = full syscall frame on kernel stack of child */
179  
-		childsyscallregs = (struct pt_regs *)(child_tos) - 1; /* 32 = safety */
180  
-		/* child shall have same syscall context to restore as parent has ... */
181  
-		*childsyscallregs = *current_pt_regs();
182  
-
183  
-		/* childregs = full task switch frame on kernel stack of child below * childsyscallregs */
184  
-		childregs = childsyscallregs - 1;
185  
-		memset(childregs, 0, sizeof(childregs));
186  
-
187  
-		/* user stack pointer is shared with the parent per definition of vfork */
188  
-		p->thread.usp = usp_thread_fn;
189  
-
190  
-		/* kernel stack pointer is not shared with parent, it is the beginning of
191  
-		 * the just created new task switch segment on the kernel stack */
192  
-		p->thread.ksp = (unsigned long)childregs - 4;
193  
-		p->thread.which_stack = 0; /* resume from ksp */
194  
-
195  
-		/* child returns via ret_from_fork */
196  
-		childregs->ra = (unsigned long)ret_from_fork;
197  
-		/* child shall return to where sys_vfork_wrapper has been called */
198  
-		childregs->r13 = (unsigned long)syscall_tail;
199  
-		/* child gets zero as return value from syscall */
200  
-		childregs->r11 = 0;
201  
-		/* after task switch segment return the stack pointer shall point to the
202  
-		 * syscall frame */
203  
-		childregs->sp = (unsigned long)childsyscallregs - 4;
204  
-
205  
-		/*printk("copy_thread2: ->pid=%d p=%lx regs=%lx childregs=%lx r5=%lx ra=%lx "
206  
-				"dsf=%lx p->thread.ksp=%lx p->thread.usp=%lx\n",
207  
-				p->pid, p, regs, childregs, childregs->r5, childregs->ra,
208  
-				dup_syscallframe, p->thread.ksp, p->thread.usp);*/
  142
+		*childregs = *current_pt_regs();
  143
+		if (usp_thread_fn)
  144
+			childregs->sp = usp_thread_fn;
  145
+
  146
+		cc->ra = (unsigned long)ret_from_fork;
209 147
 	}
210 148
 
211 149
 	return 0;
@@ -214,20 +152,12 @@ int copy_thread(unsigned long clone_flags,
214 152
 /* start userspace thread */
215 153
 void start_thread(struct pt_regs * regs, unsigned long pc, unsigned long usp)
216 154
 {
217  
-	set_fs(USER_DS);
218  
-
219  
-	memset(regs, 0, sizeof(regs));
220  
-
221 155
 	/* -4 because we will add 4 later in ret_from_syscall */
222 156
 	regs->ea = pc - 4;
223 157
 #ifdef CONFIG_BINFMT_ELF_FDPIC
224 158
 	regs->r7 = current->mm->context.exec_fdpic_loadmap;
225 159
 #endif
226 160
 	regs->sp = usp;
227  
-	current->thread.usp = usp;
228 161
 	regs->fp = current->mm->start_data;
229 162
 	regs->pt_mode = PT_MODE_USER;
230  
-
231  
-	/*printk("start_thread: current=%lx usp=%lx\n", current, usp);*/
232 163
 }
233  
-
32  arch/lm32/kernel/ptrace.c
@@ -32,29 +32,15 @@ void ptrace_disable(struct task_struct *child)
32 32
 static int ptrace_getregs(struct task_struct *child, unsigned long __user *data)
33 33
 {
34 34
 	struct pt_regs *regs = task_pt_regs(child);
35  
-	int ret;
36 35
 
37  
-	ret = copy_to_user(data, regs, sizeof(regs));
38  
-	if (!ret) {
39  
-		/* special case: sp: we always want to get the USP! */
40  
-		__put_user (current->thread.usp, data + 28);
41  
-	}
42  
-
43  
-	return ret;
  36
+	return copy_to_user(data, regs, sizeof(regs));
44 37
 }
45 38
 
46 39
 static int ptrace_setregs (struct task_struct *child, unsigned long __user *data)
47 40
 {
48 41
 	struct pt_regs *regs = task_pt_regs(child);
49  
-	int ret;
50  
-
51  
-	ret = copy_from_user(regs, data, sizeof(regs));
52  
-	if (!ret) {
53  
-		/* special case: sp: we always want to set the USP! */
54  
-		child->thread.usp = regs->sp;
55  
-	}
56 42
 
57  
-	return ret;
  43
+	return copy_from_user(regs, data, sizeof(regs));
58 44
 }
59 45
 
60 46
 long arch_ptrace(struct task_struct *child, long request, unsigned long addr,
@@ -68,14 +54,9 @@ long arch_ptrace(struct task_struct *child, long request, unsigned long addr,
68 54
 
69 55
 
70 56
 		switch (addr) {
71  
-		case 0 ... 27:
72  
-		case 29 ... 31:
  57
+		case 0 ... 31:
73 58
 			tmp = *(((unsigned long *)task_pt_regs(child)) + addr);
74 59
 			break;
75  
-		case 28: /* sp */
76  
-			/* special case: sp: we always want to get the USP! */
77  
-			tmp = child->thread.usp;
78  
-			break;
79 60
 		case PT_TEXT_ADDR:
80 61
 			tmp = child->mm->start_code;
81 62
 			break;
@@ -93,14 +74,9 @@ long arch_ptrace(struct task_struct *child, long request, unsigned long addr,
93 74
 	}
94 75
 	case PTRACE_POKEUSR:
95 76
 		switch (addr) {
96  
-		case 0 ... 27:
97  
-		case 29 ... 31:
  77
+		case 0 ... 31:
98 78
 			*(((unsigned long *)task_pt_regs(child)) + addr) = data;
99 79
 			break;
100  
-		case 28: /* sp */
101  
-			/* special case: sp: we always want to set the USP! */
102  
-			child->thread.usp = data;
103  
-			break;
104 80
 		default:
105 81
 			printk("ptrace attempted to POKEUSR at %lx\n", addr);
106 82
 			return -EIO;
10  arch/lm32/kernel/setup.c
@@ -50,7 +50,10 @@
50 50
 #include <asm/page.h>
51 51
 #include <asm/setup.h>
52 52
 
53  
-unsigned int kernel_mode = PT_MODE_KERNEL;
  53
+struct lm32_state lm32_state = {
  54
+	.current_thread = (struct thread_info*)&init_thread_union,
  55
+	.kernel_mode = PT_MODE_KERNEL,
  56
+};
54 57
 
55 58
 char __initdata cmd_line[COMMAND_LINE_SIZE];
56 59
 
@@ -66,11 +69,6 @@ void __init __weak plat_setup_arch(void)
66 69
 
67 70
 void __init setup_arch(char **cmdline_p)
68 71
 {
69  
-	/*
70  
-	 * init "current thread structure" pointer
71  
-	 */
72  
-	lm32_current_thread = (struct thread_info*)&init_thread_union;
73  
-
74 72
 	/* populate memory_start and memory_end, needed for bootmem_init() */
75 73
 	early_init_devtree(__dtb_start);
76 74
 
7  arch/lm32/kernel/signal.c
@@ -74,8 +74,9 @@ static int restore_sigcontext(struct pt_regs *regs,
74 74
 	return __copy_from_user(regs, &sc->regs, sizeof(*regs));
75 75
 }
76 76
 
77  
-asmlinkage int _sys_rt_sigreturn(struct pt_regs *regs)
  77
+asmlinkage int sys_rt_sigreturn(void)
78 78
 {
  79
+	struct pt_regs *regs = current_pt_regs();
79 80
 	struct rt_sigframe __user *frame = (struct rt_sigframe __user *)(regs->sp + 4);
80 81
 	sigset_t set;
81 82
 	stack_t st;
@@ -143,9 +144,7 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka,
143 144
 		goto give_sigsegv;
144 145
 
145 146
 	err |= __clear_user(&frame->uc, sizeof(frame->uc));
146  
-	err |= __put_user((void *)current->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
147  
-	err |= __put_user(sas_ss_flags(regs->sp), &frame->uc.uc_stack.ss_flags);
148  
-	err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
  147
+	err |= __save_altstack(&frame->uc.uc_stack, regs->sp);
149 148
 	err |= setup_sigcontext(&frame->uc.uc_mcontext, regs, set->sig[0]);
150 149
 
151 150
 	err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
2  arch/lm32/kernel/traps.c
@@ -83,7 +83,7 @@ void show_stack(struct task_struct *task, unsigned long *stack)
83 83
 
84 84
 	if (!stack) {
85 85
 		if (task)
86  
-			stack = (unsigned long *)task->thread.ksp;
  86
+			stack = (unsigned long *)task_thread_info(task)->cpu_context.sp;
87 87
 		else
88 88
 			stack = (unsigned long *)&stack;
89 89
 	}

No commit comments for this range

Something went wrong with that request. Please try again.