-
Notifications
You must be signed in to change notification settings - Fork 657
/
Copy pathuserspace.S
702 lines (616 loc) · 19.3 KB
/
userspace.S
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
/*
* Userspace and service handler hooks
*
* Copyright (c) 2017 Linaro Limited
*
* SPDX-License-Identifier: Apache-2.0
*
*/
#include <zephyr/toolchain.h>
#include <zephyr/linker/sections.h>
#include <offsets_short.h>
#include <zephyr/syscall.h>
#include <zephyr/arch/arm/exception.h>
#if defined(CONFIG_CPU_AARCH32_CORTEX_R)
#include <zephyr/arch/cpu.h>
#endif
_ASM_FILE_PROLOGUE
GTEXT(z_arm_userspace_enter)
GTEXT(z_arm_do_syscall)
GTEXT(arch_user_string_nlen)
GTEXT(z_arm_user_string_nlen_fault_start)
GTEXT(z_arm_user_string_nlen_fault_end)
GTEXT(z_arm_user_string_nlen_fixup)
GDATA(_kernel)
/* Imports */
GDATA(_k_syscall_table)
/**
*
* User space entry function
*
* This function is the entry point to user mode from privileged execution.
* The conversion is one way, and threads which transition to user mode do
* not transition back later, unless they are doing system calls.
*
* The function is invoked as:
* z_arm_userspace_enter(user_entry, p1, p2, p3,
* stack_info.start, stack_info.size);
*/
SECTION_FUNC(TEXT,z_arm_userspace_enter)
/* move user_entry to lr */
mov lr, r0
/* prepare to set stack to privileged stack */
ldr r0, =_kernel
ldr r0, [r0, #_kernel_offset_to_current]
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
/* move p1 to ip */
mov ip, r1
ldr r1, =_thread_offset_to_priv_stack_start
ldr r0, [r0, r1] /* priv stack ptr */
ldr r1, =CONFIG_PRIVILEGED_STACK_SIZE
add r0, r0, r1
/* Restore p1 from ip */
mov r1, ip
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
ldr r0, [r0, #_thread_offset_to_priv_stack_start] /* priv stack ptr */
ldr ip, =CONFIG_PRIVILEGED_STACK_SIZE
add r0, r0, ip
#elif defined(CONFIG_CPU_AARCH32_CORTEX_R)
ldr r0, [r0, #_thread_offset_to_priv_stack_start] /* priv stack ptr */
ldr ip, =CONFIG_PRIVILEGED_STACK_SIZE
add r0, r0, ip
ldr ip, =_kernel
ldr ip, [ip, #_kernel_offset_to_current]
str r0, [ip, #_thread_offset_to_priv_stack_end] /* priv stack end */
#endif
/* store current stack pointer to ip
* the current stack pointer is needed to retrieve
* stack_info.start and stack_info.size
*/
mov ip, sp
#if defined(CONFIG_CPU_AARCH32_CORTEX_R)
mov sp, r0
#else
/* set stack to privileged stack
*
* Note [applies only when CONFIG_BUILTIN_STACK_GUARD is enabled]:
* modifying PSP via MSR instruction is not subject to stack limit
* checking, so we do not need to clear PSPLIM before setting PSP.
* The operation is safe since, by design, the privileged stack is
* located in memory higher than the default (user) thread stack.
*/
msr PSP, r0
#endif
#if defined(CONFIG_BUILTIN_STACK_GUARD)
/* At this point the privileged stack is not yet protected by PSPLIM.
* Since we have just switched to the top of the privileged stack, we
* are safe, as long as the stack can accommodate the maximum exception
* stack frame.
*/
/* set stack pointer limit to the start of the priv stack */
ldr r0, =_kernel
ldr r0, [r0, #_kernel_offset_to_current]
ldr r0, [r0, #_thread_offset_to_priv_stack_start] /* priv stack ptr */
msr PSPLIM, r0
#endif
/* push args to stack */
push {r1,r2,r3,lr}
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
mov r1, ip
push {r0,r1}
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE) \
|| defined(CONFIG_CPU_AARCH32_CORTEX_R)
push {r0,ip}
#endif
/* Re-program dynamic memory map.
*
* Important note:
* z_arm_configure_dynamic_mpu_regions() may re-program the MPU Stack Guard
* to guard the privilege stack for overflows (if building with option
* CONFIG_MPU_STACK_GUARD). There is a risk of actually overflowing the
* stack while doing the re-programming. We minimize the risk by placing
* this function immediately after we have switched to the privileged stack
* so that the whole stack area is available for this critical operation.
*
* Note that the risk for overflow is higher if using the normal thread
* stack, since we do not control how much stack is actually left, when
* user invokes z_arm_userspace_enter().
*/
ldr r0, =_kernel
ldr r0, [r0, #_kernel_offset_to_current]
bl z_arm_configure_dynamic_mpu_regions
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
pop {r0,r3}
/* load up stack info from user stack */
ldr r0, [r3]
ldr r3, [r3, #4]
mov ip, r3
push {r0,r3}
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE) \
|| defined(CONFIG_CPU_AARCH32_CORTEX_R)
pop {r0,ip}
/* load up stack info from user stack */
ldr r0, [ip]
ldr ip, [ip, #4]
push {r0,ip}
#endif
/* clear the user stack area to clean out privileged data */
/* from right past the guard right up to the end */
mov r2, ip
#ifdef CONFIG_INIT_STACKS
ldr r1,=0xaaaaaaaa
#else
eors r1, r1
#endif
bl memset
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
pop {r0, r1}
mov ip, r1
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE) \
|| defined(CONFIG_CPU_AARCH32_CORTEX_R)
pop {r0,ip}
#endif
/* r0 contains user stack start, ip contains user stack size */
add r0, r0, ip /* calculate top of stack */
/* pop remaining arguments from stack before switching stacks */
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
/* Use r4 to pop lr, then restore r4 */
mov ip, r4
pop {r1,r2,r3,r4}
mov lr, r4
mov r4, ip
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE) \
|| defined(CONFIG_CPU_AARCH32_CORTEX_R)
pop {r1,r2,r3,lr}
#endif
#if defined(CONFIG_CPU_AARCH32_CORTEX_R)
/*
* set stack to user stack. We are in SYSTEM state, so r13 and r14 are
* shared with USER state
*/
mov sp, r0
#else
#if defined(CONFIG_BUILTIN_STACK_GUARD)
/*
* Guard the default (user) stack until thread drops privileges.
*
* Notes:
* PSPLIM is configured *before* PSP switches to the default (user) stack.
* This is safe, since the user stack is located, by design, in a lower
* memory area compared to the privileged stack.
*
* However, we need to prevent a context-switch to occur, because that
* would re-configure PSPLIM to guard the privileged stack; we enforce
* a PendSV locking for this purporse.
*
* Between PSPLIM update and PSP switch, the privileged stack will be
* left un-guarded; this is safe, as long as the privileged stack is
* large enough to accommodate a maximum exception stack frame.
*/
/* Temporarily store current IRQ locking status in ip */
mrs ip, BASEPRI
push {r0, ip}
/* Lock PendSV while reprogramming PSP and PSPLIM */
mov r0, #_EXC_PENDSV_PRIO_MASK
msr BASEPRI_MAX, r0
isb
/* Set PSPLIM to guard the thread's user stack. */
ldr r0, =_kernel
ldr r0, [r0, #_kernel_offset_to_current]
ldr r0, [r0, #_thread_offset_to_stack_info_start]
msr PSPLIM, r0
pop {r0, ip}
#endif
/* set stack to user stack */
msr PSP, r0
#endif
#if defined(CONFIG_BUILTIN_STACK_GUARD)
/* Restore interrupt lock status */
msr BASEPRI, ip
isb
#endif
/* restore r0 */
mov r0, lr
#if defined(CONFIG_CPU_AARCH32_CORTEX_R)
/* change processor mode to unprivileged, with all interrupts enabled. */
msr CPSR_c, #MODE_USR
#else
/* change processor mode to unprivileged */
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
push {r0, r1, r2, r3}
ldr r0, =_kernel
ldr r0, [r0, #_kernel_offset_to_current]
ldr r1, =_thread_offset_to_mode
ldr r1, [r0, r1]
movs r2, #1
orrs r1, r1, r2
mrs r3, CONTROL
orrs r3, r3, r2
mov ip, r3
/* Store (unprivileged) mode in thread's mode state variable */
ldr r2, =_thread_offset_to_mode
str r1, [r0, r2]
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
push {r0, r1}
ldr r0, =_kernel
ldr r0, [r0, #_kernel_offset_to_current]
ldr r1, [r0, #_thread_offset_to_mode]
orrs r1, r1, #1
mrs ip, CONTROL
orrs ip, ip, #1
/* Store (unprivileged) mode in thread's mode state variable */
str r1, [r0, #_thread_offset_to_mode]
#endif
dsb
msr CONTROL, ip
#endif
/* ISB is not strictly necessary here (stack pointer is not being
* touched), but it's recommended to avoid executing pre-fetched
* instructions with the previous privilege.
*/
isb
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
pop {r0, r1, r2, r3}
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
pop {r0, r1}
#endif
/* jump to z_thread_entry entry */
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
push {r0, r1}
ldr r0, =z_thread_entry
mov ip, r0
pop {r0, r1}
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE) \
|| defined(CONFIG_CPU_AARCH32_CORTEX_R)
ldr ip, =z_thread_entry
#endif
bx ip
/**
*
* Userspace system call function
*
* This function is used to do system calls from unprivileged code. This
* function is responsible for the following:
* 1) Fixing up bad syscalls
* 2) Configuring privileged stack and loading up stack arguments
* 3) Dispatching the system call
* 4) Restoring stack and calling back to the caller of the SVC
*
*/
SECTION_FUNC(TEXT, z_arm_do_syscall)
/* Note [when using MPU-based stack guarding]:
* The function is executing in privileged mode. This implies that we
* shall not be allowed to use the thread's default unprivileged stack,
* (i.e push to or pop from it), to avoid a possible stack corruption.
*
* Rationale: since we execute in PRIV mode and no MPU guard
* is guarding the end of the default stack, we won't be able
* to detect any stack overflows.
*
* Note [when using built-in stack limit checking on ARMv8-M]:
* At this point PSPLIM is already configured to guard the default (user)
* stack, so pushing to the default thread's stack is safe.
*/
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
/* save current stack pointer (user stack) */
mov ip, sp
/* temporarily push to user stack */
push {r0,r1}
/* setup privileged stack */
ldr r0, =_kernel
ldr r0, [r0, #_kernel_offset_to_current]
adds r0, r0, #_thread_offset_to_priv_stack_start
ldr r0, [r0] /* priv stack ptr */
ldr r1, =CONFIG_PRIVILEGED_STACK_SIZE
add r0, r1
/* Store current SP and LR at the beginning of the priv stack */
subs r0, #8
mov r1, ip
str r1, [r0, #0]
mov r1, lr
str r1, [r0, #4]
mov ip, r0
/* Restore user stack and original r0, r1 */
pop {r0, r1}
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
/* setup privileged stack */
ldr ip, =_kernel
ldr ip, [ip, #_kernel_offset_to_current]
ldr ip, [ip, #_thread_offset_to_priv_stack_start] /* priv stack ptr */
add ip, #CONFIG_PRIVILEGED_STACK_SIZE
/* Store current SP and LR at the beginning of the priv stack */
subs ip, #8
str sp, [ip, #0]
str lr, [ip, #4]
#elif defined(CONFIG_CPU_AARCH32_CORTEX_R)
/*
* The SVC handler has already switched to the privileged stack.
* Store the user SP and LR at the beginning of the priv stack.
*/
ldr ip, =_kernel
ldr ip, [ip, #_kernel_offset_to_current]
ldr ip, [ip, #_thread_offset_to_sp_usr]
push {ip, lr}
#endif
#if !defined(CONFIG_CPU_AARCH32_CORTEX_R)
/* switch to privileged stack */
msr PSP, ip
#endif
/* Note (applies when using stack limit checking):
* We do not need to lock IRQs after switching PSP to the privileged stack;
* PSPLIM is guarding the default (user) stack, which, by design, is
* located at *lower* memory area. Since we switch to the top of the
* privileged stack we are safe, as long as the stack can accommodate
* the maximum exception stack frame.
*/
#if defined(CONFIG_BUILTIN_STACK_GUARD)
/* Set stack pointer limit (needed in privileged mode) */
ldr ip, =_kernel
ldr ip, [ip, #_kernel_offset_to_current]
ldr ip, [ip, #_thread_offset_to_priv_stack_start] /* priv stack ptr */
msr PSPLIM, ip
#endif
/*
* r0-r5 contain arguments
* r6 contains call_id
* r8 contains original LR
*/
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
/* save r0, r1 to ip, lr */
mov ip, r0
mov lr, r1
ldr r0, =K_SYSCALL_BAD
cmp r6, r0
bne valid_syscall
/* BAD SYSCALL path */
/* fixup stack frame on the privileged stack, adding ssf */
mov r1, sp
/* ssf is present in r1 (sp) */
push {r1,lr}
push {r4,r5}
/* restore r0, r1 */
mov r0, ip
mov r1, lr
b dispatch_syscall
valid_syscall:
/* push ssf to privileged stack */
mov r1, sp
push {r1}
/* push args to complete stack frame */
push {r4,r5}
dispatch_syscall:
/* original r0 is saved in ip */
ldr r0, =_k_syscall_table
lsls r6, #2
add r0, r6
ldr r0, [r0] /* load table address */
/* swap ip and r0, restore r1 from lr */
mov r1, ip
mov ip, r0
mov r0, r1
mov r1, lr
/* execute function from dispatch table */
blx ip
/* restore LR
* r0 holds the return value and needs to be preserved
*/
mov ip, r0
mov r0, sp
ldr r0, [r0,#16]
mov lr, r0
/* Restore r0 */
mov r0, ip
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE) \
|| defined(CONFIG_CPU_AARCH32_CORTEX_R)
ldr ip, =K_SYSCALL_BAD
cmp r6, ip
bne valid_syscall
/* BAD SYSCALL path */
/* fixup stack frame on the privileged stack, adding ssf */
mov ip, sp
push {r4,r5,ip,lr}
b dispatch_syscall
valid_syscall:
/* push args to complete stack frame */
mov ip, sp
push {r4,r5,ip}
dispatch_syscall:
ldr ip, =_k_syscall_table
lsl r6, #2
add ip, r6
ldr ip, [ip] /* load table address */
/* execute function from dispatch table */
blx ip
/* restore LR */
ldr lr, [sp,#16]
#endif
#if defined(CONFIG_BUILTIN_STACK_GUARD)
/*
* Guard the default (user) stack until thread drops privileges.
*
* Notes:
* PSPLIM is configured *before* PSP switches to the default (user) stack.
* This is safe, since the user stack is located, by design, in a lower
* memory area compared to the privileged stack.
*
* However, we need to prevent a context-switch to occur, because that
* would re-configure PSPLIM to guard the privileged stack; we enforce
* a PendSV locking for this purporse.
*
* Between PSPLIM update and PSP switch, the privileged stack will be
* left un-guarded; this is safe, as long as the privileged stack is
* large enough to accommodate a maximum exception stack frame.
*/
/* Temporarily store current IRQ locking status in r2 */
mrs r2, BASEPRI
/* Lock PendSV while reprogramming PSP and PSPLIM */
mov r3, #_EXC_PENDSV_PRIO_MASK
msr BASEPRI_MAX, r3
isb
/* Set PSPLIM to guard the thread's user stack. */
ldr r3, =_kernel
ldr r3, [r3, #_kernel_offset_to_current]
ldr r3, [r3, #_thread_offset_to_stack_info_start] /* stack_info.start */
msr PSPLIM, r3
#endif
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
/* set stack back to unprivileged stack */
mov ip, r0
mov r0, sp
ldr r0, [r0,#12]
msr PSP, r0
/* Restore r0 */
mov r0, ip
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
/* set stack back to unprivileged stack */
ldr ip, [sp,#12]
msr PSP, ip
#endif
#if defined(CONFIG_BUILTIN_STACK_GUARD)
/* Restore interrupt lock status */
msr BASEPRI, r2
isb
#endif
push {r0, r1}
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
push {r2, r3}
ldr r0, =_kernel
ldr r0, [r0, #_kernel_offset_to_current]
ldr r2, =_thread_offset_to_mode
ldr r1, [r0, r2]
movs r3, #1
orrs r1, r1, r3
/* Store (unprivileged) mode in thread's mode state variable */
str r1, [r0, r2]
dsb
/* drop privileges by setting bit 0 in CONTROL */
mrs r2, CONTROL
orrs r2, r2, r3
msr CONTROL, r2
pop {r2, r3}
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE) \
|| defined(CONFIG_CPU_AARCH32_CORTEX_R)
ldr r0, =_kernel
ldr r0, [r0, #_kernel_offset_to_current]
ldr r1, [r0, #_thread_offset_to_mode]
orrs r1, r1, #1
/* Store (unprivileged) mode in thread's mode state variable */
str r1, [r0, #_thread_offset_to_mode]
dsb
#if defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
/* drop privileges by setting bit 0 in CONTROL */
mrs ip, CONTROL
orrs ip, ip, #1
msr CONTROL, ip
#endif
#endif
/* ISB is not strictly necessary here (stack pointer is not being
* touched), but it's recommended to avoid executing pre-fetched
* instructions with the previous privilege.
*/
isb
pop {r0, r1}
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
/* Zero out volatile (caller-saved) registers so as to not leak state from
* kernel mode. The C calling convention for the syscall handler will
* restore the others to original values.
*/
movs r2, #0
movs r3, #0
/*
* return back to original function that called SVC, add 1 to force thumb
* mode
*/
/* Save return value temporarily to ip */
mov ip, r0
mov r0, r8
movs r1, #1
orrs r0, r0, r1
/* swap ip, r0 */
mov r1, ip
mov ip, r0
mov r0, r1
movs r1, #0
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
/* Zero out volatile (caller-saved) registers so as to not leak state from
* kernel mode. The C calling convention for the syscall handler will
* restore the others to original values.
*/
mov r1, #0
mov r2, #0
mov r3, #0
/*
* return back to original function that called SVC, add 1 to force thumb
* mode
*/
mov ip, r8
orrs ip, ip, #1
#elif defined(CONFIG_CPU_AARCH32_CORTEX_R)
/* Restore user stack pointer */
ldr ip, [sp,#12]
mov sp, ip
/* Zero out volatile (caller-saved) registers so as to not leak state from
* kernel mode. The C calling convention for the syscall handler will
* restore the others to original values.
*/
mov r1, #0
mov r2, #0
mov r3, #0
/*
* return back to original function that called SVC
*/
mov ip, r8
cps #MODE_USR
#endif
bx ip
/*
* size_t arch_user_string_nlen(const char *s, size_t maxsize, int *err_arg)
*/
SECTION_FUNC(TEXT, arch_user_string_nlen)
push {r0, r1, r2, r4, r5, lr}
/* sp+4 is error value, init to -1 */
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE) \
|| defined(CONFIG_CPU_AARCH32_CORTEX_R)
ldr r3, =-1
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
mov.w r3, #-1
#endif
str r3, [sp, #4]
/* Perform string length calculation */
movs r3, #0 /* r3 is the counter */
strlen_loop:
z_arm_user_string_nlen_fault_start:
/* r0 contains the string. r5 = *(r0 + r3]). This could fault. */
ldrb r5, [r0, r3]
z_arm_user_string_nlen_fault_end:
#if defined(CONFIG_CPU_AARCH32_CORTEX_R)
cmp r5, #0
beq strlen_done
cmp r3, r1
beq strlen_done
adds r3, #1
b strlen_loop
#else
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
cmp r5, #0
beq strlen_done
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
cbz r5, strlen_done
#endif
cmp r3, r1
beq.n strlen_done
adds r3, #1
b.n strlen_loop
#endif
strlen_done:
/* Move length calculation from r3 to r0 (return value register) */
mov r0, r3
/* Clear error value since we succeeded */
movs r1, #0
str r1, [sp, #4]
z_arm_user_string_nlen_fixup:
/* Write error value to err pointer parameter */
ldr r1, [sp, #4]
str r1, [r2, #0]
add sp, #12
pop {r4, r5, pc}