Skip to content

Commit f37099b

Browse files
mrutland-armwildea01
authored andcommitted
arm64: convert syscall trace logic to C
Currently syscall tracing is a tricky assembly state machine, which can be rather difficult to follow, and even harder to modify. Before we start fiddling with it for pt_regs syscalls, let's convert it to C. This is not intended to have any functional change. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Reviewed-by: Catalin Marinas <catalin.marinas@arm.com> Cc: Will Deacon <will.deacon@arm.com> Signed-off-by: Will Deacon <will.deacon@arm.com>
1 parent 4141c85 commit f37099b

File tree

2 files changed

+56
-56
lines changed

2 files changed

+56
-56
lines changed

arch/arm64/kernel/entry.S

Lines changed: 2 additions & 53 deletions
Original file line numberDiff line numberDiff line change
@@ -896,26 +896,6 @@ el0_error_naked:
896896
b ret_to_user
897897
ENDPROC(el0_error)
898898

899-
900-
/*
901-
* This is the fast syscall return path. We do as little as possible here,
902-
* and this includes saving x0 back into the kernel stack.
903-
*/
904-
ret_fast_syscall:
905-
disable_daif
906-
#ifndef CONFIG_DEBUG_RSEQ
907-
ldr x1, [tsk, #TSK_TI_FLAGS] // re-check for syscall tracing
908-
and x2, x1, #_TIF_SYSCALL_WORK
909-
cbnz x2, ret_fast_syscall_trace
910-
and x2, x1, #_TIF_WORK_MASK
911-
cbnz x2, work_pending
912-
enable_step_tsk x1, x2
913-
kernel_exit 0
914-
#endif
915-
ret_fast_syscall_trace:
916-
enable_daif
917-
b __sys_trace_return_skipped // we already saved x0
918-
919899
/*
920900
* Ok, we need to do extra processing, enter the slow path.
921901
*/
@@ -971,44 +951,13 @@ alternative_else_nop_endif
971951
#endif
972952

973953
el0_svc_naked: // compat entry point
974-
stp x0, xscno, [sp, #S_ORIG_X0] // save the original x0 and syscall number
975-
enable_daif
976-
ct_user_exit 1
977-
978-
tst x16, #_TIF_SYSCALL_WORK // check for syscall hooks
979-
b.ne __sys_trace
980954
mov x0, sp
981955
mov w1, wscno
982956
mov w2, wsc_nr
983957
mov x3, stbl
984-
bl invoke_syscall
985-
b ret_fast_syscall
986-
ENDPROC(el0_svc)
987-
988-
/*
989-
* This is the really slow path. We're going to be doing context
990-
* switches, and waiting for our parent to respond.
991-
*/
992-
__sys_trace:
993-
cmp wscno, #NO_SYSCALL // user-issued syscall(-1)?
994-
b.ne 1f
995-
mov x0, #-ENOSYS // set default errno if so
996-
str x0, [sp, #S_X0]
997-
1: mov x0, sp
998-
bl syscall_trace_enter
999-
cmp w0, #NO_SYSCALL // skip the syscall?
1000-
b.eq __sys_trace_return_skipped
1001-
1002-
mov x0, sp
1003-
mov w1, wscno
1004-
mov w2, wsc_nr
1005-
mov x3, stbl
1006-
bl invoke_syscall
1007-
1008-
__sys_trace_return_skipped:
1009-
mov x0, sp
1010-
bl syscall_trace_exit
958+
bl el0_svc_common
1011959
b ret_to_user
960+
ENDPROC(el0_svc)
1012961

1013962
.popsection // .entry.text
1014963

arch/arm64/kernel/syscall.c

Lines changed: 54 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,15 @@
11
// SPDX-License-Identifier: GPL-2.0
22

3+
#include <linux/compiler.h>
4+
#include <linux/context_tracking.h>
35
#include <linux/errno.h>
46
#include <linux/nospec.h>
57
#include <linux/ptrace.h>
68
#include <linux/syscalls.h>
79

10+
#include <asm/daifflags.h>
811
#include <asm/syscall.h>
12+
#include <asm/thread_info.h>
913

1014
long compat_arm_syscall(struct pt_regs *regs);
1115

@@ -29,9 +33,9 @@ static long __invoke_syscall(struct pt_regs *regs, syscall_fn_t syscall_fn)
2933
regs->regs[3], regs->regs[4], regs->regs[5]);
3034
}
3135

32-
asmlinkage void invoke_syscall(struct pt_regs *regs, unsigned int scno,
33-
unsigned int sc_nr,
34-
const syscall_fn_t syscall_table[])
36+
static void invoke_syscall(struct pt_regs *regs, unsigned int scno,
37+
unsigned int sc_nr,
38+
const syscall_fn_t syscall_table[])
3539
{
3640
long ret;
3741

@@ -45,3 +49,50 @@ asmlinkage void invoke_syscall(struct pt_regs *regs, unsigned int scno,
4549

4650
regs->regs[0] = ret;
4751
}
52+
53+
static inline bool has_syscall_work(unsigned long flags)
54+
{
55+
return unlikely(flags & _TIF_SYSCALL_WORK);
56+
}
57+
58+
int syscall_trace_enter(struct pt_regs *regs);
59+
void syscall_trace_exit(struct pt_regs *regs);
60+
61+
asmlinkage void el0_svc_common(struct pt_regs *regs, int scno, int sc_nr,
62+
const syscall_fn_t syscall_table[])
63+
{
64+
unsigned long flags = current_thread_info()->flags;
65+
66+
regs->orig_x0 = regs->regs[0];
67+
regs->syscallno = scno;
68+
69+
local_daif_restore(DAIF_PROCCTX);
70+
user_exit();
71+
72+
if (has_syscall_work(flags)) {
73+
/* set default errno for user-issued syscall(-1) */
74+
if (scno == NO_SYSCALL)
75+
regs->regs[0] = -ENOSYS;
76+
scno = syscall_trace_enter(regs);
77+
if (scno == NO_SYSCALL)
78+
goto trace_exit;
79+
}
80+
81+
invoke_syscall(regs, scno, sc_nr, syscall_table);
82+
83+
/*
84+
* The tracing status may have changed under our feet, so we have to
85+
* check again. However, if we were tracing entry, then we always trace
86+
* exit regardless, as the old entry assembly did.
87+
*/
88+
if (!has_syscall_work(flags) && !IS_ENABLED(CONFIG_DEBUG_RSEQ)) {
89+
local_daif_mask();
90+
flags = current_thread_info()->flags;
91+
if (!has_syscall_work(flags))
92+
return;
93+
local_daif_restore(DAIF_PROCCTX);
94+
}
95+
96+
trace_exit:
97+
syscall_trace_exit(regs);
98+
}

0 commit comments

Comments
 (0)