Skip to content

Commit 8cd969d

Browse files
Dave Martinwildea01
authored andcommitted
arm64/sve: Signal handling support
This patch implements support for saving and restoring the SVE registers around signals. A fixed-size header struct sve_context is always included in the signal frame encoding the thread's vector length at the time of signal delivery, optionally followed by a variable-layout structure encoding the SVE registers. Because of the need to preserve backwards compatibility, the FPSIMD view of the SVE registers is always dumped as a struct fpsimd_context in the usual way, in addition to any sve_context. The SVE vector registers are dumped in full, including bits 127:0 of each register which alias the corresponding FPSIMD vector registers in the hardware. To avoid any ambiguity about which alias to restore during sigreturn, the kernel always restores bits 127:0 of each SVE vector register from the fpsimd_context in the signal frame (which must be present): userspace needs to take this into account if it wants to modify the SVE vector register contents on return from a signal. FPSR and FPCR, which are used by both FPSIMD and SVE, are not included in sve_context because they are always present in fpsimd_context anyway. For signal delivery, a new helper fpsimd_signal_preserve_current_state() is added to update _both_ the FPSIMD and SVE views in the task struct, to make it easier to populate this information into the signal frame. Because of the redundancy between the two views of the state, only one is updated otherwise. Reviewed-by: Catalin Marinas <catalin.marinas@arm.com> Signed-off-by: Dave Martin <Dave.Martin@arm.com> Cc: Alex Bennée <alex.bennee@linaro.org> Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org> Cc: Will Deacon <will.deacon@arm.com> Signed-off-by: Will Deacon <will.deacon@arm.com>
1 parent 79ab047 commit 8cd969d

File tree

4 files changed

+206
-19
lines changed

4 files changed

+206
-19
lines changed

arch/arm64/include/asm/fpsimd.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -63,6 +63,7 @@ extern void fpsimd_load_state(struct fpsimd_state *state);
6363
extern void fpsimd_thread_switch(struct task_struct *next);
6464
extern void fpsimd_flush_thread(void);
6565

66+
extern void fpsimd_signal_preserve_current_state(void);
6667
extern void fpsimd_preserve_current_state(void);
6768
extern void fpsimd_restore_current_state(void);
6869
extern void fpsimd_update_current_state(struct fpsimd_state *state);

arch/arm64/kernel/fpsimd.c

Lines changed: 45 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -300,6 +300,32 @@ static void fpsimd_to_sve(struct task_struct *task)
300300
sizeof(fst->vregs[i]));
301301
}
302302

303+
/*
304+
* Transfer the SVE state in task->thread.sve_state to
305+
* task->thread.fpsimd_state.
306+
*
307+
* Task can be a non-runnable task, or current. In the latter case,
308+
* softirqs (and preemption) must be disabled.
309+
* task->thread.sve_state must point to at least sve_state_size(task)
310+
* bytes of allocated kernel memory.
311+
* task->thread.sve_state must be up to date before calling this function.
312+
*/
313+
static void sve_to_fpsimd(struct task_struct *task)
314+
{
315+
unsigned int vq;
316+
void const *sst = task->thread.sve_state;
317+
struct fpsimd_state *fst = &task->thread.fpsimd_state;
318+
unsigned int i;
319+
320+
if (!system_supports_sve())
321+
return;
322+
323+
vq = sve_vq_from_vl(task->thread.sve_vl);
324+
for (i = 0; i < 32; ++i)
325+
memcpy(&fst->vregs[i], ZREG(sst, vq, i),
326+
sizeof(fst->vregs[i]));
327+
}
328+
303329
#ifdef CONFIG_ARM64_SVE
304330

305331
/*
@@ -501,25 +527,29 @@ void fpsimd_flush_thread(void)
501527
/*
502528
* Save the userland FPSIMD state of 'current' to memory, but only if the state
503529
* currently held in the registers does in fact belong to 'current'
504-
*
505-
* Currently, SVE tasks can't exist, so just WARN in that case.
506-
* Subsequent patches will add full SVE support here.
507530
*/
508531
void fpsimd_preserve_current_state(void)
509532
{
510533
if (!system_supports_fpsimd())
511534
return;
512535

513536
local_bh_disable();
514-
515-
if (!test_thread_flag(TIF_FOREIGN_FPSTATE))
516-
fpsimd_save_state(&current->thread.fpsimd_state);
517-
518-
WARN_ON_ONCE(test_and_clear_thread_flag(TIF_SVE));
519-
537+
task_fpsimd_save();
520538
local_bh_enable();
521539
}
522540

541+
/*
542+
* Like fpsimd_preserve_current_state(), but ensure that
543+
* current->thread.fpsimd_state is updated so that it can be copied to
544+
* the signal frame.
545+
*/
546+
void fpsimd_signal_preserve_current_state(void)
547+
{
548+
fpsimd_preserve_current_state();
549+
if (system_supports_sve() && test_thread_flag(TIF_SVE))
550+
sve_to_fpsimd(current);
551+
}
552+
523553
/*
524554
* Load the userland FPSIMD state of 'current' from memory, but only if the
525555
* FPSIMD state already held in the registers is /not/ the most recent FPSIMD
@@ -555,7 +585,12 @@ void fpsimd_update_current_state(struct fpsimd_state *state)
555585

556586
local_bh_disable();
557587

558-
fpsimd_load_state(state);
588+
if (system_supports_sve() && test_thread_flag(TIF_SVE)) {
589+
current->thread.fpsimd_state = *state;
590+
fpsimd_to_sve(current);
591+
}
592+
task_fpsimd_load();
593+
559594
if (test_and_clear_thread_flag(TIF_FOREIGN_FPSTATE)) {
560595
struct fpsimd_state *st = &current->thread.fpsimd_state;
561596

arch/arm64/kernel/signal.c

Lines changed: 159 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -64,6 +64,7 @@ struct rt_sigframe_user_layout {
6464

6565
unsigned long fpsimd_offset;
6666
unsigned long esr_offset;
67+
unsigned long sve_offset;
6768
unsigned long extra_offset;
6869
unsigned long end_offset;
6970
};
@@ -180,9 +181,6 @@ static int preserve_fpsimd_context(struct fpsimd_context __user *ctx)
180181
struct fpsimd_state *fpsimd = &current->thread.fpsimd_state;
181182
int err;
182183

183-
/* dump the hardware registers to the fpsimd_state structure */
184-
fpsimd_preserve_current_state();
185-
186184
/* copy the FP and status/control registers */
187185
err = __copy_to_user(ctx->vregs, fpsimd->vregs, sizeof(fpsimd->vregs));
188186
__put_user_error(fpsimd->fpsr, &ctx->fpsr, err);
@@ -215,17 +213,127 @@ static int restore_fpsimd_context(struct fpsimd_context __user *ctx)
215213
__get_user_error(fpsimd.fpsr, &ctx->fpsr, err);
216214
__get_user_error(fpsimd.fpcr, &ctx->fpcr, err);
217215

216+
clear_thread_flag(TIF_SVE);
217+
218218
/* load the hardware registers from the fpsimd_state structure */
219219
if (!err)
220220
fpsimd_update_current_state(&fpsimd);
221221

222222
return err ? -EFAULT : 0;
223223
}
224224

225+
225226
struct user_ctxs {
226227
struct fpsimd_context __user *fpsimd;
228+
struct sve_context __user *sve;
227229
};
228230

231+
#ifdef CONFIG_ARM64_SVE
232+
233+
static int preserve_sve_context(struct sve_context __user *ctx)
234+
{
235+
int err = 0;
236+
u16 reserved[ARRAY_SIZE(ctx->__reserved)];
237+
unsigned int vl = current->thread.sve_vl;
238+
unsigned int vq = 0;
239+
240+
if (test_thread_flag(TIF_SVE))
241+
vq = sve_vq_from_vl(vl);
242+
243+
memset(reserved, 0, sizeof(reserved));
244+
245+
__put_user_error(SVE_MAGIC, &ctx->head.magic, err);
246+
__put_user_error(round_up(SVE_SIG_CONTEXT_SIZE(vq), 16),
247+
&ctx->head.size, err);
248+
__put_user_error(vl, &ctx->vl, err);
249+
BUILD_BUG_ON(sizeof(ctx->__reserved) != sizeof(reserved));
250+
err |= __copy_to_user(&ctx->__reserved, reserved, sizeof(reserved));
251+
252+
if (vq) {
253+
/*
254+
* This assumes that the SVE state has already been saved to
255+
* the task struct by calling preserve_fpsimd_context().
256+
*/
257+
err |= __copy_to_user((char __user *)ctx + SVE_SIG_REGS_OFFSET,
258+
current->thread.sve_state,
259+
SVE_SIG_REGS_SIZE(vq));
260+
}
261+
262+
return err ? -EFAULT : 0;
263+
}
264+
265+
static int restore_sve_fpsimd_context(struct user_ctxs *user)
266+
{
267+
int err;
268+
unsigned int vq;
269+
struct fpsimd_state fpsimd;
270+
struct sve_context sve;
271+
272+
if (__copy_from_user(&sve, user->sve, sizeof(sve)))
273+
return -EFAULT;
274+
275+
if (sve.vl != current->thread.sve_vl)
276+
return -EINVAL;
277+
278+
if (sve.head.size <= sizeof(*user->sve)) {
279+
clear_thread_flag(TIF_SVE);
280+
goto fpsimd_only;
281+
}
282+
283+
vq = sve_vq_from_vl(sve.vl);
284+
285+
if (sve.head.size < SVE_SIG_CONTEXT_SIZE(vq))
286+
return -EINVAL;
287+
288+
/*
289+
* Careful: we are about __copy_from_user() directly into
290+
* thread.sve_state with preemption enabled, so protection is
291+
* needed to prevent a racing context switch from writing stale
292+
* registers back over the new data.
293+
*/
294+
295+
fpsimd_flush_task_state(current);
296+
barrier();
297+
/* From now, fpsimd_thread_switch() won't clear TIF_FOREIGN_FPSTATE */
298+
299+
set_thread_flag(TIF_FOREIGN_FPSTATE);
300+
barrier();
301+
/* From now, fpsimd_thread_switch() won't touch thread.sve_state */
302+
303+
sve_alloc(current);
304+
err = __copy_from_user(current->thread.sve_state,
305+
(char __user const *)user->sve +
306+
SVE_SIG_REGS_OFFSET,
307+
SVE_SIG_REGS_SIZE(vq));
308+
if (err)
309+
return -EFAULT;
310+
311+
set_thread_flag(TIF_SVE);
312+
313+
fpsimd_only:
314+
/* copy the FP and status/control registers */
315+
/* restore_sigframe() already checked that user->fpsimd != NULL. */
316+
err = __copy_from_user(fpsimd.vregs, user->fpsimd->vregs,
317+
sizeof(fpsimd.vregs));
318+
__get_user_error(fpsimd.fpsr, &user->fpsimd->fpsr, err);
319+
__get_user_error(fpsimd.fpcr, &user->fpsimd->fpcr, err);
320+
321+
/* load the hardware registers from the fpsimd_state structure */
322+
if (!err)
323+
fpsimd_update_current_state(&fpsimd);
324+
325+
return err ? -EFAULT : 0;
326+
}
327+
328+
#else /* ! CONFIG_ARM64_SVE */
329+
330+
/* Turn any non-optimised out attempts to use these into a link error: */
331+
extern int preserve_sve_context(void __user *ctx);
332+
extern int restore_sve_fpsimd_context(struct user_ctxs *user);
333+
334+
#endif /* ! CONFIG_ARM64_SVE */
335+
336+
229337
static int parse_user_sigframe(struct user_ctxs *user,
230338
struct rt_sigframe __user *sf)
231339
{
@@ -238,6 +346,7 @@ static int parse_user_sigframe(struct user_ctxs *user,
238346
char const __user *const sfp = (char const __user *)sf;
239347

240348
user->fpsimd = NULL;
349+
user->sve = NULL;
241350

242351
if (!IS_ALIGNED((unsigned long)base, 16))
243352
goto invalid;
@@ -288,6 +397,19 @@ static int parse_user_sigframe(struct user_ctxs *user,
288397
/* ignore */
289398
break;
290399

400+
case SVE_MAGIC:
401+
if (!system_supports_sve())
402+
goto invalid;
403+
404+
if (user->sve)
405+
goto invalid;
406+
407+
if (size < sizeof(*user->sve))
408+
goto invalid;
409+
410+
user->sve = (struct sve_context __user *)head;
411+
break;
412+
291413
case EXTRA_MAGIC:
292414
if (have_extra_context)
293415
goto invalid;
@@ -364,9 +486,6 @@ static int parse_user_sigframe(struct user_ctxs *user,
364486
}
365487

366488
done:
367-
if (!user->fpsimd)
368-
goto invalid;
369-
370489
return 0;
371490

372491
invalid:
@@ -400,8 +519,19 @@ static int restore_sigframe(struct pt_regs *regs,
400519
if (err == 0)
401520
err = parse_user_sigframe(&user, sf);
402521

403-
if (err == 0)
404-
err = restore_fpsimd_context(user.fpsimd);
522+
if (err == 0) {
523+
if (!user.fpsimd)
524+
return -EINVAL;
525+
526+
if (user.sve) {
527+
if (!system_supports_sve())
528+
return -EINVAL;
529+
530+
err = restore_sve_fpsimd_context(&user);
531+
} else {
532+
err = restore_fpsimd_context(user.fpsimd);
533+
}
534+
}
405535

406536
return err;
407537
}
@@ -460,6 +590,18 @@ static int setup_sigframe_layout(struct rt_sigframe_user_layout *user)
460590
return err;
461591
}
462592

593+
if (system_supports_sve()) {
594+
unsigned int vq = 0;
595+
596+
if (test_thread_flag(TIF_SVE))
597+
vq = sve_vq_from_vl(current->thread.sve_vl);
598+
599+
err = sigframe_alloc(user, &user->sve_offset,
600+
SVE_SIG_CONTEXT_SIZE(vq));
601+
if (err)
602+
return err;
603+
}
604+
463605
return sigframe_alloc_end(user);
464606
}
465607

@@ -501,6 +643,13 @@ static int setup_sigframe(struct rt_sigframe_user_layout *user,
501643
__put_user_error(current->thread.fault_code, &esr_ctx->esr, err);
502644
}
503645

646+
/* Scalable Vector Extension state, if present */
647+
if (system_supports_sve() && err == 0 && user->sve_offset) {
648+
struct sve_context __user *sve_ctx =
649+
apply_user_offset(user, user->sve_offset);
650+
err |= preserve_sve_context(sve_ctx);
651+
}
652+
504653
if (err == 0 && user->extra_offset) {
505654
char __user *sfp = (char __user *)user->sigframe;
506655
char __user *userp =
@@ -600,6 +749,8 @@ static int setup_rt_frame(int usig, struct ksignal *ksig, sigset_t *set,
600749
struct rt_sigframe __user *frame;
601750
int err = 0;
602751

752+
fpsimd_signal_preserve_current_state();
753+
603754
if (get_sigframe(&user, ksig, regs))
604755
return 1;
605756

arch/arm64/kernel/signal32.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -239,7 +239,7 @@ static int compat_preserve_vfp_context(struct compat_vfp_sigframe __user *frame)
239239
* Note that this also saves V16-31, which aren't visible
240240
* in AArch32.
241241
*/
242-
fpsimd_preserve_current_state();
242+
fpsimd_signal_preserve_current_state();
243243

244244
/* Place structure header on the stack */
245245
__put_user_error(magic, &frame->magic, err);

0 commit comments

Comments
 (0)