Skip to content

Commit 315562c

Browse files
joergroedelsuryasaimadhu
authored andcommitted
x86/sev-es: Adjust #VC IST Stack on entering NMI handler
When an NMI hits in the #VC handler entry code before it has switched to another stack, any subsequent #VC exception in the NMI code-path will overwrite the interrupted #VC handler's stack. Make sure this doesn't happen by explicitly adjusting the #VC IST entry in the NMI handler for the time it can cause #VC exceptions. [ bp: Touchups, spelling fixes. ] Signed-off-by: Joerg Roedel <jroedel@suse.de> Signed-off-by: Borislav Petkov <bp@suse.de> Link: https://lkml.kernel.org/r/20200907131613.12703-44-joro@8bytes.org
1 parent 02772fb commit 315562c

File tree

3 files changed

+81
-0
lines changed

3 files changed

+81
-0
lines changed

arch/x86/include/asm/sev-es.h

Lines changed: 19 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -78,4 +78,23 @@ extern void vc_no_ghcb(void);
7878
extern void vc_boot_ghcb(void);
7979
extern bool handle_vc_boot_ghcb(struct pt_regs *regs);
8080

81+
#ifdef CONFIG_AMD_MEM_ENCRYPT
82+
extern struct static_key_false sev_es_enable_key;
83+
extern void __sev_es_ist_enter(struct pt_regs *regs);
84+
extern void __sev_es_ist_exit(void);
85+
static __always_inline void sev_es_ist_enter(struct pt_regs *regs)
86+
{
87+
if (static_branch_unlikely(&sev_es_enable_key))
88+
__sev_es_ist_enter(regs);
89+
}
90+
static __always_inline void sev_es_ist_exit(void)
91+
{
92+
if (static_branch_unlikely(&sev_es_enable_key))
93+
__sev_es_ist_exit();
94+
}
95+
#else
96+
static inline void sev_es_ist_enter(struct pt_regs *regs) { }
97+
static inline void sev_es_ist_exit(void) { }
98+
#endif
99+
81100
#endif

arch/x86/kernel/nmi.c

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -33,6 +33,7 @@
3333
#include <asm/reboot.h>
3434
#include <asm/cache.h>
3535
#include <asm/nospec-branch.h>
36+
#include <asm/sev-es.h>
3637

3738
#define CREATE_TRACE_POINTS
3839
#include <trace/events/nmi.h>
@@ -488,6 +489,12 @@ DEFINE_IDTENTRY_RAW(exc_nmi)
488489
this_cpu_write(nmi_cr2, read_cr2());
489490
nmi_restart:
490491

492+
/*
493+
* Needs to happen before DR7 is accessed, because the hypervisor can
494+
* intercept DR7 reads/writes, turning those into #VC exceptions.
495+
*/
496+
sev_es_ist_enter(regs);
497+
491498
this_cpu_write(nmi_dr7, local_db_save());
492499

493500
irq_state = idtentry_enter_nmi(regs);
@@ -501,6 +508,8 @@ DEFINE_IDTENTRY_RAW(exc_nmi)
501508

502509
local_db_restore(this_cpu_read(nmi_dr7));
503510

511+
sev_es_ist_exit();
512+
504513
if (unlikely(this_cpu_read(nmi_cr2) != read_cr2()))
505514
write_cr2(this_cpu_read(nmi_cr2));
506515
if (this_cpu_dec_return(nmi_state))

arch/x86/kernel/sev-es.c

Lines changed: 53 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -51,6 +51,7 @@ struct sev_es_runtime_data {
5151
};
5252

5353
static DEFINE_PER_CPU(struct sev_es_runtime_data*, runtime_data);
54+
DEFINE_STATIC_KEY_FALSE(sev_es_enable_key);
5455

5556
static void __init setup_vc_stacks(int cpu)
5657
{
@@ -73,6 +74,55 @@ static void __init setup_vc_stacks(int cpu)
7374
cea_set_pte((void *)vaddr, pa, PAGE_KERNEL);
7475
}
7576

77+
static __always_inline bool on_vc_stack(unsigned long sp)
78+
{
79+
return ((sp >= __this_cpu_ist_bottom_va(VC)) && (sp < __this_cpu_ist_top_va(VC)));
80+
}
81+
82+
/*
83+
* This function handles the case when an NMI is raised in the #VC exception
84+
* handler entry code. In this case, the IST entry for #VC must be adjusted, so
85+
* that any subsequent #VC exception will not overwrite the stack contents of the
86+
* interrupted #VC handler.
87+
*
88+
* The IST entry is adjusted unconditionally so that it can be also be
89+
* unconditionally adjusted back in sev_es_ist_exit(). Otherwise a nested
90+
* sev_es_ist_exit() call may adjust back the IST entry too early.
91+
*/
92+
void noinstr __sev_es_ist_enter(struct pt_regs *regs)
93+
{
94+
unsigned long old_ist, new_ist;
95+
96+
/* Read old IST entry */
97+
old_ist = __this_cpu_read(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC]);
98+
99+
/* Make room on the IST stack */
100+
if (on_vc_stack(regs->sp))
101+
new_ist = ALIGN_DOWN(regs->sp, 8) - sizeof(old_ist);
102+
else
103+
new_ist = old_ist - sizeof(old_ist);
104+
105+
/* Store old IST entry */
106+
*(unsigned long *)new_ist = old_ist;
107+
108+
/* Set new IST entry */
109+
this_cpu_write(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC], new_ist);
110+
}
111+
112+
void noinstr __sev_es_ist_exit(void)
113+
{
114+
unsigned long ist;
115+
116+
/* Read IST entry */
117+
ist = __this_cpu_read(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC]);
118+
119+
if (WARN_ON(ist == __this_cpu_ist_top_va(VC)))
120+
return;
121+
122+
/* Read back old IST entry and write it to the TSS */
123+
this_cpu_write(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC], *(unsigned long *)ist);
124+
}
125+
76126
/* Needed in vc_early_forward_exception */
77127
void do_early_exception(struct pt_regs *regs, int trapnr);
78128

@@ -277,6 +327,9 @@ void __init sev_es_init_vc_handling(void)
277327
if (!sev_es_active())
278328
return;
279329

330+
/* Enable SEV-ES special handling */
331+
static_branch_enable(&sev_es_enable_key);
332+
280333
/* Initialize per-cpu GHCB pages */
281334
for_each_possible_cpu(cpu) {
282335
alloc_runtime_data(cpu);

0 commit comments

Comments
 (0)