Skip to content

Commit 02772fb

Browse files
joergroedelsuryasaimadhu
authored andcommitted
x86/sev-es: Allocate and map an IST stack for #VC handler
Allocate and map an IST stack and an additional fall-back stack for the #VC handler. The memory for the stacks is allocated only when SEV-ES is active. The #VC handler needs to use an IST stack because a #VC exception can be raised from kernel space with unsafe stack, e.g. in the SYSCALL entry path. Since the #VC exception can be nested, the #VC handler switches back to the interrupted stack when entered from kernel space. If switching back is not possible, the fall-back stack is used. Signed-off-by: Joerg Roedel <jroedel@suse.de> Signed-off-by: Borislav Petkov <bp@suse.de> Link: https://lkml.kernel.org/r/20200907131613.12703-43-joro@8bytes.org
1 parent 885689e commit 02772fb

File tree

5 files changed

+63
-14
lines changed

5 files changed

+63
-14
lines changed

arch/x86/include/asm/cpu_entry_area.h

Lines changed: 21 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -11,25 +11,29 @@
1111
#ifdef CONFIG_X86_64
1212

1313
/* Macro to enforce the same ordering and stack sizes */
14-
#define ESTACKS_MEMBERS(guardsize) \
15-
char DF_stack_guard[guardsize]; \
16-
char DF_stack[EXCEPTION_STKSZ]; \
17-
char NMI_stack_guard[guardsize]; \
18-
char NMI_stack[EXCEPTION_STKSZ]; \
19-
char DB_stack_guard[guardsize]; \
20-
char DB_stack[EXCEPTION_STKSZ]; \
21-
char MCE_stack_guard[guardsize]; \
22-
char MCE_stack[EXCEPTION_STKSZ]; \
23-
char IST_top_guard[guardsize]; \
14+
#define ESTACKS_MEMBERS(guardsize, optional_stack_size) \
15+
char DF_stack_guard[guardsize]; \
16+
char DF_stack[EXCEPTION_STKSZ]; \
17+
char NMI_stack_guard[guardsize]; \
18+
char NMI_stack[EXCEPTION_STKSZ]; \
19+
char DB_stack_guard[guardsize]; \
20+
char DB_stack[EXCEPTION_STKSZ]; \
21+
char MCE_stack_guard[guardsize]; \
22+
char MCE_stack[EXCEPTION_STKSZ]; \
23+
char VC_stack_guard[guardsize]; \
24+
char VC_stack[optional_stack_size]; \
25+
char VC2_stack_guard[guardsize]; \
26+
char VC2_stack[optional_stack_size]; \
27+
char IST_top_guard[guardsize]; \
2428

2529
/* The exception stacks' physical storage. No guard pages required */
2630
struct exception_stacks {
27-
ESTACKS_MEMBERS(0)
31+
ESTACKS_MEMBERS(0, 0)
2832
};
2933

3034
/* The effective cpu entry area mapping with guard pages. */
3135
struct cea_exception_stacks {
32-
ESTACKS_MEMBERS(PAGE_SIZE)
36+
ESTACKS_MEMBERS(PAGE_SIZE, EXCEPTION_STKSZ)
3337
};
3438

3539
/*
@@ -40,6 +44,8 @@ enum exception_stack_ordering {
4044
ESTACK_NMI,
4145
ESTACK_DB,
4246
ESTACK_MCE,
47+
ESTACK_VC,
48+
ESTACK_VC2,
4349
N_EXCEPTION_STACKS
4450
};
4551

@@ -139,4 +145,7 @@ static inline struct entry_stack *cpu_entry_stack(int cpu)
139145
#define __this_cpu_ist_top_va(name) \
140146
CEA_ESTACK_TOP(__this_cpu_read(cea_exception_stacks), name)
141147

148+
#define __this_cpu_ist_bottom_va(name) \
149+
CEA_ESTACK_BOT(__this_cpu_read(cea_exception_stacks), name)
150+
142151
#endif

arch/x86/include/asm/page_64_types.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,7 @@
2828
#define IST_INDEX_NMI 1
2929
#define IST_INDEX_DB 2
3030
#define IST_INDEX_MCE 3
31+
#define IST_INDEX_VC 4
3132

3233
/*
3334
* Set __PAGE_OFFSET to the most negative possible address +

arch/x86/kernel/cpu/common.c

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1829,6 +1829,8 @@ static inline void tss_setup_ist(struct tss_struct *tss)
18291829
tss->x86_tss.ist[IST_INDEX_NMI] = __this_cpu_ist_top_va(NMI);
18301830
tss->x86_tss.ist[IST_INDEX_DB] = __this_cpu_ist_top_va(DB);
18311831
tss->x86_tss.ist[IST_INDEX_MCE] = __this_cpu_ist_top_va(MCE);
1832+
/* Only mapped when SEV-ES is active */
1833+
tss->x86_tss.ist[IST_INDEX_VC] = __this_cpu_ist_top_va(VC);
18321834
}
18331835

18341836
#else /* CONFIG_X86_64 */

arch/x86/kernel/dumpstack_64.c

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -24,11 +24,13 @@ static const char * const exception_stack_names[] = {
2424
[ ESTACK_NMI ] = "NMI",
2525
[ ESTACK_DB ] = "#DB",
2626
[ ESTACK_MCE ] = "#MC",
27+
[ ESTACK_VC ] = "#VC",
28+
[ ESTACK_VC2 ] = "#VC2",
2729
};
2830

2931
const char *stack_type_name(enum stack_type type)
3032
{
31-
BUILD_BUG_ON(N_EXCEPTION_STACKS != 4);
33+
BUILD_BUG_ON(N_EXCEPTION_STACKS != 6);
3234

3335
if (type == STACK_TYPE_IRQ)
3436
return "IRQ";
@@ -79,6 +81,8 @@ struct estack_pages estack_pages[CEA_ESTACK_PAGES] ____cacheline_aligned = {
7981
EPAGERANGE(NMI),
8082
EPAGERANGE(DB),
8183
EPAGERANGE(MCE),
84+
EPAGERANGE(VC),
85+
EPAGERANGE(VC2),
8286
};
8387

8488
static bool in_exception_stack(unsigned long *stack, struct stack_info *info)
@@ -88,7 +92,7 @@ static bool in_exception_stack(unsigned long *stack, struct stack_info *info)
8892
struct pt_regs *regs;
8993
unsigned int k;
9094

91-
BUILD_BUG_ON(N_EXCEPTION_STACKS != 4);
95+
BUILD_BUG_ON(N_EXCEPTION_STACKS != 6);
9296

9397
begin = (unsigned long)__this_cpu_read(cea_exception_stacks);
9498
/*

arch/x86/kernel/sev-es.c

Lines changed: 33 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,7 @@
1717
#include <linux/kernel.h>
1818
#include <linux/mm.h>
1919

20+
#include <asm/cpu_entry_area.h>
2021
#include <asm/sev-es.h>
2122
#include <asm/insn-eval.h>
2223
#include <asm/fpu/internal.h>
@@ -37,10 +38,41 @@ static struct ghcb __initdata *boot_ghcb;
3738
/* #VC handler runtime per-CPU data */
3839
struct sev_es_runtime_data {
3940
struct ghcb ghcb_page;
41+
42+
/* Physical storage for the per-CPU IST stack of the #VC handler */
43+
char ist_stack[EXCEPTION_STKSZ] __aligned(PAGE_SIZE);
44+
45+
/*
46+
* Physical storage for the per-CPU fall-back stack of the #VC handler.
47+
* The fall-back stack is used when it is not safe to switch back to the
48+
* interrupted stack in the #VC entry code.
49+
*/
50+
char fallback_stack[EXCEPTION_STKSZ] __aligned(PAGE_SIZE);
4051
};
4152

4253
static DEFINE_PER_CPU(struct sev_es_runtime_data*, runtime_data);
4354

55+
static void __init setup_vc_stacks(int cpu)
56+
{
57+
struct sev_es_runtime_data *data;
58+
struct cpu_entry_area *cea;
59+
unsigned long vaddr;
60+
phys_addr_t pa;
61+
62+
data = per_cpu(runtime_data, cpu);
63+
cea = get_cpu_entry_area(cpu);
64+
65+
/* Map #VC IST stack */
66+
vaddr = CEA_ESTACK_BOT(&cea->estacks, VC);
67+
pa = __pa(data->ist_stack);
68+
cea_set_pte((void *)vaddr, pa, PAGE_KERNEL);
69+
70+
/* Map VC fall-back stack */
71+
vaddr = CEA_ESTACK_BOT(&cea->estacks, VC2);
72+
pa = __pa(data->fallback_stack);
73+
cea_set_pte((void *)vaddr, pa, PAGE_KERNEL);
74+
}
75+
4476
/* Needed in vc_early_forward_exception */
4577
void do_early_exception(struct pt_regs *regs, int trapnr);
4678

@@ -249,6 +281,7 @@ void __init sev_es_init_vc_handling(void)
249281
for_each_possible_cpu(cpu) {
250282
alloc_runtime_data(cpu);
251283
init_ghcb(cpu);
284+
setup_vc_stacks(cpu);
252285
}
253286
}
254287

0 commit comments

Comments
 (0)