Skip to content
Open
10 changes: 10 additions & 0 deletions arch/x86/boot/compressed/sev.c
Original file line number Diff line number Diff line change
Expand Up @@ -103,6 +103,16 @@ static enum es_result vc_read_mem(struct es_em_ctxt *ctxt,
return ES_OK;
}

static enum es_result vc_ioio_check(struct es_em_ctxt *ctxt, u16 port, size_t size)
{
return ES_OK;
}

static bool fault_in_kernel_space(unsigned long address)
{
return false;
}

#undef __init
#undef __pa
#define __init
Expand Down
4 changes: 0 additions & 4 deletions arch/x86/include/asm/cpu_entry_area.h
Original file line number Diff line number Diff line change
Expand Up @@ -130,10 +130,6 @@ struct cpu_entry_area {
};

#define CPU_ENTRY_AREA_SIZE (sizeof(struct cpu_entry_area))
#define CPU_ENTRY_AREA_ARRAY_SIZE (CPU_ENTRY_AREA_SIZE * NR_CPUS)

/* Total size includes the readonly IDT mapping page as well: */
#define CPU_ENTRY_AREA_TOTAL_SIZE (CPU_ENTRY_AREA_ARRAY_SIZE + PAGE_SIZE)

DECLARE_PER_CPU(struct cpu_entry_area *, cpu_entry_area);
DECLARE_PER_CPU(struct cea_exception_stacks *, cea_exception_stacks);
Expand Down
3 changes: 3 additions & 0 deletions arch/x86/include/asm/kasan.h
Original file line number Diff line number Diff line change
Expand Up @@ -28,9 +28,12 @@
#ifdef CONFIG_KASAN
void __init kasan_early_init(void);
void __init kasan_init(void);
void __init kasan_populate_shadow_for_vaddr(void *va, size_t size, int nid);
#else
static inline void kasan_early_init(void) { }
static inline void kasan_init(void) { }
static inline void kasan_populate_shadow_for_vaddr(void *va, size_t size,
int nid) { }
#endif

#endif
Expand Down
8 changes: 7 additions & 1 deletion arch/x86/include/asm/pgtable_areas.h
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,12 @@

#define CPU_ENTRY_AREA_RO_IDT_VADDR ((void *)CPU_ENTRY_AREA_RO_IDT)

#define CPU_ENTRY_AREA_MAP_SIZE (CPU_ENTRY_AREA_PER_CPU + CPU_ENTRY_AREA_ARRAY_SIZE - CPU_ENTRY_AREA_BASE)
#ifdef CONFIG_X86_32
#define CPU_ENTRY_AREA_MAP_SIZE (CPU_ENTRY_AREA_PER_CPU + \
(CPU_ENTRY_AREA_SIZE * NR_CPUS) - \
CPU_ENTRY_AREA_BASE)
#else
#define CPU_ENTRY_AREA_MAP_SIZE P4D_SIZE
#endif

#endif /* _ASM_X86_PGTABLE_AREAS_H */
2 changes: 1 addition & 1 deletion arch/x86/kernel/hw_breakpoint.c
Original file line number Diff line number Diff line change
Expand Up @@ -266,7 +266,7 @@ static inline bool within_cpu_entry(unsigned long addr, unsigned long end)

/* CPU entry erea is always used for CPU entry */
if (within_area(addr, end, CPU_ENTRY_AREA_BASE,
CPU_ENTRY_AREA_TOTAL_SIZE))
CPU_ENTRY_AREA_MAP_SIZE))
return true;

/*
Expand Down
53 changes: 44 additions & 9 deletions arch/x86/kernel/sev-shared.c
Original file line number Diff line number Diff line change
Expand Up @@ -589,14 +589,36 @@ void __init do_vc_no_ghcb(struct pt_regs *regs, unsigned long exit_code)
sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SEV_ES_GEN_REQ);
}

static enum es_result vc_insn_string_check(struct es_em_ctxt *ctxt,
unsigned long address,
bool write)
{
if (user_mode(ctxt->regs) && fault_in_kernel_space(address)) {
ctxt->fi.vector = X86_TRAP_PF;
ctxt->fi.error_code = X86_PF_USER;
ctxt->fi.cr2 = address;
if (write)
ctxt->fi.error_code |= X86_PF_WRITE;

return ES_EXCEPTION;
}

return ES_OK;
}

static enum es_result vc_insn_string_read(struct es_em_ctxt *ctxt,
void *src, char *buf,
unsigned int data_size,
unsigned int count,
bool backwards)
{
int i, b = backwards ? -1 : 1;
enum es_result ret = ES_OK;
unsigned long address = (unsigned long)src;
enum es_result ret;

ret = vc_insn_string_check(ctxt, address, false);
if (ret != ES_OK)
return ret;

for (i = 0; i < count; i++) {
void *s = src + (i * data_size * b);
Expand All @@ -617,7 +639,12 @@ static enum es_result vc_insn_string_write(struct es_em_ctxt *ctxt,
bool backwards)
{
int i, s = backwards ? -1 : 1;
enum es_result ret = ES_OK;
unsigned long address = (unsigned long)dst;
enum es_result ret;

ret = vc_insn_string_check(ctxt, address, true);
if (ret != ES_OK)
return ret;

for (i = 0; i < count; i++) {
void *d = dst + (i * data_size * s);
Expand Down Expand Up @@ -653,6 +680,9 @@ static enum es_result vc_insn_string_write(struct es_em_ctxt *ctxt,
static enum es_result vc_ioio_exitinfo(struct es_em_ctxt *ctxt, u64 *exitinfo)
{
struct insn *insn = &ctxt->insn;
size_t size;
u64 port;

*exitinfo = 0;

switch (insn->opcode.bytes[0]) {
Expand All @@ -661,49 +691,51 @@ static enum es_result vc_ioio_exitinfo(struct es_em_ctxt *ctxt, u64 *exitinfo)
case 0x6d:
*exitinfo |= IOIO_TYPE_INS;
*exitinfo |= IOIO_SEG_ES;
*exitinfo |= (ctxt->regs->dx & 0xffff) << 16;
port = ctxt->regs->dx & 0xffff;
break;

/* OUTS opcodes */
case 0x6e:
case 0x6f:
*exitinfo |= IOIO_TYPE_OUTS;
*exitinfo |= IOIO_SEG_DS;
*exitinfo |= (ctxt->regs->dx & 0xffff) << 16;
port = ctxt->regs->dx & 0xffff;
break;

/* IN immediate opcodes */
case 0xe4:
case 0xe5:
*exitinfo |= IOIO_TYPE_IN;
*exitinfo |= (u8)insn->immediate.value << 16;
port = (u8)insn->immediate.value & 0xffff;
break;

/* OUT immediate opcodes */
case 0xe6:
case 0xe7:
*exitinfo |= IOIO_TYPE_OUT;
*exitinfo |= (u8)insn->immediate.value << 16;
port = (u8)insn->immediate.value & 0xffff;
break;

/* IN register opcodes */
case 0xec:
case 0xed:
*exitinfo |= IOIO_TYPE_IN;
*exitinfo |= (ctxt->regs->dx & 0xffff) << 16;
port = ctxt->regs->dx & 0xffff;
break;

/* OUT register opcodes */
case 0xee:
case 0xef:
*exitinfo |= IOIO_TYPE_OUT;
*exitinfo |= (ctxt->regs->dx & 0xffff) << 16;
port = ctxt->regs->dx & 0xffff;
break;

default:
return ES_DECODE_FAILED;
}

*exitinfo |= port << 16;

switch (insn->opcode.bytes[0]) {
case 0x6c:
case 0x6e:
Expand All @@ -713,12 +745,15 @@ static enum es_result vc_ioio_exitinfo(struct es_em_ctxt *ctxt, u64 *exitinfo)
case 0xee:
/* Single byte opcodes */
*exitinfo |= IOIO_DATA_8;
size = 1;
break;
default:
/* Length determined by instruction parsing */
*exitinfo |= (insn->opnd_bytes == 2) ? IOIO_DATA_16
: IOIO_DATA_32;
size = (insn->opnd_bytes == 2) ? 2 : 4;
}

switch (insn->addr_bytes) {
case 2:
*exitinfo |= IOIO_ADDR_16;
Expand All @@ -734,7 +769,7 @@ static enum es_result vc_ioio_exitinfo(struct es_em_ctxt *ctxt, u64 *exitinfo)
if (insn_has_rep_prefix(insn))
*exitinfo |= IOIO_REP;

return ES_OK;
return vc_ioio_check(ctxt, (u16)port, size);
}

static enum es_result vc_handle_ioio(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
Expand Down
30 changes: 30 additions & 0 deletions arch/x86/kernel/sev.c
Original file line number Diff line number Diff line change
Expand Up @@ -493,6 +493,33 @@ static enum es_result vc_slow_virt_to_phys(struct ghcb *ghcb, struct es_em_ctxt
return ES_OK;
}

static enum es_result vc_ioio_check(struct es_em_ctxt *ctxt, u16 port, size_t size)
{
BUG_ON(size > 4);

if (user_mode(ctxt->regs)) {
struct thread_struct *t = &current->thread;
struct io_bitmap *iobm = t->io_bitmap;
size_t idx;

if (!iobm)
goto fault;

for (idx = port; idx < port + size; ++idx) {
if (test_bit(idx, iobm->bitmap))
goto fault;
}
}

return ES_OK;

fault:
ctxt->fi.vector = X86_TRAP_GP;
ctxt->fi.error_code = 0;

return ES_EXCEPTION;
}

/* Include code shared with pre-decompression boot stage */
#include "sev-shared.c"

Expand Down Expand Up @@ -1533,6 +1560,9 @@ static enum es_result vc_handle_mmio(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
return ES_DECODE_FAILED;
}

if (user_mode(ctxt->regs))
return ES_UNSUPPORTED;

switch (mmio) {
case MMIO_WRITE:
memcpy(ghcb->shared_buffer, reg_data, bytes);
Expand Down
58 changes: 54 additions & 4 deletions arch/x86/mm/cpu_entry_area.c
Original file line number Diff line number Diff line change
Expand Up @@ -5,26 +5,72 @@
#include <linux/kallsyms.h>
#include <linux/kcore.h>
#include <linux/pgtable.h>
#include <linux/prandom.h>

#include <asm/cpu_entry_area.h>
#include <asm/fixmap.h>
#include <asm/desc.h>
#include <asm/kasan.h>
#include <asm/setup.h>

static DEFINE_PER_CPU_PAGE_ALIGNED(struct entry_stack_page, entry_stack_storage);

#ifdef CONFIG_X86_64
static DEFINE_PER_CPU_PAGE_ALIGNED(struct exception_stacks, exception_stacks);
DEFINE_PER_CPU(struct cea_exception_stacks*, cea_exception_stacks);
#endif

#ifdef CONFIG_X86_32
static DEFINE_PER_CPU_READ_MOSTLY(unsigned long, _cea_offset);

static __always_inline unsigned int cea_offset(unsigned int cpu)
{
return per_cpu(_cea_offset, cpu);
}

static __init void init_cea_offsets(void)
{
unsigned int max_cea;
unsigned int i, j;

if (!kaslr_enabled()) {
for_each_possible_cpu(i)
per_cpu(_cea_offset, i) = i;
return;
}

max_cea = (CPU_ENTRY_AREA_MAP_SIZE - PAGE_SIZE) / CPU_ENTRY_AREA_SIZE;

/* O(sodding terrible) */
for_each_possible_cpu(i) {
unsigned int cea;

again:
cea = prandom_u32_max(max_cea);

for_each_possible_cpu(j) {
if (cea_offset(j) == cea)
goto again;

if (i == j)
break;
}

per_cpu(_cea_offset, i) = cea;
}
}
#else /* !X86_64 */
DECLARE_PER_CPU_PAGE_ALIGNED(struct doublefault_stack, doublefault_stack);

static __always_inline unsigned int cea_offset(unsigned int cpu)
{
return cpu;
}
static inline void init_cea_offsets(void) { }
#endif

/* Is called from entry code, so must be noinstr */
noinstr struct cpu_entry_area *get_cpu_entry_area(int cpu)
{
unsigned long va = CPU_ENTRY_AREA_PER_CPU + cpu * CPU_ENTRY_AREA_SIZE;
unsigned long va = CPU_ENTRY_AREA_PER_CPU + cea_offset(cpu) * CPU_ENTRY_AREA_SIZE;
BUILD_BUG_ON(sizeof(struct cpu_entry_area) % PAGE_SIZE != 0);

return (struct cpu_entry_area *) va;
Expand Down Expand Up @@ -152,6 +198,9 @@ static void __init setup_cpu_entry_area(unsigned int cpu)
pgprot_t tss_prot = PAGE_KERNEL;
#endif

kasan_populate_shadow_for_vaddr(cea, CPU_ENTRY_AREA_SIZE,
early_cpu_to_node(cpu));

cea_set_pte(&cea->gdt, get_cpu_gdt_paddr(cpu), gdt_prot);

cea_map_percpu_pages(&cea->entry_stack_page,
Expand Down Expand Up @@ -205,7 +254,6 @@ static __init void setup_cpu_entry_area_ptes(void)

/* The +1 is for the readonly IDT: */
BUILD_BUG_ON((CPU_ENTRY_AREA_PAGES+1)*PAGE_SIZE != CPU_ENTRY_AREA_MAP_SIZE);
BUILD_BUG_ON(CPU_ENTRY_AREA_TOTAL_SIZE != CPU_ENTRY_AREA_MAP_SIZE);
BUG_ON(CPU_ENTRY_AREA_BASE & ~PMD_MASK);

start = CPU_ENTRY_AREA_BASE;
Expand All @@ -221,6 +269,8 @@ void __init setup_cpu_entry_areas(void)
{
unsigned int cpu;

init_cea_offsets();

setup_cpu_entry_area_ptes();

for_each_possible_cpu(cpu)
Expand Down
15 changes: 12 additions & 3 deletions arch/x86/mm/kasan_init_64.c
Original file line number Diff line number Diff line change
Expand Up @@ -316,6 +316,18 @@ void __init kasan_early_init(void)
kasan_map_early_shadow(init_top_pgt);
}

void __init kasan_populate_shadow_for_vaddr(void *va, size_t size, int nid)
{
unsigned long shadow_start, shadow_end;

shadow_start = (unsigned long)kasan_mem_to_shadow(va);
shadow_start = round_down(shadow_start, PAGE_SIZE);
shadow_end = (unsigned long)kasan_mem_to_shadow(va + size);
shadow_end = round_up(shadow_end, PAGE_SIZE);

kasan_populate_shadow(shadow_start, shadow_end, nid);
}

void __init kasan_init(void)
{
int i;
Expand Down Expand Up @@ -393,9 +405,6 @@ void __init kasan_init(void)
kasan_mem_to_shadow((void *)VMALLOC_END + 1),
shadow_cpu_entry_begin);

kasan_populate_shadow((unsigned long)shadow_cpu_entry_begin,
(unsigned long)shadow_cpu_entry_end, 0);

kasan_populate_early_shadow(shadow_cpu_entry_end,
kasan_mem_to_shadow((void *)__START_KERNEL_map));

Expand Down
Loading