Skip to content

Commit

Permalink
x86: account for shadow stack in exception-from-stub recovery
Browse files Browse the repository at this point in the history
Dealing with exceptions raised from within emulation stubs involves
discarding return address (replaced by exception related information).
Such discarding of course also requires removing the corresponding entry
from the shadow stack.

Also amend the comment in fixup_exception_return(), to further clarify
why use of ptr[1] can't be an out-of-bounds access.

While touching do_invalid_op() also add a missing fall-through
annotation.

This is CVE-2023-46841 / XSA-451.

Fixes: 209fb99 ("x86/extable: Adjust extable handling to be shadow stack compatible")
Signed-off-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
  • Loading branch information
jbeulich committed Feb 27, 2024
1 parent 28154a2 commit 91f5f7a
Show file tree
Hide file tree
Showing 3 changed files with 71 additions and 14 deletions.
20 changes: 12 additions & 8 deletions xen/arch/x86/extable.c
Original file line number Diff line number Diff line change
Expand Up @@ -86,13 +86,16 @@ search_one_extable(const struct exception_table_entry *first,
}

unsigned long
search_exception_table(const struct cpu_user_regs *regs)
search_exception_table(const struct cpu_user_regs *regs, unsigned long *stub_ra)
{
const struct virtual_region *region = find_text_region(regs->rip);
unsigned long stub = this_cpu(stubs.addr);

if ( region && region->ex )
{
*stub_ra = 0;
return search_one_extable(region->ex, region->ex_end, regs->rip);
}

/*
* Emulation stubs (which are per-CPU) are constructed with a RET at the
Expand All @@ -115,13 +118,13 @@ search_exception_table(const struct cpu_user_regs *regs)
regs->rsp > (unsigned long)regs &&
regs->rsp < (unsigned long)get_cpu_info() )
{
unsigned long retptr = *(unsigned long *)regs->rsp;
unsigned long retaddr = *(unsigned long *)regs->rsp, fixup;

region = find_text_region(retptr);
retptr = region && region->ex
? search_one_extable(region->ex, region->ex_end, retptr)
: 0;
if ( retptr )
region = find_text_region(retaddr);
fixup = region && region->ex
? search_one_extable(region->ex, region->ex_end, retaddr)
: 0;
if ( fixup )
{
/*
* Put trap number and error code on the stack (in place of the
Expand All @@ -133,7 +136,8 @@ search_exception_table(const struct cpu_user_regs *regs)
};

*(unsigned long *)regs->rsp = token.raw;
return retptr;
*stub_ra = retaddr;
return fixup;
}
}

Expand Down
3 changes: 2 additions & 1 deletion xen/arch/x86/include/asm/uaccess.h
Original file line number Diff line number Diff line change
Expand Up @@ -421,7 +421,8 @@ union stub_exception_token {
unsigned long raw;
};

extern unsigned long search_exception_table(const struct cpu_user_regs *regs);
extern unsigned long search_exception_table(const struct cpu_user_regs *regs,
unsigned long *stub_ra);
extern void sort_exception_tables(void);
extern void sort_exception_table(struct exception_table_entry *start,
const struct exception_table_entry *stop);
Expand Down
62 changes: 57 additions & 5 deletions xen/arch/x86/traps.c
Original file line number Diff line number Diff line change
Expand Up @@ -833,7 +833,7 @@ void asmlinkage do_unhandled_trap(struct cpu_user_regs *regs)
}

static void fixup_exception_return(struct cpu_user_regs *regs,
unsigned long fixup)
unsigned long fixup, unsigned long stub_ra)
{
if ( IS_ENABLED(CONFIG_XEN_SHSTK) )
{
Expand All @@ -850,7 +850,8 @@ static void fixup_exception_return(struct cpu_user_regs *regs,
/*
* Search for %rip. The shstk currently looks like this:
*
* ... [Likely pointed to by SSP]
* tok [Supervisor token, == &tok | BUSY, only with FRED inactive]
* ... [Pointed to by SSP for most exceptions, empty in IST cases]
* %cs [== regs->cs]
* %rip [== regs->rip]
* SSP [Likely points to 3 slots higher, above %cs]
Expand All @@ -868,7 +869,56 @@ static void fixup_exception_return(struct cpu_user_regs *regs,
*/
if ( ptr[0] == regs->rip && ptr[1] == regs->cs )
{
unsigned long primary_shstk =
(ssp & ~(STACK_SIZE - 1)) +
(PRIMARY_SHSTK_SLOT + 1) * PAGE_SIZE - 8;

wrss(fixup, ptr);

if ( !stub_ra )
goto shstk_done;

/*
* Stub recovery ought to happen only when the outer context
* was on the main shadow stack. We need to also "pop" the
* stub's return address from the interrupted context's shadow
* stack. That is,
* - if we're still on the main stack, we need to move the
* entire stack (up to and including the exception frame)
* up by one slot, incrementing the original SSP in the
* exception frame,
* - if we're on an IST stack, we need to increment the
* original SSP.
*/
BUG_ON((ptr[-1] ^ primary_shstk) >> PAGE_SHIFT);

if ( (ssp ^ primary_shstk) >> PAGE_SHIFT )
{
/*
* We're on an IST stack. First make sure the two return
* addresses actually match. Then increment the interrupted
* context's SSP.
*/
BUG_ON(stub_ra != *(unsigned long*)ptr[-1]);
wrss(ptr[-1] + 8, &ptr[-1]);
goto shstk_done;
}

/* Make sure the two return addresses actually match. */
BUG_ON(stub_ra != ptr[2]);

/* Move exception frame, updating SSP there. */
wrss(ptr[1], &ptr[2]); /* %cs */
wrss(ptr[0], &ptr[1]); /* %rip */
wrss(ptr[-1] + 8, &ptr[0]); /* SSP */

/* Move all newer entries. */
while ( --ptr != _p(ssp) )
wrss(ptr[-1], &ptr[0]);

/* Finally account for our own stack having shifted up. */
asm volatile ( "incsspd %0" :: "r" (2) );

goto shstk_done;
}
}
Expand All @@ -889,7 +939,8 @@ static void fixup_exception_return(struct cpu_user_regs *regs,

static bool extable_fixup(struct cpu_user_regs *regs, bool print)
{
unsigned long fixup = search_exception_table(regs);
unsigned long stub_ra = 0;
unsigned long fixup = search_exception_table(regs, &stub_ra);

if ( unlikely(fixup == 0) )
return false;
Expand All @@ -903,7 +954,7 @@ static bool extable_fixup(struct cpu_user_regs *regs, bool print)
vector_name(regs->entry_vector), regs->error_code,
_p(regs->rip), _p(regs->rip), _p(fixup));

fixup_exception_return(regs, fixup);
fixup_exception_return(regs, fixup, stub_ra);
this_cpu(last_extable_addr) = regs->rip;

return true;
Expand Down Expand Up @@ -1166,7 +1217,8 @@ void asmlinkage do_invalid_op(struct cpu_user_regs *regs)
{
case BUGFRAME_run_fn:
case BUGFRAME_warn:
fixup_exception_return(regs, (unsigned long)eip);
fixup_exception_return(regs, (unsigned long)eip, 0);
fallthrough;
case BUGFRAME_bug:
case BUGFRAME_assert:
return;
Expand Down

0 comments on commit 91f5f7a

Please sign in to comment.