Skip to content

Commit

Permalink
x86/cpu: Rename srso_(.*)_alias to srso_alias_\1
Browse files Browse the repository at this point in the history
commit 42be649 upstream.

For a more consistent namespace.

  [ bp: Fixup names in the doc too. ]

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
Link: https://lore.kernel.org/r/20230814121148.976236447@infradead.org
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
  • Loading branch information
Peter Zijlstra authored and gregkh committed Aug 23, 2023
1 parent 5c51015 commit ee621dd
Show file tree
Hide file tree
Showing 4 changed files with 22 additions and 22 deletions.
4 changes: 2 additions & 2 deletions Documentation/admin-guide/hw-vuln/srso.rst
Expand Up @@ -124,8 +124,8 @@ sequence.
To ensure the safety of this mitigation, the kernel must ensure that the
safe return sequence is itself free from attacker interference. In Zen3
and Zen4, this is accomplished by creating a BTB alias between the
untraining function srso_untrain_ret_alias() and the safe return
function srso_safe_ret_alias() which results in evicting a potentially
untraining function srso_alias_untrain_ret() and the safe return
function srso_alias_safe_ret() which results in evicting a potentially
poisoned BTB entry and using that safe one for all function returns.

In older Zen1 and Zen2, this is accomplished using a reinterpretation
Expand Down
6 changes: 3 additions & 3 deletions arch/x86/include/asm/nospec-branch.h
Expand Up @@ -296,7 +296,7 @@

#ifdef CONFIG_CPU_SRSO
ALTERNATIVE_2 "", "call srso_untrain_ret", X86_FEATURE_SRSO, \
"call srso_untrain_ret_alias", X86_FEATURE_SRSO_ALIAS
"call srso_alias_untrain_ret", X86_FEATURE_SRSO_ALIAS
#endif
.endm

Expand All @@ -312,7 +312,7 @@

#ifdef CONFIG_CPU_SRSO
ALTERNATIVE_2 "", "call srso_untrain_ret", X86_FEATURE_SRSO, \
"call srso_untrain_ret_alias", X86_FEATURE_SRSO_ALIAS
"call srso_alias_untrain_ret", X86_FEATURE_SRSO_ALIAS
#endif
.endm

Expand Down Expand Up @@ -349,7 +349,7 @@ extern void srso_alias_return_thunk(void);

extern void retbleed_untrain_ret(void);
extern void srso_untrain_ret(void);
extern void srso_untrain_ret_alias(void);
extern void srso_alias_untrain_ret(void);

extern void entry_ibpb(void);

Expand Down
8 changes: 4 additions & 4 deletions arch/x86/kernel/vmlinux.lds.S
Expand Up @@ -149,10 +149,10 @@ SECTIONS

#ifdef CONFIG_CPU_SRSO
/*
* See the comment above srso_untrain_ret_alias()'s
* See the comment above srso_alias_untrain_ret()'s
* definition.
*/
. = srso_untrain_ret_alias | (1 << 2) | (1 << 8) | (1 << 14) | (1 << 20);
. = srso_alias_untrain_ret | (1 << 2) | (1 << 8) | (1 << 14) | (1 << 20);
*(.text.__x86.rethunk_safe)
#endif
ALIGN_ENTRY_TEXT_END
Expand Down Expand Up @@ -538,8 +538,8 @@ INIT_PER_CPU(irq_stack_backing_store);
* Instead do: (A | B) - (A & B) in order to compute the XOR
* of the two function addresses:
*/
. = ASSERT(((ABSOLUTE(srso_untrain_ret_alias) | srso_safe_ret_alias) -
(ABSOLUTE(srso_untrain_ret_alias) & srso_safe_ret_alias)) == ((1 << 2) | (1 << 8) | (1 << 14) | (1 << 20)),
. = ASSERT(((ABSOLUTE(srso_alias_untrain_ret) | srso_alias_safe_ret) -
(ABSOLUTE(srso_alias_untrain_ret) & srso_alias_safe_ret)) == ((1 << 2) | (1 << 8) | (1 << 14) | (1 << 20)),
"SRSO function pair won't alias");
#endif

Expand Down
26 changes: 13 additions & 13 deletions arch/x86/lib/retpoline.S
Expand Up @@ -133,56 +133,56 @@ SYM_CODE_END(__x86_indirect_jump_thunk_array)
#ifdef CONFIG_RETHUNK

/*
* srso_untrain_ret_alias() and srso_safe_ret_alias() are placed at
* srso_alias_untrain_ret() and srso_alias_safe_ret() are placed at
* special addresses:
*
* - srso_untrain_ret_alias() is 2M aligned
* - srso_safe_ret_alias() is also in the same 2M page but bits 2, 8, 14
* - srso_alias_untrain_ret() is 2M aligned
* - srso_alias_safe_ret() is also in the same 2M page but bits 2, 8, 14
* and 20 in its virtual address are set (while those bits in the
* srso_untrain_ret_alias() function are cleared).
* srso_alias_untrain_ret() function are cleared).
*
* This guarantees that those two addresses will alias in the branch
* target buffer of Zen3/4 generations, leading to any potential
* poisoned entries at that BTB slot to get evicted.
*
* As a result, srso_safe_ret_alias() becomes a safe return.
* As a result, srso_alias_safe_ret() becomes a safe return.
*/
#ifdef CONFIG_CPU_SRSO
.section .text.__x86.rethunk_untrain

SYM_START(srso_untrain_ret_alias, SYM_L_GLOBAL, SYM_A_NONE)
SYM_START(srso_alias_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE)
UNWIND_HINT_FUNC
ANNOTATE_NOENDBR
ASM_NOP2
lfence
jmp srso_alias_return_thunk
SYM_FUNC_END(srso_untrain_ret_alias)
__EXPORT_THUNK(srso_untrain_ret_alias)
SYM_FUNC_END(srso_alias_untrain_ret)
__EXPORT_THUNK(srso_alias_untrain_ret)

.section .text.__x86.rethunk_safe
#else
/* dummy definition for alternatives */
SYM_START(srso_untrain_ret_alias, SYM_L_GLOBAL, SYM_A_NONE)
SYM_START(srso_alias_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE)
ANNOTATE_UNRET_SAFE
ret
int3
SYM_FUNC_END(srso_untrain_ret_alias)
SYM_FUNC_END(srso_alias_untrain_ret)
#endif

SYM_START(srso_safe_ret_alias, SYM_L_GLOBAL, SYM_A_NONE)
SYM_START(srso_alias_safe_ret, SYM_L_GLOBAL, SYM_A_NONE)
add $8, %_ASM_SP
UNWIND_HINT_FUNC
ANNOTATE_UNRET_SAFE
ret
int3
SYM_FUNC_END(srso_safe_ret_alias)
SYM_FUNC_END(srso_alias_safe_ret)

.section .text.__x86.return_thunk

SYM_CODE_START(srso_alias_return_thunk)
UNWIND_HINT_FUNC
ANNOTATE_NOENDBR
call srso_safe_ret_alias
call srso_alias_safe_ret
ud2
SYM_CODE_END(srso_alias_return_thunk)

Expand Down

0 comments on commit ee621dd

Please sign in to comment.