Skip to content
Permalink
Browse files
arm64: Create a list of SYM_CODE functions, check return PC against list
SYM_CODE functions don't follow the usual calling conventions. Check if the
return PC in a stack frame falls in any of these. If it does, consider the
stack trace unreliable.

Define a special section for unreliable functions
=================================================

Define a SYM_CODE_END() macro for arm64 that adds the function address
range to a new section called "sym_code_functions".

Linker file
===========

Include the "sym_code_functions" section under read-only data in
vmlinux.lds.S.

Initialization
==============

Define an early_initcall() to create a sym_code_functions[] array from
the linker data.

Unwinder check
==============

Add a reliability check in unwind_is_reliable() that compares a return
PC with sym_code_functions[]. If there is a match, then return failure.

Signed-off-by: Madhavan T. Venkataraman <madvenka@linux.microsoft.com>
  • Loading branch information
madvenka786 authored and intel-lab-lkp committed Aug 12, 2021
1 parent b948b91 commit fcc398e417dd1c8521e069b516322c5e5f615444
Show file tree
Hide file tree
Showing 4 changed files with 76 additions and 0 deletions.
@@ -68,4 +68,16 @@
SYM_FUNC_END_ALIAS(x); \
SYM_FUNC_END_ALIAS(__pi_##x)

/*
* Record the address range of each SYM_CODE function in a struct code_range
* in a special section.
*/
#define SYM_CODE_END(name) \
SYM_END(name, SYM_T_NONE) ;\
99: ;\
.pushsection "sym_code_functions", "aw" ;\
.quad name ;\
.quad 99b ;\
.popsection

#endif
@@ -21,5 +21,6 @@ extern char __exittext_begin[], __exittext_end[];
extern char __irqentry_text_start[], __irqentry_text_end[];
extern char __mmuoff_data_start[], __mmuoff_data_end[];
extern char __entry_tramp_text_start[], __entry_tramp_text_end[];
extern char __sym_code_functions_start[], __sym_code_functions_end[];

#endif /* __ASM_SECTIONS_H */
@@ -18,6 +18,31 @@
#include <asm/stack_pointer.h>
#include <asm/stacktrace.h>

struct code_range {
unsigned long start;
unsigned long end;
};

static struct code_range *sym_code_functions;
static int num_sym_code_functions;

int __init init_sym_code_functions(void)
{
size_t size = (unsigned long)__sym_code_functions_end -
(unsigned long)__sym_code_functions_start;

sym_code_functions = (struct code_range *)__sym_code_functions_start;
/*
* Order it so that sym_code_functions is not visible before
* num_sym_code_functions.
*/
smp_mb();
num_sym_code_functions = size / sizeof(struct code_range);

return 0;
}
early_initcall(init_sym_code_functions);

/*
* AArch64 PCS assigns the frame pointer to x29.
*
@@ -185,13 +210,41 @@ void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl)
*/
static bool notrace unwind_is_reliable(struct stackframe *frame)
{
const struct code_range *range;
unsigned long pc;
int i;

/*
* If the PC is not a known kernel text address, then we cannot
* be sure that a subsequent unwind will be reliable, as we
* don't know that the code follows our unwind requirements.
*/
if (!__kernel_text_address(frame->pc))
return false;

/*
* Check the return PC against sym_code_functions[]. If there is a
* match, then the consider the stack frame unreliable.
*
* As SYM_CODE functions don't follow the usual calling conventions,
* we assume by default that any SYM_CODE function cannot be unwound
* reliably.
*
* Note that this includes:
*
* - Exception handlers and entry assembly
* - Trampoline assembly (e.g., ftrace, kprobes)
* - Hypervisor-related assembly
* - Hibernation-related assembly
* - CPU start-stop, suspend-resume assembly
* - Kernel relocation assembly
*/
pc = frame->pc;
for (i = 0; i < num_sym_code_functions; i++) {
range = &sym_code_functions[i];
if (pc >= range->start && pc < range->end)
return false;
}
return true;
}

@@ -111,6 +111,14 @@ jiffies = jiffies_64;
#define TRAMP_TEXT
#endif

#define SYM_CODE_FUNCTIONS \
. = ALIGN(16); \
.symcode : AT(ADDR(.symcode) - LOAD_OFFSET) { \
__sym_code_functions_start = .; \
KEEP(*(sym_code_functions)) \
__sym_code_functions_end = .; \
}

/*
* The size of the PE/COFF section that covers the kernel image, which
* runs from _stext to _edata, must be a round multiple of the PE/COFF
@@ -196,6 +204,8 @@ SECTIONS
swapper_pg_dir = .;
. += PAGE_SIZE;

SYM_CODE_FUNCTIONS

. = ALIGN(SEGMENT_ALIGN);
__init_begin = .;
__inittext_begin = .;

0 comments on commit fcc398e

Please sign in to comment.