Skip to content
Permalink
Browse files

kernel: rename NANO_ESF

This is now called z_arch_esf_t, conforming to our naming
convention.

This needs to remain a typedef due to how our offset generation
header mechanism works.

Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
  • Loading branch information...
andrewboie committed Jul 16, 2019
1 parent c9a4bd4 commit 96571a8c40b34387086e6adfb2a07e872feecd26
Showing with 298 additions and 298 deletions.
  1. +1 −1 arch/arc/core/fatal.c
  2. +1 −1 arch/arc/core/fault.c
  3. +1 −1 arch/arc/include/kernel_arch_func.h
  4. +4 −4 arch/arm/core/fatal.c
  5. +16 −16 arch/arm/core/fault.c
  6. +1 −1 arch/arm/include/kernel_arch_func.h
  7. +39 −39 arch/nios2/core/exception.S
  8. +2 −2 arch/nios2/core/fatal.c
  9. +19 −19 arch/nios2/core/offsets/offsets.c
  10. +1 −1 arch/nios2/include/kernel_arch_func.h
  11. +2 −2 arch/riscv32/core/fatal.c
  12. +86 −86 arch/riscv32/core/isr.S
  13. +23 −23 arch/riscv32/core/offsets/offsets.c
  14. +39 −39 arch/riscv32/core/swap.S
  15. +1 −1 arch/riscv32/include/kernel_arch_func.h
  16. +3 −3 arch/x86/core/ia32/excstub.S
  17. +12 −12 arch/x86/core/ia32/fatal.c
  18. +1 −1 arch/x86/core/ia32/float.c
  19. +14 −14 arch/x86/core/offsets/offsets.c
  20. +2 −2 arch/x86/include/ia32/exception.h
  21. +2 −2 arch/x86_64/core/x86_64.c
  22. +1 −1 arch/xtensa/core/fatal.c
  23. +1 −1 arch/xtensa/core/xtensa-asm2.c
  24. +1 −1 arch/xtensa/include/kernel_arch_func.h
  25. +1 −1 include/arch/arc/v2/exc.h
  26. +1 −1 include/arch/arm/cortex_m/exc.h
  27. +3 −3 include/arch/nios2/arch.h
  28. +1 −1 include/arch/posix/arch.h
  29. +1 −1 include/arch/riscv32/exp.h
  30. +1 −1 include/arch/x86/ia32/arch.h
  31. +1 −1 include/arch/x86_64/arch.h
  32. +1 −1 include/arch/xtensa/exc.h
  33. +2 −2 include/fatal.h
  34. +2 −2 kernel/fatal.c
  35. +1 −1 scripts/checkpatch/typedefsfile
  36. +1 −1 subsys/testsuite/ztest/include/ztest.h
  37. +2 −2 tests/arch/x86/static_idt/src/main.c
  38. +1 −1 tests/kernel/fatal/src/main.c
  39. +1 −1 tests/kernel/mem_protect/mem_protect/src/common.c
  40. +1 −1 tests/kernel/mem_protect/protection/src/main.c
  41. +1 −1 tests/kernel/mem_protect/stackprot/src/main.c
  42. +1 −1 tests/kernel/mem_protect/userspace/src/main.c
  43. +1 −1 tests/kernel/pipe/pipe/src/test_pipe.c
  44. +1 −1 tests/kernel/threads/dynamic_thread/src/main.c
@@ -18,7 +18,7 @@
#include <arch/cpu.h>
#include <logging/log_ctrl.h>

void z_arc_fatal_error(unsigned int reason, const NANO_ESF *esf)
void z_arc_fatal_error(unsigned int reason, const z_arch_esf_t *esf)
{
if (reason == K_ERR_CPU_EXCEPTION) {
z_fatal_print("Faulting instruction address = 0x%lx",
@@ -363,7 +363,7 @@ static void dump_exception_info(u32_t vector, u32_t cause, u32_t parameter)
* invokes the user provided routine k_sys_fatal_error_handler() which is
* responsible for implementing the error handling policy.
*/
void _Fault(NANO_ESF *esf)
void _Fault(z_arch_esf_t *esf)
{
u32_t vector, cause, parameter;
u32_t exc_addr = z_arc_v2_aux_reg_read(_ARC_V2_EFA);
@@ -61,7 +61,7 @@ extern void z_arc_userspace_enter(k_thread_entry_t user_entry, void *p1,


extern void z_arch_switch(void *switch_to, void **switched_from);
extern void z_arc_fatal_error(unsigned int reason, const NANO_ESF *esf);
extern void z_arc_fatal_error(unsigned int reason, const z_arch_esf_t *esf);
#endif /* _ASMLANGUAGE */

#ifdef __cplusplus
@@ -19,7 +19,7 @@
#include <kernel_structs.h>
#include <logging/log_ctrl.h>

static void esf_dump(const NANO_ESF *esf)
static void esf_dump(const z_arch_esf_t *esf)
{
z_fatal_print("r0/a1: 0x%08x r1/a2: 0x%08x r2/a3: 0x%08x",
esf->basic.a1, esf->basic.a2, esf->basic.a3);
@@ -41,7 +41,7 @@ static void esf_dump(const NANO_ESF *esf)
esf->basic.pc);
}

void z_arm_fatal_error(unsigned int reason, const NANO_ESF *esf)
void z_arm_fatal_error(unsigned int reason, const z_arch_esf_t *esf)
{

if (esf != NULL) {
@@ -50,15 +50,15 @@ void z_arm_fatal_error(unsigned int reason, const NANO_ESF *esf)
z_fatal_error(reason, esf);
}

void z_do_kernel_oops(const NANO_ESF *esf)
void z_do_kernel_oops(const z_arch_esf_t *esf)
{
z_arm_fatal_error(esf->basic.r0, esf);
}

FUNC_NORETURN void z_arch_syscall_oops(void *ssf_ptr)
{
u32_t *ssf_contents = ssf_ptr;
NANO_ESF oops_esf = { 0 };
z_arch_esf_t oops_esf = { 0 };

/* TODO: Copy the rest of the register set out of ssf_ptr */
oops_esf.basic.pc = ssf_contents[3];
@@ -136,7 +136,7 @@
*/

#if (CONFIG_FAULT_DUMP == 1)
static void FaultShow(const NANO_ESF *esf, int fault)
static void FaultShow(const z_arch_esf_t *esf, int fault)
{
PR_EXC("Fault! EXC #%d", fault);

@@ -155,7 +155,7 @@ static void FaultShow(const NANO_ESF *esf, int fault)
*
* For Dump level 0, no information needs to be generated.
*/
static void FaultShow(const NANO_ESF *esf, int fault)
static void FaultShow(const z_arch_esf_t *esf, int fault)
{
(void)esf;
(void)fault;
@@ -175,7 +175,7 @@ static const struct z_exc_handle exceptions[] = {
*
* @return true if error is recoverable, otherwise return false.
*/
static bool memory_fault_recoverable(NANO_ESF *esf)
static bool memory_fault_recoverable(z_arch_esf_t *esf)
{
#ifdef CONFIG_USERSPACE
for (int i = 0; i < ARRAY_SIZE(exceptions); i++) {
@@ -210,7 +210,7 @@ u32_t z_check_thread_stack_fail(const u32_t fault_addr,
*
* @return error code to identify the fatal error reason
*/
static u32_t MpuFault(NANO_ESF *esf, int fromHardFault, bool *recoverable)
static u32_t MpuFault(z_arch_esf_t *esf, int fromHardFault, bool *recoverable)
{
u32_t reason = K_ERR_CPU_EXCEPTION;
u32_t mmfar = -EINVAL;
@@ -333,7 +333,7 @@ static u32_t MpuFault(NANO_ESF *esf, int fromHardFault, bool *recoverable)
*
* @return N/A
*/
static int BusFault(NANO_ESF *esf, int fromHardFault, bool *recoverable)
static int BusFault(z_arch_esf_t *esf, int fromHardFault, bool *recoverable)
{
u32_t reason = K_ERR_CPU_EXCEPTION;

@@ -487,7 +487,7 @@ static int BusFault(NANO_ESF *esf, int fromHardFault, bool *recoverable)
*
* @return error code to identify the fatal error reason
*/
static u32_t UsageFault(const NANO_ESF *esf)
static u32_t UsageFault(const z_arch_esf_t *esf)
{
u32_t reason = K_ERR_CPU_EXCEPTION;

@@ -543,7 +543,7 @@ static u32_t UsageFault(const NANO_ESF *esf)
*
* @return N/A
*/
static void SecureFault(const NANO_ESF *esf)
static void SecureFault(const z_arch_esf_t *esf)
{
PR_FAULT_INFO("***** SECURE FAULT *****");

@@ -582,7 +582,7 @@ static void SecureFault(const NANO_ESF *esf)
*
* @return N/A
*/
static void DebugMonitor(const NANO_ESF *esf)
static void DebugMonitor(const z_arch_esf_t *esf)
{
ARG_UNUSED(esf);

@@ -602,7 +602,7 @@ static void DebugMonitor(const NANO_ESF *esf)
*
* @return error code to identify the fatal error reason
*/
static u32_t HardFault(NANO_ESF *esf, bool *recoverable)
static u32_t HardFault(z_arch_esf_t *esf, bool *recoverable)
{
u32_t reason = K_ERR_CPU_EXCEPTION;

@@ -644,7 +644,7 @@ static u32_t HardFault(NANO_ESF *esf, bool *recoverable)
*
* @return N/A
*/
static void ReservedException(const NANO_ESF *esf, int fault)
static void ReservedException(const z_arch_esf_t *esf, int fault)
{
ARG_UNUSED(esf);

@@ -654,7 +654,7 @@ static void ReservedException(const NANO_ESF *esf, int fault)
}

/* Handler function for ARM fault conditions. */
static u32_t FaultHandle(NANO_ESF *esf, int fault, bool *recoverable)
static u32_t FaultHandle(z_arch_esf_t *esf, int fault, bool *recoverable)
{
u32_t reason = K_ERR_CPU_EXCEPTION;

@@ -708,7 +708,7 @@ static u32_t FaultHandle(NANO_ESF *esf, int fault, bool *recoverable)
*
* @param secure_esf Pointer to the secure stack frame.
*/
static void SecureStackDump(const NANO_ESF *secure_esf)
static void SecureStackDump(const z_arch_esf_t *secure_esf)
{
/*
* In case a Non-Secure exception interrupted the Secure
@@ -733,7 +733,7 @@ static void SecureStackDump(const NANO_ESF *secure_esf)
* Non-Secure exception entry.
*/
top_of_sec_stack += ADDITIONAL_STATE_CONTEXT_WORDS;
secure_esf = (const NANO_ESF *)top_of_sec_stack;
secure_esf = (const z_arch_esf_t *)top_of_sec_stack;
sec_ret_addr = secure_esf->basic.pc;
} else {
/* Exception during Non-Secure function call.
@@ -780,7 +780,7 @@ static void SecureStackDump(const NANO_ESF *secure_esf)
* Note: exc_return argument shall only be used by the Fault handler if we are
* running a Secure Firmware.
*/
void _Fault(NANO_ESF *esf, u32_t exc_return)
void _Fault(z_arch_esf_t *esf, u32_t exc_return)
{
u32_t reason = K_ERR_CPU_EXCEPTION;
int fault = SCB->ICSR & SCB_ICSR_VECTACTIVE_Msk;
@@ -815,13 +815,13 @@ void _Fault(NANO_ESF *esf, u32_t exc_return)
* and supply it to the fault handing function.
*/
if (exc_return & EXC_RETURN_MODE_THREAD) {
esf = (NANO_ESF *)__TZ_get_PSP_NS();
esf = (z_arch_esf_t *)__TZ_get_PSP_NS();
if ((SCB->ICSR & SCB_ICSR_RETTOBASE_Msk) == 0) {
PR_EXC("RETTOBASE does not match EXC_RETURN");
goto _exit_fatal;
}
} else {
esf = (NANO_ESF *)__TZ_get_MSP_NS();
esf = (z_arch_esf_t *)__TZ_get_MSP_NS();
if ((SCB->ICSR & SCB_ICSR_RETTOBASE_Msk) != 0) {
PR_EXC("RETTOBASE does not match EXC_RETURN");
goto _exit_fatal;
@@ -146,7 +146,7 @@ extern FUNC_NORETURN void z_arm_userspace_enter(k_thread_entry_t user_entry,
u32_t stack_end,
u32_t stack_start);

extern void z_arm_fatal_error(unsigned int reason, const NANO_ESF *esf);
extern void z_arm_fatal_error(unsigned int reason, const z_arch_esf_t *esf);

#endif /* _ASMLANGUAGE */

@@ -34,35 +34,35 @@ GTEXT(_offload_routine)
*/
SECTION_FUNC(exception.entry, _exception)
/* Reserve thread stack space for saving context */
subi sp, sp, __NANO_ESF_SIZEOF
subi sp, sp, __z_arch_esf_t_SIZEOF

/* Preserve all caller-saved registers onto the thread's stack */
stw ra, __NANO_ESF_ra_OFFSET(sp)
stw r1, __NANO_ESF_r1_OFFSET(sp)
stw r2, __NANO_ESF_r2_OFFSET(sp)
stw r3, __NANO_ESF_r3_OFFSET(sp)
stw r4, __NANO_ESF_r4_OFFSET(sp)
stw r5, __NANO_ESF_r5_OFFSET(sp)
stw r6, __NANO_ESF_r6_OFFSET(sp)
stw r7, __NANO_ESF_r7_OFFSET(sp)
stw r8, __NANO_ESF_r8_OFFSET(sp)
stw r9, __NANO_ESF_r9_OFFSET(sp)
stw r10, __NANO_ESF_r10_OFFSET(sp)
stw r11, __NANO_ESF_r11_OFFSET(sp)
stw r12, __NANO_ESF_r12_OFFSET(sp)
stw r13, __NANO_ESF_r13_OFFSET(sp)
stw r14, __NANO_ESF_r14_OFFSET(sp)
stw r15, __NANO_ESF_r15_OFFSET(sp)
stw ra, __z_arch_esf_t_ra_OFFSET(sp)
stw r1, __z_arch_esf_t_r1_OFFSET(sp)
stw r2, __z_arch_esf_t_r2_OFFSET(sp)
stw r3, __z_arch_esf_t_r3_OFFSET(sp)
stw r4, __z_arch_esf_t_r4_OFFSET(sp)
stw r5, __z_arch_esf_t_r5_OFFSET(sp)
stw r6, __z_arch_esf_t_r6_OFFSET(sp)
stw r7, __z_arch_esf_t_r7_OFFSET(sp)
stw r8, __z_arch_esf_t_r8_OFFSET(sp)
stw r9, __z_arch_esf_t_r9_OFFSET(sp)
stw r10, __z_arch_esf_t_r10_OFFSET(sp)
stw r11, __z_arch_esf_t_r11_OFFSET(sp)
stw r12, __z_arch_esf_t_r12_OFFSET(sp)
stw r13, __z_arch_esf_t_r13_OFFSET(sp)
stw r14, __z_arch_esf_t_r14_OFFSET(sp)
stw r15, __z_arch_esf_t_r15_OFFSET(sp)

/* Store value of estatus control register */
rdctl et, estatus
stw et, __NANO_ESF_estatus_OFFSET(sp)
stw et, __z_arch_esf_t_estatus_OFFSET(sp)

/* ea-4 is the address of the instruction when the exception happened,
* put this in the stack frame as well
*/
addi r15, ea, -4
stw r15, __NANO_ESF_instr_OFFSET(sp)
stw r15, __z_arch_esf_t_instr_OFFSET(sp)

/* Figure out whether we are here because of an interrupt or an
* exception. If an interrupt, switch stacks and enter IRQ handling
@@ -156,7 +156,7 @@ not_interrupt:
*
* We earlier put ea - 4 in the stack frame, replace it with just ea
*/
stw ea, __NANO_ESF_instr_OFFSET(sp)
stw ea, __z_arch_esf_t_instr_OFFSET(sp)

#ifdef CONFIG_IRQ_OFFLOAD
/* Check the contents of _offload_routine. If non-NULL, jump into
@@ -192,35 +192,35 @@ _exception_exit:
* and return to the interrupted context */

/* Return address from the exception */
ldw ea, __NANO_ESF_instr_OFFSET(sp)
ldw ea, __z_arch_esf_t_instr_OFFSET(sp)

/* Restore estatus
* XXX is this right??? */
ldw r5, __NANO_ESF_estatus_OFFSET(sp)
ldw r5, __z_arch_esf_t_estatus_OFFSET(sp)
wrctl estatus, r5

/* Restore caller-saved registers */
ldw ra, __NANO_ESF_ra_OFFSET(sp)
ldw r1, __NANO_ESF_r1_OFFSET(sp)
ldw r2, __NANO_ESF_r2_OFFSET(sp)
ldw r3, __NANO_ESF_r3_OFFSET(sp)
ldw r4, __NANO_ESF_r4_OFFSET(sp)
ldw r5, __NANO_ESF_r5_OFFSET(sp)
ldw r6, __NANO_ESF_r6_OFFSET(sp)
ldw r7, __NANO_ESF_r7_OFFSET(sp)
ldw r8, __NANO_ESF_r8_OFFSET(sp)
ldw r9, __NANO_ESF_r9_OFFSET(sp)
ldw r10, __NANO_ESF_r10_OFFSET(sp)
ldw r11, __NANO_ESF_r11_OFFSET(sp)
ldw r12, __NANO_ESF_r12_OFFSET(sp)
ldw r13, __NANO_ESF_r13_OFFSET(sp)
ldw r14, __NANO_ESF_r14_OFFSET(sp)
ldw r15, __NANO_ESF_r15_OFFSET(sp)
ldw ra, __z_arch_esf_t_ra_OFFSET(sp)
ldw r1, __z_arch_esf_t_r1_OFFSET(sp)
ldw r2, __z_arch_esf_t_r2_OFFSET(sp)
ldw r3, __z_arch_esf_t_r3_OFFSET(sp)
ldw r4, __z_arch_esf_t_r4_OFFSET(sp)
ldw r5, __z_arch_esf_t_r5_OFFSET(sp)
ldw r6, __z_arch_esf_t_r6_OFFSET(sp)
ldw r7, __z_arch_esf_t_r7_OFFSET(sp)
ldw r8, __z_arch_esf_t_r8_OFFSET(sp)
ldw r9, __z_arch_esf_t_r9_OFFSET(sp)
ldw r10, __z_arch_esf_t_r10_OFFSET(sp)
ldw r11, __z_arch_esf_t_r11_OFFSET(sp)
ldw r12, __z_arch_esf_t_r12_OFFSET(sp)
ldw r13, __z_arch_esf_t_r13_OFFSET(sp)
ldw r14, __z_arch_esf_t_r14_OFFSET(sp)
ldw r15, __z_arch_esf_t_r15_OFFSET(sp)

/* Put the stack pointer back where it was when we entered
* exception state
*/
addi sp, sp, __NANO_ESF_SIZEOF
addi sp, sp, __z_arch_esf_t_SIZEOF

/* All done, copy estatus into status and transfer to ea */
eret
@@ -11,7 +11,7 @@
#include <logging/log_ctrl.h>

FUNC_NORETURN void z_nios2_fatal_error(unsigned int reason,
const NANO_ESF *esf)
const z_arch_esf_t *esf)
{
if (esf != NULL) {
/* Subtract 4 from EA since we added 4 earlier so that the
@@ -99,7 +99,7 @@ static char *cause_str(u32_t cause_code)
}
#endif

FUNC_NORETURN void _Fault(const NANO_ESF *esf)
FUNC_NORETURN void _Fault(const z_arch_esf_t *esf)
{
#if defined(CONFIG_PRINTK) || defined(CONFIG_LOG)
/* Unfortunately, completely unavailable on Nios II/e cores */
@@ -43,25 +43,25 @@ GEN_OFFSET_SYM(_callee_saved_t, sp);
GEN_OFFSET_SYM(_callee_saved_t, key);
GEN_OFFSET_SYM(_callee_saved_t, retval);

GEN_OFFSET_SYM(NANO_ESF, ra);
GEN_OFFSET_SYM(NANO_ESF, r1);
GEN_OFFSET_SYM(NANO_ESF, r2);
GEN_OFFSET_SYM(NANO_ESF, r3);
GEN_OFFSET_SYM(NANO_ESF, r4);
GEN_OFFSET_SYM(NANO_ESF, r5);
GEN_OFFSET_SYM(NANO_ESF, r6);
GEN_OFFSET_SYM(NANO_ESF, r7);
GEN_OFFSET_SYM(NANO_ESF, r8);
GEN_OFFSET_SYM(NANO_ESF, r9);
GEN_OFFSET_SYM(NANO_ESF, r10);
GEN_OFFSET_SYM(NANO_ESF, r11);
GEN_OFFSET_SYM(NANO_ESF, r12);
GEN_OFFSET_SYM(NANO_ESF, r13);
GEN_OFFSET_SYM(NANO_ESF, r14);
GEN_OFFSET_SYM(NANO_ESF, r15);
GEN_OFFSET_SYM(NANO_ESF, estatus);
GEN_OFFSET_SYM(NANO_ESF, instr);
GEN_ABSOLUTE_SYM(__NANO_ESF_SIZEOF, sizeof(NANO_ESF));
GEN_OFFSET_SYM(z_arch_esf_t, ra);
GEN_OFFSET_SYM(z_arch_esf_t, r1);
GEN_OFFSET_SYM(z_arch_esf_t, r2);
GEN_OFFSET_SYM(z_arch_esf_t, r3);
GEN_OFFSET_SYM(z_arch_esf_t, r4);
GEN_OFFSET_SYM(z_arch_esf_t, r5);
GEN_OFFSET_SYM(z_arch_esf_t, r6);
GEN_OFFSET_SYM(z_arch_esf_t, r7);
GEN_OFFSET_SYM(z_arch_esf_t, r8);
GEN_OFFSET_SYM(z_arch_esf_t, r9);
GEN_OFFSET_SYM(z_arch_esf_t, r10);
GEN_OFFSET_SYM(z_arch_esf_t, r11);
GEN_OFFSET_SYM(z_arch_esf_t, r12);
GEN_OFFSET_SYM(z_arch_esf_t, r13);
GEN_OFFSET_SYM(z_arch_esf_t, r14);
GEN_OFFSET_SYM(z_arch_esf_t, r15);
GEN_OFFSET_SYM(z_arch_esf_t, estatus);
GEN_OFFSET_SYM(z_arch_esf_t, instr);
GEN_ABSOLUTE_SYM(__z_arch_esf_t_SIZEOF, sizeof(z_arch_esf_t));

/*
* size of the struct k_thread structure sans save area for floating

0 comments on commit 96571a8

Please sign in to comment.
You can’t perform that action at this time.