Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
15 changes: 13 additions & 2 deletions arch/riscv32/core/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -3,9 +3,20 @@ zephyr_sources(
fatal.c
irq_manage.c
irq_offload.c
isr.S
prep_c.c
reset.S
swap.S
thread_entry_wrapper.S
thread.c
)

# SMP-only sources
zephyr_sources_ifdef(CONFIG_SMP curr_cpu.c)
zephyr_sources_ifdef(CONFIG_SMP start_cpu.c)

# Use SMP ISR if SMP is configured
zephyr_sources_ifndef(CONFIG_SMP isr.S)
zephyr_sources_ifdef(CONFIG_SMP isr_smp.S)

# Use swap or switch depending on the USE_SWITCH config
zephyr_sources_ifndef(CONFIG_USE_SWITCH swap.S)
zephyr_sources_ifdef(CONFIG_USE_SWITCH switch.S)
20 changes: 20 additions & 0 deletions arch/riscv32/core/curr_cpu.c
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
/*
* Copyright (c) 2018 SiFive Inc.
*
* SPDX-License-Identifier: Apache-2.0
*/

#include <kernel.h>
#include <kernel_structs.h>

inline _cpu_t *_arch_curr_cpu(void)
{
/* On hart init, the mscratch csr is set to the pointer for the
* _kernel.cpus[] struct for that hart */
u32_t mscratch;

__asm__ volatile("csrr %0, mscratch" : "=r" (mscratch));

return (_cpu_t *) mscratch;
}

19 changes: 19 additions & 0 deletions arch/riscv32/core/fatal.c
Original file line number Diff line number Diff line change
Expand Up @@ -91,6 +91,7 @@ FUNC_NORETURN void _NanoFatalErrorHandler(unsigned int reason,
break;
}

#ifndef CONFIG_SMP
printk("Current thread ID = %p\n"
"Faulting instruction address = 0x%x\n"
" ra: 0x%x gp: 0x%x tp: 0x%x t0: 0x%x\n"
Expand All @@ -105,6 +106,24 @@ FUNC_NORETURN void _NanoFatalErrorHandler(unsigned int reason,
esf->t5, esf->t6, esf->a0, esf->a1,
esf->a2, esf->a3, esf->a4, esf->a5,
esf->a6, esf->a7);
#else
printk("Hart: %d\n"
"Current thread ID = %p\n"
"Faulting instruction address = 0x%x\n"
" ra: 0x%x gp: 0x%x tp: 0x%x t0: 0x%x\n"
" t1: 0x%x t2: 0x%x t3: 0x%x t4: 0x%x\n"
" t5: 0x%x t6: 0x%x a0: 0x%x a1: 0x%x\n"
" a2: 0x%x a3: 0x%x a4: 0x%x a5: 0x%x\n"
" a6: 0x%x a7: 0x%x\n",
_current_cpu->id,
k_current_get(),
(esf->mepc == 0xdeadbaad) ? 0xdeadbaad : esf->mepc,
esf->ra, esf->gp, esf->tp, esf->t0,
esf->t1, esf->t2, esf->t3, esf->t4,
esf->t5, esf->t6, esf->a0, esf->a1,
esf->a2, esf->a3, esf->a4, esf->a5,
esf->a6, esf->a7);
#endif

_SysFatalErrorHandler(reason, esf);
/* spin forever */
Expand Down
286 changes: 286 additions & 0 deletions arch/riscv32/core/isr_smp.S
Original file line number Diff line number Diff line change
@@ -0,0 +1,286 @@
/*
* Copyright (c) 2018 SiFive Inc.
*
* SPDX-License-Identifier: Apache-2.0
*/

#include <toolchain.h>
#include <linker/sections.h>
#include <kernel_structs.h>
#include <offsets_short.h>

/* imports */
GDATA(_sw_isr_table)
GTEXT(__soc_is_irq)
GTEXT(__soc_handle_irq)
GTEXT(_get_next_switch_handle)
GTEXT(_Fault)

#ifdef CONFIG_RISCV_SOC_CONTEXT_SAVE
GTEXT(__soc_save_context)
GTEXT(__soc_restore_context)
#endif

#ifdef CONFIG_TRACING
GTEXT(z_sys_trace_thread_switched_in)
GTEXT(z_sys_trace_isr_enter)
#endif

#ifdef CONFIG_IRQ_OFFLOAD
GTEXT(_offload_routine)
#endif

#ifdef CONFIG_TIMESLICING
GTEXT(_update_time_slice_before_swap)
#endif

/* exports */
GTEXT(__irq_wrapper)

SECTION_FUNC(exception.entry, __irq_wrapper)
/* Allocate space on thread stack to save registers */
addi sp, sp, -__NANO_ESF_SIZEOF

/*
* Save caller and callee-saved registers onto the stack
*/
sw ra, __NANO_ESF_ra_OFFSET(sp)
sw gp, __NANO_ESF_gp_OFFSET(sp)
sw tp, __NANO_ESF_tp_OFFSET(sp)
sw t0, __NANO_ESF_t0_OFFSET(sp)
sw t1, __NANO_ESF_t1_OFFSET(sp)
sw t2, __NANO_ESF_t2_OFFSET(sp)
sw t3, __NANO_ESF_t3_OFFSET(sp)
sw t4, __NANO_ESF_t4_OFFSET(sp)
sw t5, __NANO_ESF_t5_OFFSET(sp)
sw t6, __NANO_ESF_t6_OFFSET(sp)
sw a0, __NANO_ESF_a0_OFFSET(sp)
sw a1, __NANO_ESF_a1_OFFSET(sp)
sw a2, __NANO_ESF_a2_OFFSET(sp)
sw a3, __NANO_ESF_a3_OFFSET(sp)
sw a4, __NANO_ESF_a4_OFFSET(sp)
sw a5, __NANO_ESF_a5_OFFSET(sp)
sw a6, __NANO_ESF_a6_OFFSET(sp)
sw a7, __NANO_ESF_a7_OFFSET(sp)
sw s0, __NANO_ESF_s0_OFFSET(sp)
sw s1, __NANO_ESF_s1_OFFSET(sp)
sw s2, __NANO_ESF_s2_OFFSET(sp)
sw s3, __NANO_ESF_s3_OFFSET(sp)
sw s4, __NANO_ESF_s4_OFFSET(sp)
sw s5, __NANO_ESF_s5_OFFSET(sp)
sw s6, __NANO_ESF_s6_OFFSET(sp)
sw s7, __NANO_ESF_s7_OFFSET(sp)
sw s8, __NANO_ESF_s8_OFFSET(sp)
sw s9, __NANO_ESF_s9_OFFSET(sp)
sw s10, __NANO_ESF_s10_OFFSET(sp)
sw s11, __NANO_ESF_s11_OFFSET(sp)

#ifdef CONFIG_EXECUTION_BENCHMARKING
call read_timer_start_of_isr
#endif
/* Save MEPC register */
csrr t0, mepc
sw t0, __NANO_ESF_mepc_OFFSET(sp)

/* Save SOC-specific MSTATUS register */
csrr t0, SOC_MSTATUS_REG
sw t0, __NANO_ESF_mstatus_OFFSET(sp)

#ifdef CONFIG_RISCV_SOC_CONTEXT_SAVE
/* Handle context saving at SOC level. */
jal ra, __soc_save_context
#endif /* CONFIG_RISCV_SOC_CONTEXT_SAVE */

switch_to_interrupt_stack:
/* Get cpu context */
csrr t0, mscratch

/* Increment nested interrupt count */
lw t1, _cpu_offset_to_nested(t0)
addi t2, t1, 1
sw t2, _cpu_offset_to_nested(t0)

/* Save thread stack pointer to temp register t2 */
mv t2, sp

/* If the nested count was nonzero, we're already on the
* interrupt stack */
bnez t1, on_interrupt_stack

/* Switch to interrupt stack */
lw sp, _cpu_offset_to_irq_stack(t0)

on_interrupt_stack:
/*
* Save thread stack pointer on interrupt stack
* In RISC-V, stack pointer needs to be 16-byte aligned
*/
addi sp, sp, -16
sw t2, 0x00(sp)

/*
* Check if exception is the result of an interrupt or not.
* (SOC dependent). Following the RISC-V architecture spec, the MSB
* of the mcause register is used to indicate whether an exception
* is the result of an interrupt or an exception/fault. But for some
* SOCs (like pulpino or riscv-qemu), the MSB is never set to indicate
* interrupt. Hence, check for interrupt/exception via the __soc_is_irq
* function (that needs to be implemented by each SOC). The result is
* returned via register a0 (1: interrupt, 0 exception)
*/
jal ra, __soc_is_irq
bnez a0, is_interrupt

/*
* If the exception is the result of an ECALL, check whether to
* perform a context-switch or an IRQ offload. Otherwise call _Fault
* to report the exception.
*/
csrr t0, mcause
li t2, SOC_MCAUSE_EXP_MASK
and t0, t0, t2

/*
* If mcause == SOC_MCAUSE_ECALL_EXP, handle system call,
* otherwise handle fault
*/
li t1, SOC_MCAUSE_ECALL_EXP
beq t0, t1, is_syscall

/*
* Call _Fault to handle exception.
* Load the stored stack frame off of the interrupt stack and pass it
* to _Fault (via register a0).
*
* If _Fault returns, set return address to leave_interrupt_stack
* to restore stack.
*/
lw a0, 0x00(sp)
la ra, leave_interrupt_stack
tail _Fault

is_syscall:
/*
* A syscall is the result of an ecall instruction, in which case the
* MEPC will contain the address of the ecall instruction.
* Increment saved MEPC by 4 to prevent triggering the same ecall
* again upon exiting the ISR.
*
* It's safe to always increment by 4, even with compressed
* instructions, because the ecall instruction is always 4 bytes.
*/
lw t0, __NANO_ESF_mepc_OFFSET(sp)
addi t0, t0, 4
sw t0, __NANO_ESF_mepc_OFFSET(sp)

j leave_interrupt_stack

is_interrupt:
#ifdef CONFIG_TRACING
call z_sys_trace_isr_enter
#endif

/* Get IRQ causing interrupt */
csrr a0, mcause
li t0, SOC_MCAUSE_EXP_MASK
and a0, a0, t0

/*
* Clear pending IRQ generating the interrupt at SOC level
* Pass IRQ number to __soc_handle_irq via register a0
*/
jal ra, __soc_handle_irq

/*
* Load corresponding registered function in _sw_isr_table.
* (table is 8-bytes wide, we should shift index by 3)
*/
la t0, _sw_isr_table
slli a0, a0, 3
add t0, t0, a0

/* Load argument in a0 register */
lw a0, 0x00(t0)

/* Load ISR function address in register t1 */
lw t1, 0x04(t0)

/* Call ISR function */
jalr ra, t1

#ifdef CONFIG_EXECUTION_BENCHMARKING
call read_timer_end_of_isr
#endif

leave_interrupt_stack:
/* Get cpu context */
csrr t0, mscratch

/* Decrement nested count */
lw t1, _cpu_offset_to_nested(t0)
addi t1, t1, -1
sw t1, _cpu_offset_to_nested(t0)

/* Pop saved thread stack pointer off of the interrupt stack and into
* the argument to _get_next_switch_handle */
lw a0, 0x00(sp)
addi sp, sp, 16

/* Ask the scheduler for the next thread to run */
call _get_next_switch_handle

/* Switch to the new stack */
mv sp, a0

restore_next_thread:
#ifdef CONFIG_RISCV_SOC_CONTEXT_SAVE
/* Handle context saving at SOC level. */
jal ra, __soc_save_context
#endif /* CONFIG_RISCV_SOC_CONTEXT_SAVE */

/* Restore MEPC register */
lw t0, __NANO_ESF_mepc_OFFSET(sp)
csrw mepc, t0

/* Restore SOC-specific MSTATUS register */
lw t0, __NANO_ESF_mstatus_OFFSET(sp)
csrw SOC_MSTATUS_REG, t0

/* Load saved registers */
lw ra, __NANO_ESF_ra_OFFSET(sp)
lw gp, __NANO_ESF_gp_OFFSET(sp)
lw tp, __NANO_ESF_tp_OFFSET(sp)
lw t0, __NANO_ESF_t0_OFFSET(sp)
lw t1, __NANO_ESF_t1_OFFSET(sp)
lw t2, __NANO_ESF_t2_OFFSET(sp)
lw t3, __NANO_ESF_t3_OFFSET(sp)
lw t4, __NANO_ESF_t4_OFFSET(sp)
lw t5, __NANO_ESF_t5_OFFSET(sp)
lw t6, __NANO_ESF_t6_OFFSET(sp)
lw a0, __NANO_ESF_a0_OFFSET(sp)
lw a1, __NANO_ESF_a1_OFFSET(sp)
lw a2, __NANO_ESF_a2_OFFSET(sp)
lw a3, __NANO_ESF_a3_OFFSET(sp)
lw a4, __NANO_ESF_a4_OFFSET(sp)
lw a5, __NANO_ESF_a5_OFFSET(sp)
lw a6, __NANO_ESF_a6_OFFSET(sp)
lw a7, __NANO_ESF_a7_OFFSET(sp)
lw s0, __NANO_ESF_s0_OFFSET(sp)
lw s1, __NANO_ESF_s1_OFFSET(sp)
lw s2, __NANO_ESF_s2_OFFSET(sp)
lw s3, __NANO_ESF_s3_OFFSET(sp)
lw s4, __NANO_ESF_s4_OFFSET(sp)
lw s5, __NANO_ESF_s5_OFFSET(sp)
lw s6, __NANO_ESF_s6_OFFSET(sp)
lw s7, __NANO_ESF_s7_OFFSET(sp)
lw s8, __NANO_ESF_s8_OFFSET(sp)
lw s9, __NANO_ESF_s9_OFFSET(sp)
lw s10, __NANO_ESF_s10_OFFSET(sp)
lw s11, __NANO_ESF_s11_OFFSET(sp)

/* Free stack space */
addi sp, sp, __NANO_ESF_SIZEOF

/* Return from interrupt */
SOC_ERET

14 changes: 14 additions & 0 deletions arch/riscv32/core/offsets/offsets.c
Original file line number Diff line number Diff line change
Expand Up @@ -54,6 +54,20 @@ GEN_OFFSET_SYM(NANO_ESF, a4);
GEN_OFFSET_SYM(NANO_ESF, a5);
GEN_OFFSET_SYM(NANO_ESF, a6);
GEN_OFFSET_SYM(NANO_ESF, a7);
#ifdef CONFIG_USE_SWITCH
GEN_OFFSET_SYM(NANO_ESF, s0);
GEN_OFFSET_SYM(NANO_ESF, s1);
GEN_OFFSET_SYM(NANO_ESF, s2);
GEN_OFFSET_SYM(NANO_ESF, s3);
GEN_OFFSET_SYM(NANO_ESF, s4);
GEN_OFFSET_SYM(NANO_ESF, s5);
GEN_OFFSET_SYM(NANO_ESF, s6);
GEN_OFFSET_SYM(NANO_ESF, s7);
GEN_OFFSET_SYM(NANO_ESF, s8);
GEN_OFFSET_SYM(NANO_ESF, s9);
GEN_OFFSET_SYM(NANO_ESF, s10);
GEN_OFFSET_SYM(NANO_ESF, s11);
#endif

GEN_OFFSET_SYM(NANO_ESF, mepc);
GEN_OFFSET_SYM(NANO_ESF, mstatus);
Expand Down
Loading