Skip to content

Commit

Permalink
[libcpu] add hw-thread_self
Browse files Browse the repository at this point in the history
Signed-off-by: Shell <smokewood@qq.com>
  • Loading branch information
polarvid committed May 13, 2024
1 parent 74b60b7 commit e6a7cd3
Show file tree
Hide file tree
Showing 9 changed files with 144 additions and 45 deletions.
4 changes: 3 additions & 1 deletion include/rtdef.h
Original file line number Diff line number Diff line change
Expand Up @@ -731,8 +731,10 @@ struct rt_cpu
struct rt_thread *current_thread;

rt_uint8_t irq_switch_flag:1;
rt_uint8_t critical_switch_flag:1;
rt_uint8_t sched_lock_flag:1;
#ifndef ARCH_USING_HW_THREAD_SELF
rt_uint8_t critical_switch_flag:1;
#endif /* ARCH_USING_HW_THREAD_SELF */

rt_uint8_t current_priority;
rt_list_t priority_table[RT_THREAD_PRIORITY_MAX];
Expand Down
1 change: 1 addition & 0 deletions include/rtsched.h
Original file line number Diff line number Diff line change
Expand Up @@ -55,6 +55,7 @@ struct rt_sched_thread_ctx
rt_uint8_t stat; /**< thread status */
rt_uint8_t sched_flag_locked:1; /**< calling thread have the scheduler locked */
rt_uint8_t sched_flag_ttmr_set:1; /**< thread timer is start */
rt_uint8_t critical_switch_flag:1; /**< critical switch pending */

#ifdef RT_USING_SMP
rt_uint8_t bind_cpu; /**< thread is bind to cpu */
Expand Down
6 changes: 6 additions & 0 deletions libcpu/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,8 @@ if ARCH_ARMV8 && ARCH_CPU_64BIT
default y
config ARCH_USING_GENERIC_CPUID
bool "Using generic cpuid implemenation"
select ARCH_USING_HW_THREAD_SELF
default y if RT_USING_OFW
default n
endmenu
endif
Expand Down Expand Up @@ -270,3 +272,7 @@ config ARCH_HOST_SIMULATOR
config ARCH_CPU_STACK_GROWS_UPWARD
bool
default n

config ARCH_USING_HW_THREAD_SELF
bool
default n
20 changes: 16 additions & 4 deletions libcpu/aarch64/common/context_gcc.S
Original file line number Diff line number Diff line change
Expand Up @@ -27,15 +27,27 @@ rt_thread_switch_interrupt_flag: .zero 8
#endif

.text

#ifdef ARCH_USING_GENERIC_CPUID
.globl rt_hw_cpu_id_set
#else /* !ARCH_USING_GENERIC_CPUID */
.weak rt_hw_cpu_id_set
#endif /* ARCH_USING_GENERIC_CPUID */
.type rt_hw_cpu_id_set, @function
rt_hw_cpu_id_set:
mrs x0, mpidr_el1 /* MPIDR_EL1: Multi-Processor Affinity Register */
#ifndef RT_USING_OFW
mrs x0, mpidr_el1 /* MPIDR_EL1: Multi-Processor Affinity Register */
#ifdef ARCH_ARM_CORTEX_A55
lsr x0, x0, #8
lsr x0, x0, #8
#endif
and x0, x0, #15
#endif /* !RT_USING_OFW */

#ifdef ARCH_USING_GENERIC_CPUID
msr tpidrro_el0, x0
#else
msr tpidr_el1, x0
#endif
and x0, x0, #15
msr tpidr_el1, x0
ret

/*
Expand Down
23 changes: 23 additions & 0 deletions libcpu/aarch64/common/cpu.c
Original file line number Diff line number Diff line change
Expand Up @@ -231,6 +231,29 @@ int rt_hw_cpu_boot_secondary(int num_cpus, rt_uint64_t *cpu_hw_ids, struct cpu_o

#endif /*RT_USING_SMP*/

/**
* Generic hw-cpu-id
*/
#ifdef ARCH_USING_GENERIC_CPUID
#if RT_CPUS_NR > 1

int rt_hw_cpu_id(void)
{
long cpuid;
__asm__ volatile("mrs %0, tpidrro_el0":"=r"(cpuid));
return cpuid;
}

#else

int rt_hw_cpu_id(void)
{
return 0;
}

#endif /* RT_CPUS_NR > 1 */
#endif /* ARCH_USING_GENERIC_CPUID */

/**
* @addtogroup ARM CPU
*/
Expand Down
39 changes: 14 additions & 25 deletions libcpu/aarch64/common/cpuport.h
Original file line number Diff line number Diff line change
Expand Up @@ -27,31 +27,6 @@ typedef struct
rt_uint32_t value;
} rt_hw_spinlock_t;

/**
* Generic hw-cpu-id
*/
#ifdef ARCH_USING_GENERIC_CPUID

#if RT_CPUS_NR > 0

rt_inline int rt_hw_cpu_id(void)
{
long cpuid;
__asm__ volatile("mrs %0, tpidr_el1":"=r"(cpuid));
return cpuid;
}

#else

rt_inline int rt_hw_cpu_id(void)
{
return 0;
}

#endif /* RT_CPUS_NR > 1 */

#endif /* ARCH_USING_GENERIC_CPUID */

#endif /* RT_USING_SMP */

#define rt_hw_barrier(cmd, ...) \
Expand Down Expand Up @@ -106,5 +81,19 @@ rt_inline int __rt_ffs(int value)
}

#endif /* RT_USING_CPU_FFS */
#ifdef ARCH_USING_HW_THREAD_SELF
rt_inline struct rt_thread *rt_hw_thread_self(void)
{
struct rt_thread *thread;
__asm__ volatile ("mrs %0, tpidr_el1":"=r"(thread));

return thread;
}

rt_inline void rt_hw_thread_set_self(struct rt_thread *thread)
{
__asm__ volatile ("msr tpidr_el1, %0"::"r"(thread));
}
#endif /* ARCH_USING_HW_THREAD_SELF */

#endif /*CPUPORT_H__*/
10 changes: 6 additions & 4 deletions libcpu/aarch64/cortex-a/entry_point.S
Original file line number Diff line number Diff line change
Expand Up @@ -95,6 +95,8 @@ _start:
/* Save cpu stack */
get_phy stack_top, .boot_cpu_stack_top
/* Save cpu id temp */
msr tpidrro_el0, xzr
/* Save thread self */
msr tpidr_el1, xzr

bl init_cpu_el
Expand Down Expand Up @@ -149,11 +151,11 @@ _secondary_cpu_entry:

/* Get cpu id success */
sub x0, x2, #1
msr tpidr_el1, x0 /* Save cpu id global */
#else
bl rt_hw_cpu_id_set
mrs x0, tpidr_el1

/* Save cpu id global */
#endif /* RT_USING_OFW */
bl rt_hw_cpu_id_set
bl rt_hw_cpu_id

/* Set current cpu's stack top */
sub x0, x0, #1
Expand Down
78 changes: 68 additions & 10 deletions src/scheduler_mp.c
Original file line number Diff line number Diff line change
Expand Up @@ -99,6 +99,14 @@ static struct rt_spinlock _mp_scheduler_lock;
rt_hw_local_irq_enable(level); \
} while (0)

#ifdef ARCH_USING_HW_THREAD_SELF
#define CRITICAL_SWITCH_FLAG(pcpu, curthr) (RT_SCHED_CTX(curthr).critical_switch_flag)

#else /* !ARCH_USING_HW_THREAD_SELF */
#define CRITICAL_SWITCH_FLAG(pcpu, curthr) ((pcpu)->critical_switch_flag)

#endif /* ARCH_USING_HW_THREAD_SELF */

static rt_uint32_t rt_thread_ready_priority_group;
#if RT_THREAD_PRIORITY_MAX > 32
/* Maximum priority level, 256 */
Expand Down Expand Up @@ -749,15 +757,15 @@ rt_err_t rt_sched_unlock_n_resched(rt_sched_lock_level_t level)
/* leaving critical region of global context since we can't schedule */
SCHEDULER_CONTEXT_UNLOCK(pcpu);

pcpu->critical_switch_flag = 1;
CRITICAL_SWITCH_FLAG(pcpu, current_thread) = 1;
error = -RT_ESCHEDLOCKED;

SCHEDULER_EXIT_CRITICAL(current_thread);
}
else
{
/* flush critical switch flag since a scheduling is done */
pcpu->critical_switch_flag = 0;
CRITICAL_SWITCH_FLAG(pcpu, current_thread) = 0;

/* pick the highest runnable thread, and pass the control to it */
to_thread = _prepare_context_switch_locked(cpu_id, pcpu, current_thread);
Expand Down Expand Up @@ -828,7 +836,7 @@ void rt_schedule(void)
/* whether caller had locked the local scheduler already */
if (RT_SCHED_CTX(current_thread).critical_lock_nest > 1)
{
pcpu->critical_switch_flag = 1;
CRITICAL_SWITCH_FLAG(pcpu, current_thread) = 1;

SCHEDULER_EXIT_CRITICAL(current_thread);

Expand All @@ -837,7 +845,7 @@ void rt_schedule(void)
else
{
/* flush critical switch flag since a scheduling is done */
pcpu->critical_switch_flag = 0;
CRITICAL_SWITCH_FLAG(pcpu, current_thread) = 0;
pcpu->irq_switch_flag = 0;

/**
Expand Down Expand Up @@ -912,13 +920,13 @@ void rt_scheduler_do_irq_switch(void *context)
/* whether caller had locked the local scheduler already */
if (RT_SCHED_CTX(current_thread).critical_lock_nest > 1)
{
pcpu->critical_switch_flag = 1;
CRITICAL_SWITCH_FLAG(pcpu, current_thread) = 1;
SCHEDULER_EXIT_CRITICAL(current_thread);
}
else if (rt_atomic_load(&(pcpu->irq_nest)) == 0)
{
/* flush critical & irq switch flag since a scheduling is done */
pcpu->critical_switch_flag = 0;
CRITICAL_SWITCH_FLAG(pcpu, current_thread) = 0;
pcpu->irq_switch_flag = 0;

SCHEDULER_CONTEXT_LOCK(pcpu);
Expand Down Expand Up @@ -1056,6 +1064,9 @@ void rt_sched_post_ctx_switch(struct rt_thread *thread)
}
/* safe to access since irq is masked out */
pcpu->current_thread = thread;
#ifdef ARCH_USING_HW_THREAD_SELF
rt_hw_thread_set_self(thread);
#endif /* ARCH_USING_HW_THREAD_SELF */
}

#ifdef RT_DEBUGING_CRITICAL
Expand Down Expand Up @@ -1101,9 +1112,11 @@ RTM_EXPORT(rt_exit_critical_safe);
*/
rt_base_t rt_enter_critical(void)
{
rt_base_t level;
rt_base_t critical_level;
struct rt_thread *current_thread;

#ifndef ARCH_USING_HW_THREAD_SELF
rt_base_t level;
struct rt_cpu *pcpu;

/* disable interrupt */
Expand All @@ -1125,6 +1138,20 @@ rt_base_t rt_enter_critical(void)
/* enable interrupt */
rt_hw_local_irq_enable(level);

#else /* !ARCH_USING_HW_THREAD_SELF */

current_thread = rt_hw_thread_self();
if (!current_thread)
{
/* scheduler unavailable */
return -RT_EINVAL;
}

/* critical for local cpu */
RT_SCHED_CTX(current_thread).critical_lock_nest++;
critical_level = RT_SCHED_CTX(current_thread).critical_lock_nest;

#endif /* ARCH_USING_HW_THREAD_SELF */
return critical_level;
}
RTM_EXPORT(rt_enter_critical);
Expand All @@ -1134,9 +1161,11 @@ RTM_EXPORT(rt_enter_critical);
*/
void rt_exit_critical(void)
{
rt_base_t level;
struct rt_thread *current_thread;
rt_bool_t need_resched;

#ifndef ARCH_USING_HW_THREAD_SELF
rt_base_t level;
struct rt_cpu *pcpu;

/* disable interrupt */
Expand All @@ -1157,8 +1186,8 @@ void rt_exit_critical(void)
if (RT_SCHED_CTX(current_thread).critical_lock_nest == 0)
{
/* is there any scheduling request unfinished? */
need_resched = pcpu->critical_switch_flag;
pcpu->critical_switch_flag = 0;
need_resched = CRITICAL_SWITCH_FLAG(pcpu, current_thread);
CRITICAL_SWITCH_FLAG(pcpu, current_thread) = 0;

/* enable interrupt */
rt_hw_local_irq_enable(level);
Expand All @@ -1174,6 +1203,35 @@ void rt_exit_critical(void)
/* enable interrupt */
rt_hw_local_irq_enable(level);
}

#else /* !ARCH_USING_HW_THREAD_SELF */

current_thread = rt_hw_thread_self();
if (!current_thread)
{
return;
}

/* the necessary memory barrier is done on irq_(dis|en)able */
RT_SCHED_CTX(current_thread).critical_lock_nest--;

/* may need a rescheduling */
if (RT_SCHED_CTX(current_thread).critical_lock_nest == 0)
{
/* is there any scheduling request unfinished? */
need_resched = CRITICAL_SWITCH_FLAG(pcpu, current_thread);
CRITICAL_SWITCH_FLAG(pcpu, current_thread) = 0;

if (need_resched)
rt_schedule();
}
else
{
/* each exit_critical is strictly corresponding to an enter_critical */
RT_ASSERT(RT_SCHED_CTX(current_thread).critical_lock_nest > 0);
}

#endif /* ARCH_USING_HW_THREAD_SELF */
}
RTM_EXPORT(rt_exit_critical);

Expand Down
8 changes: 7 additions & 1 deletion src/thread.c
Original file line number Diff line number Diff line change
Expand Up @@ -356,12 +356,18 @@ RTM_EXPORT(rt_thread_init);
rt_thread_t rt_thread_self(void)
{
#ifdef RT_USING_SMP
rt_base_t lock;
rt_thread_t self;

#ifdef ARCH_USING_HW_THREAD_SELF
self = rt_hw_thread_self();
#else /* !ARCH_USING_HW_THREAD_SELF */
rt_base_t lock;

lock = rt_hw_local_irq_disable();
self = rt_cpu_self()->current_thread;
rt_hw_local_irq_enable(lock);

#endif /* ARCH_USING_HW_THREAD_SELF */
return self;

#else /* !RT_USING_SMP */
Expand Down

0 comments on commit e6a7cd3

Please sign in to comment.