Skip to content

Commit

Permalink
scheduler: add cpu object for UP scheduler
Browse files Browse the repository at this point in the history
Also, maintain the rt_current_thread in cpu object on UP scheduler.
  • Loading branch information
polarvid committed May 22, 2024
1 parent da61998 commit 5a819be
Show file tree
Hide file tree
Showing 6 changed files with 74 additions and 41 deletions.
11 changes: 10 additions & 1 deletion include/rtdef.h
Original file line number Diff line number Diff line change
Expand Up @@ -756,10 +756,19 @@ struct rt_cpu
struct rt_cpu_usage_stats cpu_stat;
#endif
};
typedef struct rt_cpu *rt_cpu_t;

#else /* !RT_USING_SMP */
struct rt_cpu
{
struct rt_thread *current_thread;
};

#endif /* RT_USING_SMP */

typedef struct rt_cpu *rt_cpu_t;
/* Noted: As API to reject writing to this variable from application codes */
#define rt_current_thread rt_thread_self()

struct rt_thread;

#ifdef RT_USING_SMART
Expand Down
9 changes: 6 additions & 3 deletions include/rtthread.h
Original file line number Diff line number Diff line change
Expand Up @@ -668,6 +668,12 @@ rt_err_t rt_device_control(rt_device_t dev, int cmd, void *arg);
void rt_interrupt_enter(void);
void rt_interrupt_leave(void);

/**
* CPU object
*/
struct rt_cpu *rt_cpu_self(void);
struct rt_cpu *rt_cpu_index(int index);

#ifdef RT_USING_SMP

/*
Expand All @@ -678,9 +684,6 @@ rt_base_t rt_cpus_lock(void);
void rt_cpus_unlock(rt_base_t level);
void rt_cpus_lock_status_restore(struct rt_thread *thread);

struct rt_cpu *rt_cpu_self(void);
struct rt_cpu *rt_cpu_index(int index);

#ifdef RT_USING_DEBUG
rt_base_t rt_cpu_get_id(void);
#else /* !RT_USING_DEBUG */
Expand Down
26 changes: 26 additions & 0 deletions src/cpu_up.c
Original file line number Diff line number Diff line change
Expand Up @@ -6,10 +6,14 @@
* Change Logs:
* Date Author Notes
* 2024-04-19 Shell Fixup UP irq spinlock
* 2024-05-22 Shell Add UP cpu object and
* maintain the rt_current_thread inside it
*/
#include <rthw.h>
#include <rtthread.h>

static struct rt_cpu _cpu;

/**
* @brief Initialize a static spinlock object.
*
Expand Down Expand Up @@ -80,3 +84,25 @@ void rt_spin_unlock_irqrestore(struct rt_spinlock *lock, rt_base_t level)
rt_exit_critical_safe(critical_level);
rt_hw_interrupt_enable(level);
}

/**
* @brief This fucntion will return current cpu object.
*
* @return Return a pointer to the current cpu object.
*/
struct rt_cpu *rt_cpu_self(void)
{
return &_cpu;
}

/**
* @brief This fucntion will return the cpu object corresponding to index.
*
* @param index is the index of target cpu object.
*
* @return Return a pointer to the cpu object corresponding to index.
*/
struct rt_cpu *rt_cpu_index(int index)
{
return index == 0 ? &_cpu : RT_NULL;
}
17 changes: 0 additions & 17 deletions src/scheduler_mp.c
Original file line number Diff line number Diff line change
Expand Up @@ -1331,22 +1331,5 @@ rt_err_t rt_sched_thread_bind_cpu(struct rt_thread *thread, int cpu)
return RT_EOK;
}

rt_thread_t rt_sched_thread_self(void)
{
#ifdef ARCH_USING_HW_THREAD_SELF
return rt_hw_thread_self();

#else /* !ARCH_USING_HW_THREAD_SELF */
rt_thread_t self;
rt_base_t lock;

lock = rt_hw_local_irq_disable();
self = rt_cpu_self()->current_thread;
rt_hw_local_irq_enable(lock);

return self;
#endif /* ARCH_USING_HW_THREAD_SELF */
}

/**@}*/
/**@endcond*/
35 changes: 16 additions & 19 deletions src/scheduler_up.c
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,6 @@ rt_uint8_t rt_thread_ready_table[32];

extern volatile rt_uint8_t rt_interrupt_nest;
static rt_int16_t rt_scheduler_lock_nest;
struct rt_thread *rt_current_thread = RT_NULL;
rt_uint8_t rt_current_priority;

#if defined(RT_USING_HOOK) && defined(RT_HOOK_USING_FUNC_PTR)
Expand Down Expand Up @@ -175,7 +174,7 @@ void rt_system_scheduler_start(void)

to_thread = _scheduler_get_highest_priority_thread(&highest_ready_priority);

rt_current_thread = to_thread;
rt_cpu_self()->current_thread = to_thread;

rt_sched_remove_thread(to_thread);
RT_SCHED_CTX(to_thread).stat = RT_THREAD_RUNNING;
Expand Down Expand Up @@ -203,6 +202,8 @@ void rt_schedule(void)
rt_base_t level;
struct rt_thread *to_thread;
struct rt_thread *from_thread;
/* using local variable to avoid unecessary function call */
struct rt_thread *curr_thread = rt_thread_self();

/* disable interrupt */
level = rt_hw_interrupt_disable();
Expand All @@ -219,28 +220,29 @@ void rt_schedule(void)

to_thread = _scheduler_get_highest_priority_thread(&highest_ready_priority);

if ((RT_SCHED_CTX(rt_current_thread).stat & RT_THREAD_STAT_MASK) == RT_THREAD_RUNNING)
if ((RT_SCHED_CTX(curr_thread).stat & RT_THREAD_STAT_MASK) == RT_THREAD_RUNNING)
{
if (RT_SCHED_PRIV(rt_current_thread).current_priority < highest_ready_priority)
if (RT_SCHED_PRIV(curr_thread).current_priority < highest_ready_priority)
{
to_thread = rt_current_thread;
to_thread = curr_thread;
}
else if (RT_SCHED_PRIV(rt_current_thread).current_priority == highest_ready_priority && (RT_SCHED_CTX(rt_current_thread).stat & RT_THREAD_STAT_YIELD_MASK) == 0)
else if (RT_SCHED_PRIV(curr_thread).current_priority == highest_ready_priority
&& (RT_SCHED_CTX(curr_thread).stat & RT_THREAD_STAT_YIELD_MASK) == 0)
{
to_thread = rt_current_thread;
to_thread = curr_thread;
}
else
{
need_insert_from_thread = 1;
}
}

if (to_thread != rt_current_thread)
if (to_thread != curr_thread)
{
/* if the destination thread is not the same as current thread */
rt_current_priority = (rt_uint8_t)highest_ready_priority;
from_thread = rt_current_thread;
rt_current_thread = to_thread;
from_thread = curr_thread;
rt_cpu_self()->current_thread = to_thread;

RT_OBJECT_HOOK_CALL(rt_scheduler_hook, (from_thread, to_thread));

Expand Down Expand Up @@ -282,11 +284,11 @@ void rt_schedule(void)
#ifdef RT_USING_SIGNALS
/* check stat of thread for signal */
level = rt_hw_interrupt_disable();
if (RT_SCHED_CTX(rt_current_thread).stat & RT_THREAD_STAT_SIGNAL_PENDING)
if (RT_SCHED_CTX(curr_thread).stat & RT_THREAD_STAT_SIGNAL_PENDING)
{
extern void rt_thread_handle_sig(rt_bool_t clean_state);

RT_SCHED_CTX(rt_current_thread).stat &= ~RT_THREAD_STAT_SIGNAL_PENDING;
RT_SCHED_CTX(curr_thread).stat &= ~RT_THREAD_STAT_SIGNAL_PENDING;

rt_hw_interrupt_enable(level);

Expand All @@ -310,8 +312,8 @@ void rt_schedule(void)
}
else
{
rt_sched_remove_thread(rt_current_thread);
RT_SCHED_CTX(rt_current_thread).stat = RT_THREAD_RUNNING | (RT_SCHED_CTX(rt_current_thread).stat & ~RT_THREAD_STAT_MASK);
rt_sched_remove_thread(curr_thread);
RT_SCHED_CTX(curr_thread).stat = RT_THREAD_RUNNING | (RT_SCHED_CTX(curr_thread).stat & ~RT_THREAD_STAT_MASK);
}
}
}
Expand Down Expand Up @@ -564,10 +566,5 @@ rt_err_t rt_sched_thread_bind_cpu(struct rt_thread *thread, int cpu)
return -RT_EINVAL;
}

rt_thread_t rt_sched_thread_self(void)
{
return rt_current_thread;
}

/**@}*/
/**@endcond*/
17 changes: 16 additions & 1 deletion src/thread.c
Original file line number Diff line number Diff line change
Expand Up @@ -355,7 +355,22 @@ RTM_EXPORT(rt_thread_init);
*/
rt_thread_t rt_thread_self(void)
{
return rt_sched_thread_self();
#ifndef RT_USING_SMP
return rt_cpu_self()->current_thread;

#elif defined (ARCH_USING_HW_THREAD_SELF)
return rt_hw_thread_self();

#else /* !ARCH_USING_HW_THREAD_SELF */
rt_thread_t self;
rt_base_t lock;

lock = rt_hw_local_irq_disable();
self = rt_cpu_self()->current_thread;
rt_hw_local_irq_enable(lock);

return self;
#endif /* ARCH_USING_HW_THREAD_SELF */
}
RTM_EXPORT(rt_thread_self);

Expand Down

0 comments on commit 5a819be

Please sign in to comment.