Skip to content

Commit

Permalink
解决由于在中断上下文以外,sched_enqueue时,未关中断导致cpu_queue双重加锁的问题 (#201)
Browse files Browse the repository at this point in the history
  • Loading branch information
fslongjin committed Mar 13, 2023
1 parent 33270d0 commit c2e757d
Show file tree
Hide file tree
Showing 3 changed files with 26 additions and 21 deletions.
12 changes: 7 additions & 5 deletions kernel/src/sched/cfs.rs
Expand Up @@ -66,30 +66,32 @@ impl CFSQueue {

/// @brief 将pcb加入队列
pub fn enqueue(&mut self, pcb: &'static mut process_control_block) {
self.lock.lock();
let mut rflags = 0u64;
self.lock.lock_irqsave(&mut rflags);

// 如果进程是IDLE进程,那么就不加入队列
if pcb.pid == 0 {
self.lock.unlock();
self.lock.unlock_irqrestore(&rflags);
return;
}
self.queue.push(pcb);
self.sort();
self.lock.unlock();
self.lock.unlock_irqrestore(&rflags);
}

/// @brief 将pcb从调度队列中弹出,若队列为空,则返回IDLE进程的pcb
pub fn dequeue(&mut self) -> &'static mut process_control_block {
let res: &'static mut process_control_block;
self.lock.lock();
let mut rflags = 0u64;
self.lock.lock_irqsave(&mut rflags);
if self.queue.len() > 0 {
// 队列不为空,返回下一个要执行的pcb
res = self.queue.pop().unwrap();
} else {
// 如果队列为空,则返回IDLE进程的pcb
res = unsafe { self.idle_pcb.as_mut().unwrap() };
}
self.lock.unlock();
self.lock.unlock_irqrestore(&rflags);
return res;
}

Expand Down
11 changes: 5 additions & 6 deletions kernel/src/sched/core.rs
Expand Up @@ -11,7 +11,7 @@ use crate::{
process_control_block, pt_regs, EINVAL, EPERM, MAX_CPU_NUM, PF_NEED_MIGRATE, PROC_RUNNING,
SCHED_FIFO, SCHED_NORMAL, SCHED_RR,
},
process::process::process_cpu,
process::process::process_cpu
};

use super::cfs::{sched_cfs_init, SchedulerCFS, __get_cfs_scheduler};
Expand All @@ -34,7 +34,7 @@ pub fn get_cpu_loads(cpu_id: u32) -> u32 {
let cfs_scheduler = __get_cfs_scheduler();
let rt_scheduler = __get_rt_scheduler();
let len_cfs = cfs_scheduler.get_cfs_queue_len(cpu_id);
let len_rt = rt_scheduler.get_rt_queue_len(cpu_id);
let len_rt = rt_scheduler.rt_queue_len(cpu_id);
// let load_rt = rt_scheduler.get_load_list_len(cpu_id);
// kdebug!("this cpu_id {} is load rt {}", cpu_id, load_rt);

Expand Down Expand Up @@ -111,14 +111,13 @@ pub extern "C" fn sched_enqueue(pcb: &'static mut process_control_block, mut res
}
let cfs_scheduler = __get_cfs_scheduler();
let rt_scheduler = __get_rt_scheduler();
// TODO 前几号进程不进行迁移,这里需要判断修改,当前的意思为了调试已经初始化完成的rt进程
// if pcb.pid > 4 && pcb.policy!=0{
if pcb.pid > 4 {

// 除了IDLE以外的进程,都进行负载均衡
if pcb.pid > 0 {
loads_balance(pcb);
}
compiler_fence(core::sync::atomic::Ordering::SeqCst);

compiler_fence(core::sync::atomic::Ordering::SeqCst);
if (pcb.flags & (PF_NEED_MIGRATE as u64)) != 0 {
// kdebug!("migrating pcb:{:?}", pcb);
pcb.flags &= !(PF_NEED_MIGRATE as u64);
Expand Down
24 changes: 14 additions & 10 deletions kernel/src/sched/rt.rs
Expand Up @@ -52,41 +52,44 @@ impl RTQueue {
}
/// @brief 将pcb加入队列
pub fn enqueue(&mut self, pcb: &'static mut process_control_block) {
self.lock.lock();
let mut rflags = 0u64;
self.lock.lock_irqsave(&mut rflags);

// 如果进程是IDLE进程,那么就不加入队列
if pcb.pid == 0 {
self.lock.unlock();
self.lock.unlock_irqrestore(&rflags);
return;
}
self.queue.push_back(pcb);
self.lock.unlock();
self.lock.unlock_irqrestore(&rflags);
}

/// @brief 将pcb从调度队列头部取出,若队列为空,则返回None
pub fn dequeue(&mut self) -> Option<&'static mut process_control_block> {
let res: Option<&'static mut process_control_block>;
self.lock.lock();
let mut rflags = 0u64;
self.lock.lock_irqsave(&mut rflags);
if self.queue.len() > 0 {
// 队列不为空,返回下一个要执行的pcb
res = Some(self.queue.pop_front().unwrap());
} else {
// 如果队列为空,则返回None
res = None;
}
self.lock.unlock();
self.lock.unlock_irqrestore(&rflags);
return res;
}
pub fn enqueue_front(&mut self, pcb: &'static mut process_control_block) {
self.lock.lock();
let mut rflags = 0u64;
self.lock.lock_irqsave(&mut rflags);

// 如果进程是IDLE进程,那么就不加入队列
if pcb.pid == 0 {
self.lock.unlock();
self.lock.unlock_irqrestore(&rflags);
return;
}
self.queue.push_front(pcb);
self.lock.unlock();
self.lock.unlock_irqrestore(&rflags);
}
pub fn get_rt_queue_size(&mut self) -> usize {
return self.queue.len();
Expand Down Expand Up @@ -143,15 +146,16 @@ impl SchedulerRT {
None
}

pub fn get_rt_queue_len(&mut self, cpu_id: u32) -> usize {
pub fn rt_queue_len(&mut self, cpu_id: u32) -> usize {
let mut sum = 0;
for prio in 0..SchedulerRT::MAX_RT_PRIO {
sum += self.cpu_queue[cpu_id as usize][prio as usize].get_rt_queue_size();
}
return sum as usize;
}

pub fn get_load_list_len(&mut self, cpu_id: u32) -> usize {
#[inline]
pub fn load_list_len(&mut self, cpu_id: u32) -> usize {
return self.load_list[cpu_id as usize].len();
}

Expand Down

0 comments on commit c2e757d

Please sign in to comment.