Skip to content

Commit 810305b

Browse files
conghuic23wenlingz
authored andcommitted
hv: sched: disable interrupt when grab schedule spinlock
After moving softirq to following interrupt path, softirq handler might break in the schedule spinlock context and try to grab the lock again, then deadlock. Disable interrupt with schedule spinlock context. For the IRQ disable/restore operations: CPU_INT_ALL_DISABLE(&rflag) CPU_INT_ALL_RESTORE(rflag) each takes 50~60 cycles. renaming: get_schedule_lock -> obtain_schedule_lock Tracked-On: #3813 Signed-off-by: Conghui Chen <conghui.chen@intel.com> Acked-by: Eddie Dong <eddie.dong@intel.com>
1 parent 15c6a3e commit 810305b

File tree

2 files changed

+23
-16
lines changed

2 files changed

+23
-16
lines changed

hypervisor/common/schedule.c

Lines changed: 21 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -39,16 +39,16 @@ static inline void set_thread_status(struct thread_object *obj, enum thread_obje
3939
obj->status = status;
4040
}
4141

42-
void get_schedule_lock(uint16_t pcpu_id)
42+
void obtain_schedule_lock(uint16_t pcpu_id, uint64_t *rflag)
4343
{
4444
struct sched_control *ctl = &per_cpu(sched_ctl, pcpu_id);
45-
spinlock_obtain(&ctl->scheduler_lock);
45+
spinlock_irqsave_obtain(&ctl->scheduler_lock, rflag);
4646
}
4747

48-
void release_schedule_lock(uint16_t pcpu_id)
48+
void release_schedule_lock(uint16_t pcpu_id, uint64_t rflag)
4949
{
5050
struct sched_control *ctl = &per_cpu(sched_ctl, pcpu_id);
51-
spinlock_release(&ctl->scheduler_lock);
51+
spinlock_irqrestore_release(&ctl->scheduler_lock, rflag);
5252
}
5353

5454
static struct acrn_scheduler *get_scheduler(uint16_t pcpu_id)
@@ -91,13 +91,15 @@ void deinit_sched(uint16_t pcpu_id)
9191
void init_thread_data(struct thread_object *obj)
9292
{
9393
struct acrn_scheduler *scheduler = get_scheduler(obj->pcpu_id);
94-
get_schedule_lock(obj->pcpu_id);
94+
uint64_t rflag;
95+
96+
obtain_schedule_lock(obj->pcpu_id, &rflag);
9597
if (scheduler->init_data != NULL) {
9698
scheduler->init_data(obj);
9799
}
98100
/* initial as BLOCKED status, so we can wake it up to run */
99101
set_thread_status(obj, THREAD_STS_BLOCKED);
100-
release_schedule_lock(obj->pcpu_id);
102+
release_schedule_lock(obj->pcpu_id, rflag);
101103
}
102104

103105
void deinit_thread_data(struct thread_object *obj)
@@ -151,8 +153,9 @@ void schedule(void)
151153
struct sched_control *ctl = &per_cpu(sched_ctl, pcpu_id);
152154
struct thread_object *next = &per_cpu(idle, pcpu_id);
153155
struct thread_object *prev = ctl->curr_obj;
156+
uint64_t rflag;
154157

155-
get_schedule_lock(pcpu_id);
158+
obtain_schedule_lock(pcpu_id, &rflag);
156159
if (ctl->scheduler->pick_next != NULL) {
157160
next = ctl->scheduler->pick_next(ctl);
158161
}
@@ -164,7 +167,7 @@ void schedule(void)
164167
}
165168
set_thread_status(next, THREAD_STS_RUNNING);
166169
ctl->curr_obj = next;
167-
release_schedule_lock(pcpu_id);
170+
release_schedule_lock(pcpu_id, rflag);
168171

169172
/* If we picked different sched object, switch context */
170173
if (prev != next) {
@@ -184,8 +187,9 @@ void sleep_thread(struct thread_object *obj)
184187
{
185188
uint16_t pcpu_id = obj->pcpu_id;
186189
struct acrn_scheduler *scheduler = get_scheduler(pcpu_id);
190+
uint64_t rflag;
187191

188-
get_schedule_lock(pcpu_id);
192+
obtain_schedule_lock(pcpu_id, &rflag);
189193
if (scheduler->sleep != NULL) {
190194
scheduler->sleep(obj);
191195
}
@@ -197,15 +201,16 @@ void sleep_thread(struct thread_object *obj)
197201
}
198202
}
199203
set_thread_status(obj, THREAD_STS_BLOCKED);
200-
release_schedule_lock(pcpu_id);
204+
release_schedule_lock(pcpu_id, rflag);
201205
}
202206

203207
void wake_thread(struct thread_object *obj)
204208
{
205209
uint16_t pcpu_id = obj->pcpu_id;
206210
struct acrn_scheduler *scheduler;
211+
uint64_t rflag;
207212

208-
get_schedule_lock(pcpu_id);
213+
obtain_schedule_lock(pcpu_id, &rflag);
209214
if (is_blocked(obj)) {
210215
scheduler = get_scheduler(pcpu_id);
211216
if (scheduler->wake != NULL) {
@@ -214,16 +219,18 @@ void wake_thread(struct thread_object *obj)
214219
set_thread_status(obj, THREAD_STS_RUNNABLE);
215220
make_reschedule_request(pcpu_id, DEL_MODE_IPI);
216221
}
217-
release_schedule_lock(pcpu_id);
222+
release_schedule_lock(pcpu_id, rflag);
218223
}
219224

220225
void run_thread(struct thread_object *obj)
221226
{
227+
uint64_t rflag;
228+
222229
init_thread_data(obj);
223-
get_schedule_lock(obj->pcpu_id);
230+
obtain_schedule_lock(obj->pcpu_id, &rflag);
224231
get_cpu_var(sched_ctl).curr_obj = obj;
225232
set_thread_status(obj, THREAD_STS_RUNNING);
226-
release_schedule_lock(obj->pcpu_id);
233+
release_schedule_lock(obj->pcpu_id, rflag);
227234

228235
if (obj->thread_entry != NULL) {
229236
obj->thread_entry(obj);

hypervisor/include/common/schedule.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -88,8 +88,8 @@ struct thread_object *sched_get_current(uint16_t pcpu_id);
8888

8989
void init_sched(uint16_t pcpu_id);
9090
void deinit_sched(uint16_t pcpu_id);
91-
void get_schedule_lock(uint16_t pcpu_id);
92-
void release_schedule_lock(uint16_t pcpu_id);
91+
void obtain_schedule_lock(uint16_t pcpu_id, uint64_t *rflag);
92+
void release_schedule_lock(uint16_t pcpu_id, uint64_t rflag);
9393

9494
void init_thread_data(struct thread_object *obj);
9595
void deinit_thread_data(struct thread_object *obj);

0 commit comments

Comments
 (0)