@@ -39,16 +39,16 @@ static inline void set_thread_status(struct thread_object *obj, enum thread_obje
39
39
obj -> status = status ;
40
40
}
41
41
42
- void get_schedule_lock (uint16_t pcpu_id )
42
+ void obtain_schedule_lock (uint16_t pcpu_id , uint64_t * rflag )
43
43
{
44
44
struct sched_control * ctl = & per_cpu (sched_ctl , pcpu_id );
45
- spinlock_obtain (& ctl -> scheduler_lock );
45
+ spinlock_irqsave_obtain (& ctl -> scheduler_lock , rflag );
46
46
}
47
47
48
- void release_schedule_lock (uint16_t pcpu_id )
48
+ void release_schedule_lock (uint16_t pcpu_id , uint64_t rflag )
49
49
{
50
50
struct sched_control * ctl = & per_cpu (sched_ctl , pcpu_id );
51
- spinlock_release (& ctl -> scheduler_lock );
51
+ spinlock_irqrestore_release (& ctl -> scheduler_lock , rflag );
52
52
}
53
53
54
54
static struct acrn_scheduler * get_scheduler (uint16_t pcpu_id )
@@ -91,13 +91,15 @@ void deinit_sched(uint16_t pcpu_id)
91
91
void init_thread_data (struct thread_object * obj )
92
92
{
93
93
struct acrn_scheduler * scheduler = get_scheduler (obj -> pcpu_id );
94
- get_schedule_lock (obj -> pcpu_id );
94
+ uint64_t rflag ;
95
+
96
+ obtain_schedule_lock (obj -> pcpu_id , & rflag );
95
97
if (scheduler -> init_data != NULL ) {
96
98
scheduler -> init_data (obj );
97
99
}
98
100
/* initial as BLOCKED status, so we can wake it up to run */
99
101
set_thread_status (obj , THREAD_STS_BLOCKED );
100
- release_schedule_lock (obj -> pcpu_id );
102
+ release_schedule_lock (obj -> pcpu_id , rflag );
101
103
}
102
104
103
105
void deinit_thread_data (struct thread_object * obj )
@@ -151,8 +153,9 @@ void schedule(void)
151
153
struct sched_control * ctl = & per_cpu (sched_ctl , pcpu_id );
152
154
struct thread_object * next = & per_cpu (idle , pcpu_id );
153
155
struct thread_object * prev = ctl -> curr_obj ;
156
+ uint64_t rflag ;
154
157
155
- get_schedule_lock (pcpu_id );
158
+ obtain_schedule_lock (pcpu_id , & rflag );
156
159
if (ctl -> scheduler -> pick_next != NULL ) {
157
160
next = ctl -> scheduler -> pick_next (ctl );
158
161
}
@@ -164,7 +167,7 @@ void schedule(void)
164
167
}
165
168
set_thread_status (next , THREAD_STS_RUNNING );
166
169
ctl -> curr_obj = next ;
167
- release_schedule_lock (pcpu_id );
170
+ release_schedule_lock (pcpu_id , rflag );
168
171
169
172
/* If we picked different sched object, switch context */
170
173
if (prev != next ) {
@@ -184,8 +187,9 @@ void sleep_thread(struct thread_object *obj)
184
187
{
185
188
uint16_t pcpu_id = obj -> pcpu_id ;
186
189
struct acrn_scheduler * scheduler = get_scheduler (pcpu_id );
190
+ uint64_t rflag ;
187
191
188
- get_schedule_lock (pcpu_id );
192
+ obtain_schedule_lock (pcpu_id , & rflag );
189
193
if (scheduler -> sleep != NULL ) {
190
194
scheduler -> sleep (obj );
191
195
}
@@ -197,15 +201,16 @@ void sleep_thread(struct thread_object *obj)
197
201
}
198
202
}
199
203
set_thread_status (obj , THREAD_STS_BLOCKED );
200
- release_schedule_lock (pcpu_id );
204
+ release_schedule_lock (pcpu_id , rflag );
201
205
}
202
206
203
207
void wake_thread (struct thread_object * obj )
204
208
{
205
209
uint16_t pcpu_id = obj -> pcpu_id ;
206
210
struct acrn_scheduler * scheduler ;
211
+ uint64_t rflag ;
207
212
208
- get_schedule_lock (pcpu_id );
213
+ obtain_schedule_lock (pcpu_id , & rflag );
209
214
if (is_blocked (obj )) {
210
215
scheduler = get_scheduler (pcpu_id );
211
216
if (scheduler -> wake != NULL ) {
@@ -214,16 +219,18 @@ void wake_thread(struct thread_object *obj)
214
219
set_thread_status (obj , THREAD_STS_RUNNABLE );
215
220
make_reschedule_request (pcpu_id , DEL_MODE_IPI );
216
221
}
217
- release_schedule_lock (pcpu_id );
222
+ release_schedule_lock (pcpu_id , rflag );
218
223
}
219
224
220
225
void run_thread (struct thread_object * obj )
221
226
{
227
+ uint64_t rflag ;
228
+
222
229
init_thread_data (obj );
223
- get_schedule_lock (obj -> pcpu_id );
230
+ obtain_schedule_lock (obj -> pcpu_id , & rflag );
224
231
get_cpu_var (sched_ctl ).curr_obj = obj ;
225
232
set_thread_status (obj , THREAD_STS_RUNNING );
226
- release_schedule_lock (obj -> pcpu_id );
233
+ release_schedule_lock (obj -> pcpu_id , rflag );
227
234
228
235
if (obj -> thread_entry != NULL ) {
229
236
obj -> thread_entry (obj );
0 commit comments