Skip to content

Commit ff0703d

Browse files
JasonChenCJwenlingz
authored andcommitted
scheduler: make scheduling based on struct sched_object
use struct sched_object as the main interface of scheduling, then make scheduler as an independent module to vcpu: - add struct sched_object as one field in struct vcpu - define sched_object.thread for switch_to thread - define sched_object.prepare_switch_out/in for prepare_switch before switch_to - move context_switch_out/context_switch_in into vcpu.c as vcpu.sched_obj.prepare_switch_out/in - make default_idle as global idle.thread for idle_thread - make vcpu_thread as vcpu.sched_obj.thread for each vcpu thread - simplify switch_to based on sched_object Tracked-On: #1842 Signed-off-by: Jason Chen CJ <jason.cj.chen@intel.com> Acked-by: Eddie Dong <edide.dong@intel.com>
1 parent 8aae0df commit ff0703d

File tree

6 files changed

+116
-104
lines changed

6 files changed

+116
-104
lines changed

hypervisor/arch/x86/guest/vcpu.c

Lines changed: 31 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -600,6 +600,34 @@ void resume_vcpu(struct acrn_vcpu *vcpu)
600600
release_schedule_lock(vcpu->pcpu_id);
601601
}
602602

603+
static void context_switch_out(struct sched_object *prev)
604+
{
605+
struct acrn_vcpu *vcpu = list_entry(prev, struct acrn_vcpu, sched_obj);
606+
607+
/* cancel event(int, gp, nmi and exception) injection */
608+
cancel_event_injection(vcpu);
609+
610+
atomic_store32(&vcpu->running, 0U);
611+
/* do prev vcpu context switch out */
612+
/* For now, we don't need to invalid ept.
613+
* But if we have more than one vcpu on one pcpu,
614+
* we need add ept invalid operation here.
615+
*/
616+
}
617+
618+
static void context_switch_in(struct sched_object *next)
619+
{
620+
struct acrn_vcpu *vcpu = list_entry(next, struct acrn_vcpu, sched_obj);
621+
622+
atomic_store32(&vcpu->running, 1U);
623+
/* FIXME:
624+
* Now, we don't need to load new vcpu VMCS because
625+
* we only do switch between vcpu loop and idle loop.
626+
* If we have more than one vcpu on on pcpu, need to
627+
* add VMCS load operation here.
628+
*/
629+
}
630+
603631
void schedule_vcpu(struct acrn_vcpu *vcpu)
604632
{
605633
vcpu->state = VCPU_RUNNING;
@@ -625,6 +653,9 @@ int32_t prepare_vcpu(struct acrn_vm *vm, uint16_t pcpu_id)
625653
set_pcpu_used(pcpu_id);
626654

627655
INIT_LIST_HEAD(&vcpu->sched_obj.run_list);
656+
vcpu->sched_obj.thread = vcpu_thread;
657+
vcpu->sched_obj.prepare_switch_out = context_switch_out;
658+
vcpu->sched_obj.prepare_switch_in = context_switch_in;
628659

629660
return ret;
630661
}

hypervisor/arch/x86/init.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -69,7 +69,7 @@ static void enter_guest_mode(uint16_t pcpu_id)
6969
}
7070
#endif
7171

72-
default_idle();
72+
switch_to_idle(default_idle);
7373

7474
/* Control should not come here */
7575
cpu_dead();

hypervisor/common/hv_main.c

Lines changed: 20 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -17,8 +17,9 @@ static void run_vcpu_pre_work(struct acrn_vcpu *vcpu)
1717
}
1818
}
1919

20-
void vcpu_thread(struct acrn_vcpu *vcpu)
20+
void vcpu_thread(struct sched_object *obj)
2121
{
22+
struct acrn_vcpu *vcpu = list_entry(obj, struct acrn_vcpu, sched_obj);
2223
uint32_t basic_exit_reason = 0U;
2324
int32_t ret = 0;
2425

@@ -88,3 +89,21 @@ void vcpu_thread(struct acrn_vcpu *vcpu)
8889
profiling_post_vmexit_handler(vcpu);
8990
} while (1);
9091
}
92+
93+
void default_idle(__unused struct sched_object *obj)
94+
{
95+
uint16_t pcpu_id = get_cpu_id();
96+
97+
while (1) {
98+
if (need_reschedule(pcpu_id) != 0) {
99+
schedule();
100+
} else if (need_offline(pcpu_id) != 0) {
101+
cpu_dead();
102+
} else {
103+
CPU_IRQ_ENABLE();
104+
handle_complete_ioreq(pcpu_id);
105+
cpu_do_idle();
106+
CPU_IRQ_DISABLE();
107+
}
108+
}
109+
}

hypervisor/common/schedule.c

Lines changed: 53 additions & 97 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,7 @@
88
#include <schedule.h>
99

1010
static uint64_t pcpu_used_bitmap;
11+
static struct sched_object idle;
1112

1213
void init_scheduler(void)
1314
{
@@ -21,7 +22,7 @@ void init_scheduler(void)
2122
spinlock_init(&ctx->scheduler_lock);
2223
INIT_LIST_HEAD(&ctx->runqueue);
2324
ctx->flags = 0UL;
24-
ctx->curr_vcpu = NULL;
25+
ctx->curr_obj = NULL;
2526
}
2627
}
2728

@@ -94,18 +95,6 @@ static struct sched_object *get_next_sched_obj(uint16_t pcpu_id)
9495
return obj;
9596
}
9697

97-
static struct acrn_vcpu *select_next_vcpu(uint16_t pcpu_id)
98-
{
99-
struct acrn_vcpu *vcpu = NULL;
100-
struct sched_object *obj = get_next_sched_obj(pcpu_id);
101-
102-
if (obj != NULL) {
103-
vcpu = list_entry(obj, struct acrn_vcpu, sched_obj);
104-
}
105-
106-
return vcpu;
107-
}
108-
10998
void make_reschedule_request(uint16_t pcpu_id)
11099
{
111100
struct sched_context *ctx = &per_cpu(sched_ctx, pcpu_id);
@@ -123,43 +112,6 @@ int32_t need_reschedule(uint16_t pcpu_id)
123112
return bitmap_test_and_clear_lock(NEED_RESCHEDULE, &ctx->flags);
124113
}
125114

126-
static void context_switch_out(struct acrn_vcpu *vcpu)
127-
{
128-
/* if it's idle thread, no action for switch out */
129-
if (vcpu == NULL) {
130-
return;
131-
}
132-
133-
/* cancel event(int, gp, nmi and exception) injection */
134-
cancel_event_injection(vcpu);
135-
136-
atomic_store32(&vcpu->running, 0U);
137-
/* do prev vcpu context switch out */
138-
/* For now, we don't need to invalid ept.
139-
* But if we have more than one vcpu on one pcpu,
140-
* we need add ept invalid operation here.
141-
*/
142-
}
143-
144-
static void context_switch_in(struct acrn_vcpu *vcpu)
145-
{
146-
/* update current_vcpu */
147-
get_cpu_var(sched_ctx).curr_vcpu = vcpu;
148-
149-
/* if it's idle thread, no action for switch out */
150-
if (vcpu == NULL) {
151-
return;
152-
}
153-
154-
atomic_store32(&vcpu->running, 1U);
155-
/* FIXME:
156-
* Now, we don't need to load new vcpu VMCS because
157-
* we only do switch between vcpu loop and idle loop.
158-
* If we have more than one vcpu on on pcpu, need to
159-
* add VMCS load operation here.
160-
*/
161-
}
162-
163115
void make_pcpu_offline(uint16_t pcpu_id)
164116
{
165117
struct sched_context *ctx = &per_cpu(sched_ctx, pcpu_id);
@@ -177,80 +129,84 @@ int32_t need_offline(uint16_t pcpu_id)
177129
return bitmap_test_and_clear_lock(NEED_OFFLINE, &ctx->flags);
178130
}
179131

180-
void default_idle(void)
132+
static void switch_to_asm(struct sched_object *next, uint64_t cur_sp)
181133
{
182-
uint16_t pcpu_id = get_cpu_id();
183-
184-
while (1) {
185-
if (need_reschedule(pcpu_id) != 0) {
186-
schedule();
187-
} else if (need_offline(pcpu_id) != 0) {
188-
cpu_dead();
189-
} else {
190-
CPU_IRQ_ENABLE();
191-
handle_complete_ioreq(pcpu_id);
192-
cpu_do_idle();
193-
CPU_IRQ_DISABLE();
194-
}
195-
}
134+
asm volatile ("movq %2, %%rsp\n"
135+
"movq %0, %%rdi\n"
136+
"call 22f\n"
137+
"11: \n"
138+
"pause\n"
139+
"jmp 11b\n"
140+
"22:\n"
141+
"mov %1, (%%rsp)\n"
142+
"ret\n"
143+
:
144+
: "c"(next), "a"(next->thread), "r"(cur_sp)
145+
: "memory");
196146
}
197147

198-
static void switch_to(struct acrn_vcpu *curr)
148+
static void switch_to(struct sched_object *next)
199149
{
200150
/*
201151
* reset stack pointer here. Otherwise, schedule
202152
* is recursive call and stack will overflow finally.
203153
*/
204154
uint64_t cur_sp = (uint64_t)&get_cpu_var(stack)[CONFIG_STACK_SIZE];
205155

206-
if (curr == NULL) {
207-
asm volatile ("movq %1, %%rsp\n"
208-
"movq $0, %%rdi\n"
209-
"call 22f\n"
210-
"11: \n"
211-
"pause\n"
212-
"jmp 11b\n"
213-
"22:\n"
214-
"mov %0, (%%rsp)\n"
215-
"ret\n"
216-
:
217-
: "a"(default_idle), "r"(cur_sp)
218-
: "memory");
219-
} else {
220-
asm volatile ("movq %2, %%rsp\n"
221-
"movq %0, %%rdi\n"
222-
"call 44f\n"
223-
"33: \n"
224-
"pause\n"
225-
"jmp 33b\n"
226-
"44:\n"
227-
"mov %1, (%%rsp)\n"
228-
"ret\n"
229-
:
230-
: "c"(curr), "a"(vcpu_thread), "r"(cur_sp)
231-
: "memory");
156+
switch_to_asm(next, cur_sp);
157+
}
158+
159+
static void prepare_switch(struct sched_object *prev, struct sched_object *next)
160+
{
161+
if ((prev != NULL) && (prev->prepare_switch_out != NULL)) {
162+
prev->prepare_switch_out(prev);
163+
}
164+
165+
/* update current object */
166+
get_cpu_var(sched_ctx).curr_obj = next;
167+
168+
if ((next != NULL) && (next->prepare_switch_in != NULL)) {
169+
next->prepare_switch_in(next);
232170
}
233171
}
234172

235173
void schedule(void)
236174
{
237175
uint16_t pcpu_id = get_cpu_id();
238-
struct acrn_vcpu *next = NULL;
239-
struct acrn_vcpu *prev = per_cpu(sched_ctx, pcpu_id).curr_vcpu;
176+
struct sched_object *next = NULL;
177+
struct sched_object *prev = per_cpu(sched_ctx, pcpu_id).curr_obj;
240178

241179
get_schedule_lock(pcpu_id);
242-
next = select_next_vcpu(pcpu_id);
180+
next = get_next_sched_obj(pcpu_id);
243181

244182
if (prev == next) {
245183
release_schedule_lock(pcpu_id);
246184
return;
247185
}
248186

249-
context_switch_out(prev);
250-
context_switch_in(next);
187+
prepare_switch(prev, next);
251188
release_schedule_lock(pcpu_id);
252189

190+
if (next == NULL) {
191+
next = &idle;
192+
}
193+
253194
switch_to(next);
254195

255196
ASSERT(false, "Shouldn't go here");
256197
}
198+
199+
void switch_to_idle(run_thread_t idle_thread)
200+
{
201+
uint16_t pcpu_id = get_cpu_id();
202+
203+
if (pcpu_id == BOOT_CPU_ID) {
204+
idle.thread = idle_thread;
205+
idle.prepare_switch_out = NULL;
206+
idle.prepare_switch_in = NULL;
207+
}
208+
209+
if (idle_thread != NULL) {
210+
idle_thread(&idle);
211+
}
212+
}

hypervisor/include/arch/x86/guest/vcpu.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -299,6 +299,9 @@ vcpu_vlapic(struct acrn_vcpu *vcpu)
299299
return &(vcpu->arch.vlapic);
300300
}
301301

302+
void default_idle(struct sched_object *obj);
303+
void vcpu_thread(struct sched_object *obj);
304+
302305
/* External Interfaces */
303306

304307
/**

hypervisor/include/common/schedule.h

Lines changed: 8 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -10,19 +10,26 @@
1010
#define NEED_RESCHEDULE (1U)
1111
#define NEED_OFFLINE (2U)
1212

13+
struct sched_object;
14+
typedef void (*run_thread_t)(struct sched_object *obj);
15+
typedef void (*prepare_switch_t)(struct sched_object *obj);
1316
struct sched_object {
1417
struct list_head run_list;
18+
run_thread_t thread;
19+
prepare_switch_t prepare_switch_out;
20+
prepare_switch_t prepare_switch_in;
1521
};
1622

1723
struct sched_context {
1824
spinlock_t runqueue_lock;
1925
struct list_head runqueue;
2026
uint64_t flags;
21-
struct acrn_vcpu *curr_vcpu;
27+
struct sched_object *curr_obj;
2228
spinlock_t scheduler_lock;
2329
};
2430

2531
void init_scheduler(void);
32+
void switch_to_idle(run_thread_t idle_thread);
2633
void get_schedule_lock(uint16_t pcpu_id);
2734
void release_schedule_lock(uint16_t pcpu_id);
2835

@@ -33,15 +40,11 @@ void free_pcpu(uint16_t pcpu_id);
3340
void add_to_cpu_runqueue(struct sched_object *obj, uint16_t pcpu_id);
3441
void remove_from_cpu_runqueue(struct sched_object *obj, uint16_t pcpu_id);
3542

36-
void default_idle(void);
37-
3843
void make_reschedule_request(uint16_t pcpu_id);
3944
int32_t need_reschedule(uint16_t pcpu_id);
4045
void make_pcpu_offline(uint16_t pcpu_id);
4146
int32_t need_offline(uint16_t pcpu_id);
4247

4348
void schedule(void);
44-
45-
void vcpu_thread(struct acrn_vcpu *vcpu);
4649
#endif /* SCHEDULE_H */
4750

0 commit comments

Comments
 (0)