Skip to content

Commit b39526f

Browse files
lifeixacrnsi
authored andcommitted
hv: schedule: vCPU schedule state setting don't need to be atomic
vCPU schedule state change is under schedule lock protection. So there's no need to be atomic. Tracked-On: #1842 Signed-off-by: Li, Fei1 <fei1.li@intel.com> Reviewed-by: Yin Fengwei <fengwei.yin@intel.com> Acked-by: Eddie Dong <eddie.dong@intel.com>
1 parent 8af334c commit b39526f

File tree

2 files changed

+7
-8
lines changed

2 files changed

+7
-8
lines changed

hypervisor/arch/x86/guest/vcpu.c

Lines changed: 6 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,6 @@
1313
#include <cpu_caps.h>
1414
#include <per_cpu.h>
1515
#include <init.h>
16-
#include <atomic.h>
1716
#include <vm.h>
1817
#include <vmcs.h>
1918
#include <mmu.h>
@@ -416,7 +415,7 @@ int32_t create_vcpu(uint16_t pcpu_id, struct acrn_vm *vm, struct acrn_vcpu **rtn
416415
*rtn_vcpu_handle = vcpu;
417416

418417
vcpu->launched = false;
419-
vcpu->running = 0U;
418+
vcpu->running = false;
420419
vcpu->arch.nr_sipi = 0U;
421420
vcpu->state = VCPU_INIT;
422421

@@ -587,7 +586,7 @@ void reset_vcpu(struct acrn_vcpu *vcpu)
587586
vcpu->state = VCPU_INIT;
588587

589588
vcpu->launched = false;
590-
vcpu->running = 0U;
589+
vcpu->running = false;
591590
vcpu->arch.nr_sipi = 0U;
592591

593592
vcpu->arch.exception_info.exception = VECTOR_INVALID;
@@ -619,7 +618,7 @@ void pause_vcpu(struct acrn_vcpu *vcpu, enum vcpu_state new_state)
619618
vcpu->prev_state = vcpu->state;
620619
vcpu->state = new_state;
621620

622-
if (atomic_load32(&vcpu->running) == 1U) {
621+
if (vcpu->running) {
623622
remove_from_cpu_runqueue(&vcpu->sched_obj);
624623

625624
if (is_lapic_pt_enabled(vcpu)) {
@@ -631,7 +630,7 @@ void pause_vcpu(struct acrn_vcpu *vcpu, enum vcpu_state new_state)
631630
release_schedule_lock(vcpu->pcpu_id);
632631

633632
if (vcpu->pcpu_id != pcpu_id) {
634-
while (atomic_load32(&vcpu->running) == 1U) {
633+
while (vcpu->running) {
635634
asm_pause();
636635
}
637636
}
@@ -659,7 +658,7 @@ static void context_switch_out(struct sched_object *prev)
659658
{
660659
struct acrn_vcpu *vcpu = list_entry(prev, struct acrn_vcpu, sched_obj);
661660

662-
atomic_store32(&vcpu->running, 0U);
661+
vcpu->running = false;
663662
/* do prev vcpu context switch out */
664663
/* For now, we don't need to invalid ept.
665664
* But if we have more than one vcpu on one pcpu,
@@ -671,7 +670,7 @@ static void context_switch_in(struct sched_object *next)
671670
{
672671
struct acrn_vcpu *vcpu = list_entry(next, struct acrn_vcpu, sched_obj);
673672

674-
atomic_store32(&vcpu->running, 1U);
673+
vcpu->running = true;
675674
/* FIXME:
676675
* Now, we don't need to load new vcpu VMCS because
677676
* we only do switch between vcpu loop and idle loop.

hypervisor/include/arch/x86/guest/vcpu.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -358,7 +358,7 @@ struct acrn_vcpu {
358358

359359
struct sched_object sched_obj;
360360
bool launched; /* Whether the vcpu is launched on target pcpu */
361-
uint32_t running; /* vcpu is picked up and run? */
361+
bool running; /* vcpu is picked up and run? */
362362

363363
struct instr_emul_ctxt inst_ctxt;
364364
struct io_request req; /* used by io/ept emulation */

0 commit comments

Comments
 (0)