Skip to content

Commit 6eab234

Browse files
committed
KVM: SVM: Don't check vCPU's blocking status when toggling AVIC on/off
Don't query a vCPU's blocking status when toggling AVIC on/off; barring KVM bugs, the vCPU can't be blocking when refreshing AVIC controls. And if there are KVM bugs, ensuring the vCPU and its associated IRTEs are in the correct state is desirable, i.e. well worth any overhead in a buggy scenario. Isolating the "real" load/put flows will allow moving the IOMMU IRTE (de)activation logic from avic_refresh_apicv_exec_ctrl() to avic_update_iommu_vcpu_affinity(), i.e. will allow updating the vCPU's physical ID entry and its IRTEs in a common path, under a single critical section of ir_list_lock. Tested-by: Sairaj Kodilkar <sarunkod@amd.com> Link: https://lore.kernel.org/r/20250611224604.313496-60-seanjc@google.com Signed-off-by: Sean Christopherson <seanjc@google.com>
1 parent f2bc961 commit 6eab234

File tree

1 file changed

+37
-28
lines changed

1 file changed

+37
-28
lines changed

arch/x86/kvm/svm/avic.c

Lines changed: 37 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -829,7 +829,7 @@ static void avic_update_iommu_vcpu_affinity(struct kvm_vcpu *vcpu, int cpu)
829829
WARN_ON_ONCE(amd_iommu_update_ga(cpu, irqfd->irq_bypass_data));
830830
}
831831

832-
void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
832+
static void __avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
833833
{
834834
struct kvm_svm *kvm_svm = to_kvm_svm(vcpu->kvm);
835835
int h_physical_id = kvm_cpu_get_apicid(cpu);
@@ -845,16 +845,6 @@ void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
845845
if (WARN_ON_ONCE(vcpu->vcpu_id * sizeof(entry) >= PAGE_SIZE))
846846
return;
847847

848-
/*
849-
* No need to update anything if the vCPU is blocking, i.e. if the vCPU
850-
* is being scheduled in after being preempted. The CPU entries in the
851-
* Physical APIC table and IRTE are consumed iff IsRun{ning} is '1'.
852-
* If the vCPU was migrated, its new CPU value will be stuffed when the
853-
* vCPU unblocks.
854-
*/
855-
if (kvm_vcpu_is_blocking(vcpu))
856-
return;
857-
858848
/*
859849
* Grab the per-vCPU interrupt remapping lock even if the VM doesn't
860850
* _currently_ have assigned devices, as that can change. Holding
@@ -889,31 +879,33 @@ void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
889879
spin_unlock_irqrestore(&svm->ir_list_lock, flags);
890880
}
891881

892-
void avic_vcpu_put(struct kvm_vcpu *vcpu)
882+
void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
883+
{
884+
/*
885+
* No need to update anything if the vCPU is blocking, i.e. if the vCPU
886+
* is being scheduled in after being preempted. The CPU entries in the
887+
* Physical APIC table and IRTE are consumed iff IsRun{ning} is '1'.
888+
* If the vCPU was migrated, its new CPU value will be stuffed when the
889+
* vCPU unblocks.
890+
*/
891+
if (kvm_vcpu_is_blocking(vcpu))
892+
return;
893+
894+
__avic_vcpu_load(vcpu, cpu);
895+
}
896+
897+
static void __avic_vcpu_put(struct kvm_vcpu *vcpu)
893898
{
894899
struct kvm_svm *kvm_svm = to_kvm_svm(vcpu->kvm);
895900
struct vcpu_svm *svm = to_svm(vcpu);
896901
unsigned long flags;
897-
u64 entry;
902+
u64 entry = svm->avic_physical_id_entry;
898903

899904
lockdep_assert_preemption_disabled();
900905

901906
if (WARN_ON_ONCE(vcpu->vcpu_id * sizeof(entry) >= PAGE_SIZE))
902907
return;
903908

904-
/*
905-
* Note, reading the Physical ID entry outside of ir_list_lock is safe
906-
* as only the pCPU that has loaded (or is loading) the vCPU is allowed
907-
* to modify the entry, and preemption is disabled. I.e. the vCPU
908-
* can't be scheduled out and thus avic_vcpu_{put,load}() can't run
909-
* recursively.
910-
*/
911-
entry = svm->avic_physical_id_entry;
912-
913-
/* Nothing to do if IsRunning == '0' due to vCPU blocking. */
914-
if (!(entry & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK))
915-
return;
916-
917909
/*
918910
* Take and hold the per-vCPU interrupt remapping lock while updating
919911
* the Physical ID entry even though the lock doesn't protect against
@@ -933,7 +925,24 @@ void avic_vcpu_put(struct kvm_vcpu *vcpu)
933925
WRITE_ONCE(kvm_svm->avic_physical_id_table[vcpu->vcpu_id], entry);
934926

935927
spin_unlock_irqrestore(&svm->ir_list_lock, flags);
928+
}
929+
930+
void avic_vcpu_put(struct kvm_vcpu *vcpu)
931+
{
932+
/*
933+
* Note, reading the Physical ID entry outside of ir_list_lock is safe
934+
* as only the pCPU that has loaded (or is loading) the vCPU is allowed
935+
* to modify the entry, and preemption is disabled. I.e. the vCPU
936+
* can't be scheduled out and thus avic_vcpu_{put,load}() can't run
937+
* recursively.
938+
*/
939+
u64 entry = to_svm(vcpu)->avic_physical_id_entry;
940+
941+
/* Nothing to do if IsRunning == '0' due to vCPU blocking. */
942+
if (!(entry & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK))
943+
return;
936944

945+
__avic_vcpu_put(vcpu);
937946
}
938947

939948
void avic_refresh_virtual_apic_mode(struct kvm_vcpu *vcpu)
@@ -974,9 +983,9 @@ void avic_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu)
974983
avic_refresh_virtual_apic_mode(vcpu);
975984

976985
if (activated)
977-
avic_vcpu_load(vcpu, vcpu->cpu);
986+
__avic_vcpu_load(vcpu, vcpu->cpu);
978987
else
979-
avic_vcpu_put(vcpu);
988+
__avic_vcpu_put(vcpu);
980989

981990
/*
982991
* Here, we go through the per-vcpu ir_list to update all existing

0 commit comments

Comments
 (0)