Skip to content

Commit 5f3d06b

Browse files
committed
KVM: SVM: Consolidate IRTE update when toggling AVIC on/off
Fold the IRTE modification logic in avic_refresh_apicv_exec_ctrl() into __avic_vcpu_{load,put}(), and add a param to the helpers to communicate whether or not AVIC is being toggled, i.e. if IRTE needs a "full" update, or just a quick update to set the CPU and IsRun. Link: https://lore.kernel.org/r/20250611224604.313496-61-seanjc@google.com Signed-off-by: Sean Christopherson <seanjc@google.com>
1 parent 6eab234 commit 5f3d06b

File tree

1 file changed

+46
-39
lines changed

1 file changed

+46
-39
lines changed

arch/x86/kvm/svm/avic.c

Lines changed: 46 additions & 39 deletions
Original file line numberDiff line numberDiff line change
@@ -811,7 +811,28 @@ int avic_pi_update_irte(struct kvm_kernel_irqfd *irqfd, struct kvm *kvm,
811811
return irq_set_vcpu_affinity(host_irq, NULL);
812812
}
813813

814-
static void avic_update_iommu_vcpu_affinity(struct kvm_vcpu *vcpu, int cpu)
814+
enum avic_vcpu_action {
815+
/*
816+
* There is no need to differentiate between activate and deactivate,
817+
* as KVM only refreshes AVIC state when the vCPU is scheduled in and
818+
* isn't blocking, i.e. the pCPU must always be (in)valid when AVIC is
819+
* being (de)activated.
820+
*/
821+
AVIC_TOGGLE_ON_OFF = BIT(0),
822+
AVIC_ACTIVATE = AVIC_TOGGLE_ON_OFF,
823+
AVIC_DEACTIVATE = AVIC_TOGGLE_ON_OFF,
824+
825+
/*
826+
* No unique action is required to deal with a vCPU that stops/starts
827+
* running, as IRTEs are configured to generate GALog interrupts at all
828+
* times.
829+
*/
830+
AVIC_START_RUNNING = 0,
831+
AVIC_STOP_RUNNING = 0,
832+
};
833+
834+
static void avic_update_iommu_vcpu_affinity(struct kvm_vcpu *vcpu, int cpu,
835+
enum avic_vcpu_action action)
815836
{
816837
struct vcpu_svm *svm = to_svm(vcpu);
817838
struct kvm_kernel_irqfd *irqfd;
@@ -825,11 +846,20 @@ static void avic_update_iommu_vcpu_affinity(struct kvm_vcpu *vcpu, int cpu)
825846
if (list_empty(&svm->ir_list))
826847
return;
827848

828-
list_for_each_entry(irqfd, &svm->ir_list, vcpu_list)
829-
WARN_ON_ONCE(amd_iommu_update_ga(cpu, irqfd->irq_bypass_data));
849+
list_for_each_entry(irqfd, &svm->ir_list, vcpu_list) {
850+
void *data = irqfd->irq_bypass_data;
851+
852+
if (!(action & AVIC_TOGGLE_ON_OFF))
853+
WARN_ON_ONCE(amd_iommu_update_ga(cpu, data));
854+
else if (cpu >= 0)
855+
WARN_ON_ONCE(amd_iommu_activate_guest_mode(data, cpu));
856+
else
857+
WARN_ON_ONCE(amd_iommu_deactivate_guest_mode(data));
858+
}
830859
}
831860

832-
static void __avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
861+
static void __avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu,
862+
enum avic_vcpu_action action)
833863
{
834864
struct kvm_svm *kvm_svm = to_kvm_svm(vcpu->kvm);
835865
int h_physical_id = kvm_cpu_get_apicid(cpu);
@@ -874,7 +904,7 @@ static void __avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
874904

875905
WRITE_ONCE(kvm_svm->avic_physical_id_table[vcpu->vcpu_id], entry);
876906

877-
avic_update_iommu_vcpu_affinity(vcpu, h_physical_id);
907+
avic_update_iommu_vcpu_affinity(vcpu, h_physical_id, action);
878908

879909
spin_unlock_irqrestore(&svm->ir_list_lock, flags);
880910
}
@@ -891,10 +921,10 @@ void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
891921
if (kvm_vcpu_is_blocking(vcpu))
892922
return;
893923

894-
__avic_vcpu_load(vcpu, cpu);
924+
__avic_vcpu_load(vcpu, cpu, AVIC_START_RUNNING);
895925
}
896926

897-
static void __avic_vcpu_put(struct kvm_vcpu *vcpu)
927+
static void __avic_vcpu_put(struct kvm_vcpu *vcpu, enum avic_vcpu_action action)
898928
{
899929
struct kvm_svm *kvm_svm = to_kvm_svm(vcpu->kvm);
900930
struct vcpu_svm *svm = to_svm(vcpu);
@@ -916,7 +946,7 @@ static void __avic_vcpu_put(struct kvm_vcpu *vcpu)
916946
*/
917947
spin_lock_irqsave(&svm->ir_list_lock, flags);
918948

919-
avic_update_iommu_vcpu_affinity(vcpu, -1);
949+
avic_update_iommu_vcpu_affinity(vcpu, -1, action);
920950

921951
entry &= ~AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK;
922952
svm->avic_physical_id_entry = entry;
@@ -942,7 +972,7 @@ void avic_vcpu_put(struct kvm_vcpu *vcpu)
942972
if (!(entry & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK))
943973
return;
944974

945-
__avic_vcpu_put(vcpu);
975+
__avic_vcpu_put(vcpu, AVIC_STOP_RUNNING);
946976
}
947977

948978
void avic_refresh_virtual_apic_mode(struct kvm_vcpu *vcpu)
@@ -971,41 +1001,18 @@ void avic_refresh_virtual_apic_mode(struct kvm_vcpu *vcpu)
9711001

9721002
void avic_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu)
9731003
{
974-
bool activated = kvm_vcpu_apicv_active(vcpu);
975-
int apic_id = kvm_cpu_get_apicid(vcpu->cpu);
976-
struct vcpu_svm *svm = to_svm(vcpu);
977-
struct kvm_kernel_irqfd *irqfd;
978-
unsigned long flags;
979-
9801004
if (!enable_apicv)
9811005
return;
9821006

1007+
/* APICv should only be toggled on/off while the vCPU is running. */
1008+
WARN_ON_ONCE(kvm_vcpu_is_blocking(vcpu));
1009+
9831010
avic_refresh_virtual_apic_mode(vcpu);
9841011

985-
if (activated)
986-
__avic_vcpu_load(vcpu, vcpu->cpu);
1012+
if (kvm_vcpu_apicv_active(vcpu))
1013+
__avic_vcpu_load(vcpu, vcpu->cpu, AVIC_ACTIVATE);
9871014
else
988-
__avic_vcpu_put(vcpu);
989-
990-
/*
991-
* Here, we go through the per-vcpu ir_list to update all existing
992-
* interrupt remapping table entry targeting this vcpu.
993-
*/
994-
spin_lock_irqsave(&svm->ir_list_lock, flags);
995-
996-
if (list_empty(&svm->ir_list))
997-
goto out;
998-
999-
list_for_each_entry(irqfd, &svm->ir_list, vcpu_list) {
1000-
void *data = irqfd->irq_bypass_data;
1001-
1002-
if (activated)
1003-
WARN_ON_ONCE(amd_iommu_activate_guest_mode(data, apic_id));
1004-
else
1005-
WARN_ON_ONCE(amd_iommu_deactivate_guest_mode(data));
1006-
}
1007-
out:
1008-
spin_unlock_irqrestore(&svm->ir_list_lock, flags);
1015+
__avic_vcpu_put(vcpu, AVIC_DEACTIVATE);
10091016
}
10101017

10111018
void avic_vcpu_blocking(struct kvm_vcpu *vcpu)
@@ -1031,7 +1038,7 @@ void avic_vcpu_blocking(struct kvm_vcpu *vcpu)
10311038
* CPU and cause noisy neighbor problems if the VM is sending interrupts
10321039
* to the vCPU while it's scheduled out.
10331040
*/
1034-
avic_vcpu_put(vcpu);
1041+
__avic_vcpu_put(vcpu, AVIC_STOP_RUNNING);
10351042
}
10361043

10371044
void avic_vcpu_unblocking(struct kvm_vcpu *vcpu)

0 commit comments

Comments
 (0)