@@ -811,7 +811,28 @@ int avic_pi_update_irte(struct kvm_kernel_irqfd *irqfd, struct kvm *kvm,
811
811
return irq_set_vcpu_affinity (host_irq , NULL );
812
812
}
813
813
814
- static void avic_update_iommu_vcpu_affinity (struct kvm_vcpu * vcpu , int cpu )
814
+ enum avic_vcpu_action {
815
+ /*
816
+ * There is no need to differentiate between activate and deactivate,
817
+ * as KVM only refreshes AVIC state when the vCPU is scheduled in and
818
+ * isn't blocking, i.e. the pCPU must always be (in)valid when AVIC is
819
+ * being (de)activated.
820
+ */
821
+ AVIC_TOGGLE_ON_OFF = BIT (0 ),
822
+ AVIC_ACTIVATE = AVIC_TOGGLE_ON_OFF ,
823
+ AVIC_DEACTIVATE = AVIC_TOGGLE_ON_OFF ,
824
+
825
+ /*
826
+ * No unique action is required to deal with a vCPU that stops/starts
827
+ * running, as IRTEs are configured to generate GALog interrupts at all
828
+ * times.
829
+ */
830
+ AVIC_START_RUNNING = 0 ,
831
+ AVIC_STOP_RUNNING = 0 ,
832
+ };
833
+
834
+ static void avic_update_iommu_vcpu_affinity (struct kvm_vcpu * vcpu , int cpu ,
835
+ enum avic_vcpu_action action )
815
836
{
816
837
struct vcpu_svm * svm = to_svm (vcpu );
817
838
struct kvm_kernel_irqfd * irqfd ;
@@ -825,11 +846,20 @@ static void avic_update_iommu_vcpu_affinity(struct kvm_vcpu *vcpu, int cpu)
825
846
if (list_empty (& svm -> ir_list ))
826
847
return ;
827
848
828
- list_for_each_entry (irqfd , & svm -> ir_list , vcpu_list )
829
- WARN_ON_ONCE (amd_iommu_update_ga (cpu , irqfd -> irq_bypass_data ));
849
+ list_for_each_entry (irqfd , & svm -> ir_list , vcpu_list ) {
850
+ void * data = irqfd -> irq_bypass_data ;
851
+
852
+ if (!(action & AVIC_TOGGLE_ON_OFF ))
853
+ WARN_ON_ONCE (amd_iommu_update_ga (cpu , data ));
854
+ else if (cpu >= 0 )
855
+ WARN_ON_ONCE (amd_iommu_activate_guest_mode (data , cpu ));
856
+ else
857
+ WARN_ON_ONCE (amd_iommu_deactivate_guest_mode (data ));
858
+ }
830
859
}
831
860
832
- static void __avic_vcpu_load (struct kvm_vcpu * vcpu , int cpu )
861
+ static void __avic_vcpu_load (struct kvm_vcpu * vcpu , int cpu ,
862
+ enum avic_vcpu_action action )
833
863
{
834
864
struct kvm_svm * kvm_svm = to_kvm_svm (vcpu -> kvm );
835
865
int h_physical_id = kvm_cpu_get_apicid (cpu );
@@ -874,7 +904,7 @@ static void __avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
874
904
875
905
WRITE_ONCE (kvm_svm -> avic_physical_id_table [vcpu -> vcpu_id ], entry );
876
906
877
- avic_update_iommu_vcpu_affinity (vcpu , h_physical_id );
907
+ avic_update_iommu_vcpu_affinity (vcpu , h_physical_id , action );
878
908
879
909
spin_unlock_irqrestore (& svm -> ir_list_lock , flags );
880
910
}
@@ -891,10 +921,10 @@ void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
891
921
if (kvm_vcpu_is_blocking (vcpu ))
892
922
return ;
893
923
894
- __avic_vcpu_load (vcpu , cpu );
924
+ __avic_vcpu_load (vcpu , cpu , AVIC_START_RUNNING );
895
925
}
896
926
897
- static void __avic_vcpu_put (struct kvm_vcpu * vcpu )
927
+ static void __avic_vcpu_put (struct kvm_vcpu * vcpu , enum avic_vcpu_action action )
898
928
{
899
929
struct kvm_svm * kvm_svm = to_kvm_svm (vcpu -> kvm );
900
930
struct vcpu_svm * svm = to_svm (vcpu );
@@ -916,7 +946,7 @@ static void __avic_vcpu_put(struct kvm_vcpu *vcpu)
916
946
*/
917
947
spin_lock_irqsave (& svm -> ir_list_lock , flags );
918
948
919
- avic_update_iommu_vcpu_affinity (vcpu , -1 );
949
+ avic_update_iommu_vcpu_affinity (vcpu , -1 , action );
920
950
921
951
entry &= ~AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK ;
922
952
svm -> avic_physical_id_entry = entry ;
@@ -942,7 +972,7 @@ void avic_vcpu_put(struct kvm_vcpu *vcpu)
942
972
if (!(entry & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK ))
943
973
return ;
944
974
945
- __avic_vcpu_put (vcpu );
975
+ __avic_vcpu_put (vcpu , AVIC_STOP_RUNNING );
946
976
}
947
977
948
978
void avic_refresh_virtual_apic_mode (struct kvm_vcpu * vcpu )
@@ -971,41 +1001,18 @@ void avic_refresh_virtual_apic_mode(struct kvm_vcpu *vcpu)
971
1001
972
1002
void avic_refresh_apicv_exec_ctrl (struct kvm_vcpu * vcpu )
973
1003
{
974
- bool activated = kvm_vcpu_apicv_active (vcpu );
975
- int apic_id = kvm_cpu_get_apicid (vcpu -> cpu );
976
- struct vcpu_svm * svm = to_svm (vcpu );
977
- struct kvm_kernel_irqfd * irqfd ;
978
- unsigned long flags ;
979
-
980
1004
if (!enable_apicv )
981
1005
return ;
982
1006
1007
+ /* APICv should only be toggled on/off while the vCPU is running. */
1008
+ WARN_ON_ONCE (kvm_vcpu_is_blocking (vcpu ));
1009
+
983
1010
avic_refresh_virtual_apic_mode (vcpu );
984
1011
985
- if (activated )
986
- __avic_vcpu_load (vcpu , vcpu -> cpu );
1012
+ if (kvm_vcpu_apicv_active ( vcpu ) )
1013
+ __avic_vcpu_load (vcpu , vcpu -> cpu , AVIC_ACTIVATE );
987
1014
else
988
- __avic_vcpu_put (vcpu );
989
-
990
- /*
991
- * Here, we go through the per-vcpu ir_list to update all existing
992
- * interrupt remapping table entry targeting this vcpu.
993
- */
994
- spin_lock_irqsave (& svm -> ir_list_lock , flags );
995
-
996
- if (list_empty (& svm -> ir_list ))
997
- goto out ;
998
-
999
- list_for_each_entry (irqfd , & svm -> ir_list , vcpu_list ) {
1000
- void * data = irqfd -> irq_bypass_data ;
1001
-
1002
- if (activated )
1003
- WARN_ON_ONCE (amd_iommu_activate_guest_mode (data , apic_id ));
1004
- else
1005
- WARN_ON_ONCE (amd_iommu_deactivate_guest_mode (data ));
1006
- }
1007
- out :
1008
- spin_unlock_irqrestore (& svm -> ir_list_lock , flags );
1015
+ __avic_vcpu_put (vcpu , AVIC_DEACTIVATE );
1009
1016
}
1010
1017
1011
1018
void avic_vcpu_blocking (struct kvm_vcpu * vcpu )
@@ -1031,7 +1038,7 @@ void avic_vcpu_blocking(struct kvm_vcpu *vcpu)
1031
1038
* CPU and cause noisy neighbor problems if the VM is sending interrupts
1032
1039
* to the vCPU while it's scheduled out.
1033
1040
*/
1034
- avic_vcpu_put (vcpu );
1041
+ __avic_vcpu_put (vcpu , AVIC_STOP_RUNNING );
1035
1042
}
1036
1043
1037
1044
void avic_vcpu_unblocking (struct kvm_vcpu * vcpu )
0 commit comments