@@ -76,14 +76,6 @@ static bool next_vm_id_wrapped = 0;
76
76
static DEFINE_SPINLOCK (svm_vm_data_hash_lock );
77
77
bool x2avic_enabled ;
78
78
79
- /*
80
- * This is a wrapper of struct amd_iommu_ir_data.
81
- */
82
- struct amd_svm_iommu_ir {
83
- struct list_head node ; /* Used by SVM for per-vcpu ir_list */
84
- void * data ; /* Storing pointer to struct amd_ir_data */
85
- };
86
-
87
79
static void avic_activate_vmcb (struct vcpu_svm * svm )
88
80
{
89
81
struct vmcb * vmcb = svm -> vmcb01 .ptr ;
@@ -747,8 +739,8 @@ static int avic_set_pi_irte_mode(struct kvm_vcpu *vcpu, bool activate)
747
739
{
748
740
int ret = 0 ;
749
741
unsigned long flags ;
750
- struct amd_svm_iommu_ir * ir ;
751
742
struct vcpu_svm * svm = to_svm (vcpu );
743
+ struct kvm_kernel_irqfd * irqfd ;
752
744
753
745
if (!kvm_arch_has_assigned_device (vcpu -> kvm ))
754
746
return 0 ;
@@ -762,11 +754,11 @@ static int avic_set_pi_irte_mode(struct kvm_vcpu *vcpu, bool activate)
762
754
if (list_empty (& svm -> ir_list ))
763
755
goto out ;
764
756
765
- list_for_each_entry (ir , & svm -> ir_list , node ) {
757
+ list_for_each_entry (irqfd , & svm -> ir_list , vcpu_list ) {
766
758
if (activate )
767
- ret = amd_iommu_activate_guest_mode (ir -> data );
759
+ ret = amd_iommu_activate_guest_mode (irqfd -> irq_bypass_data );
768
760
else
769
- ret = amd_iommu_deactivate_guest_mode (ir -> data );
761
+ ret = amd_iommu_deactivate_guest_mode (irqfd -> irq_bypass_data );
770
762
if (ret )
771
763
break ;
772
764
}
@@ -775,27 +767,30 @@ static int avic_set_pi_irte_mode(struct kvm_vcpu *vcpu, bool activate)
775
767
return ret ;
776
768
}
777
769
778
- static void svm_ir_list_del (struct vcpu_svm * svm , struct amd_iommu_pi_data * pi )
770
+ static void svm_ir_list_del (struct vcpu_svm * svm ,
771
+ struct kvm_kernel_irqfd * irqfd ,
772
+ struct amd_iommu_pi_data * pi )
779
773
{
780
774
unsigned long flags ;
781
- struct amd_svm_iommu_ir * cur ;
775
+ struct kvm_kernel_irqfd * cur ;
782
776
783
777
spin_lock_irqsave (& svm -> ir_list_lock , flags );
784
- list_for_each_entry (cur , & svm -> ir_list , node ) {
785
- if (cur -> data != pi -> ir_data )
778
+ list_for_each_entry (cur , & svm -> ir_list , vcpu_list ) {
779
+ if (cur -> irq_bypass_data != pi -> ir_data )
780
+ continue ;
781
+ if (WARN_ON_ONCE (cur != irqfd ))
786
782
continue ;
787
- list_del (& cur -> node );
788
- kfree (cur );
783
+ list_del (& irqfd -> vcpu_list );
789
784
break ;
790
785
}
791
786
spin_unlock_irqrestore (& svm -> ir_list_lock , flags );
792
787
}
793
788
794
- static int svm_ir_list_add (struct vcpu_svm * svm , struct amd_iommu_pi_data * pi )
789
+ static int svm_ir_list_add (struct vcpu_svm * svm ,
790
+ struct kvm_kernel_irqfd * irqfd ,
791
+ struct amd_iommu_pi_data * pi )
795
792
{
796
- int ret = 0 ;
797
793
unsigned long flags ;
798
- struct amd_svm_iommu_ir * ir ;
799
794
u64 entry ;
800
795
801
796
if (WARN_ON_ONCE (!pi -> ir_data ))
@@ -812,25 +807,14 @@ static int svm_ir_list_add(struct vcpu_svm *svm, struct amd_iommu_pi_data *pi)
812
807
struct kvm_vcpu * prev_vcpu = kvm_get_vcpu_by_id (kvm , vcpu_id );
813
808
struct vcpu_svm * prev_svm ;
814
809
815
- if (!prev_vcpu ) {
816
- ret = - EINVAL ;
817
- goto out ;
818
- }
810
+ if (!prev_vcpu )
811
+ return - EINVAL ;
819
812
820
813
prev_svm = to_svm (prev_vcpu );
821
- svm_ir_list_del (prev_svm , pi );
814
+ svm_ir_list_del (prev_svm , irqfd , pi );
822
815
}
823
816
824
- /**
825
- * Allocating new amd_iommu_pi_data, which will get
826
- * add to the per-vcpu ir_list.
827
- */
828
- ir = kzalloc (sizeof (struct amd_svm_iommu_ir ), GFP_ATOMIC | __GFP_ACCOUNT );
829
- if (!ir ) {
830
- ret = - ENOMEM ;
831
- goto out ;
832
- }
833
- ir -> data = pi -> ir_data ;
817
+ irqfd -> irq_bypass_data = pi -> ir_data ;
834
818
835
819
spin_lock_irqsave (& svm -> ir_list_lock , flags );
836
820
@@ -845,10 +829,9 @@ static int svm_ir_list_add(struct vcpu_svm *svm, struct amd_iommu_pi_data *pi)
845
829
amd_iommu_update_ga (entry & AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK ,
846
830
true, pi -> ir_data );
847
831
848
- list_add (& ir -> node , & svm -> ir_list );
832
+ list_add (& irqfd -> vcpu_list , & svm -> ir_list );
849
833
spin_unlock_irqrestore (& svm -> ir_list_lock , flags );
850
- out :
851
- return ret ;
834
+ return 0 ;
852
835
}
853
836
854
837
/*
@@ -952,7 +935,7 @@ int avic_pi_update_irte(struct kvm_kernel_irqfd *irqfd, struct kvm *kvm,
952
935
* scheduling information in IOMMU irte.
953
936
*/
954
937
if (!ret && pi .is_guest_mode )
955
- svm_ir_list_add (svm , & pi );
938
+ svm_ir_list_add (svm , irqfd , & pi );
956
939
}
957
940
958
941
if (!ret && svm ) {
@@ -993,7 +976,7 @@ int avic_pi_update_irte(struct kvm_kernel_irqfd *irqfd, struct kvm *kvm,
993
976
994
977
vcpu = kvm_get_vcpu_by_id (kvm , id );
995
978
if (vcpu )
996
- svm_ir_list_del (to_svm (vcpu ), & pi );
979
+ svm_ir_list_del (to_svm (vcpu ), irqfd , & pi );
997
980
}
998
981
}
999
982
out :
@@ -1005,8 +988,8 @@ static inline int
1005
988
avic_update_iommu_vcpu_affinity (struct kvm_vcpu * vcpu , int cpu , bool r )
1006
989
{
1007
990
int ret = 0 ;
1008
- struct amd_svm_iommu_ir * ir ;
1009
991
struct vcpu_svm * svm = to_svm (vcpu );
992
+ struct kvm_kernel_irqfd * irqfd ;
1010
993
1011
994
lockdep_assert_held (& svm -> ir_list_lock );
1012
995
@@ -1020,8 +1003,8 @@ avic_update_iommu_vcpu_affinity(struct kvm_vcpu *vcpu, int cpu, bool r)
1020
1003
if (list_empty (& svm -> ir_list ))
1021
1004
return 0 ;
1022
1005
1023
- list_for_each_entry (ir , & svm -> ir_list , node ) {
1024
- ret = amd_iommu_update_ga (cpu , r , ir -> data );
1006
+ list_for_each_entry (irqfd , & svm -> ir_list , vcpu_list ) {
1007
+ ret = amd_iommu_update_ga (cpu , r , irqfd -> irq_bypass_data );
1025
1008
if (ret )
1026
1009
return ret ;
1027
1010
}
0 commit comments