@@ -883,6 +883,165 @@ int kvm_vcpu_allocate_vncr_tlb(struct kvm_vcpu *vcpu)
883883 return 0 ;
884884}
885885
886+ static u64 read_vncr_el2 (struct kvm_vcpu * vcpu )
887+ {
888+ return (u64 )sign_extend64 (__vcpu_sys_reg (vcpu , VNCR_EL2 ), 48 );
889+ }
890+
891+ static int kvm_translate_vncr (struct kvm_vcpu * vcpu )
892+ {
893+ bool write_fault , writable ;
894+ unsigned long mmu_seq ;
895+ struct vncr_tlb * vt ;
896+ struct page * page ;
897+ u64 va , pfn , gfn ;
898+ int ret ;
899+
900+ vt = vcpu -> arch .vncr_tlb ;
901+
902+ vt -> wi = (struct s1_walk_info ) {
903+ .regime = TR_EL20 ,
904+ .as_el0 = false,
905+ .pan = false,
906+ };
907+ vt -> wr = (struct s1_walk_result ){};
908+ vt -> valid = false;
909+
910+ guard (srcu )(& vcpu -> kvm -> srcu );
911+
912+ va = read_vncr_el2 (vcpu );
913+
914+ ret = __kvm_translate_va (vcpu , & vt -> wi , & vt -> wr , va );
915+ if (ret )
916+ return ret ;
917+
918+ write_fault = kvm_is_write_fault (vcpu );
919+
920+ mmu_seq = vcpu -> kvm -> mmu_invalidate_seq ;
921+ smp_rmb ();
922+
923+ gfn = vt -> wr .pa >> PAGE_SHIFT ;
924+ pfn = kvm_faultin_pfn (vcpu , gfn , write_fault , & writable , & page );
925+ if (is_error_noslot_pfn (pfn ) || (write_fault && !writable ))
926+ return - EFAULT ;
927+
928+ scoped_guard (write_lock , & vcpu -> kvm -> mmu_lock ) {
929+ if (mmu_invalidate_retry (vcpu -> kvm , mmu_seq ))
930+ return - EAGAIN ;
931+
932+ vt -> gva = va ;
933+ vt -> hpa = pfn << PAGE_SHIFT ;
934+ vt -> valid = true;
935+ vt -> cpu = -1 ;
936+
937+ kvm_make_request (KVM_REQ_MAP_L1_VNCR_EL2 , vcpu );
938+ }
939+
940+ kvm_release_faultin_page (vcpu -> kvm , page , false, vt -> wr .pw );
941+ if (vt -> wr .pw )
942+ mark_page_dirty (vcpu -> kvm , gfn );
943+
944+ return 0 ;
945+ }
946+
947+ static void inject_vncr_perm (struct kvm_vcpu * vcpu )
948+ {
949+ struct vncr_tlb * vt = vcpu -> arch .vncr_tlb ;
950+ u64 esr = kvm_vcpu_get_esr (vcpu );
951+
952+ /* Adjust the fault level to reflect that of the guest's */
953+ esr &= ~ESR_ELx_FSC ;
954+ esr |= FIELD_PREP (ESR_ELx_FSC ,
955+ ESR_ELx_FSC_PERM_L (vt -> wr .level ));
956+
957+ kvm_inject_nested_sync (vcpu , esr );
958+ }
959+
960+ static bool kvm_vncr_tlb_lookup (struct kvm_vcpu * vcpu )
961+ {
962+ struct vncr_tlb * vt = vcpu -> arch .vncr_tlb ;
963+
964+ lockdep_assert_held_read (& vcpu -> kvm -> mmu_lock );
965+
966+ if (!vt -> valid )
967+ return false;
968+
969+ if (read_vncr_el2 (vcpu ) != vt -> gva )
970+ return false;
971+
972+ if (vt -> wr .nG ) {
973+ u64 tcr = vcpu_read_sys_reg (vcpu , TCR_EL2 );
974+ u64 ttbr = ((tcr & TCR_A1 ) ?
975+ vcpu_read_sys_reg (vcpu , TTBR1_EL2 ) :
976+ vcpu_read_sys_reg (vcpu , TTBR0_EL2 ));
977+ u16 asid ;
978+
979+ asid = FIELD_GET (TTBR_ASID_MASK , ttbr );
980+ if (!kvm_has_feat_enum (vcpu -> kvm , ID_AA64MMFR0_EL1 , ASIDBITS , 16 ) ||
981+ !(tcr & TCR_ASID16 ))
982+ asid &= GENMASK (7 , 0 );
983+
984+ return asid != vt -> wr .asid ;
985+ }
986+
987+ return true;
988+ }
989+
990+ int kvm_handle_vncr_abort (struct kvm_vcpu * vcpu )
991+ {
992+ struct vncr_tlb * vt = vcpu -> arch .vncr_tlb ;
993+ u64 esr = kvm_vcpu_get_esr (vcpu );
994+
995+ BUG_ON (!(esr & ESR_ELx_VNCR_SHIFT ));
996+
997+ if (esr_fsc_is_permission_fault (esr )) {
998+ inject_vncr_perm (vcpu );
999+ } else if (esr_fsc_is_translation_fault (esr )) {
1000+ bool valid ;
1001+ int ret ;
1002+
1003+ scoped_guard (read_lock , & vcpu -> kvm -> mmu_lock )
1004+ valid = kvm_vncr_tlb_lookup (vcpu );
1005+
1006+ if (!valid )
1007+ ret = kvm_translate_vncr (vcpu );
1008+ else
1009+ ret = - EPERM ;
1010+
1011+ switch (ret ) {
1012+ case - EAGAIN :
1013+ case - ENOMEM :
1014+ /* Let's try again... */
1015+ break ;
1016+ case - EFAULT :
1017+ case - EINVAL :
1018+ case - ENOENT :
1019+ case - EACCES :
1020+ /*
1021+ * Translation failed, inject the corresponding
1022+ * exception back to EL2.
1023+ */
1024+ BUG_ON (!vt -> wr .failed );
1025+
1026+ esr &= ~ESR_ELx_FSC ;
1027+ esr |= FIELD_PREP (ESR_ELx_FSC , vt -> wr .fst );
1028+
1029+ kvm_inject_nested_sync (vcpu , esr );
1030+ break ;
1031+ case - EPERM :
1032+ /* Hack to deal with POE until we get kernel support */
1033+ inject_vncr_perm (vcpu );
1034+ break ;
1035+ case 0 :
1036+ break ;
1037+ }
1038+ } else {
1039+ WARN_ONCE (1 , "Unhandled VNCR abort, ESR=%llx\n" , esr );
1040+ }
1041+
1042+ return 1 ;
1043+ }
1044+
8861045/*
8871046 * Our emulated CPU doesn't support all the possible features. For the
8881047 * sake of simplicity (and probably mental sanity), wipe out a number
0 commit comments