@@ -869,14 +869,14 @@ static int emulate_movx(struct vcpu *vcpu, struct instr_emul_vie *vie)
869
869
*
870
870
* It's only used by MOVS/STO
871
871
*/
872
- static void get_gva_di_si_nocheck (struct vcpu * vcpu , uint8_t addrsize ,
873
- enum cpu_reg_name seg , enum cpu_reg_name gpr , uint64_t * gva )
872
+ static void get_gva_si_nocheck (struct vcpu * vcpu , uint8_t addrsize ,
873
+ enum cpu_reg_name seg , uint64_t * gva )
874
874
{
875
875
uint64_t val ;
876
876
struct seg_desc desc ;
877
877
enum vm_cpu_mode cpu_mode ;
878
878
879
- val = vm_get_register (vcpu , gpr );
879
+ val = vm_get_register (vcpu , CPU_REG_RSI );
880
880
vm_get_seg_desc (seg , & desc );
881
881
cpu_mode = get_vcpu_mode (vcpu );
882
882
@@ -888,29 +888,24 @@ static void get_gva_di_si_nocheck(struct vcpu *vcpu, uint8_t addrsize,
888
888
/*
889
889
* @pre only called during instruction decode phase
890
890
*
891
- * @remark This function get gva from ES:DI and DS(other segment):SI. And
892
- * do check the failure condition and inject exception to guest accordingly.
891
+ * @remark This function get gva from ES:DI. And do check the failure
892
+ * condition and inject exception to guest accordingly.
893
893
*
894
894
* It's only used by MOVS/STO
895
895
*/
896
- static int get_gva_di_si_check (struct vcpu * vcpu , uint8_t addrsize ,
897
- uint32_t prot , enum cpu_reg_name seg , enum cpu_reg_name gpr ,
898
- uint64_t * gva )
896
+ static int get_gva_di_check (struct vcpu * vcpu , struct instr_emul_vie * vie ,
897
+ uint8_t addrsize , uint64_t * gva )
899
898
{
900
899
int ret ;
901
900
uint32_t err_code ;
902
901
struct seg_desc desc ;
903
902
enum vm_cpu_mode cpu_mode ;
904
903
uint64_t val , gpa ;
905
904
906
- val = vm_get_register (vcpu , gpr );
907
- vm_get_seg_desc (seg , & desc );
905
+ val = vm_get_register (vcpu , CPU_REG_RDI );
906
+ vm_get_seg_desc (CPU_REG_ES , & desc );
908
907
cpu_mode = get_vcpu_mode (vcpu );
909
908
910
- if (!is_desc_valid (& desc , prot )) {
911
- goto exception_inject ;
912
- }
913
-
914
909
if (cpu_mode == CPU_MODE_64BIT ) {
915
910
if ((addrsize != 4U ) && (addrsize != 8U )) {
916
911
goto exception_inject ;
@@ -919,46 +914,72 @@ static int get_gva_di_si_check(struct vcpu *vcpu, uint8_t addrsize,
919
914
if ((addrsize != 2U ) && (addrsize != 4U )) {
920
915
goto exception_inject ;
921
916
}
917
+
918
+ if (!is_desc_valid (& desc , PROT_WRITE )) {
919
+ goto exception_inject ;
920
+ }
922
921
}
923
922
924
- if (vie_calculate_gla (cpu_mode , seg , & desc , val , addrsize , gva ) != 0 ) {
923
+ if (vie_calculate_gla (cpu_mode , CPU_REG_ES , & desc , val , addrsize , gva )
924
+ != 0 ) {
925
925
goto exception_inject ;
926
926
}
927
927
928
928
if (vie_canonical_check (cpu_mode , * gva ) != 0 ) {
929
929
goto exception_inject ;
930
930
}
931
931
932
- err_code = ( prot == PROT_WRITE ) ? PAGE_FAULT_WR_FLAG : 0U ;
933
- ret = gva2gpa (vcpu , ( uint64_t ) gva , & gpa , & err_code );
932
+ err_code = PAGE_FAULT_WR_FLAG ;
933
+ ret = gva2gpa (vcpu , * gva , & gpa , & err_code );
934
934
if (ret < 0 ) {
935
935
if (ret == - EFAULT ) {
936
936
vcpu_inject_pf (vcpu , (uint64_t )gva , err_code );
937
937
}
938
938
return ret ;
939
939
}
940
940
941
+ /* If we are checking the dest operand for movs instruction,
942
+ * we cache the gpa if check pass. It will be used during
943
+ * movs instruction emulation.
944
+ */
945
+ vie -> dst_gpa = gpa ;
946
+
941
947
return 0 ;
942
948
943
949
exception_inject :
944
- if (seg == CPU_REG_SS ) {
945
- vcpu_inject_ss (vcpu );
946
- } else {
947
- vcpu_inject_gp (vcpu , 0U );
948
- }
950
+ vcpu_inject_gp (vcpu , 0U );
949
951
return - EFAULT ;
950
952
}
951
953
954
+ /* MOVs gets the operands from RSI and RDI. Both operands could be memory.
955
+ * With VMX enabled, one of the operand triggers EPT voilation.
956
+ *
957
+ * If it's RSI access trigger EPT voilation, it's source operands and always
958
+ * read operations. Not neccesary to check whether need to inject fault (done
959
+ * by VMX already). We do need to check the RDI.
960
+ *
961
+ * If it's RDI access trigger EPT voilation, we need to check RDI because it's
962
+ * always write operations and VMX doens't cover write access check.
963
+ * Not neccesary to check RSI, because VMX cover it for us.
964
+ *
965
+ * In summary,
966
+ * For MOVs instruction, we always check RDI during instruction decoding phase.
967
+ * And access RSI without any check during instruction emulation phase.
968
+ */
952
969
static int emulate_movs (struct vcpu * vcpu , struct instr_emul_vie * vie )
953
970
{
954
- uint64_t dstaddr , srcaddr ;
971
+ uint64_t src_gva , gpa , val ;
972
+ uint64_t * dst_hva , * src_hva ;
955
973
uint64_t rcx , rdi , rsi , rflags ;
974
+ uint32_t err_code ;
975
+ enum cpu_reg_name seg ;
956
976
int error , repeat ;
957
977
uint8_t opsize ;
958
- enum cpu_reg_name seg ;
978
+ bool is_mmio_write ;
959
979
960
980
opsize = (vie -> opcode == 0xA4U ) ? 1U : vie -> opsize ;
961
981
error = 0 ;
982
+ is_mmio_write = (vcpu -> req .reqs .mmio .direction == REQUEST_WRITE );
962
983
963
984
/*
964
985
* XXX although the MOVS instruction is only supposed to be used with
@@ -984,11 +1005,25 @@ static int emulate_movs(struct vcpu *vcpu, struct instr_emul_vie *vie)
984
1005
985
1006
seg = (vie -> seg_override != 0U ) ? (vie -> segment_register ) : CPU_REG_DS ;
986
1007
987
- get_gva_di_si_nocheck (vcpu , vie -> addrsize , seg , CPU_REG_RSI , & srcaddr );
988
- get_gva_di_si_nocheck (vcpu , vie -> addrsize , CPU_REG_ES , CPU_REG_RDI ,
989
- & dstaddr );
1008
+ if (is_mmio_write ) {
1009
+ get_gva_si_nocheck (vcpu , vie -> addrsize , seg , & src_gva );
990
1010
991
- (void )memcpy_s ((void * )dstaddr , 16U , (void * )srcaddr , opsize );
1011
+ /* we are sure it will success */
1012
+ (void )gva2gpa (vcpu , src_gva , & gpa , & err_code );
1013
+ src_hva = gpa2hva (vcpu -> vm , gpa );
1014
+
1015
+ val = * src_hva ;
1016
+
1017
+ mmio_write (vcpu , val );
1018
+ } else {
1019
+ mmio_read (vcpu , & val );
1020
+
1021
+ /* The dest gpa is saved during dst check instruction
1022
+ * decoding.
1023
+ */
1024
+ dst_hva = gpa2hva (vcpu -> vm , vie -> dst_gpa );
1025
+ memcpy_s (dst_hva , opsize , & val , opsize );
1026
+ }
992
1027
993
1028
rsi = vm_get_register (vcpu , CPU_REG_RSI );
994
1029
rdi = vm_get_register (vcpu , CPU_REG_RDI );
@@ -2128,14 +2163,14 @@ static int instr_check_di(struct vcpu *vcpu, struct instr_emul_ctxt *emul_ctxt)
2128
2163
int ret ;
2129
2164
struct instr_emul_vie * vie = & emul_ctxt -> vie ;
2130
2165
uint64_t gva ;
2131
- enum cpu_reg_name seg ;
2132
2166
2133
- ret = get_gva_di_si_check (vcpu , vie -> addrsize , PROT_WRITE ,
2134
- CPU_REG_ES , CPU_REG_RDI , & gva );
2167
+ ret = get_gva_di_check (vcpu , vie , vie -> addrsize , & gva );
2135
2168
2136
2169
if (ret < 0 ) {
2137
2170
return - EFAULT ;
2138
2171
}
2172
+
2173
+ return 0 ;
2139
2174
}
2140
2175
2141
2176
static int instr_check_gva (struct vcpu * vcpu , struct instr_emul_ctxt * emul_ctxt ,
0 commit comments