@@ -44,19 +44,15 @@ enum mem_map_request_type {
44
44
PAGING_REQUEST_TYPE_UNKNOWN ,
45
45
};
46
46
47
- struct mm_capability {
48
- bool ept_x_only_supported ;
49
- /* EPT and MMU 1-GByte page supported flag */
50
- bool ept_1gb_page_supported ;
51
- bool invept_supported ;
52
- bool invept_single_context_supported ;
53
- bool invept_global_context_supported ;
54
- bool invvpid_supported ;
55
- bool invvpid_single_context_supported ;
56
- bool invvpid_global_context_supported ;
47
+ static struct vmx_capability {
48
+ uint32_t ept ;
49
+ uint32_t vpid ;
50
+ } vmx_caps ;
51
+
52
+ static struct mm_capability {
53
+ /* MMU 1-GByte page supported flag */
57
54
bool mmu_1gb_page_supported ;
58
- };
59
- static struct mm_capability mm_caps ;
55
+ } mm_caps ;
60
56
61
57
#define INVEPT_TYPE_SINGLE_CONTEXT 1UL
62
58
#define INVEPT_TYPE_ALL_CONTEXTS 2UL
@@ -93,31 +89,25 @@ static inline void inv_tlb_one_page(void *addr)
93
89
asm volatile ("invlpg (%0)" : : "r" (addr ) : "memory" );
94
90
}
95
91
92
+ static inline bool cpu_has_vmx_ept_cap (uint32_t bit_mask )
93
+ {
94
+ return !!(vmx_caps .ept & bit_mask );
95
+ }
96
+
97
+ static inline bool cpu_has_vmx_vpid_cap (uint32_t bit_mask )
98
+ {
99
+ return !!(vmx_caps .vpid & bit_mask );
100
+ }
101
+
96
102
static void check_mmu_capability (void )
97
103
{
98
104
uint64_t val ;
99
105
uint32_t eax , ebx , ecx , edx ;
100
106
101
- memset (& mm_caps , 0 , sizeof (struct mm_capability ));
102
-
103
107
/* Read the MSR register of EPT and VPID Capability - SDM A.10 */
104
108
val = msr_read (MSR_IA32_VMX_EPT_VPID_CAP );
105
- mm_caps .ept_x_only_supported = (val & MSR_VMX_EPT_X_ONLY )
106
- ? (true) : (false);
107
- mm_caps .ept_1gb_page_supported = (val & MSR_VMX_EPT_VPID_CAP_1GB )
108
- ? (true) : (false);
109
- mm_caps .invept_supported =
110
- (val & MSR_VMX_INVEPT ) ? (true) : (false);
111
- mm_caps .invept_single_context_supported =
112
- (val & MSR_VMX_INVEPT_SINGLE_CONTEXT ) ? (true) : (false);
113
- mm_caps .invept_global_context_supported =
114
- (val & MSR_VMX_INVEPT_GLOBAL_CONTEXT ) ? (true) : (false);
115
- mm_caps .invvpid_supported =
116
- (val & MSR_VMX_INVVPID ) ? (true) : (false);
117
- mm_caps .invvpid_single_context_supported =
118
- (val & MSR_VMX_INVVPID_SINGLE_CONTEXT ) ? (true) : (false);
119
- mm_caps .invvpid_global_context_supported =
120
- (val & MSR_VMX_INVVPID_GLOBAL_CONTEXT ) ? (true) : (false);
109
+ vmx_caps .ept = (uint32_t ) val ;
110
+ vmx_caps .vpid = (uint32_t ) (val >> 32 );
121
111
122
112
/* Read CPUID to check if PAGE1GB is supported
123
113
* SDM 4.1.4 If CPUID.80000001H:EDX.Page1GB[bit26]=1,
@@ -127,32 +117,15 @@ static void check_mmu_capability(void)
127
117
mm_caps .mmu_1gb_page_supported = (edx & CPUID_EDX_PAGE1GB ) ?
128
118
(true) : (false);
129
119
130
- if (!mm_caps . invept_supported )
120
+ if (!cpu_has_vmx_ept_cap ( VMX_EPT_INVEPT ) )
131
121
panic ("invept must be supported" );
132
122
}
133
123
134
- static inline bool check_ept_x_only_support (void )
135
- {
136
- return mm_caps .ept_x_only_supported ;
137
- }
138
-
139
- static inline bool check_invept_single_support (void )
140
- {
141
- return mm_caps .invept_supported &&
142
- mm_caps .invept_single_context_supported ;
143
- }
144
-
145
- static inline bool check_invept_global_support (void )
146
- {
147
- return mm_caps .invept_supported &&
148
- mm_caps .invept_global_context_supported ;
149
- }
150
-
151
124
void invept (struct vcpu * vcpu )
152
125
{
153
126
struct invept_desc desc = {0 };
154
127
155
- if (check_invept_single_support ( )) {
128
+ if (cpu_has_vmx_ept_cap ( VMX_EPT_INVEPT_SINGLE_CONTEXT )) {
156
129
desc .eptp = vcpu -> vm -> arch_vm .nworld_eptp | (3 << 3 ) | 6 ;
157
130
_invept (INVEPT_TYPE_SINGLE_CONTEXT , desc );
158
131
if (vcpu -> vm -> sworld_control .sworld_enabled ) {
@@ -161,7 +134,7 @@ void invept(struct vcpu *vcpu)
161
134
_invept (INVEPT_TYPE_SINGLE_CONTEXT , desc );
162
135
163
136
}
164
- } else if (check_invept_global_support ( ))
137
+ } else if (cpu_has_vmx_ept_cap ( VMX_EPT_INVEPT_GLOBAL_CONTEXT ))
165
138
_invept (INVEPT_TYPE_ALL_CONTEXTS , desc );
166
139
}
167
140
@@ -170,7 +143,7 @@ static bool check_mmu_1gb_support(struct map_params *map_params)
170
143
bool status = false;
171
144
172
145
if (map_params -> page_table_type == PTT_EPT )
173
- status = mm_caps . ept_1gb_page_supported ;
146
+ status = cpu_has_vmx_ept_cap ( VMX_EPT_1GB_PAGE ) ;
174
147
else
175
148
status = mm_caps .mmu_1gb_page_supported ;
176
149
return status ;
@@ -191,7 +164,7 @@ static inline uint32_t check_page_table_present(int page_table_type,
191
164
if ((table_entry == IA32E_EPT_W_BIT ) ||
192
165
(table_entry == (IA32E_EPT_W_BIT | IA32E_EPT_X_BIT )) ||
193
166
((table_entry == IA32E_EPT_X_BIT ) &&
194
- !check_ept_x_only_support ( )))
167
+ !cpu_has_vmx_ept_cap ( VMX_EPT_EXECUTE_ONLY )))
195
168
return PT_MISCFG_PRESENT ;
196
169
} else {
197
170
table_entry &= (IA32E_COMM_P_BIT );
@@ -905,7 +878,7 @@ static uint64_t break_page_table(struct map_params *map_params, void *paddr,
905
878
* current page size, obtain the starting physical address
906
879
* aligned of current page size
907
880
*/
908
- pa = (((( uint64_t )paddr ) / page_size ) * page_size );
881
+ pa = ((uint64_t )paddr ) & ~( page_size - 1 );
909
882
if (map_params -> page_table_type == PTT_EPT ) {
910
883
/* Keep original attribute(here &0x3f)
911
884
* bit 0(R) bit1(W) bit2(X) bit3~5 MT
0 commit comments