Skip to content

Commit 9e7179c

Browse files
binbinwu1lijinxia
authored andcommitted
hv: support gva2gpa in different paging modes
Translate gva2gpa in different paging modes. Change the definition of gva2gpa. - return value for error status - Add a parameter for error code when paging fault. Change the definition of vm_gva2gpa. - return value for error status - Add a parameter for error code when paing fault. Signed-off-by: Binbin Wu <binbin.wu@intel.com> Reviewed-by: Kevin Tian <kevin.tian@intel.com> Reviewed-by: Jason Chen CJ <jason.cj.chen@intel.com> Acked-by: Xu, Anthony <anthony.xu@intel.com>
1 parent dd14d8e commit 9e7179c

File tree

7 files changed

+250
-43
lines changed

7 files changed

+250
-43
lines changed

hypervisor/arch/x86/guest/guest.c

Lines changed: 175 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,19 @@ uint32_t e820_entries;
1515
struct e820_entry e820[E820_MAX_ENTRIES];
1616
struct e820_mem_params e820_mem;
1717

18+
struct page_walk_info {
19+
uint64_t top_entry; /* Top level paging structure entry */
20+
int level;
21+
int width;
22+
bool is_user_mode;
23+
bool is_write_access;
24+
bool is_inst_fetch;
25+
bool pse; /* CR4.PSE for 32bit paing,
26+
* true for PAE/4-level paing */
27+
bool wp; /* CR0.WP */
28+
bool nxe; /* MSR_IA32_EFER_NXE_BIT */
29+
};
30+
1831
inline bool
1932
is_vm0(struct vm *vm)
2033
{
@@ -173,34 +186,179 @@ enum vm_paging_mode get_vcpu_paging_mode(struct vcpu *vcpu)
173186
return PAGING_MODE_4_LEVEL;
174187
}
175188

176-
uint64_t gva2gpa(struct vm *vm, uint64_t cr3, uint64_t gva)
189+
/* TODO: Add code to check for Revserved bits, SMAP and PKE when do translation
190+
* during page walk */
191+
static int _gva2gpa_common(struct vcpu *vcpu, struct page_walk_info *pw_info,
192+
uint64_t gva, uint64_t *gpa, uint32_t *err_code)
177193
{
178-
int level, index, shift;
179-
uint64_t *base, addr, entry, page_size;
180-
uint64_t gpa = 0;
181-
182-
addr = cr3;
183-
184-
for (level = 3; level >= 0; level--) {
194+
int i, index, shift;
195+
uint8_t *base;
196+
uint64_t entry;
197+
uint64_t addr, page_size;
198+
int ret = 0;
199+
int fault = 0;
200+
201+
if (pw_info->level < 1)
202+
return -EINVAL;
203+
204+
addr = pw_info->top_entry;
205+
for (i = pw_info->level - 1; i >= 0; i--) {
185206
addr = addr & IA32E_REF_MASK;
186-
base = GPA2HVA(vm, addr);
187-
ASSERT(base != NULL, "invalid ptp base.");
188-
shift = level * 9 + 12;
189-
index = (gva >> shift) & 0x1FF;
207+
base = GPA2HVA(vcpu->vm, addr);
208+
if (base == NULL) {
209+
ret = -EFAULT;
210+
goto out;
211+
}
212+
213+
shift = i * pw_info->width + 12;
214+
index = (gva >> shift) & ((1UL << pw_info->width) - 1);
190215
page_size = 1UL << shift;
191216

192-
entry = base[index];
193-
if (level > 0 && (entry & MMU_32BIT_PDE_PS) != 0)
217+
if (pw_info->width == 10)
218+
/* 32bit entry */
219+
entry = *((uint32_t *)(base + 4 * index));
220+
else
221+
entry = *((uint64_t *)(base + 8 * index));
222+
223+
/* check if the entry present */
224+
if (!(entry & MMU_32BIT_PDE_P)) {
225+
ret = -EFAULT;
226+
goto out;
227+
}
228+
/* check for R/W */
229+
if (pw_info->is_write_access && !(entry & MMU_32BIT_PDE_RW)) {
230+
/* Case1: Supermode and wp is 1
231+
* Case2: Usermode */
232+
if (!(!pw_info->is_user_mode && !pw_info->wp))
233+
fault = 1;
234+
}
235+
/* check for nx, since for 32-bit paing, the XD bit is
236+
* reserved(0), use the same logic as PAE/4-level paging */
237+
if (pw_info->is_inst_fetch && pw_info->nxe &&
238+
(entry & MMU_MEM_ATTR_BIT_EXECUTE_DISABLE))
239+
fault = 1;
240+
241+
/* check for U/S */
242+
if (!(entry & MMU_32BIT_PDE_US) && pw_info->is_user_mode)
243+
fault = 1;
244+
245+
if (pw_info->pse && (i > 0 && (entry & MMU_32BIT_PDE_PS)))
194246
break;
195247
addr = entry;
196248
}
197249

198-
entry >>= shift; entry <<= (shift + 12); entry >>= 12;
199-
gpa = entry | (gva & (page_size - 1));
250+
entry >>= shift;
251+
/* shift left 12bit more and back to clear XD/Prot Key/Ignored bits */
252+
entry <<= (shift + 12);
253+
entry >>= 12;
254+
*gpa = entry | (gva & (page_size - 1));
255+
out:
256+
257+
if (fault) {
258+
ret = -EFAULT;
259+
*err_code |= PAGE_FAULT_P_FLAG;
260+
}
261+
return ret;
262+
}
263+
264+
static int _gva2gpa_pae(struct vcpu *vcpu, struct page_walk_info *pw_info,
265+
uint64_t gva, uint64_t *gpa, uint32_t *err_code)
266+
{
267+
int index;
268+
uint64_t *base;
269+
uint64_t entry;
270+
uint64_t addr;
271+
int ret;
272+
273+
addr = pw_info->top_entry & 0xFFFFFFF0UL;
274+
base = GPA2HVA(vcpu->vm, addr);
275+
if (base == NULL) {
276+
ret = -EFAULT;
277+
goto out;
278+
}
279+
280+
index = (gva >> 30) & 0x3;
281+
entry = base[index];
282+
283+
if (!(entry & MMU_32BIT_PDE_P)) {
284+
ret = -EFAULT;
285+
goto out;
286+
}
287+
288+
pw_info->level = 2;
289+
pw_info->top_entry = entry;
290+
ret = _gva2gpa_common(vcpu, pw_info, gva, gpa, err_code);
291+
292+
out:
293+
return ret;
200294

201-
return gpa;
202295
}
203296

297+
/* Refer to SDM Vol.3A 6-39 section 6.15 for the format of paging fault error
298+
* code.
299+
*
300+
* Caller should set the contect of err_code properly according to the address
301+
* usage when calling this function:
302+
* - If it is an address for write, set PAGE_FAULT_WR_FLAG in err_code.
303+
* - If it is an address for instruction featch, set PAGE_FAULT_ID_FLAG in
304+
* err_code.
305+
* Caller should check the return value to confirm if the function success or
306+
* not.
307+
* If a protection volation detected during page walk, this function still will
308+
* give the gpa translated, it is up to caller to decide if it need to inject a
309+
* #PF or not.
310+
* - Return 0 for success.
311+
* - Return -EINVAL for invalid parameter.
312+
* - Return -EFAULT for paging fault, and refer to err_code for paging fault
313+
* error code.
314+
*/
315+
int gva2gpa(struct vcpu *vcpu, uint64_t gva, uint64_t *gpa,
316+
uint32_t *err_code)
317+
{
318+
struct run_context *cur_context =
319+
&vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context];
320+
enum vm_paging_mode pm = get_vcpu_paging_mode(vcpu);
321+
struct page_walk_info pw_info;
322+
int ret = 0;
323+
324+
if (!gpa || !err_code)
325+
return -EINVAL;
326+
*gpa = 0;
327+
328+
pw_info.top_entry = cur_context->cr3;
329+
pw_info.level = pm;
330+
pw_info.is_write_access = !!(*err_code & PAGE_FAULT_WR_FLAG);
331+
pw_info.is_inst_fetch = !!(*err_code & PAGE_FAULT_ID_FLAG);
332+
pw_info.is_user_mode = ((exec_vmread(VMX_GUEST_CS_SEL) & 0x3) == 3);
333+
pw_info.pse = true;
334+
pw_info.nxe = cur_context->ia32_efer & MSR_IA32_EFER_NXE_BIT;
335+
pw_info.wp = !!(cur_context->cr0 & CR0_WP);
336+
337+
*err_code &= ~PAGE_FAULT_P_FLAG;
338+
339+
if (pm == PAGING_MODE_4_LEVEL) {
340+
pw_info.width = 9;
341+
ret = _gva2gpa_common(vcpu, &pw_info, gva, gpa, err_code);
342+
} else if(pm == PAGING_MODE_3_LEVEL) {
343+
pw_info.width = 9;
344+
ret = _gva2gpa_pae(vcpu, &pw_info, gva, gpa, err_code);
345+
} else if (pm == PAGING_MODE_2_LEVEL) {
346+
pw_info.width = 10;
347+
pw_info.pse = !!(cur_context->cr4 & CR4_PSE);
348+
pw_info.nxe = false;
349+
ret = _gva2gpa_common(vcpu, &pw_info, gva, gpa, err_code);
350+
} else
351+
*gpa = gva;
352+
353+
if (ret == -EFAULT) {
354+
if (pw_info.is_user_mode)
355+
*err_code |= PAGE_FAULT_US_FLAG;
356+
}
357+
358+
return ret;
359+
}
360+
361+
204362
void init_e820(void)
205363
{
206364
unsigned int i;

hypervisor/arch/x86/guest/instr_emul.c

Lines changed: 17 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -674,6 +674,7 @@ emulate_movs(struct vcpu *vcpu, __unused uint64_t gpa, struct vie *vie,
674674
uint64_t dstaddr, srcaddr, dstgpa, srcgpa;
675675
uint64_t rcx, rdi, rsi, rflags;
676676
int error, fault, opsize, seg, repeat;
677+
uint32_t err_code;
677678

678679
opsize = (vie->op.op_byte == 0xA4) ? 1 : vie->opsize;
679680
error = 0;
@@ -713,8 +714,14 @@ emulate_movs(struct vcpu *vcpu, __unused uint64_t gpa, struct vie *vie,
713714
if (error || fault)
714715
goto done;
715716

716-
vm_gva2gpa(vcpu, srcaddr, &srcgpa);
717-
vm_gva2gpa(vcpu, dstaddr, &dstgpa);
717+
err_code = 0;
718+
error = vm_gva2gpa(vcpu, srcaddr, &srcgpa, &err_code);
719+
if (error)
720+
goto done;
721+
err_code = PAGE_FAULT_WR_FLAG;
722+
error = vm_gva2gpa(vcpu, dstaddr, &dstgpa, &err_code);
723+
if (error)
724+
goto done;
718725
memcpy_s((char *)dstaddr, 16, (char *)srcaddr, opsize);
719726

720727
error = vie_read_register(vcpu, VM_REG_GUEST_RSI, &rsi);
@@ -1236,6 +1243,7 @@ emulate_stack_op(struct vcpu *vcpu, uint64_t mmio_gpa, struct vie *vie,
12361243
struct seg_desc ss_desc;
12371244
uint64_t cr0, rflags, rsp, stack_gla, stack_gpa, val;
12381245
int error, size, stackaddrsize, pushop;
1246+
uint32_t err_code = 0;
12391247

12401248
memset(&ss_desc, 0, sizeof(ss_desc));
12411249

@@ -1302,7 +1310,13 @@ emulate_stack_op(struct vcpu *vcpu, uint64_t mmio_gpa, struct vie *vie,
13021310
return 0;
13031311
}
13041312

1305-
vm_gva2gpa(vcpu, stack_gla, &stack_gpa);
1313+
if (pushop)
1314+
err_code |= PAGE_FAULT_WR_FLAG;
1315+
error = vm_gva2gpa(vcpu, stack_gla, &stack_gpa, &err_code);
1316+
if (error) {
1317+
pr_err("%s: failed to translate gva2gpa", __func__);
1318+
return error;
1319+
}
13061320
if (pushop) {
13071321
error = memread(vcpu, mmio_gpa, &val, size, arg);
13081322
if (error == 0)

hypervisor/arch/x86/guest/instr_emul_wrapper.c

Lines changed: 12 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -304,15 +304,15 @@ static int mmio_write(struct vcpu *vcpu, __unused uint64_t gpa, uint64_t wval,
304304
return 0;
305305
}
306306

307-
void vm_gva2gpa(struct vcpu *vcpu, uint64_t gva, uint64_t *gpa)
307+
int vm_gva2gpa(struct vcpu *vcpu, uint64_t gva, uint64_t *gpa,
308+
uint32_t *err_code)
308309
{
309310

310311
ASSERT(gpa != NULL, "Error in input arguments");
311312
ASSERT(vcpu != NULL,
312313
"Invalid vcpu id when gva2gpa");
313314

314-
*gpa = gva2gpa(vcpu->vm,
315-
vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context].cr3, gva);
315+
return gva2gpa(vcpu, gva, gpa, err_code);
316316
}
317317

318318
uint8_t decode_instruction(struct vcpu *vcpu)
@@ -323,13 +323,19 @@ uint8_t decode_instruction(struct vcpu *vcpu)
323323
uint32_t csar;
324324
int retval = 0;
325325
enum vm_cpu_mode cpu_mode;
326+
int error;
327+
uint32_t err_code;
326328

327329
guest_rip_gva =
328330
vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context].rip;
329331

330-
guest_rip_gpa = gva2gpa(vcpu->vm,
331-
vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context].cr3,
332-
guest_rip_gva);
332+
err_code = PAGE_FAULT_ID_FLAG;
333+
error = gva2gpa(vcpu, guest_rip_gva, &guest_rip_gpa, &err_code);
334+
if (error) {
335+
pr_err("gva2gpa failed for guest_rip_gva 0x%016llx:",
336+
guest_rip_gva);
337+
return 0;
338+
}
333339

334340
guest_rip_hva = GPA2HVA(vcpu->vm, guest_rip_gpa);
335341
emul_cnx = &per_cpu(g_inst_ctxt, vcpu->pcpu_id);

hypervisor/arch/x86/guest/ucode.c

Lines changed: 13 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -32,10 +32,16 @@ void acrn_update_ucode(struct vcpu *vcpu, uint64_t v)
3232
int data_size, data_page_num;
3333
uint8_t *ucode_ptr, *ptr;
3434
int chunk_size;
35+
int error = 0;
36+
uint32_t err_code;
3537

3638
gva = v - sizeof(struct ucode_header);
3739

38-
vm_gva2gpa(vcpu, gva, &gpa);
40+
err_code = 0;
41+
error = vm_gva2gpa(vcpu, gva, &gpa, &err_code);
42+
if (error)
43+
return;
44+
3945
uhdr = (struct ucode_header *)GPA2HVA(vcpu->vm, gpa);
4046

4147
data_size = GET_DATA_SIZE(uhdr) + sizeof(struct ucode_header);
@@ -60,7 +66,12 @@ void acrn_update_ucode(struct vcpu *vcpu, uint64_t v)
6066
ucode_ptr += chunk_size;
6167
gva += chunk_size;
6268

63-
vm_gva2gpa(vcpu, gva, &gpa);
69+
err_code = 0;
70+
error = vm_gva2gpa(vcpu, gva, &gpa, &err_code);
71+
if (error) {
72+
free(ucode_ptr);
73+
return;
74+
}
6475
hva = (uint64_t)GPA2HVA(vcpu->vm, gpa);
6576
}
6677

hypervisor/debug/dump.c

Lines changed: 24 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -104,8 +104,16 @@ static void dump_guest_stack(struct vcpu *vcpu)
104104
uint64_t page2_size;
105105
struct run_context *cur_context =
106106
&vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context];
107+
uint32_t err_code;
108+
int err;
107109

108-
gpa = gva2gpa(vcpu->vm, cur_context->cr3, cur_context->rsp);
110+
err_code = 0;
111+
err = gva2gpa(vcpu, cur_context->rsp, &gpa, &err_code);
112+
if (err) {
113+
printf("gva2gpa failed for guest rsp 0x%016llx\r\n",
114+
cur_context->rsp);
115+
return;
116+
}
109117
hpa = gpa2hpa(vcpu->vm, gpa);
110118
printf("\r\nGuest Stack:\r\n");
111119
printf("Dump stack for vcpu %d, from gva 0x%016llx ->"
@@ -131,8 +139,15 @@ static void dump_guest_stack(struct vcpu *vcpu)
131139
"0x%016llx\r\n", (hpa+i*32), tmp[i*4],
132140
tmp[i*4+1], tmp[i*4+2], tmp[i*4+3]);
133141
}
134-
gpa = gva2gpa(vcpu->vm, cur_context->cr3,
142+
err_code = 0;
143+
err = gva2gpa(vcpu, cur_context->rsp + page1_size, &gpa,
144+
&err_code);
145+
if (err) {
146+
printf("gva2gpa failed for guest rsp 0x%016llx\r\n",
135147
cur_context->rsp + page1_size);
148+
return;
149+
150+
}
136151
hpa = gpa2hpa(vcpu->vm, gpa);
137152
printf("Dump stack for vcpu %d, from gva 0x%016llx ->"
138153
"gpa 0x%016llx -> hpa 0x%016llx \r\n",
@@ -159,6 +174,8 @@ static void show_guest_call_trace(struct vcpu *vcpu)
159174
uint64_t count = 0;
160175
struct run_context *cur_context =
161176
&vcpu->arch_vcpu.contexts[vcpu->arch_vcpu.cur_context];
177+
int err;
178+
uint32_t err_code;
162179

163180
bp = cur_context->guest_cpu_regs.regs.rbp;
164181
printf("Guest Call Trace: **************************************\r\n");
@@ -177,7 +194,11 @@ static void show_guest_call_trace(struct vcpu *vcpu)
177194
* if the address is invalid, it will cause hv page fault
178195
* then halt system */
179196
while ((count++ < CALL_TRACE_HIERARCHY_MAX) && (bp != 0)) {
180-
gpa = gva2gpa(vcpu->vm, cur_context->cr3, bp);
197+
err = gva2gpa(vcpu, bp, &gpa, &err_code);
198+
if (err) {
199+
printf("gva2gpa failed for guest bp 0x%016llx\r\n", bp);
200+
break;
201+
}
181202
hpa = gpa2hpa(vcpu->vm, gpa);
182203
hva = HPA2HVA(hpa);
183204
printf("BP_GVA(0x%016llx)->BP_GPA(0x%016llx)"

0 commit comments

Comments
 (0)