|
11 | 11 |
|
12 | 12 | #define ACRN_DBG_EPT 6U
|
13 | 13 |
|
14 |
| - |
15 | 14 | static uint64_t find_next_table(uint32_t table_offset, void *table_base)
|
16 | 15 | {
|
17 | 16 | uint64_t table_entry;
|
@@ -190,22 +189,6 @@ bool is_ept_supported(void)
|
190 | 189 | return status;
|
191 | 190 | }
|
192 | 191 |
|
193 |
| -static int |
194 |
| -hv_emulate_mmio(struct vcpu *vcpu, struct io_request *io_req, |
195 |
| - struct mem_io_node *mmio_handler) |
196 |
| -{ |
197 |
| - struct mmio_request *mmio_req = &io_req->reqs.mmio; |
198 |
| - |
199 |
| - if ((mmio_req->address % mmio_req->size) != 0UL) { |
200 |
| - pr_err("access size not align with paddr"); |
201 |
| - return -EINVAL; |
202 |
| - } |
203 |
| - |
204 |
| - /* Handle this MMIO operation */ |
205 |
| - return mmio_handler->read_write(vcpu, io_req, |
206 |
| - mmio_handler->handler_private_data); |
207 |
| -} |
208 |
| - |
209 | 192 | int register_mmio_emulation_handler(struct vm *vm,
|
210 | 193 | hv_mem_io_handler_t read_write, uint64_t start,
|
211 | 194 | uint64_t end, void *handler_private_data)
|
@@ -275,82 +258,30 @@ void unregister_mmio_emulation_handler(struct vm *vm, uint64_t start,
|
275 | 258 | }
|
276 | 259 | }
|
277 | 260 |
|
278 |
| -int dm_emulate_mmio_post(struct vcpu *vcpu) |
279 |
| -{ |
280 |
| - int ret = 0; |
281 |
| - uint16_t cur = vcpu->vcpu_id; |
282 |
| - struct io_request *io_req = &vcpu->req; |
283 |
| - struct mmio_request *mmio_req = &io_req->reqs.mmio; |
284 |
| - union vhm_request_buffer *req_buf; |
285 |
| - struct vhm_request *vhm_req; |
286 |
| - |
287 |
| - req_buf = (union vhm_request_buffer *)(vcpu->vm->sw.io_shared_page); |
288 |
| - vhm_req = &req_buf->req_queue[cur]; |
289 |
| - |
290 |
| - mmio_req->value = vhm_req->reqs.mmio.value; |
291 |
| - io_req->processed = vhm_req->processed; |
292 |
| - |
293 |
| - /* VHM emulation data already copy to req, mark to free slot now */ |
294 |
| - vhm_req->valid = 0; |
295 |
| - |
296 |
| - if (io_req->processed != REQ_STATE_SUCCESS) { |
297 |
| - goto out; |
298 |
| - } |
299 |
| - |
300 |
| - if (mmio_req->direction == REQUEST_READ) { |
301 |
| - /* Emulate instruction and update vcpu register set */ |
302 |
| - ret = emulate_instruction(vcpu); |
303 |
| - if (ret != 0) { |
304 |
| - goto out; |
305 |
| - } |
306 |
| - } |
307 |
| - |
308 |
| -out: |
309 |
| - return ret; |
310 |
| -} |
311 |
| - |
312 |
| -static int |
313 |
| -dm_emulate_mmio_pre(struct vcpu *vcpu, uint64_t exit_qual __unused) |
314 |
| -{ |
315 |
| - int status; |
316 |
| - struct io_request *io_req = &vcpu->req; |
317 |
| - struct mmio_request *mmio_req = &io_req->reqs.mmio; |
318 |
| - |
319 |
| - if (mmio_req->direction == REQUEST_WRITE) { |
320 |
| - status = emulate_instruction(vcpu); |
321 |
| - if (status != 0) { |
322 |
| - return status; |
323 |
| - } |
324 |
| - /* XXX: write access while EPT perm RX -> WP */ |
325 |
| - if ((exit_qual & 0x38UL) == 0x28UL) { |
326 |
| - io_req->type = REQ_WP; |
327 |
| - } |
328 |
| - } |
329 |
| - |
330 |
| - return 0; |
331 |
| -} |
332 |
| - |
333 | 261 | int ept_violation_vmexit_handler(struct vcpu *vcpu)
|
334 | 262 | {
|
335 | 263 | int status = -EINVAL, ret;
|
336 | 264 | uint64_t exit_qual;
|
337 | 265 | uint64_t gpa;
|
338 |
| - struct list_head *pos; |
339 | 266 | struct io_request *io_req = &vcpu->req;
|
340 | 267 | struct mmio_request *mmio_req = &io_req->reqs.mmio;
|
341 |
| - struct mem_io_node *mmio_handler = NULL; |
342 |
| - |
343 |
| - io_req->type = REQ_MMIO; |
344 |
| - io_req->processed = REQ_STATE_PENDING; |
345 | 268 |
|
346 | 269 | /* Handle page fault from guest */
|
347 | 270 | exit_qual = vcpu->arch_vcpu.exit_qualification;
|
348 | 271 |
|
| 272 | + io_req->type = REQ_MMIO; |
| 273 | + io_req->processed = REQ_STATE_PENDING; |
| 274 | + |
349 | 275 | /* Specify if read or write operation */
|
350 | 276 | if ((exit_qual & 0x2UL) != 0UL) {
|
351 | 277 | /* Write operation */
|
352 | 278 | mmio_req->direction = REQUEST_WRITE;
|
353 | 279 | mmio_req->value = 0UL;
|
| 280 | + |
| 281 | + /* XXX: write access while EPT perm RX -> WP */ |
| 282 | + if ((exit_qual & 0x38UL) == 0x28UL) { |
| 283 | + io_req->type = REQ_WP; |
| 284 | + } |
354 | 285 | } else {
|
355 | 286 | /* Read operation */
|
356 | 287 | mmio_req->direction = REQUEST_READ;
|
@@ -380,59 +311,31 @@ int ept_violation_vmexit_handler(struct vcpu *vcpu)
|
380 | 311 | goto out;
|
381 | 312 | }
|
382 | 313 |
|
383 |
| - list_for_each(pos, &vcpu->vm->mmio_list) { |
384 |
| - mmio_handler = list_entry(pos, struct mem_io_node, list); |
385 |
| - if (((mmio_req->address + mmio_req->size) <= |
386 |
| - mmio_handler->range_start) || |
387 |
| - (mmio_req->address >= mmio_handler->range_end)) { |
388 |
| - continue; |
389 |
| - } |
390 |
| - else if (!((mmio_req->address >= mmio_handler->range_start) && |
391 |
| - ((mmio_req->address + mmio_req->size) <= |
392 |
| - mmio_handler->range_end))) { |
393 |
| - pr_fatal("Err MMIO, addr:0x%llx, size:%x", |
394 |
| - mmio_req->address, mmio_req->size); |
395 |
| - return -EIO; |
396 |
| - } |
397 | 314 |
|
398 |
| - if (mmio_req->direction == REQUEST_WRITE) { |
399 |
| - if (emulate_instruction(vcpu) != 0) { |
400 |
| - goto out; |
401 |
| - } |
402 |
| - } |
| 315 | + /* |
| 316 | + * For MMIO write, ask DM to run MMIO emulation after |
| 317 | + * instruction emulation. For MMIO read, ask DM to run MMIO |
| 318 | + * emulation at first. |
| 319 | + */ |
403 | 320 |
|
404 |
| - /* Call generic memory emulation handler |
405 |
| - * For MMIO write, call hv_emulate_mmio after |
406 |
| - * instruction emulation. For MMIO read, |
407 |
| - * call hv_emulate_mmio at first. |
408 |
| - */ |
409 |
| - hv_emulate_mmio(vcpu, io_req, mmio_handler); |
410 |
| - if (mmio_req->direction == REQUEST_READ) { |
411 |
| - /* Emulate instruction and update vcpu register set */ |
412 |
| - if (emulate_instruction(vcpu) != 0) { |
413 |
| - goto out; |
414 |
| - } |
| 321 | + /* Determine value being written. */ |
| 322 | + if (mmio_req->direction == REQUEST_WRITE) { |
| 323 | + status = emulate_instruction(vcpu); |
| 324 | + if (status != 0) { |
| 325 | + goto out; |
415 | 326 | }
|
416 |
| - |
417 |
| - status = 0; |
418 |
| - break; |
419 | 327 | }
|
420 | 328 |
|
421 |
| - if (status != 0) { |
422 |
| - /* |
423 |
| - * No mmio handler from HV side, search from VHM in Dom0 |
424 |
| - * |
425 |
| - * ACRN insert request to VHM and inject upcall |
426 |
| - * For MMIO write, ask DM to run MMIO emulation after |
427 |
| - * instruction emulation. For MMIO read, ask DM to run MMIO |
428 |
| - * emulation at first. |
429 |
| - */ |
430 |
| - if (dm_emulate_mmio_pre(vcpu, exit_qual) != 0) { |
431 |
| - goto out; |
432 |
| - } |
| 329 | + status = emulate_io(vcpu, io_req); |
433 | 330 |
|
434 |
| - status = acrn_insert_request_wait(vcpu, &vcpu->req); |
435 |
| - } |
| 331 | + /* io_req is hypervisor-private. For requests sent to VHM, |
| 332 | + * io_req->processed will be PENDING till dm_emulate_mmio_post() is |
| 333 | + * called on vcpu resume. */ |
| 334 | + if (status == 0) { |
| 335 | + if (io_req->processed != REQ_STATE_PENDING) { |
| 336 | + status = emulate_mmio_post(vcpu, io_req); |
| 337 | + } |
| 338 | + } |
436 | 339 |
|
437 | 340 | return status;
|
438 | 341 |
|
|
0 commit comments