@@ -413,46 +413,24 @@ int64_t hcall_notify_req_finish(uint64_t vmid, uint64_t vcpu_id)
413
413
return ret ;
414
414
}
415
415
416
- int64_t hcall_set_vm_memmap (struct vm * vm , uint64_t vmid , uint64_t param )
416
+ int64_t _set_vm_memmap (struct vm * vm , struct vm * target_vm ,
417
+ struct vm_set_memmap * memmap )
417
418
{
418
- int64_t ret = 0 ;
419
419
uint64_t hpa ;
420
420
uint32_t attr , prot ;
421
- struct vm_set_memmap memmap ;
422
- struct vm * target_vm = get_vm_from_vmid (vmid );
423
-
424
- if (!vm || !target_vm )
425
- return -1 ;
426
-
427
- memset ((void * )& memmap , 0 , sizeof (memmap ));
428
-
429
- if (copy_from_vm (vm , & memmap , param , sizeof (memmap ))) {
430
- pr_err ("%s: Unable copy param to vm\n" , __func__ );
431
- return -1 ;
432
- }
433
-
434
- if (!is_vm0 (vm )) {
435
- pr_err ("%s: ERROR! Not coming from service vm" , __func__ );
436
- return -1 ;
437
- }
438
421
439
- if (is_vm0 (target_vm )) {
440
- pr_err ("%s: ERROR! Targeting to service vm" , __func__ );
441
- return -1 ;
442
- }
443
-
444
- if ((memmap .length & 0xFFF ) != 0 ) {
422
+ if ((memmap -> length & 0xFFF ) != 0 ) {
445
423
pr_err ("%s: ERROR! [vm%d] map size 0x%x is not page aligned" ,
446
- __func__ , vmid , memmap . length );
424
+ __func__ , target_vm -> attr . id , memmap -> length );
447
425
return -1 ;
448
426
}
449
427
450
- hpa = gpa2hpa (vm , memmap . vm0_gpa );
428
+ hpa = gpa2hpa (vm , memmap -> vm0_gpa );
451
429
dev_dbg (ACRN_DBG_HYCALL , "[vm%d] gpa=0x%x hpa=0x%x size=0x%x" ,
452
- vmid , memmap . remote_gpa , hpa , memmap . length );
430
+ target_vm -> attr . id , memmap -> remote_gpa , hpa , memmap -> length );
453
431
454
432
if (((hpa <= CONFIG_RAM_START ) &&
455
- (hpa + memmap . length > CONFIG_RAM_START )) ||
433
+ (hpa + memmap -> length > CONFIG_RAM_START )) ||
456
434
((hpa >= CONFIG_RAM_START ) &&
457
435
(hpa < CONFIG_RAM_START + CONFIG_RAM_SIZE ))) {
458
436
pr_err ("%s: ERROR! overlap the HV memory region." , __func__ );
@@ -461,8 +439,8 @@ int64_t hcall_set_vm_memmap(struct vm *vm, uint64_t vmid, uint64_t param)
461
439
462
440
/* Check prot */
463
441
attr = 0 ;
464
- if (memmap . type != MAP_UNMAP ) {
465
- prot = memmap . prot ;
442
+ if (memmap -> type != MAP_UNMAP ) {
443
+ prot = ( memmap -> prot != 0 ) ? memmap -> prot : memmap -> prot_2 ;
466
444
if (prot & MEM_ACCESS_READ )
467
445
attr |= MMU_MEM_ATTR_READ ;
468
446
if (prot & MEM_ACCESS_WRITE )
@@ -484,10 +462,78 @@ int64_t hcall_set_vm_memmap(struct vm *vm, uint64_t vmid, uint64_t param)
484
462
}
485
463
486
464
/* create gpa to hpa EPT mapping */
487
- ret = ept_mmap (target_vm , hpa ,
488
- memmap .remote_gpa , memmap .length , memmap .type , attr );
465
+ return ept_mmap (target_vm , hpa ,
466
+ memmap -> remote_gpa , memmap -> length , memmap -> type , attr );
467
+ }
489
468
490
- return ret ;
469
+ int64_t hcall_set_vm_memmap (struct vm * vm , uint64_t vmid , uint64_t param )
470
+ {
471
+ struct vm_set_memmap memmap ;
472
+ struct vm * target_vm = get_vm_from_vmid (vmid );
473
+
474
+ if (!vm || !target_vm )
475
+ return -1 ;
476
+
477
+ memset ((void * )& memmap , 0 , sizeof (memmap ));
478
+
479
+ if (copy_from_vm (vm , & memmap , param , sizeof (memmap ))) {
480
+ pr_err ("%s: Unable copy param to vm\n" , __func__ );
481
+ return -1 ;
482
+ }
483
+
484
+ if (!is_vm0 (vm )) {
485
+ pr_err ("%s: ERROR! Not coming from service vm" , __func__ );
486
+ return -1 ;
487
+ }
488
+
489
+ if (is_vm0 (target_vm )) {
490
+ pr_err ("%s: ERROR! Targeting to service vm" , __func__ );
491
+ return -1 ;
492
+ }
493
+
494
+ return _set_vm_memmap (vm , target_vm , & memmap );
495
+ }
496
+
497
+ int64_t hcall_set_vm_memmaps (struct vm * vm , uint64_t param )
498
+ {
499
+ struct set_memmaps set_memmaps ;
500
+ struct memory_map * regions ;
501
+ struct vm * target_vm ;
502
+ unsigned int idx ;
503
+
504
+ if (!is_vm0 (vm )) {
505
+ pr_err ("%s: ERROR! Not coming from service vm" ,
506
+ __func__ );
507
+ return -1 ;
508
+ }
509
+
510
+ memset ((void * )& set_memmaps , 0 , sizeof (set_memmaps ));
511
+
512
+ if (copy_from_vm (vm , & set_memmaps , param , sizeof (set_memmaps ))) {
513
+ pr_err ("%s: Unable copy param from vm\n" , __func__ );
514
+ return -1 ;
515
+ }
516
+
517
+ target_vm = get_vm_from_vmid (set_memmaps .vmid );
518
+ if (is_vm0 (target_vm )) {
519
+ pr_err ("%s: ERROR! Targeting to service vm" ,
520
+ __func__ );
521
+ return -1 ;
522
+ }
523
+
524
+ idx = 0 ;
525
+ /*TODO: use copy_from_vm for this buffer page */
526
+ regions = GPA2HVA (vm , set_memmaps .memmaps_gpa );
527
+ while (idx < set_memmaps .memmaps_num ) {
528
+ /* the force pointer change below is for back compatible
529
+ * to struct vm_set_memmap, it will be removed in the future
530
+ */
531
+ if (_set_vm_memmap (vm , target_vm ,
532
+ (struct vm_set_memmap * )& regions [idx ]) < 0 )
533
+ return -1 ;
534
+ idx ++ ;
535
+ }
536
+ return 0 ;
491
537
}
492
538
493
539
int64_t hcall_remap_pci_msix (struct vm * vm , uint64_t vmid , uint64_t param )
0 commit comments