@@ -596,58 +596,49 @@ void register_pio_emulation_handler(struct acrn_vm *vm, uint32_t pio_idx,
596
596
/**
597
597
* @brief Register a MMIO handler
598
598
*
599
- * This API registers a MMIO handler to \p vm before it is launched.
599
+ * This API registers a MMIO handler to \p vm before it is Started
600
+ * For Pre-launched VMs, this API can be called after it is Started
600
601
*
601
602
* @param vm The VM to which the MMIO handler is registered
602
603
* @param read_write The handler for emulating accesses to the given range
603
604
* @param start The base address of the range \p read_write can emulate
604
605
* @param end The end of the range (exclusive) \p read_write can emulate
605
606
* @param handler_private_data Handler-specific data which will be passed to \p read_write when called
606
607
*
607
- * @retval 0 Registration succeeds
608
- * @retval -EINVAL \p read_write is NULL, \p end is not larger than \p start or \p vm has been launched
608
+ * @return None
609
609
*/
610
- int32_t register_mmio_emulation_handler (struct acrn_vm * vm ,
610
+ void register_mmio_emulation_handler (struct acrn_vm * vm ,
611
611
hv_mem_io_handler_t read_write , uint64_t start ,
612
612
uint64_t end , void * handler_private_data )
613
613
{
614
- int32_t status = - EINVAL ;
615
614
struct mem_io_node * mmio_node ;
616
615
617
- if ((vm -> hw .created_vcpus > 0U ) && (vm -> hw .vcpu_array [0 ].launched )) {
618
- pr_err ("register mmio handler after vm launched" );
619
- } else {
620
- /* Ensure both a read/write handler and range check function exist */
621
- if ((read_write != NULL ) && (end > start )) {
622
- if (vm -> emul_mmio_regions >= CONFIG_MAX_EMULATED_MMIO_REGIONS ) {
623
- pr_err ("the emulated mmio region is out of range" );
624
- } else {
625
- mmio_node = & (vm -> emul_mmio [vm -> emul_mmio_regions ]);
626
- /* Fill in information for this node */
627
- mmio_node -> read_write = read_write ;
628
- mmio_node -> handler_private_data = handler_private_data ;
629
- mmio_node -> range_start = start ;
630
- mmio_node -> range_end = end ;
631
-
632
- (vm -> emul_mmio_regions )++ ;
616
+ /* Ensure both a read/write handler and range check function exist */
617
+ if ((read_write != NULL ) && (end > start )) {
618
+ if (vm -> emul_mmio_regions >= CONFIG_MAX_EMULATED_MMIO_REGIONS ) {
619
+ pr_err ("the emulated mmio region is out of range" );
620
+ } else {
621
+ mmio_node = & (vm -> emul_mmio [vm -> emul_mmio_regions ]);
622
+ /* Fill in information for this node */
623
+ mmio_node -> read_write = read_write ;
624
+ mmio_node -> handler_private_data = handler_private_data ;
625
+ mmio_node -> range_start = start ;
626
+ mmio_node -> range_end = end ;
633
627
634
- /*
635
- * SOS would map all its memory at beginning, so we
636
- * should unmap it. But UOS will not, so we shouldn't
637
- * need to unmap it.
638
- */
639
- if (is_sos_vm (vm )) {
640
- ept_mr_del (vm , (uint64_t * )vm -> arch_vm .nworld_eptp , start , end - start );
641
- }
628
+ (vm -> emul_mmio_regions )++ ;
642
629
643
- /* Return success */
644
- status = 0 ;
630
+ /*
631
+ * SOS would map all its memory at beginning, so we
632
+ * should unmap it. But UOS will not, so we shouldn't
633
+ * need to unmap it.
634
+ */
635
+ if (is_sos_vm (vm )) {
636
+ ept_mr_del (vm , (uint64_t * )vm -> arch_vm .nworld_eptp , start , end - start );
645
637
}
638
+
646
639
}
647
640
}
648
641
649
- /* Return status to caller */
650
- return status ;
651
642
}
652
643
653
644
/**
0 commit comments