@@ -330,6 +330,68 @@ static struct pci_vdev *find_vdev(const struct acrn_vpci *vpci, union pci_bdf bd
330
330
return vdev ;
331
331
}
332
332
333
+ static void vpci_init_pt_dev (struct pci_vdev * vdev )
334
+ {
335
+ init_vmsi (vdev );
336
+ init_vmsix (vdev );
337
+
338
+ /*
339
+ * Here init_vdev_pt() needs to be called after init_vmsix() for the following reason:
340
+ * init_vdev_pt() will indirectly call has_msix_cap(), which
341
+ * requires init_vmsix() to be called first.
342
+ */
343
+ init_vdev_pt (vdev );
344
+ assign_vdev_pt_iommu_domain (vdev );
345
+ }
346
+
347
+ static void vpci_deinit_pt_dev (struct pci_vdev * vdev )
348
+ {
349
+ deinit_vmsi (vdev );
350
+ deinit_vmsix (vdev );
351
+ remove_vdev_pt_iommu_domain (vdev );
352
+ }
353
+
354
+ static int32_t vpci_write_pt_dev_cfg (struct pci_vdev * vdev , uint32_t offset ,
355
+ uint32_t bytes , uint32_t val )
356
+ {
357
+ if (vbar_access (vdev , offset , bytes )) {
358
+ (void )vdev_pt_write_cfg (vdev , offset , bytes , val );
359
+ } else if (msicap_access (vdev , offset )) {
360
+ (void )vmsi_write_cfg (vdev , offset , bytes , val );
361
+ } else if (msixcap_access (vdev , offset )) {
362
+ (void )vmsix_write_cfg (vdev , offset , bytes , val );
363
+ } else {
364
+ /* passthru to physical device */
365
+ pci_pdev_write_cfg (vdev -> pdev -> bdf , offset , bytes , val );
366
+ }
367
+
368
+ return 0 ;
369
+ }
370
+
371
+ static int32_t vpci_read_pt_dev_cfg (struct pci_vdev * vdev , uint32_t offset ,
372
+ uint32_t bytes , uint32_t * val )
373
+ {
374
+ if (vbar_access (vdev , offset , bytes )) {
375
+ (void )vdev_pt_read_cfg (vdev , offset , bytes , val );
376
+ } else if (msicap_access (vdev , offset )) {
377
+ (void )vmsi_read_cfg (vdev , offset , bytes , val );
378
+ } else if (msixcap_access (vdev , offset )) {
379
+ (void )vmsix_read_cfg (vdev , offset , bytes , val );
380
+ } else {
381
+ /* passthru to physical device */
382
+ * val = pci_pdev_read_cfg (vdev -> pdev -> bdf , offset , bytes );
383
+ }
384
+
385
+ return 0 ;
386
+ }
387
+
388
+ static struct pci_vdev_ops pci_pt_dev_ops = {
389
+ .init_vdev = vpci_init_pt_dev ,
390
+ .deinit_vdev = vpci_deinit_pt_dev ,
391
+ .write_vdev_cfg = vpci_write_pt_dev_cfg ,
392
+ .read_vdev_cfg = vpci_read_pt_dev_cfg ,
393
+ };
394
+
333
395
/**
334
396
* @pre vpci != NULL
335
397
*/
@@ -339,14 +401,7 @@ static void read_cfg(const struct acrn_vpci *vpci, union pci_bdf bdf,
339
401
struct pci_vdev * vdev = find_vdev (vpci , bdf );
340
402
341
403
if (vdev != NULL ) {
342
- if ((vhostbridge_read_cfg (vdev , offset , bytes , val ) != 0 )
343
- && (vdev_pt_read_cfg (vdev , offset , bytes , val ) != 0 )
344
- && (vmsi_read_cfg (vdev , offset , bytes , val ) != 0 )
345
- && (vmsix_read_cfg (vdev , offset , bytes , val ) != 0 )
346
- ) {
347
- /* Not handled by any handlers, passthru to physical device */
348
- * val = pci_pdev_read_cfg (vdev -> pdev -> bdf , offset , bytes );
349
- }
404
+ vdev -> vdev_ops -> read_vdev_cfg (vdev , offset , bytes , val );
350
405
}
351
406
}
352
407
@@ -359,14 +414,7 @@ static void write_cfg(const struct acrn_vpci *vpci, union pci_bdf bdf,
359
414
struct pci_vdev * vdev = find_vdev (vpci , bdf );
360
415
361
416
if (vdev != NULL ) {
362
- if ((vhostbridge_write_cfg (vdev , offset , bytes , val ) != 0 )
363
- && (vdev_pt_write_cfg (vdev , offset , bytes , val ) != 0 )
364
- && (vmsi_write_cfg (vdev , offset , bytes , val ) != 0 )
365
- && (vmsix_write_cfg (vdev , offset , bytes , val ) != 0 )
366
- ) {
367
- /* Not handled by any handlers, passthru to physical device */
368
- pci_pdev_write_cfg (vdev -> pdev -> bdf , offset , bytes , val );
369
- }
417
+ vdev -> vdev_ops -> write_vdev_cfg (vdev , offset , bytes , val );
370
418
}
371
419
}
372
420
@@ -425,24 +473,8 @@ static void init_vdev_for_pdev(struct pci_pdev *pdev, const struct acrn_vm *vm)
425
473
vdev -> bdf .value = pdev -> bdf .value ;
426
474
}
427
475
428
- init_vhostbridge (vdev );
429
- init_vmsi (vdev );
430
- init_vmsix (vdev );
431
-
432
- /*
433
- * Here init_vdev_pt() needs to be called after init_vmsix() for the following reason:
434
- * init_vdev_pt() will indirectly call has_msix_cap(), which
435
- * requires init_vmsix() to be called first.
436
- */
437
- init_vdev_pt (vdev );
438
-
439
- /*
440
- * For pre-launched VM, the host bridge is fully virtualized and it does not have a physical
441
- * host bridge counterpart.
442
- */
443
- if ((is_prelaunched_vm (vm ) && !is_hostbridge (vdev )) || is_sos_vm (vm )) {
444
- assign_vdev_pt_iommu_domain (vdev );
445
- }
476
+ vdev -> vdev_ops = & pci_pt_dev_ops ;
477
+ vdev -> vdev_ops -> init_vdev (vdev );
446
478
}
447
479
}
448
480
@@ -471,13 +503,7 @@ static void deinit_prelaunched_vm_vpci(const struct acrn_vm *vm)
471
503
for (i = 0U ; i < vm -> vpci .pci_vdev_cnt ; i ++ ) {
472
504
vdev = (struct pci_vdev * ) & (vm -> vpci .pci_vdevs [i ]);
473
505
474
- deinit_vhostbridge (vdev );
475
- deinit_vmsi (vdev );
476
- deinit_vmsix (vdev );
477
-
478
- if ((is_prelaunched_vm (vm ) && !is_hostbridge (vdev )) || is_sos_vm (vm )) {
479
- remove_vdev_pt_iommu_domain (vdev );
480
- }
506
+ vdev -> vdev_ops -> deinit_vdev (vdev );
481
507
}
482
508
}
483
509
0 commit comments