Skip to content

Commit 9eba328

Browse files
fyin1acrnsi
authored andcommitted
vdev_ops: add general vdev ops
And use the ops based operations instead of direct access vdev specific API. Tracked-On: #3241 Signed-off-by: Yin Fengwei <fengwei.yin@intel.com> Acked-by: Eddie Dong <eddie.dong@intel.com>
1 parent 37de8f0 commit 9eba328

File tree

1 file changed

+67
-41
lines changed

1 file changed

+67
-41
lines changed

hypervisor/dm/vpci/vpci.c

Lines changed: 67 additions & 41 deletions
Original file line numberDiff line numberDiff line change
@@ -330,6 +330,68 @@ static struct pci_vdev *find_vdev(const struct acrn_vpci *vpci, union pci_bdf bd
330330
return vdev;
331331
}
332332

333+
static void vpci_init_pt_dev(struct pci_vdev *vdev)
334+
{
335+
init_vmsi(vdev);
336+
init_vmsix(vdev);
337+
338+
/*
339+
* Here init_vdev_pt() needs to be called after init_vmsix() for the following reason:
340+
* init_vdev_pt() will indirectly call has_msix_cap(), which
341+
* requires init_vmsix() to be called first.
342+
*/
343+
init_vdev_pt(vdev);
344+
assign_vdev_pt_iommu_domain(vdev);
345+
}
346+
347+
static void vpci_deinit_pt_dev(struct pci_vdev *vdev)
348+
{
349+
deinit_vmsi(vdev);
350+
deinit_vmsix(vdev);
351+
remove_vdev_pt_iommu_domain(vdev);
352+
}
353+
354+
static int32_t vpci_write_pt_dev_cfg(struct pci_vdev *vdev, uint32_t offset,
355+
uint32_t bytes, uint32_t val)
356+
{
357+
if (vbar_access(vdev, offset, bytes)) {
358+
(void)vdev_pt_write_cfg(vdev, offset, bytes, val);
359+
} else if (msicap_access(vdev, offset)) {
360+
(void)vmsi_write_cfg(vdev, offset, bytes, val);
361+
} else if (msixcap_access(vdev, offset)) {
362+
(void)vmsix_write_cfg(vdev, offset, bytes, val);
363+
} else {
364+
/* passthru to physical device */
365+
pci_pdev_write_cfg(vdev->pdev->bdf, offset, bytes, val);
366+
}
367+
368+
return 0;
369+
}
370+
371+
static int32_t vpci_read_pt_dev_cfg(struct pci_vdev *vdev, uint32_t offset,
372+
uint32_t bytes, uint32_t *val)
373+
{
374+
if (vbar_access(vdev, offset, bytes)) {
375+
(void)vdev_pt_read_cfg(vdev, offset, bytes, val);
376+
} else if (msicap_access(vdev, offset)) {
377+
(void)vmsi_read_cfg(vdev, offset, bytes, val);
378+
} else if (msixcap_access(vdev, offset)) {
379+
(void)vmsix_read_cfg(vdev, offset, bytes, val);
380+
} else {
381+
/* passthru to physical device */
382+
*val = pci_pdev_read_cfg(vdev->pdev->bdf, offset, bytes);
383+
}
384+
385+
return 0;
386+
}
387+
388+
static struct pci_vdev_ops pci_pt_dev_ops = {
389+
.init_vdev = vpci_init_pt_dev,
390+
.deinit_vdev = vpci_deinit_pt_dev,
391+
.write_vdev_cfg = vpci_write_pt_dev_cfg,
392+
.read_vdev_cfg = vpci_read_pt_dev_cfg,
393+
};
394+
333395
/**
334396
* @pre vpci != NULL
335397
*/
@@ -339,14 +401,7 @@ static void read_cfg(const struct acrn_vpci *vpci, union pci_bdf bdf,
339401
struct pci_vdev *vdev = find_vdev(vpci, bdf);
340402

341403
if (vdev != NULL) {
342-
if ((vhostbridge_read_cfg(vdev, offset, bytes, val) != 0)
343-
&& (vdev_pt_read_cfg(vdev, offset, bytes, val) != 0)
344-
&& (vmsi_read_cfg(vdev, offset, bytes, val) != 0)
345-
&& (vmsix_read_cfg(vdev, offset, bytes, val) != 0)
346-
) {
347-
/* Not handled by any handlers, passthru to physical device */
348-
*val = pci_pdev_read_cfg(vdev->pdev->bdf, offset, bytes);
349-
}
404+
vdev->vdev_ops->read_vdev_cfg(vdev, offset, bytes, val);
350405
}
351406
}
352407

@@ -359,14 +414,7 @@ static void write_cfg(const struct acrn_vpci *vpci, union pci_bdf bdf,
359414
struct pci_vdev *vdev = find_vdev(vpci, bdf);
360415

361416
if (vdev != NULL) {
362-
if ((vhostbridge_write_cfg(vdev, offset, bytes, val) != 0)
363-
&& (vdev_pt_write_cfg(vdev, offset, bytes, val) != 0)
364-
&& (vmsi_write_cfg(vdev, offset, bytes, val) != 0)
365-
&& (vmsix_write_cfg(vdev, offset, bytes, val) != 0)
366-
) {
367-
/* Not handled by any handlers, passthru to physical device */
368-
pci_pdev_write_cfg(vdev->pdev->bdf, offset, bytes, val);
369-
}
417+
vdev->vdev_ops->write_vdev_cfg(vdev, offset, bytes, val);
370418
}
371419
}
372420

@@ -425,24 +473,8 @@ static void init_vdev_for_pdev(struct pci_pdev *pdev, const struct acrn_vm *vm)
425473
vdev->bdf.value = pdev->bdf.value;
426474
}
427475

428-
init_vhostbridge(vdev);
429-
init_vmsi(vdev);
430-
init_vmsix(vdev);
431-
432-
/*
433-
* Here init_vdev_pt() needs to be called after init_vmsix() for the following reason:
434-
* init_vdev_pt() will indirectly call has_msix_cap(), which
435-
* requires init_vmsix() to be called first.
436-
*/
437-
init_vdev_pt(vdev);
438-
439-
/*
440-
* For pre-launched VM, the host bridge is fully virtualized and it does not have a physical
441-
* host bridge counterpart.
442-
*/
443-
if ((is_prelaunched_vm(vm) && !is_hostbridge(vdev)) || is_sos_vm(vm)) {
444-
assign_vdev_pt_iommu_domain(vdev);
445-
}
476+
vdev->vdev_ops = &pci_pt_dev_ops;
477+
vdev->vdev_ops->init_vdev(vdev);
446478
}
447479
}
448480

@@ -471,13 +503,7 @@ static void deinit_prelaunched_vm_vpci(const struct acrn_vm *vm)
471503
for (i = 0U; i < vm->vpci.pci_vdev_cnt; i++) {
472504
vdev = (struct pci_vdev *) &(vm->vpci.pci_vdevs[i]);
473505

474-
deinit_vhostbridge(vdev);
475-
deinit_vmsi(vdev);
476-
deinit_vmsix(vdev);
477-
478-
if ((is_prelaunched_vm(vm) && !is_hostbridge(vdev)) || is_sos_vm(vm)) {
479-
remove_vdev_pt_iommu_domain(vdev);
480-
}
506+
vdev->vdev_ops->deinit_vdev(vdev);
481507
}
482508
}
483509

0 commit comments

Comments
 (0)