|
45 | 45 | #include <asm/iommu.h> |
46 | 46 |
|
47 | 47 | #include "irq_remapping.h" |
48 | | -#include "pci.h" |
49 | 48 |
|
50 | 49 | #define ROOT_SIZE VTD_PAGE_SIZE |
51 | 50 | #define CONTEXT_SIZE VTD_PAGE_SIZE |
@@ -4373,91 +4372,21 @@ static int intel_iommu_domain_has_cap(struct iommu_domain *domain, |
4373 | 4372 | return 0; |
4374 | 4373 | } |
4375 | 4374 |
|
4376 | | -#define REQ_ACS_FLAGS (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF) |
4377 | | - |
4378 | 4375 | static int intel_iommu_add_device(struct device *dev) |
4379 | 4376 | { |
4380 | | - struct pci_dev *pdev = to_pci_dev(dev); |
4381 | | - struct pci_dev *bridge, *dma_pdev = NULL; |
4382 | 4377 | struct iommu_group *group; |
4383 | | - int ret; |
4384 | 4378 | u8 bus, devfn; |
4385 | 4379 |
|
4386 | 4380 | if (!device_to_iommu(dev, &bus, &devfn)) |
4387 | 4381 | return -ENODEV; |
4388 | 4382 |
|
4389 | | - bridge = pci_find_upstream_pcie_bridge(pdev); |
4390 | | - if (bridge) { |
4391 | | - if (pci_is_pcie(bridge)) |
4392 | | - dma_pdev = pci_get_domain_bus_and_slot( |
4393 | | - pci_domain_nr(pdev->bus), |
4394 | | - bridge->subordinate->number, 0); |
4395 | | - if (!dma_pdev) |
4396 | | - dma_pdev = pci_dev_get(bridge); |
4397 | | - } else |
4398 | | - dma_pdev = pci_dev_get(pdev); |
4399 | | - |
4400 | | - /* Account for quirked devices */ |
4401 | | - swap_pci_ref(&dma_pdev, pci_get_dma_source(dma_pdev)); |
4402 | | - |
4403 | | - /* |
4404 | | - * If it's a multifunction device that does not support our |
4405 | | - * required ACS flags, add to the same group as lowest numbered |
4406 | | - * function that also does not suport the required ACS flags. |
4407 | | - */ |
4408 | | - if (dma_pdev->multifunction && |
4409 | | - !pci_acs_enabled(dma_pdev, REQ_ACS_FLAGS)) { |
4410 | | - u8 i, slot = PCI_SLOT(dma_pdev->devfn); |
4411 | | - |
4412 | | - for (i = 0; i < 8; i++) { |
4413 | | - struct pci_dev *tmp; |
4414 | | - |
4415 | | - tmp = pci_get_slot(dma_pdev->bus, PCI_DEVFN(slot, i)); |
4416 | | - if (!tmp) |
4417 | | - continue; |
4418 | | - |
4419 | | - if (!pci_acs_enabled(tmp, REQ_ACS_FLAGS)) { |
4420 | | - swap_pci_ref(&dma_pdev, tmp); |
4421 | | - break; |
4422 | | - } |
4423 | | - pci_dev_put(tmp); |
4424 | | - } |
4425 | | - } |
4426 | | - |
4427 | | - /* |
4428 | | - * Devices on the root bus go through the iommu. If that's not us, |
4429 | | - * find the next upstream device and test ACS up to the root bus. |
4430 | | - * Finding the next device may require skipping virtual buses. |
4431 | | - */ |
4432 | | - while (!pci_is_root_bus(dma_pdev->bus)) { |
4433 | | - struct pci_bus *bus = dma_pdev->bus; |
4434 | | - |
4435 | | - while (!bus->self) { |
4436 | | - if (!pci_is_root_bus(bus)) |
4437 | | - bus = bus->parent; |
4438 | | - else |
4439 | | - goto root_bus; |
4440 | | - } |
4441 | | - |
4442 | | - if (pci_acs_path_enabled(bus->self, NULL, REQ_ACS_FLAGS)) |
4443 | | - break; |
4444 | | - |
4445 | | - swap_pci_ref(&dma_pdev, pci_dev_get(bus->self)); |
4446 | | - } |
| 4383 | + group = iommu_group_get_for_dev(dev); |
4447 | 4384 |
|
4448 | | -root_bus: |
4449 | | - group = iommu_group_get(&dma_pdev->dev); |
4450 | | - pci_dev_put(dma_pdev); |
4451 | | - if (!group) { |
4452 | | - group = iommu_group_alloc(); |
4453 | | - if (IS_ERR(group)) |
4454 | | - return PTR_ERR(group); |
4455 | | - } |
4456 | | - |
4457 | | - ret = iommu_group_add_device(group, dev); |
| 4385 | + if (IS_ERR(group)) |
| 4386 | + return PTR_ERR(group); |
4458 | 4387 |
|
4459 | 4388 | iommu_group_put(group); |
4460 | | - return ret; |
| 4389 | + return 0; |
4461 | 4390 | } |
4462 | 4391 |
|
4463 | 4392 | static void intel_iommu_remove_device(struct device *dev) |
|
0 commit comments