Skip to content

Commit

Permalink
VT-d: centralize mapping of QI entries
Browse files Browse the repository at this point in the history
Introduce a helper function to reduce redundancy. Take the opportunity
to express the logic without using the somewhat odd QINVAL_ENTRY_ORDER.
Also take the opportunity to uniformly unmap after updating queue tail
and dropping the lock (like was done so far only by
queue_invalidate_context_sync()).

Signed-off-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Kevin Tian <kevin.tian@intel.com>
  • Loading branch information
jbeulich committed Jun 24, 2021
1 parent e705977 commit a0eb197
Showing 1 changed file with 29 additions and 35 deletions.
64 changes: 29 additions & 35 deletions xen/drivers/passthrough/vtd/qinval.c
Original file line number Diff line number Diff line change
Expand Up @@ -69,22 +69,28 @@ static void qinval_update_qtail(struct vtd_iommu *iommu, unsigned int index)
dmar_writel(iommu->reg, DMAR_IQT_REG, val << QINVAL_INDEX_SHIFT);
}

static struct qinval_entry *qi_map_entry(const struct vtd_iommu *iommu,
unsigned int index)
{
paddr_t base = iommu->qinval_maddr +
((index * sizeof(struct qinval_entry)) & PAGE_MASK);
struct qinval_entry *entries = map_vtd_domain_page(base);

return &entries[index % (PAGE_SIZE / sizeof(*entries))];
}

static int __must_check queue_invalidate_context_sync(struct vtd_iommu *iommu,
u16 did, u16 source_id,
u8 function_mask,
u8 granu)
{
unsigned long flags;
unsigned int index;
u64 entry_base;
struct qinval_entry *qinval_entry, *qinval_entries;
struct qinval_entry *qinval_entry;

spin_lock_irqsave(&iommu->register_lock, flags);
index = qinval_next_index(iommu);
entry_base = iommu->qinval_maddr +
((index >> QINVAL_ENTRY_ORDER) << PAGE_SHIFT);
qinval_entries = map_vtd_domain_page(entry_base);
qinval_entry = &qinval_entries[index % (1 << QINVAL_ENTRY_ORDER)];
qinval_entry = qi_map_entry(iommu, index);

qinval_entry->q.cc_inv_dsc.lo.type = TYPE_INVAL_CONTEXT;
qinval_entry->q.cc_inv_dsc.lo.granu = granu;
Expand All @@ -98,7 +104,7 @@ static int __must_check queue_invalidate_context_sync(struct vtd_iommu *iommu,
qinval_update_qtail(iommu, index);
spin_unlock_irqrestore(&iommu->register_lock, flags);

unmap_vtd_domain_page(qinval_entries);
unmap_vtd_domain_page(qinval_entry);

return invalidate_sync(iommu);
}
Expand All @@ -110,15 +116,11 @@ static int __must_check queue_invalidate_iotlb_sync(struct vtd_iommu *iommu,
{
unsigned long flags;
unsigned int index;
u64 entry_base;
struct qinval_entry *qinval_entry, *qinval_entries;
struct qinval_entry *qinval_entry;

spin_lock_irqsave(&iommu->register_lock, flags);
index = qinval_next_index(iommu);
entry_base = iommu->qinval_maddr +
((index >> QINVAL_ENTRY_ORDER) << PAGE_SHIFT);
qinval_entries = map_vtd_domain_page(entry_base);
qinval_entry = &qinval_entries[index % (1 << QINVAL_ENTRY_ORDER)];
qinval_entry = qi_map_entry(iommu, index);

qinval_entry->q.iotlb_inv_dsc.lo.type = TYPE_INVAL_IOTLB;
qinval_entry->q.iotlb_inv_dsc.lo.granu = granu;
Expand All @@ -133,10 +135,11 @@ static int __must_check queue_invalidate_iotlb_sync(struct vtd_iommu *iommu,
qinval_entry->q.iotlb_inv_dsc.hi.res_1 = 0;
qinval_entry->q.iotlb_inv_dsc.hi.addr = addr >> PAGE_SHIFT_4K;

unmap_vtd_domain_page(qinval_entries);
qinval_update_qtail(iommu, index);
spin_unlock_irqrestore(&iommu->register_lock, flags);

unmap_vtd_domain_page(qinval_entry);

return invalidate_sync(iommu);
}

Expand All @@ -147,17 +150,13 @@ static int __must_check queue_invalidate_wait(struct vtd_iommu *iommu,
static DEFINE_PER_CPU(uint32_t, poll_slot);
unsigned int index;
unsigned long flags;
u64 entry_base;
struct qinval_entry *qinval_entry, *qinval_entries;
struct qinval_entry *qinval_entry;
uint32_t *this_poll_slot = &this_cpu(poll_slot);

spin_lock_irqsave(&iommu->register_lock, flags);
ACCESS_ONCE(*this_poll_slot) = QINVAL_STAT_INIT;
index = qinval_next_index(iommu);
entry_base = iommu->qinval_maddr +
((index >> QINVAL_ENTRY_ORDER) << PAGE_SHIFT);
qinval_entries = map_vtd_domain_page(entry_base);
qinval_entry = &qinval_entries[index % (1 << QINVAL_ENTRY_ORDER)];
qinval_entry = qi_map_entry(iommu, index);

qinval_entry->q.inv_wait_dsc.lo.type = TYPE_INVAL_WAIT;
qinval_entry->q.inv_wait_dsc.lo.iflag = iflag;
Expand All @@ -167,10 +166,11 @@ static int __must_check queue_invalidate_wait(struct vtd_iommu *iommu,
qinval_entry->q.inv_wait_dsc.lo.sdata = QINVAL_STAT_DONE;
qinval_entry->q.inv_wait_dsc.hi.saddr = virt_to_maddr(this_poll_slot);

unmap_vtd_domain_page(qinval_entries);
qinval_update_qtail(iommu, index);
spin_unlock_irqrestore(&iommu->register_lock, flags);

unmap_vtd_domain_page(qinval_entry);

/* Now we don't support interrupt method */
if ( sw )
{
Expand Down Expand Up @@ -246,16 +246,12 @@ int qinval_device_iotlb_sync(struct vtd_iommu *iommu, struct pci_dev *pdev,
{
unsigned long flags;
unsigned int index;
u64 entry_base;
struct qinval_entry *qinval_entry, *qinval_entries;
struct qinval_entry *qinval_entry;

ASSERT(pdev);
spin_lock_irqsave(&iommu->register_lock, flags);
index = qinval_next_index(iommu);
entry_base = iommu->qinval_maddr +
((index >> QINVAL_ENTRY_ORDER) << PAGE_SHIFT);
qinval_entries = map_vtd_domain_page(entry_base);
qinval_entry = &qinval_entries[index % (1 << QINVAL_ENTRY_ORDER)];
qinval_entry = qi_map_entry(iommu, index);

qinval_entry->q.dev_iotlb_inv_dsc.lo.type = TYPE_INVAL_DEVICE_IOTLB;
qinval_entry->q.dev_iotlb_inv_dsc.lo.res_1 = 0;
Expand All @@ -268,10 +264,11 @@ int qinval_device_iotlb_sync(struct vtd_iommu *iommu, struct pci_dev *pdev,
qinval_entry->q.dev_iotlb_inv_dsc.hi.res_1 = 0;
qinval_entry->q.dev_iotlb_inv_dsc.hi.addr = addr >> PAGE_SHIFT_4K;

unmap_vtd_domain_page(qinval_entries);
qinval_update_qtail(iommu, index);
spin_unlock_irqrestore(&iommu->register_lock, flags);

unmap_vtd_domain_page(qinval_entry);

return dev_invalidate_sync(iommu, pdev, did);
}

Expand All @@ -280,16 +277,12 @@ static int __must_check queue_invalidate_iec_sync(struct vtd_iommu *iommu,
{
unsigned long flags;
unsigned int index;
u64 entry_base;
struct qinval_entry *qinval_entry, *qinval_entries;
struct qinval_entry *qinval_entry;
int ret;

spin_lock_irqsave(&iommu->register_lock, flags);
index = qinval_next_index(iommu);
entry_base = iommu->qinval_maddr +
((index >> QINVAL_ENTRY_ORDER) << PAGE_SHIFT);
qinval_entries = map_vtd_domain_page(entry_base);
qinval_entry = &qinval_entries[index % (1 << QINVAL_ENTRY_ORDER)];
qinval_entry = qi_map_entry(iommu, index);

qinval_entry->q.iec_inv_dsc.lo.type = TYPE_INVAL_IEC;
qinval_entry->q.iec_inv_dsc.lo.granu = granu;
Expand All @@ -299,10 +292,11 @@ static int __must_check queue_invalidate_iec_sync(struct vtd_iommu *iommu,
qinval_entry->q.iec_inv_dsc.lo.res_2 = 0;
qinval_entry->q.iec_inv_dsc.hi.res = 0;

unmap_vtd_domain_page(qinval_entries);
qinval_update_qtail(iommu, index);
spin_unlock_irqrestore(&iommu->register_lock, flags);

unmap_vtd_domain_page(qinval_entry);

ret = invalidate_sync(iommu);

/*
Expand Down

0 comments on commit a0eb197

Please sign in to comment.