Skip to content

Commit

Permalink
hv: vtd: export iommu_flush_cache
Browse files Browse the repository at this point in the history
VT-d shares the EPT tables as the second level translation tables.
For the IOMMUs that don't support page-walk coherecy, cpu cache should
be flushed for the IOMMU EPT entries that are modified.

For the current implementation, EPT tables for translating from GPA to HPA
for EPT/IOMMU are not modified after VM is created, so cpu cache invlidation is
done once per VM before starting execution of VM.
However, this may be changed, runtime EPT modification is possible.

When cpu cache of EPT entries is invalidated when modification, there is no need
invalidate cpu cache globally per VM.

This patch exports iommu_flush_cache for EPT entry cache invlidation operations.
- IOMMUs share the same copy of EPT table, cpu cache should be flushed if any of
  the IOMMU active doesn't support page-walk coherency.
- In the context of ACRN, GPA to HPA mapping relationship is not changed after
  VM created, skip flushing iotlb to avoid potential performance penalty.

Tracked-On: #4120
Signed-off-by: Binbin Wu <binbin.wu@intel.com>
Reviewed-by: Anthony Xu <anthony.xu@intel.com>
  • Loading branch information
binbinwu1 authored and wenlingz committed Nov 19, 2019
1 parent 30a773f commit a6944fe
Show file tree
Hide file tree
Showing 3 changed files with 28 additions and 14 deletions.
22 changes: 12 additions & 10 deletions hypervisor/arch/x86/vtd.c
Expand Up @@ -284,16 +284,18 @@ static inline void dmar_wait_completion(const struct dmar_drhd_rt *dmar_unit, ui
}
}

/* flush cache when root table, context table updated */
static void iommu_flush_cache(const struct dmar_drhd_rt *dmar_unit,
void *p, uint32_t size)
/* Flush CPU cache when root table, context table or second-level translation teable updated
* In the context of ACRN, GPA to HPA mapping relationship is not changed after VM created,
* skip flushing iotlb to avoid performance penalty.
*/
void iommu_flush_cache(const void *p, uint32_t size)
{
uint32_t i;

/* if vtd support page-walk coherency, no need to flush cacheline */
if (iommu_ecap_c(dmar_unit->ecap) == 0U) {
if (!iommu_page_walk_coherent) {
for (i = 0U; i < size; i += CACHE_LINE_SIZE) {
clflush((char *)p + i);
clflush((const char *)p + i);
}
}
}
Expand Down Expand Up @@ -1077,7 +1079,7 @@ static int32_t add_iommu_device(struct iommu_domain *domain, uint16_t segment, u

root_entry->hi_64 = 0UL;
root_entry->lo_64 = lo_64;
iommu_flush_cache(dmar_unit, root_entry, sizeof(struct dmar_entry));
iommu_flush_cache(root_entry, sizeof(struct dmar_entry));
} else {
context_table_addr = dmar_get_bitslice(root_entry->lo_64,
ROOT_ENTRY_LOWER_CTP_MASK, ROOT_ENTRY_LOWER_CTP_POS);
Expand Down Expand Up @@ -1132,7 +1134,7 @@ static int32_t add_iommu_device(struct iommu_domain *domain, uint16_t segment, u

context_entry->hi_64 = hi_64;
context_entry->lo_64 = lo_64;
iommu_flush_cache(dmar_unit, context_entry, sizeof(struct dmar_entry));
iommu_flush_cache(context_entry, sizeof(struct dmar_entry));
}
}
}
Expand Down Expand Up @@ -1181,7 +1183,7 @@ static int32_t remove_iommu_device(const struct iommu_domain *domain, uint16_t s
/* clear the present bit first */
context_entry->lo_64 = 0UL;
context_entry->hi_64 = 0UL;
iommu_flush_cache(dmar_unit, context_entry, sizeof(struct dmar_entry));
iommu_flush_cache(context_entry, sizeof(struct dmar_entry));

sid.bits.b = bus;
sid.bits.d = pci_slot(devfun);
Expand Down Expand Up @@ -1370,7 +1372,7 @@ int32_t dmar_assign_irte(struct intr_source intr_src, union dmar_ir_entry irte,
ir_entry->entry.hi_64 = irte.entry.hi_64;
ir_entry->entry.lo_64 = irte.entry.lo_64;

iommu_flush_cache(dmar_unit, ir_entry, sizeof(union dmar_ir_entry));
iommu_flush_cache(ir_entry, sizeof(union dmar_ir_entry));
dmar_invalid_iec(dmar_unit, index, 0U, false);
}
return ret;
Expand Down Expand Up @@ -1401,7 +1403,7 @@ void dmar_free_irte(struct intr_source intr_src, uint16_t index)
ir_entry = ir_table + index;
ir_entry->bits.present = 0x0UL;

iommu_flush_cache(dmar_unit, ir_entry, sizeof(union dmar_ir_entry));
iommu_flush_cache(ir_entry, sizeof(union dmar_ir_entry));
dmar_invalid_iec(dmar_unit, index, 0U, false);
}
}
6 changes: 3 additions & 3 deletions hypervisor/include/arch/x86/mmu.h
Expand Up @@ -102,8 +102,8 @@ enum _page_table_level {
#define PAGE_SIZE_2M MEM_2M
#define PAGE_SIZE_1G MEM_1G

void sanitize_pte_entry(uint64_t *ptep);
void sanitize_pte(uint64_t *pt_page);
void sanitize_pte_entry(uint64_t *ptep, const struct memory_ops *mem_ops);
void sanitize_pte(uint64_t *pt_page, const struct memory_ops *mem_ops);
/**
* @brief MMU paging enable
*
Expand Down Expand Up @@ -171,7 +171,7 @@ static inline void cache_flush_invalidate_all(void)
asm volatile (" wbinvd\n" : : : "memory");
}

static inline void clflush(volatile void *p)
static inline void clflush(const volatile void *p)
{
asm volatile ("clflush (%0)" :: "r"(p));
}
Expand Down
14 changes: 13 additions & 1 deletion hypervisor/include/arch/x86/vtd.h
Expand Up @@ -542,7 +542,7 @@ struct iommu_domain;
* @brief Assign a device specified by bus & devfun to a iommu domain.
*
* Remove the device from the from_domain (if non-NULL), and add it to the to_domain (if non-NULL).
* API silently fails to add/remove devices to/from domains that are under "Ignored" DMAR units.
* API silently fails to add/remove devices to/from domains that are under "Ignored" DMAR units.
*
* @param[in] from_domain iommu domain from which the device is removed from
* @param[in] to_domain iommu domain to which the device is assgined to
Expand Down Expand Up @@ -666,6 +666,18 @@ int32_t dmar_assign_irte(struct intr_source intr_src, union dmar_ir_entry irte,
*
*/
void dmar_free_irte(struct intr_source intr_src, uint16_t index);

/**
* @brief Flash cacheline(s) for a specific address with specific size.
*
* Flash cacheline(s) for a specific address with specific size,
* if all IOMMUs active support page-walk coherency, cacheline(s) are not fluashed.
*
* @param[in] p the address of the buffer, whose cache need to be invalidated
* @param[in] size the size of the buffer
*
*/
void iommu_flush_cache(const void *p, uint32_t size);
/**
* @}
*/
Expand Down

0 comments on commit a6944fe

Please sign in to comment.