Skip to content

Commit a6944fe

Browse files
binbinwu1wenlingz
authored andcommitted
hv: vtd: export iommu_flush_cache
VT-d shares the EPT tables as the second level translation tables. For the IOMMUs that don't support page-walk coherecy, cpu cache should be flushed for the IOMMU EPT entries that are modified. For the current implementation, EPT tables for translating from GPA to HPA for EPT/IOMMU are not modified after VM is created, so cpu cache invlidation is done once per VM before starting execution of VM. However, this may be changed, runtime EPT modification is possible. When cpu cache of EPT entries is invalidated when modification, there is no need invalidate cpu cache globally per VM. This patch exports iommu_flush_cache for EPT entry cache invlidation operations. - IOMMUs share the same copy of EPT table, cpu cache should be flushed if any of the IOMMU active doesn't support page-walk coherency. - In the context of ACRN, GPA to HPA mapping relationship is not changed after VM created, skip flushing iotlb to avoid potential performance penalty. Tracked-On: #4120 Signed-off-by: Binbin Wu <binbin.wu@intel.com> Reviewed-by: Anthony Xu <anthony.xu@intel.com>
1 parent 30a773f commit a6944fe

File tree

3 files changed

+28
-14
lines changed

3 files changed

+28
-14
lines changed

hypervisor/arch/x86/vtd.c

Lines changed: 12 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -284,16 +284,18 @@ static inline void dmar_wait_completion(const struct dmar_drhd_rt *dmar_unit, ui
284284
}
285285
}
286286

287-
/* flush cache when root table, context table updated */
288-
static void iommu_flush_cache(const struct dmar_drhd_rt *dmar_unit,
289-
void *p, uint32_t size)
287+
/* Flush CPU cache when root table, context table or second-level translation teable updated
288+
* In the context of ACRN, GPA to HPA mapping relationship is not changed after VM created,
289+
* skip flushing iotlb to avoid performance penalty.
290+
*/
291+
void iommu_flush_cache(const void *p, uint32_t size)
290292
{
291293
uint32_t i;
292294

293295
/* if vtd support page-walk coherency, no need to flush cacheline */
294-
if (iommu_ecap_c(dmar_unit->ecap) == 0U) {
296+
if (!iommu_page_walk_coherent) {
295297
for (i = 0U; i < size; i += CACHE_LINE_SIZE) {
296-
clflush((char *)p + i);
298+
clflush((const char *)p + i);
297299
}
298300
}
299301
}
@@ -1077,7 +1079,7 @@ static int32_t add_iommu_device(struct iommu_domain *domain, uint16_t segment, u
10771079

10781080
root_entry->hi_64 = 0UL;
10791081
root_entry->lo_64 = lo_64;
1080-
iommu_flush_cache(dmar_unit, root_entry, sizeof(struct dmar_entry));
1082+
iommu_flush_cache(root_entry, sizeof(struct dmar_entry));
10811083
} else {
10821084
context_table_addr = dmar_get_bitslice(root_entry->lo_64,
10831085
ROOT_ENTRY_LOWER_CTP_MASK, ROOT_ENTRY_LOWER_CTP_POS);
@@ -1132,7 +1134,7 @@ static int32_t add_iommu_device(struct iommu_domain *domain, uint16_t segment, u
11321134

11331135
context_entry->hi_64 = hi_64;
11341136
context_entry->lo_64 = lo_64;
1135-
iommu_flush_cache(dmar_unit, context_entry, sizeof(struct dmar_entry));
1137+
iommu_flush_cache(context_entry, sizeof(struct dmar_entry));
11361138
}
11371139
}
11381140
}
@@ -1181,7 +1183,7 @@ static int32_t remove_iommu_device(const struct iommu_domain *domain, uint16_t s
11811183
/* clear the present bit first */
11821184
context_entry->lo_64 = 0UL;
11831185
context_entry->hi_64 = 0UL;
1184-
iommu_flush_cache(dmar_unit, context_entry, sizeof(struct dmar_entry));
1186+
iommu_flush_cache(context_entry, sizeof(struct dmar_entry));
11851187

11861188
sid.bits.b = bus;
11871189
sid.bits.d = pci_slot(devfun);
@@ -1370,7 +1372,7 @@ int32_t dmar_assign_irte(struct intr_source intr_src, union dmar_ir_entry irte,
13701372
ir_entry->entry.hi_64 = irte.entry.hi_64;
13711373
ir_entry->entry.lo_64 = irte.entry.lo_64;
13721374

1373-
iommu_flush_cache(dmar_unit, ir_entry, sizeof(union dmar_ir_entry));
1375+
iommu_flush_cache(ir_entry, sizeof(union dmar_ir_entry));
13741376
dmar_invalid_iec(dmar_unit, index, 0U, false);
13751377
}
13761378
return ret;
@@ -1401,7 +1403,7 @@ void dmar_free_irte(struct intr_source intr_src, uint16_t index)
14011403
ir_entry = ir_table + index;
14021404
ir_entry->bits.present = 0x0UL;
14031405

1404-
iommu_flush_cache(dmar_unit, ir_entry, sizeof(union dmar_ir_entry));
1406+
iommu_flush_cache(ir_entry, sizeof(union dmar_ir_entry));
14051407
dmar_invalid_iec(dmar_unit, index, 0U, false);
14061408
}
14071409
}

hypervisor/include/arch/x86/mmu.h

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -102,8 +102,8 @@ enum _page_table_level {
102102
#define PAGE_SIZE_2M MEM_2M
103103
#define PAGE_SIZE_1G MEM_1G
104104

105-
void sanitize_pte_entry(uint64_t *ptep);
106-
void sanitize_pte(uint64_t *pt_page);
105+
void sanitize_pte_entry(uint64_t *ptep, const struct memory_ops *mem_ops);
106+
void sanitize_pte(uint64_t *pt_page, const struct memory_ops *mem_ops);
107107
/**
108108
* @brief MMU paging enable
109109
*
@@ -171,7 +171,7 @@ static inline void cache_flush_invalidate_all(void)
171171
asm volatile (" wbinvd\n" : : : "memory");
172172
}
173173

174-
static inline void clflush(volatile void *p)
174+
static inline void clflush(const volatile void *p)
175175
{
176176
asm volatile ("clflush (%0)" :: "r"(p));
177177
}

hypervisor/include/arch/x86/vtd.h

Lines changed: 13 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -542,7 +542,7 @@ struct iommu_domain;
542542
* @brief Assign a device specified by bus & devfun to a iommu domain.
543543
*
544544
* Remove the device from the from_domain (if non-NULL), and add it to the to_domain (if non-NULL).
545-
* API silently fails to add/remove devices to/from domains that are under "Ignored" DMAR units.
545+
* API silently fails to add/remove devices to/from domains that are under "Ignored" DMAR units.
546546
*
547547
* @param[in] from_domain iommu domain from which the device is removed from
548548
* @param[in] to_domain iommu domain to which the device is assgined to
@@ -666,6 +666,18 @@ int32_t dmar_assign_irte(struct intr_source intr_src, union dmar_ir_entry irte,
666666
*
667667
*/
668668
void dmar_free_irte(struct intr_source intr_src, uint16_t index);
669+
670+
/**
671+
* @brief Flash cacheline(s) for a specific address with specific size.
672+
*
673+
* Flash cacheline(s) for a specific address with specific size,
674+
* if all IOMMUs active support page-walk coherency, cacheline(s) are not fluashed.
675+
*
676+
* @param[in] p the address of the buffer, whose cache need to be invalidated
677+
* @param[in] size the size of the buffer
678+
*
679+
*/
680+
void iommu_flush_cache(const void *p, uint32_t size);
669681
/**
670682
* @}
671683
*/

0 commit comments

Comments
 (0)