Skip to content

Commit 2abd8b3

Browse files
binbinwu1acrnsi
authored andcommitted
hv: vtd: export iommu_flush_cache
VT-d shares the EPT tables as the second level translation tables. For the IOMMUs that don't support page-walk coherecy, cpu cache should be flushed for the IOMMU EPT entries that are modified. For the current implementation, EPT tables for translating from GPA to HPA for EPT/IOMMU are not modified after VM is created, so cpu cache invlidation is done once per VM before starting execution of VM. However, this may be changed, runtime EPT modification is possible. When cpu cache of EPT entries is invalidated when modification, there is no need invalidate cpu cache globally per VM. This patch exports iommu_flush_cache for EPT entry cache invlidation operations. - IOMMUs share the same copy of EPT table, cpu cache should be flushed if any of the IOMMU active doesn't support page-walk coherency. - In the context of ACRN, GPA to HPA mapping relationship is not changed after VM created, skip flushing iotlb to avoid potential performance penalty. Tracked-On: #3607 Signed-off-by: Binbin Wu <binbin.wu@intel.com> Reviewed-by: Anthony Xu <anthony.xu@intel.com>
1 parent 826aaf7 commit 2abd8b3

File tree

3 files changed

+29
-15
lines changed

3 files changed

+29
-15
lines changed

hypervisor/arch/x86/vtd.c

Lines changed: 12 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -287,16 +287,18 @@ static inline void dmar_wait_completion(const struct dmar_drhd_rt *dmar_unit, ui
287287
}
288288
}
289289

290-
/* flush cache when root table, context table updated */
291-
static void iommu_flush_cache(const struct dmar_drhd_rt *dmar_unit,
292-
void *p, uint32_t size)
290+
/* Flush CPU cache when root table, context table or second-level translation teable updated
291+
* In the context of ACRN, GPA to HPA mapping relationship is not changed after VM created,
292+
* skip flushing iotlb to avoid performance penalty.
293+
*/
294+
void iommu_flush_cache(const void *p, uint32_t size)
293295
{
294296
uint32_t i;
295297

296298
/* if vtd support page-walk coherency, no need to flush cacheline */
297-
if (iommu_ecap_c(dmar_unit->ecap) == 0U) {
299+
if (!iommu_page_walk_coherent) {
298300
for (i = 0U; i < size; i += CACHE_LINE_SIZE) {
299-
clflush((char *)p + i);
301+
clflush((const char *)p + i);
300302
}
301303
}
302304
}
@@ -1088,7 +1090,7 @@ static int32_t add_iommu_device(struct iommu_domain *domain, uint16_t segment, u
10881090

10891091
root_entry->hi_64 = 0UL;
10901092
root_entry->lo_64 = lo_64;
1091-
iommu_flush_cache(dmar_unit, root_entry, sizeof(struct dmar_entry));
1093+
iommu_flush_cache(root_entry, sizeof(struct dmar_entry));
10921094
} else {
10931095
context_table_addr = dmar_get_bitslice(root_entry->lo_64,
10941096
ROOT_ENTRY_LOWER_CTP_MASK, ROOT_ENTRY_LOWER_CTP_POS);
@@ -1143,7 +1145,7 @@ static int32_t add_iommu_device(struct iommu_domain *domain, uint16_t segment, u
11431145

11441146
context_entry->hi_64 = hi_64;
11451147
context_entry->lo_64 = lo_64;
1146-
iommu_flush_cache(dmar_unit, context_entry, sizeof(struct dmar_entry));
1148+
iommu_flush_cache(context_entry, sizeof(struct dmar_entry));
11471149
}
11481150
}
11491151
}
@@ -1192,7 +1194,7 @@ static int32_t remove_iommu_device(const struct iommu_domain *domain, uint16_t s
11921194
/* clear the present bit first */
11931195
context_entry->lo_64 = 0UL;
11941196
context_entry->hi_64 = 0UL;
1195-
iommu_flush_cache(dmar_unit, context_entry, sizeof(struct dmar_entry));
1197+
iommu_flush_cache(context_entry, sizeof(struct dmar_entry));
11961198

11971199
sid.bits.b = bus;
11981200
sid.bits.d = pci_slot(devfun);
@@ -1376,7 +1378,7 @@ int32_t dmar_assign_irte(struct intr_source intr_src, union dmar_ir_entry irte,
13761378
ir_entry->entry.hi_64 = irte.entry.hi_64;
13771379
ir_entry->entry.lo_64 = irte.entry.lo_64;
13781380

1379-
iommu_flush_cache(dmar_unit, ir_entry, sizeof(union dmar_ir_entry));
1381+
iommu_flush_cache(ir_entry, sizeof(union dmar_ir_entry));
13801382
dmar_invalid_iec(dmar_unit, index, 0U, false);
13811383
}
13821384
return ret;
@@ -1407,7 +1409,7 @@ void dmar_free_irte(struct intr_source intr_src, uint16_t index)
14071409
ir_entry = ir_table + index;
14081410
ir_entry->bits.present = 0x0UL;
14091411

1410-
iommu_flush_cache(dmar_unit, ir_entry, sizeof(union dmar_ir_entry));
1412+
iommu_flush_cache(ir_entry, sizeof(union dmar_ir_entry));
14111413
dmar_invalid_iec(dmar_unit, index, 0U, false);
14121414
}
14131415
}

hypervisor/include/arch/x86/mmu.h

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -102,8 +102,8 @@ enum _page_table_level {
102102
#define PAGE_SIZE_2M MEM_2M
103103
#define PAGE_SIZE_1G MEM_1G
104104

105-
void sanitize_pte_entry(uint64_t *ptep);
106-
void sanitize_pte(uint64_t *pt_page);
105+
void sanitize_pte_entry(uint64_t *ptep, const struct memory_ops *mem_ops);
106+
void sanitize_pte(uint64_t *pt_page, const struct memory_ops *mem_ops);
107107
/**
108108
* @brief MMU paging enable
109109
*
@@ -176,12 +176,12 @@ static inline void cache_flush_invalidate_all(void)
176176
asm volatile (" wbinvd\n" : : : "memory");
177177
}
178178

179-
static inline void clflush(volatile void *p)
179+
static inline void clflush(const volatile void *p)
180180
{
181181
asm volatile ("clflush (%0)" :: "r"(p));
182182
}
183183

184-
static inline void clflushopt(volatile void *p)
184+
static inline void clflushopt(const volatile void *p)
185185
{
186186
asm volatile ("clflushopt (%0)" :: "r"(p));
187187
}

hypervisor/include/arch/x86/vtd.h

Lines changed: 13 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -549,7 +549,7 @@ struct iommu_domain;
549549
* @brief Assign a device specified by bus & devfun to a iommu domain.
550550
*
551551
* Remove the device from the from_domain (if non-NULL), and add it to the to_domain (if non-NULL).
552-
* API silently fails to add/remove devices to/from domains that are under "Ignored" DMAR units.
552+
* API silently fails to add/remove devices to/from domains that are under "Ignored" DMAR units.
553553
*
554554
* @param[in] from_domain iommu domain from which the device is removed from
555555
* @param[in] to_domain iommu domain to which the device is assgined to
@@ -665,6 +665,18 @@ int32_t dmar_assign_irte(struct intr_source intr_src, union dmar_ir_entry irte,
665665
*
666666
*/
667667
void dmar_free_irte(struct intr_source intr_src, uint16_t index);
668+
669+
/**
670+
* @brief Flash cacheline(s) for a specific address with specific size.
671+
*
672+
* Flash cacheline(s) for a specific address with specific size,
673+
* if all IOMMUs active support page-walk coherency, cacheline(s) are not fluashed.
674+
*
675+
* @param[in] p the address of the buffer, whose cache need to be invalidated
676+
* @param[in] size the size of the buffer
677+
*
678+
*/
679+
void iommu_flush_cache(const void *p, uint32_t size);
668680
/**
669681
* @}
670682
*/

0 commit comments

Comments
 (0)