From ee717eb6d46b5285db1aae9172ecdfc70b9cd9ca Mon Sep 17 00:00:00 2001 From: Nicolin Chen Date: Fri, 10 Feb 2023 19:37:59 -0800 Subject: [PATCH] TMP iommu/arm-smmu-v3: Add arm_smmu_cache_invalidate_user Draft a solution using mmap to share a kenrel page with the hypervisor to dispatch all TLBI commands. Signed-off-by: Nicolin Chen --- drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c | 82 +++++++++++++++++++++ drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h | 2 + drivers/iommu/iommufd/main.c | 46 ++++++++++++ include/linux/iommu.h | 1 + include/uapi/linux/iommufd.h | 17 +---- 5 files changed, 135 insertions(+), 13 deletions(-) diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c index fe38d1274483..d2b9e25a3121 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c @@ -2093,6 +2093,9 @@ static void arm_smmu_domain_free(struct iommu_domain *domain) arm_smmu_bitmap_free(smmu->vmid_map, cfg->vmid); } + if (smmu_domain->cmdq_user) + free_pages_exact(smmu_domain->cmdq_user, + smmu_domain->cmdq_user_size); kfree(smmu_domain); } @@ -2878,10 +2881,89 @@ static void arm_smmu_remove_dev_pasid(struct device *dev, ioasid_t pasid) arm_smmu_sva_remove_dev_pasid(domain, dev, pasid); } +static int arm_smmu_fix_user_cmd(struct arm_smmu_domain *smmu_domain, u64 *cmd) +{ + struct arm_smmu_stream *stream; + + switch (*cmd & CMDQ_0_OP) { + case CMDQ_OP_TLBI_NSNH_ALL: + *cmd &= ~CMDQ_0_OP; + *cmd |= CMDQ_OP_TLBI_NH_ALL; + fallthrough; + case CMDQ_OP_TLBI_NH_VA: + case CMDQ_OP_TLBI_NH_VAA: + case CMDQ_OP_TLBI_NH_ALL: + case CMDQ_OP_TLBI_NH_ASID: + *cmd &= ~CMDQ_TLBI_0_VMID; + *cmd |= FIELD_PREP(CMDQ_TLBI_0_VMID, + smmu_domain->s2->s2_cfg.vmid); + break; + case CMDQ_OP_ATC_INV: + case CMDQ_OP_CFGI_CD: + case CMDQ_OP_CFGI_CD_ALL: + xa_lock(&smmu_domain->smmu->user_streams); + stream = xa_load(&smmu_domain->smmu->user_streams, + FIELD_GET(CMDQ_CFGI_0_SID, *cmd)); + xa_unlock(&smmu_domain->smmu->user_streams); + if (!stream) + return -ENODEV; + *cmd &= ~CMDQ_CFGI_0_SID; + *cmd |= FIELD_PREP(CMDQ_CFGI_0_SID, stream->id); + break; + default: + return -EOPNOTSUPP; + } + pr_debug("Fixed user CMD: %016llx : %016llx\n", cmd[1], cmd[0]); + + return 0; +} + +static void arm_smmu_cache_invalidate_user(struct iommu_domain *domain, + void *user_data) +{ + const u32 cons_err = FIELD_PREP(CMDQ_CONS_ERR, CMDQ_ERR_CERROR_ILL_IDX); + struct iommu_hwpt_invalidate_arm_smmuv3 *inv_data = user_data; + struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); + struct arm_smmu_device *smmu = smmu_domain->smmu; + u64 *cmds; + int ret; + int i; + + if (!smmu || !smmu_domain->s2 || domain->type != IOMMU_DOMAIN_NESTED) + return; + cmds = smmu_domain->cmdq_user; + for (i = 0; i < inv_data->cmdq_prod; i++) { + ret = arm_smmu_fix_user_cmd(smmu_domain, &cmds[i * 2]); + if (ret) { + inv_data->cmdq_cons = cons_err | i; + return; + } + } + ret = arm_smmu_cmdq_issue_cmdlist(smmu, cmds, i, true); + if (ret) { + inv_data->cmdq_cons = cons_err | i; // FIXME + return; + } + inv_data->cmdq_cons = inv_data->cmdq_prod; +} + +static void *arm_smmu_get_mmap_page(struct iommu_domain *domain, size_t pgsize) +{ + struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); + + if (!smmu_domain->cmdq_user) { + smmu_domain->cmdq_user = alloc_pages_exact(pgsize, GFP_KERNEL); + smmu_domain->cmdq_user_size = pgsize; + } + return smmu_domain->cmdq_user; +} + static const struct iommu_domain_ops arm_smmu_nested_domain_ops = { .attach_dev = arm_smmu_attach_dev, .free = arm_smmu_domain_free, .get_msi_mapping_domain = arm_smmu_get_msi_mapping_domain, + .cache_invalidate_user = arm_smmu_cache_invalidate_user, + .get_mmap_page = arm_smmu_get_mmap_page, }; static struct iommu_domain * diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h index 838108b22162..74e917142d56 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h @@ -733,6 +733,8 @@ struct arm_smmu_domain { spinlock_t devices_lock; struct list_head mmu_notifiers; + void *cmdq_user; + size_t cmdq_user_size; }; static inline struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom) diff --git a/drivers/iommu/iommufd/main.c b/drivers/iommu/iommufd/main.c index f5bdbdcd8d54..a3e45d402dc2 100644 --- a/drivers/iommu/iommufd/main.c +++ b/drivers/iommu/iommufd/main.c @@ -16,6 +16,7 @@ #include #include #include +#include #include #include "io_pagetable.h" @@ -368,11 +369,56 @@ static long iommufd_fops_ioctl(struct file *filp, unsigned int cmd, return ret; } +static int iommufd_fops_mmap(struct file *filp, struct vm_area_struct *vma) +{ + struct iommufd_ctx *ictx = filp->private_data; + size_t size = vma->vm_end - vma->vm_start; + struct iommufd_hw_pagetable *hwpt; + u32 hwpt_id = (u32)vma->vm_pgoff; + void *mmap_page; + int rc; + + if (size > PAGE_SIZE) + return -EINVAL; + + hwpt = container_of(iommufd_get_object(ictx, hwpt_id, + IOMMUFD_OBJ_HW_PAGETABLE), + struct iommufd_hw_pagetable, obj); + if (IS_ERR(hwpt)) + return PTR_ERR(hwpt); + + /* Do not allow any kernel-managed hw_pagetable */ + if (!hwpt->parent) { + rc = -EINVAL; + goto out_put_hwpt; + } + if (!hwpt->domain->ops->get_mmap_page) { + rc = -EOPNOTSUPP; + goto out_put_hwpt; + } + + mmap_page = hwpt->domain->ops->get_mmap_page(hwpt->domain, size); + if (!mmap_page) { + rc = -ENOMEM; + goto out_put_hwpt; + } + + vma->vm_pgoff = 0; + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP); + rc = remap_pfn_range(vma, vma->vm_start, virt_to_pfn(mmap_page), size, + vma->vm_page_prot); +out_put_hwpt: + iommufd_put_object(&hwpt->obj); + return rc; +} + static const struct file_operations iommufd_fops = { .owner = THIS_MODULE, .open = iommufd_fops_open, .release = iommufd_fops_release, .unlocked_ioctl = iommufd_fops_ioctl, + .mmap = iommufd_fops_mmap, }; /** diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 4d0ab016537f..7cc6d7d7cd11 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -364,6 +364,7 @@ struct iommu_domain_ops { struct iommu_iotlb_gather *iotlb_gather); void (*cache_invalidate_user)(struct iommu_domain *domain, void *user_data); + void *(*get_mmap_page)(struct iommu_domain *domain, size_t pgsize); phys_addr_t (*iova_to_phys)(struct iommu_domain *domain, dma_addr_t iova); diff --git a/include/uapi/linux/iommufd.h b/include/uapi/linux/iommufd.h index e8c4a16f76e9..dcb929f24226 100644 --- a/include/uapi/linux/iommufd.h +++ b/include/uapi/linux/iommufd.h @@ -638,21 +638,12 @@ struct iommu_hwpt_invalidate_intel_vtd { /** * struct iommu_hwpt_invalidate_arm_smmuv3 - ARM SMMUv3 cahce invalidation info - * @flags: boolean attributes of cache invalidation command - * @opcode: opcode of cache invalidation command - * @ssid: SubStream ID - * @granule_size: page/block size of the mapping in bytes - * @range: IOVA range to invalidate + * @cmdq_prod: Producer index of user command queues + * @cmdq_cons: Consumer index of user command queues */ struct iommu_hwpt_invalidate_arm_smmuv3 { -#define IOMMU_SMMUV3_CMDQ_TLBI_VA_LEAF (1 << 0) - __u64 flags; - __u8 opcode; - __u8 padding[3]; - __u32 asid; - __u32 ssid; - __u32 granule_size; - struct iommu_iova_range range; + __u32 cmdq_prod; + __u32 cmdq_cons; }; /**