Skip to content

Commit c186479

Browse files
rmurphy-armjoergroedel
authored andcommitted
iommu/dma: Relax locking in iommu_dma_prepare_msi()
Since commit ece6e6f ("iommu/dma-iommu: Split iommu_dma_map_msi_msg() in two parts"), iommu_dma_prepare_msi() should no longer have to worry about preempting itself, nor being called in atomic context at all. Thus we can downgrade the IRQ-safe locking to a simple mutex to avoid angering the new might_sleep() check in iommu_map(). Reported-by: Qian Cai <cai@lca.pw> Tested-by: Jean-Philippe Brucker <jean-philippe@linaro.org> Signed-off-by: Robin Murphy <robin.murphy@arm.com> Signed-off-by: Joerg Roedel <jroedel@suse.de>
1 parent f81b846 commit c186479

File tree

1 file changed

+8
-9
lines changed

1 file changed

+8
-9
lines changed

drivers/iommu/dma-iommu.c

Lines changed: 8 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@
1919
#include <linux/iova.h>
2020
#include <linux/irq.h>
2121
#include <linux/mm.h>
22+
#include <linux/mutex.h>
2223
#include <linux/pci.h>
2324
#include <linux/scatterlist.h>
2425
#include <linux/vmalloc.h>
@@ -44,7 +45,6 @@ struct iommu_dma_cookie {
4445
dma_addr_t msi_iova;
4546
};
4647
struct list_head msi_page_list;
47-
spinlock_t msi_lock;
4848

4949
/* Domain for flush queue callback; NULL if flush queue not in use */
5050
struct iommu_domain *fq_domain;
@@ -63,7 +63,6 @@ static struct iommu_dma_cookie *cookie_alloc(enum iommu_dma_cookie_type type)
6363

6464
cookie = kzalloc(sizeof(*cookie), GFP_KERNEL);
6565
if (cookie) {
66-
spin_lock_init(&cookie->msi_lock);
6766
INIT_LIST_HEAD(&cookie->msi_page_list);
6867
cookie->type = type;
6968
}
@@ -1176,7 +1175,7 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
11761175
if (msi_page->phys == msi_addr)
11771176
return msi_page;
11781177

1179-
msi_page = kzalloc(sizeof(*msi_page), GFP_ATOMIC);
1178+
msi_page = kzalloc(sizeof(*msi_page), GFP_KERNEL);
11801179
if (!msi_page)
11811180
return NULL;
11821181

@@ -1206,7 +1205,7 @@ int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr)
12061205
struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
12071206
struct iommu_dma_cookie *cookie;
12081207
struct iommu_dma_msi_page *msi_page;
1209-
unsigned long flags;
1208+
static DEFINE_MUTEX(msi_prepare_lock); /* see below */
12101209

12111210
if (!domain || !domain->iova_cookie) {
12121211
desc->iommu_cookie = NULL;
@@ -1216,13 +1215,13 @@ int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr)
12161215
cookie = domain->iova_cookie;
12171216

12181217
/*
1219-
* We disable IRQs to rule out a possible inversion against
1220-
* irq_desc_lock if, say, someone tries to retarget the affinity
1221-
* of an MSI from within an IPI handler.
1218+
* In fact the whole prepare operation should already be serialised by
1219+
* irq_domain_mutex further up the callchain, but that's pretty subtle
1220+
* on its own, so consider this locking as failsafe documentation...
12221221
*/
1223-
spin_lock_irqsave(&cookie->msi_lock, flags);
1222+
mutex_lock(&msi_prepare_lock);
12241223
msi_page = iommu_dma_get_msi_page(dev, msi_addr, domain);
1225-
spin_unlock_irqrestore(&cookie->msi_lock, flags);
1224+
mutex_unlock(&msi_prepare_lock);
12261225

12271226
msi_desc_set_iommu_cookie(desc, msi_page);
12281227

0 commit comments

Comments
 (0)