Skip to content
Permalink
Browse files
iommu/iova: Add support for IOVA max alignment tuning
IOVAs are aligned to the smallest PAGE_SIZE order, where the requested
IOVA can fit. But this might not work for all use-cases. It can cause
IOVA fragmentation in some multimedia and 8K video use-cases that may
require larger buffers to be allocated and mapped.

When the above allocation pattern is used with the current alignment
scheme, the IOVA space could be quickly exhausted for 32bit devices.

In order to get better IOVA space utilization and reduce fragmentation,
a new kernel command line parameter is introduced to make the alignment
limit configurable by the user during boot.

Signed-off-by: Georgi Djakov <quic_c_gdjako@quicinc.com>
  • Loading branch information
Georgi Djakov authored and intel-lab-lkp committed Oct 13, 2021
1 parent 0b29edc commit dc4cf3ea2ffff2a4c30a8a2c395e830ea37dd819
Show file tree
Hide file tree
Showing 2 changed files with 33 additions and 1 deletion.
@@ -2054,6 +2054,14 @@
forcing Dual Address Cycle for PCI cards supporting
greater than 32-bit addressing.

iommu.max_align_shift=
[ARM64, X86] Limit the alignment of IOVAs to a maximum
PAGE_SIZE order. Larger IOVAs will be aligned to this
specified order. The order is expressed as a power of
two multiplied by the PAGE_SIZE.
Format: { "4" | "5" | "6" | "7" | "8" | "9" }
Default: 9

iommu.strict= [ARM64, X86] Configure TLB invalidation behaviour
Format: { "0" | "1" }
0 - Lazy mode.
@@ -15,6 +15,9 @@
/* The anchor node sits above the top of the usable address space */
#define IOVA_ANCHOR ~0UL

#define IOMMU_DEFAULT_IOVA_MAX_ALIGN_SHIFT 9
static unsigned long iommu_max_align_shift __read_mostly = IOMMU_DEFAULT_IOVA_MAX_ALIGN_SHIFT;

static bool iova_rcache_insert(struct iova_domain *iovad,
unsigned long pfn,
unsigned long size);
@@ -27,6 +30,27 @@ static void free_iova_rcaches(struct iova_domain *iovad);
static void fq_destroy_all_entries(struct iova_domain *iovad);
static void fq_flush_timeout(struct timer_list *t);

static unsigned long limit_align_shift(struct iova_domain *iovad, unsigned long shift)
{
unsigned long max_align_shift;

max_align_shift = iommu_max_align_shift + PAGE_SHIFT - iova_shift(iovad);
return min_t(unsigned long, max_align_shift, shift);
}

static int __init iommu_set_def_max_align_shift(char *str)
{
unsigned long max_align_shift;

int ret = kstrtoul(str, 10, &max_align_shift);

if (!ret)
iommu_max_align_shift = max_align_shift;

return 0;
}
early_param("iommu.max_align_shift", iommu_set_def_max_align_shift);

static int iova_cpuhp_dead(unsigned int cpu, struct hlist_node *node)
{
struct iova_domain *iovad;
@@ -242,7 +266,7 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
unsigned long high_pfn = limit_pfn, low_pfn = iovad->start_pfn;

if (size_aligned)
align_mask <<= fls_long(size - 1);
align_mask <<= limit_align_shift(iovad, fls_long(size - 1));

/* Walk the tree backwards */
spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);

0 comments on commit dc4cf3e

Please sign in to comment.