Skip to content

Commit

Permalink
swiotlb: support aligned swiotlb buffers
Browse files Browse the repository at this point in the history
Add an argument to swiotlb_tbl_map_single that specifies the desired
alignment of the allocated buffer. This is used by dma-iommu to ensure
the buffer is aligned to the iova granule size when using swiotlb with
untrusted sub-granule mappings. This addresses an issue where adjacent
slots could be exposed to the untrusted device if IO_TLB_SIZE < iova
granule < PAGE_SIZE.

Signed-off-by: David Stevens <stevensd@chromium.org>
  • Loading branch information
David Stevens authored and intel-lab-lkp committed Aug 13, 2021
1 parent 23c7ba0 commit 50aeec2
Show file tree
Hide file tree
Showing 3 changed files with 11 additions and 7 deletions.
4 changes: 2 additions & 2 deletions drivers/iommu/dma-iommu.c
Expand Up @@ -802,8 +802,8 @@ static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
size_t padding_size;

aligned_size = iova_align(iovad, size);
phys = swiotlb_tbl_map_single(dev, phys, size,
aligned_size, dir, attrs);
phys = swiotlb_tbl_map_single(dev, phys, size, aligned_size,
iova_mask(iovad), dir, attrs);

if (phys == DMA_MAPPING_ERROR)
return DMA_MAPPING_ERROR;
Expand Down
3 changes: 2 additions & 1 deletion include/linux/swiotlb.h
Expand Up @@ -44,7 +44,8 @@ extern void __init swiotlb_update_mem_attributes(void);

phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, phys_addr_t phys,
size_t mapping_size, size_t alloc_size,
enum dma_data_direction dir, unsigned long attrs);
unsigned int alloc_aligned_mask, enum dma_data_direction dir,
unsigned long attrs);

extern void swiotlb_tbl_unmap_single(struct device *hwdev,
phys_addr_t tlb_addr,
Expand Down
11 changes: 7 additions & 4 deletions kernel/dma/swiotlb.c
Expand Up @@ -427,7 +427,7 @@ static unsigned int wrap_index(struct io_tlb_mem *mem, unsigned int index)
* allocate a buffer from that IO TLB pool.
*/
static int find_slots(struct device *dev, phys_addr_t orig_addr,
size_t alloc_size)
size_t alloc_size, unsigned int alloc_align_mask)
{
struct io_tlb_mem *mem = io_tlb_default_mem;
unsigned long boundary_mask = dma_get_seg_boundary(dev);
Expand All @@ -450,6 +450,7 @@ static int find_slots(struct device *dev, phys_addr_t orig_addr,
stride = (iotlb_align_mask >> IO_TLB_SHIFT) + 1;
if (alloc_size >= PAGE_SIZE)
stride = max(stride, stride << (PAGE_SHIFT - IO_TLB_SHIFT));
stride = max(stride, (alloc_align_mask >> IO_TLB_SHIFT) + 1);

spin_lock_irqsave(&mem->lock, flags);
if (unlikely(nslots > mem->nslabs - mem->used))
Expand Down Expand Up @@ -504,7 +505,8 @@ static int find_slots(struct device *dev, phys_addr_t orig_addr,

phys_addr_t swiotlb_tbl_map_single(struct device *dev, phys_addr_t orig_addr,
size_t mapping_size, size_t alloc_size,
enum dma_data_direction dir, unsigned long attrs)
unsigned int alloc_align_mask, enum dma_data_direction dir,
unsigned long attrs)
{
struct io_tlb_mem *mem = io_tlb_default_mem;
unsigned int offset = swiotlb_align_offset(dev, orig_addr);
Expand All @@ -524,7 +526,8 @@ phys_addr_t swiotlb_tbl_map_single(struct device *dev, phys_addr_t orig_addr,
return (phys_addr_t)DMA_MAPPING_ERROR;
}

index = find_slots(dev, orig_addr, alloc_size + offset);
index = find_slots(dev, orig_addr,
alloc_size + offset, alloc_align_mask);
if (index == -1) {
if (!(attrs & DMA_ATTR_NO_WARN))
dev_warn_ratelimited(dev,
Expand Down Expand Up @@ -636,7 +639,7 @@ dma_addr_t swiotlb_map(struct device *dev, phys_addr_t paddr, size_t size,
trace_swiotlb_bounced(dev, phys_to_dma(dev, paddr), size,
swiotlb_force);

swiotlb_addr = swiotlb_tbl_map_single(dev, paddr, size, size, dir,
swiotlb_addr = swiotlb_tbl_map_single(dev, paddr, size, size, 0, dir,
attrs);
if (swiotlb_addr == (phys_addr_t)DMA_MAPPING_ERROR)
return DMA_MAPPING_ERROR;
Expand Down

0 comments on commit 50aeec2

Please sign in to comment.