Skip to content

Commit

Permalink
dma-mapping: move dma_common_{mmap,get_sgtable} out of mapping.c
Browse files Browse the repository at this point in the history
Add a new file that contains helpera for misc DMA ops, which is only
built when CONFIG_DMA_OPS is set.

Signed-off-by: Christoph Hellwig <hch@lst.de>
  • Loading branch information
Christoph Hellwig authored and intel-lab-lkp committed Aug 19, 2020
1 parent 95ee9c6 commit 1994fb3
Show file tree
Hide file tree
Showing 3 changed files with 53 additions and 46 deletions.
1 change: 1 addition & 0 deletions kernel/dma/Makefile
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
# SPDX-License-Identifier: GPL-2.0

obj-$(CONFIG_HAS_DMA) += mapping.o direct.o
obj-$(CONFIG_DMA_OPS) += ops_helpers.o
obj-$(CONFIG_DMA_OPS) += dummy.o
obj-$(CONFIG_DMA_CMA) += contiguous.o
obj-$(CONFIG_DMA_DECLARE_COHERENT) += coherent.o
Expand Down
47 changes: 1 addition & 46 deletions kernel/dma/mapping.c
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
#include <linux/memblock.h> /* for max_pfn */
#include <linux/acpi.h>
#include <linux/dma-direct.h>
#include <linux/dma-noncoherent.h>
#include <linux/dma-mapping.h>
#include <linux/export.h>
#include <linux/gfp.h>
#include <linux/of_device.h>
Expand Down Expand Up @@ -295,22 +295,6 @@ void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
}
EXPORT_SYMBOL(dma_sync_sg_for_device);

/*
* Create scatter-list for the already allocated DMA buffer.
*/
int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
unsigned long attrs)
{
struct page *page = virt_to_page(cpu_addr);
int ret;

ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
if (!ret)
sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
return ret;
}

/*
* The whole dma_get_sgtable() idea is fundamentally unsafe - it seems
* that the intention is to allow exporting memory allocated via the
Expand Down Expand Up @@ -358,35 +342,6 @@ pgprot_t dma_pgprot(struct device *dev, pgprot_t prot, unsigned long attrs)
}
#endif /* CONFIG_MMU */

/*
* Create userspace mapping for the DMA-coherent memory.
*/
int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
unsigned long attrs)
{
#ifdef CONFIG_MMU
unsigned long user_count = vma_pages(vma);
unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
unsigned long off = vma->vm_pgoff;
int ret = -ENXIO;

vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs);

if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
return ret;

if (off >= count || user_count > count - off)
return -ENXIO;

return remap_pfn_range(vma, vma->vm_start,
page_to_pfn(virt_to_page(cpu_addr)) + vma->vm_pgoff,
user_count << PAGE_SHIFT, vma->vm_page_prot);
#else
return -ENXIO;
#endif /* CONFIG_MMU */
}

/**
* dma_can_mmap - check if a given device supports dma_mmap_*
* @dev: device to check
Expand Down
51 changes: 51 additions & 0 deletions kernel/dma/ops_helpers.c
Original file line number Diff line number Diff line change
@@ -0,0 +1,51 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Helpers for DMA ops implementations. These generally rely on the fact that
* the allocated memory contains normal pages in the direct kernel mapping.
*/
#include <linux/dma-noncoherent.h>

/*
* Create scatter-list for the already allocated DMA buffer.
*/
int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
unsigned long attrs)
{
struct page *page = virt_to_page(cpu_addr);
int ret;

ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
if (!ret)
sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
return ret;
}

/*
* Create userspace mapping for the DMA-coherent memory.
*/
int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
unsigned long attrs)
{
#ifdef CONFIG_MMU
unsigned long user_count = vma_pages(vma);
unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
unsigned long off = vma->vm_pgoff;
int ret = -ENXIO;

vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs);

if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
return ret;

if (off >= count || user_count > count - off)
return -ENXIO;

return remap_pfn_range(vma, vma->vm_start,
page_to_pfn(virt_to_page(cpu_addr)) + vma->vm_pgoff,
user_count << PAGE_SHIFT, vma->vm_page_prot);
#else
return -ENXIO;
#endif /* CONFIG_MMU */
}

0 comments on commit 1994fb3

Please sign in to comment.