| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,358 @@ | ||
| // SPDX-License-Identifier: GPL-2.0-only | ||
| /* | ||
| * Dynamic IOMMU mapped bounce buffers. | ||
| * | ||
| * Copyright (C) 2021 Google, Inc. | ||
| */ | ||
|
|
||
| #include <linux/dma-iommu.h> | ||
| #include <linux/dma-map-ops.h> | ||
| #include <linux/highmem.h> | ||
| #include <linux/list.h> | ||
| #include <linux/slab.h> | ||
|
|
||
| #include "io-buffer-manager.h" | ||
| #include "io-bounce-buffers.h" | ||
|
|
||
| struct io_bounce_buffers { | ||
| struct iommu_domain *domain; | ||
| struct iova_domain *iovad; | ||
| unsigned int nid; | ||
| struct io_buffer_manager manager; | ||
| }; | ||
|
|
||
| bool io_bounce_buffers_release_buffer_cb(struct io_buffer_manager *manager, | ||
| dma_addr_t iova, size_t size) | ||
| { | ||
| struct io_bounce_buffers *buffers = | ||
| container_of(manager, struct io_bounce_buffers, manager); | ||
| return iommu_unmap(buffers->domain, iova, size) >= size; | ||
| } | ||
|
|
||
| struct io_bounce_buffers *io_bounce_buffers_init(struct device *dev, | ||
| struct iommu_domain *domain, | ||
| struct iova_domain *iovad) | ||
| { | ||
| int ret; | ||
| struct io_bounce_buffers *buffers; | ||
|
|
||
| buffers = kzalloc(sizeof(*buffers), GFP_KERNEL); | ||
| if (!buffers) | ||
| return ERR_PTR(-ENOMEM); | ||
|
|
||
| ret = io_buffer_manager_init(&buffers->manager); | ||
| if (ret) { | ||
| kfree(buffers); | ||
| return ERR_PTR(ret); | ||
| } | ||
|
|
||
| buffers->domain = domain; | ||
| buffers->iovad = iovad; | ||
| buffers->nid = dev_to_node(dev); | ||
|
|
||
| return buffers; | ||
| } | ||
|
|
||
| void io_bounce_buffers_destroy(struct io_bounce_buffers *buffers) | ||
| { | ||
| kfree(buffers); | ||
| } | ||
|
|
||
| static bool should_sync_buffer(enum dma_data_direction dir, bool sync_for_cpu) | ||
| { | ||
| return dir == DMA_BIDIRECTIONAL || | ||
| (dir == DMA_FROM_DEVICE && sync_for_cpu) || | ||
| (dir == DMA_TO_DEVICE && !sync_for_cpu); | ||
| } | ||
|
|
||
| static void io_bounce_buffers_do_sync(struct io_bounce_buffers *buffers, | ||
| struct page **bounce_buffer, | ||
| size_t bounce_offset, struct page *orig, | ||
| size_t orig_offset, size_t size, | ||
| enum dma_data_direction dir, int prot, | ||
| bool sync_for_cpu) | ||
| { | ||
| bool needs_bounce_sync = should_sync_buffer(dir, sync_for_cpu); | ||
| char *orig_lowmem_ptr; | ||
| bool dma_is_coherent = prot & IOMMU_CACHE; | ||
|
|
||
| if (dma_is_coherent && !needs_bounce_sync) | ||
| return; | ||
|
|
||
| orig_lowmem_ptr = PageHighMem(orig) ? NULL : page_to_virt(orig); | ||
|
|
||
| while (size) { | ||
| size_t copy_len, bounce_page_offset; | ||
| struct page *bounce_page; | ||
|
|
||
| bounce_page = bounce_buffer[bounce_offset / PAGE_SIZE]; | ||
| bounce_page_offset = bounce_offset % PAGE_SIZE; | ||
|
|
||
| copy_len = size; | ||
| if (copy_len + bounce_page_offset > PAGE_SIZE) | ||
| copy_len = PAGE_SIZE - bounce_page_offset; | ||
|
|
||
| if (!dma_is_coherent && sync_for_cpu) { | ||
| phys_addr_t paddr = page_to_phys(bounce_page); | ||
|
|
||
| arch_sync_dma_for_cpu(paddr + bounce_page_offset, | ||
| copy_len, dir); | ||
| } | ||
|
|
||
| if (needs_bounce_sync) { | ||
| char *bounce_page_ptr = kmap_local_page(bounce_page); | ||
| char *bounce_ptr = bounce_page_ptr + bounce_page_offset; | ||
|
|
||
| if (!orig_lowmem_ptr) { | ||
| size_t remaining = copy_len; | ||
| size_t offset = orig_offset % PAGE_SIZE; | ||
| size_t orig_page_idx = orig_offset / PAGE_SIZE; | ||
|
|
||
| while (remaining) { | ||
| char *orig_ptr; | ||
| size_t sz = min(remaining, | ||
| PAGE_SIZE - offset); | ||
|
|
||
| orig_ptr = kmap_local_page( | ||
| nth_page(orig, orig_page_idx)); | ||
| if (sync_for_cpu) { | ||
| memcpy(orig_ptr + offset, | ||
| bounce_ptr, sz); | ||
| } else { | ||
| memcpy(bounce_ptr, | ||
| orig_ptr + offset, sz); | ||
| } | ||
| kunmap_local(orig_ptr); | ||
|
|
||
| remaining -= sz; | ||
| orig_page_idx += 1; | ||
| bounce_ptr += sz; | ||
| offset = 0; | ||
| } | ||
| } else if (sync_for_cpu) { | ||
| memcpy(orig_lowmem_ptr + orig_offset, | ||
| bounce_ptr, copy_len); | ||
| } else { | ||
| memcpy(bounce_ptr, | ||
| orig_lowmem_ptr + orig_offset, copy_len); | ||
| } | ||
|
|
||
| kunmap_local(bounce_page_ptr); | ||
| } | ||
|
|
||
| if (!dma_is_coherent && !sync_for_cpu) { | ||
| phys_addr_t paddr = page_to_phys(bounce_page); | ||
|
|
||
| arch_sync_dma_for_device(paddr + bounce_page_offset, | ||
| copy_len, dir); | ||
| } | ||
|
|
||
| bounce_offset += copy_len; | ||
| orig_offset += copy_len; | ||
| size -= copy_len; | ||
| } | ||
| } | ||
|
|
||
| bool io_bounce_buffers_sync_single(struct io_bounce_buffers *buffers, | ||
| dma_addr_t dma_handle, size_t size, | ||
| enum dma_data_direction dir, | ||
| bool sync_for_cpu) | ||
| { | ||
| struct io_bounce_buffer_info info; | ||
| void *orig_buffer; | ||
| size_t offset; | ||
| int prot; | ||
|
|
||
| if (!io_buffer_manager_find_buffer(&buffers->manager, dma_handle, &info, | ||
| &orig_buffer, &prot)) | ||
| return false; | ||
|
|
||
| offset = dma_handle - info.iova; | ||
| io_bounce_buffers_do_sync(buffers, info.bounce_buffer, offset, | ||
| orig_buffer, offset, size, dir, prot, | ||
| sync_for_cpu); | ||
| return true; | ||
| } | ||
|
|
||
| static void __io_bounce_buffers_sync_sg(struct io_bounce_buffers *buffers, | ||
| struct scatterlist *sgl, int nents, | ||
| struct page **bounce_buffer, | ||
| enum dma_data_direction dir, int prot, | ||
| bool sync_for_cpu) | ||
| { | ||
| size_t bounce_offset = 0; | ||
| struct scatterlist *iter; | ||
| int i; | ||
|
|
||
| for_each_sg(sgl, iter, nents, i) { | ||
| io_bounce_buffers_do_sync(buffers, bounce_buffer, bounce_offset, | ||
| sg_page(iter), iter->offset, | ||
| iter->length, dir, prot, | ||
| sync_for_cpu); | ||
| bounce_offset += iter->length; | ||
| } | ||
| } | ||
|
|
||
| bool io_bounce_buffers_sync_sg(struct io_bounce_buffers *buffers, | ||
| struct scatterlist *sgl, int nents, | ||
| enum dma_data_direction dir, bool sync_for_cpu) | ||
| { | ||
| struct io_bounce_buffer_info info; | ||
| void *orig_buffer; | ||
| int prot; | ||
|
|
||
| if (!io_buffer_manager_find_buffer(&buffers->manager, | ||
| sg_dma_address(sgl), &info, | ||
| &orig_buffer, &prot)) | ||
| return false; | ||
|
|
||
| // In the non bounce buffer case, iommu_dma_map_sg syncs before setting | ||
| // up the new mapping's dma address. This check handles false positives | ||
| // in find_buffer caused by sgl being reused for a non bounce buffer | ||
| // case after being used with a bounce buffer. | ||
| if (orig_buffer != sgl) | ||
| return false; | ||
|
|
||
| __io_bounce_buffers_sync_sg(buffers, sgl, nents, info.bounce_buffer, | ||
| dir, prot, sync_for_cpu); | ||
|
|
||
| return true; | ||
| } | ||
|
|
||
| bool io_bounce_buffers_unmap_page(struct io_bounce_buffers *buffers, | ||
| dma_addr_t handle, size_t size, | ||
| enum dma_data_direction dir, | ||
| unsigned long attrs) | ||
| { | ||
| if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) | ||
| io_bounce_buffers_sync_single(buffers, handle, size, dir, true); | ||
|
|
||
| return io_buffer_manager_release_buffer(&buffers->manager, | ||
| buffers->domain, handle, true); | ||
| } | ||
|
|
||
| bool io_bounce_buffers_unmap_sg(struct io_bounce_buffers *buffers, | ||
| struct scatterlist *sgl, int nents, | ||
| enum dma_data_direction dir, | ||
| unsigned long attrs) | ||
| { | ||
| if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) | ||
| io_bounce_buffers_sync_sg(buffers, sgl, nents, dir, true); | ||
|
|
||
| return io_buffer_manager_release_buffer( | ||
| &buffers->manager, buffers->domain, sg_dma_address(sgl), true); | ||
| } | ||
|
|
||
| static bool io_bounce_buffers_map_buffer(struct io_bounce_buffers *buffers, | ||
| struct io_bounce_buffer_info *info, | ||
| int prot) | ||
| { | ||
| unsigned int count = info->size >> PAGE_SHIFT; | ||
| struct sg_table sgt; | ||
| size_t mapped; | ||
|
|
||
| if (sg_alloc_table_from_pages(&sgt, info->bounce_buffer, count, 0, | ||
| info->size, GFP_ATOMIC)) | ||
| return false; | ||
|
|
||
| mapped = iommu_map_sg_atomic(buffers->domain, info->iova, sgt.sgl, | ||
| sgt.orig_nents, prot); | ||
|
|
||
| sg_free_table(&sgt); | ||
| return mapped >= info->size; | ||
| } | ||
|
|
||
| bool io_bounce_buffers_map_page(struct io_bounce_buffers *buffers, | ||
| struct device *dev, struct page *page, | ||
| unsigned long offset, size_t size, int prot, | ||
| enum dma_data_direction dir, | ||
| unsigned long attrs, dma_addr_t *handle) | ||
| { | ||
| bool skip_cpu_sync = attrs & DMA_ATTR_SKIP_CPU_SYNC; | ||
| struct io_bounce_buffer_info info; | ||
| bool force_bounce = iova_offset(buffers->iovad, offset | size); | ||
|
|
||
| if (!force_bounce) | ||
| return false; | ||
|
|
||
| *handle = DMA_MAPPING_ERROR; | ||
| if (!io_buffer_manager_alloc_buffer(&buffers->manager, dev, page, | ||
| offset + size, prot, buffers->nid, | ||
| &info)) | ||
| return true; | ||
|
|
||
| if (!skip_cpu_sync) | ||
| io_bounce_buffers_do_sync(buffers, info.bounce_buffer, offset, | ||
| page, offset, size, dir, prot, false); | ||
|
|
||
| if (!io_bounce_buffers_map_buffer(buffers, &info, prot)) { | ||
| io_buffer_manager_release_buffer( | ||
| &buffers->manager, buffers->domain, info.iova, false); | ||
| return true; | ||
| } | ||
|
|
||
| *handle = info.iova + offset; | ||
| return true; | ||
| } | ||
|
|
||
| bool io_bounce_buffers_map_sg(struct io_bounce_buffers *buffers, | ||
| struct device *dev, struct scatterlist *sgl, | ||
| int nents, int prot, enum dma_data_direction dir, | ||
| unsigned long attrs, int *out_nents) | ||
| { | ||
| struct io_bounce_buffer_info info; | ||
| struct scatterlist *iter; | ||
| size_t size = 0; | ||
| bool skip_cpu_sync = attrs & DMA_ATTR_SKIP_CPU_SYNC; | ||
| dma_addr_t seg_iova; | ||
| int i; | ||
| bool force_bounce = false; | ||
|
|
||
| for_each_sg(sgl, iter, nents, i) { | ||
| size += iter->length; | ||
| force_bounce |= iova_offset(buffers->iovad, | ||
| iter->offset | iter->length); | ||
| } | ||
|
|
||
| if (!force_bounce) | ||
| return false; | ||
|
|
||
| *out_nents = 0; | ||
| if (!io_buffer_manager_alloc_buffer(&buffers->manager, dev, sgl, size, | ||
| prot, buffers->nid, &info)) | ||
| return true; | ||
|
|
||
| if (!skip_cpu_sync) | ||
| __io_bounce_buffers_sync_sg(buffers, sgl, nents, | ||
| info.bounce_buffer, dir, prot, | ||
| false); | ||
|
|
||
| if (!io_bounce_buffers_map_buffer(buffers, &info, prot)) { | ||
| io_buffer_manager_release_buffer( | ||
| &buffers->manager, buffers->domain, info.iova, false); | ||
| return true; | ||
| } | ||
|
|
||
| i = 0; | ||
| seg_iova = info.iova; | ||
| while (size > 0) { | ||
| size_t seg_size = min_t(size_t, size, | ||
| dma_get_max_seg_size(dev)); | ||
|
|
||
| sg_dma_len(sgl) = seg_size; | ||
| sg_dma_address(sgl) = seg_iova; | ||
|
|
||
| sgl = sg_next(sgl); | ||
| size -= seg_size; | ||
| seg_iova += seg_size; | ||
| i++; | ||
| } | ||
|
|
||
| if (sgl) { | ||
| sg_dma_address(sgl) = DMA_MAPPING_ERROR; | ||
| sg_dma_len(sgl) = 0; | ||
| } | ||
|
|
||
| *out_nents = i; | ||
| return true; | ||
| } |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,46 @@ | ||
| /* SPDX-License-Identifier: GPL-2.0-only */ | ||
| /* | ||
| * Copyright (C) 2021 Google, Inc. | ||
| */ | ||
|
|
||
| #ifndef _LINUX_IO_BOUNCE_BUFFERS_H | ||
| #define _LINUX_IO_BOUNCE_BUFFERS_H | ||
|
|
||
| #include <linux/dma-iommu.h> | ||
| #include <linux/iova.h> | ||
|
|
||
| struct io_bounce_buffers; | ||
|
|
||
| struct io_bounce_buffers *io_bounce_buffers_init(struct device *dev, | ||
| struct iommu_domain *domain, | ||
| struct iova_domain *iovad); | ||
| void io_bounce_buffers_destroy(struct io_bounce_buffers *buffers); | ||
|
|
||
| bool io_bounce_buffers_sync_single(struct io_bounce_buffers *buffers, | ||
| dma_addr_t dma_handle, size_t size, | ||
| enum dma_data_direction dir, | ||
| bool sync_for_cpu); | ||
| bool io_bounce_buffers_sync_sg(struct io_bounce_buffers *buffers, | ||
| struct scatterlist *sgl, int nents, | ||
| enum dma_data_direction dir, bool sync_for_cpu); | ||
|
|
||
| bool io_bounce_buffers_map_page(struct io_bounce_buffers *buffers, | ||
| struct device *dev, struct page *page, | ||
| unsigned long offset, size_t size, int prot, | ||
| enum dma_data_direction dir, | ||
| unsigned long attrs, dma_addr_t *handle); | ||
| bool io_bounce_buffers_map_sg(struct io_bounce_buffers *buffers, | ||
| struct device *dev, struct scatterlist *sgl, | ||
| int nents, int prot, enum dma_data_direction dir, | ||
| unsigned long attrs, int *out_nents); | ||
|
|
||
| bool io_bounce_buffers_unmap_page(struct io_bounce_buffers *buffers, | ||
| dma_addr_t handle, size_t size, | ||
| enum dma_data_direction dir, | ||
| unsigned long attrs); | ||
| bool io_bounce_buffers_unmap_sg(struct io_bounce_buffers *buffers, | ||
| struct scatterlist *sgl, int nents, | ||
| enum dma_data_direction dir, | ||
| unsigned long attrs); | ||
|
|
||
| #endif /* _LINUX_IO_BOUNCE_BUFFERS_H */ |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,212 @@ | ||
| // SPDX-License-Identifier: GPL-2.0-only | ||
| /* | ||
| * Manager which allocates and tracks bounce buffers and their IOVAs. Does | ||
| * not actually manage the IOMMU mapping nor do the bounce copies. | ||
| * | ||
| * Copyright (C) 2021 Google, Inc. | ||
| */ | ||
|
|
||
| #include "io-buffer-manager.h" | ||
|
|
||
| #include <linux/slab.h> | ||
|
|
||
| struct io_buffer_node { | ||
| struct rb_node node; | ||
| struct io_bounce_buffer_info info; | ||
| void *orig_buffer; | ||
| int prot; | ||
| }; | ||
|
|
||
| static void io_buffer_manager_free_pages(struct page **pages, int count) | ||
| { | ||
| while (count--) | ||
| __free_page(pages[count]); | ||
| kfree(pages); | ||
| } | ||
|
|
||
| static struct page **io_buffer_manager_alloc_pages(int count, unsigned int nid) | ||
| { | ||
| struct page **pages; | ||
| unsigned int i; | ||
|
|
||
| pages = kmalloc_array(count, sizeof(*pages), GFP_ATOMIC); | ||
| if (!pages) | ||
| return NULL; | ||
|
|
||
| // The IOMMU can map highmem pages, but try to allocate non-highmem | ||
| // pages first to make accessing the buffer cheaper. | ||
| for (i = 0; i < count; i++) { | ||
| pages[i] = alloc_pages_node( | ||
| nid, | ||
| GFP_ATOMIC | __GFP_ZERO | __GFP_NORETRY | __GFP_NOWARN, | ||
| 0); | ||
| if (!pages[i]) { | ||
| pages[i] = alloc_pages_node( | ||
| nid, GFP_ATOMIC | __GFP_ZERO | __GFP_HIGHMEM, | ||
| 0); | ||
| if (!pages[i]) { | ||
| io_buffer_manager_free_pages(pages, i); | ||
| return NULL; | ||
| } | ||
| } | ||
| } | ||
|
|
||
| return pages; | ||
| } | ||
|
|
||
| struct io_buffer_node *find_fallback_node(struct rb_root *root, dma_addr_t iova) | ||
| { | ||
| struct rb_node *node = root->rb_node; | ||
|
|
||
| while (node) { | ||
| struct io_buffer_node *cur = | ||
| container_of(node, struct io_buffer_node, node); | ||
|
|
||
| if (iova < cur->info.iova) | ||
| node = node->rb_left; | ||
| else if (iova >= cur->info.iova + cur->info.size) | ||
| node = node->rb_right; | ||
| else | ||
| return cur; | ||
| } | ||
| return NULL; | ||
| } | ||
|
|
||
| bool insert_fallback_node(struct rb_root *root, struct io_buffer_node *node) | ||
| { | ||
| struct rb_node **new = &(root->rb_node), *parent = NULL; | ||
| dma_addr_t node_end = node->info.iova + node->info.size; | ||
|
|
||
| while (*new) { | ||
| struct io_buffer_node *cur = | ||
| container_of(*new, struct io_buffer_node, node); | ||
| dma_addr_t cur_end = cur->info.iova + cur->info.size; | ||
|
|
||
| parent = *new; | ||
| if (node_end <= cur->info.iova) | ||
| new = &((*new)->rb_left); | ||
| else if (node->info.iova >= cur_end) | ||
| new = &((*new)->rb_right); | ||
| else { | ||
| pr_crit("IOVA collision new=[%llx,%llx) old=[%llx,%llx)\n", | ||
| node->info.iova, node_end, cur->info.iova, | ||
| cur_end); | ||
| return false; | ||
| } | ||
| } | ||
|
|
||
| rb_link_node(&node->node, parent, new); | ||
| rb_insert_color(&node->node, root); | ||
| return true; | ||
| } | ||
|
|
||
| bool io_buffer_manager_alloc_buffer(struct io_buffer_manager *manager, | ||
| struct device *dev, void *orig_buffer, | ||
| size_t size, int prot, unsigned int nid, | ||
| struct io_bounce_buffer_info *info) | ||
| { | ||
| struct iommu_domain *domain = iommu_get_dma_domain(dev); | ||
| struct io_buffer_node *node; | ||
| unsigned long flags; | ||
|
|
||
| node = kzalloc(sizeof(*node), GFP_ATOMIC); | ||
| if (!node) | ||
| return false; | ||
|
|
||
| size = PAGE_ALIGN(size); | ||
| node->info.iova = | ||
| __iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev); | ||
| if (!node->info.iova) | ||
| goto free_node; | ||
|
|
||
| node->info.bounce_buffer = | ||
| io_buffer_manager_alloc_pages(size >> PAGE_SHIFT, nid); | ||
| if (!node->info.bounce_buffer) | ||
| goto free_iova; | ||
|
|
||
| spin_lock_irqsave(&manager->fallback_lock, flags); | ||
| if (!insert_fallback_node(&manager->fallback_buffers, node)) | ||
| goto fallback_lock_unlock; | ||
| spin_unlock_irqrestore(&manager->fallback_lock, flags); | ||
|
|
||
| node->orig_buffer = orig_buffer; | ||
| node->prot = prot; | ||
| node->info.size = size; | ||
|
|
||
| *info = node->info; | ||
|
|
||
| return true; | ||
|
|
||
| fallback_lock_unlock: | ||
| spin_unlock_irqrestore(&manager->fallback_lock, flags); | ||
| free_iova: | ||
| __iommu_dma_free_iova(domain->iova_cookie, node->info.iova, size, NULL); | ||
| free_node: | ||
| kfree(node); | ||
| return false; | ||
| } | ||
|
|
||
| bool io_buffer_manager_find_buffer(struct io_buffer_manager *manager, | ||
| dma_addr_t handle, | ||
| struct io_bounce_buffer_info *info, | ||
| void **orig_buffer, int *prot) | ||
| { | ||
| struct io_buffer_node *node; | ||
| unsigned long flags; | ||
|
|
||
| spin_lock_irqsave(&manager->fallback_lock, flags); | ||
| node = find_fallback_node(&manager->fallback_buffers, handle); | ||
| spin_unlock_irqrestore(&manager->fallback_lock, flags); | ||
|
|
||
| if (!node) | ||
| return false; | ||
|
|
||
| *info = node->info; | ||
| *orig_buffer = node->orig_buffer; | ||
| *prot = node->prot; | ||
| return true; | ||
| } | ||
|
|
||
| bool io_buffer_manager_release_buffer(struct io_buffer_manager *manager, | ||
| struct iommu_domain *domain, | ||
| dma_addr_t handle, bool inited) | ||
| { | ||
| struct io_buffer_node *node; | ||
| unsigned long flags; | ||
| bool free_buffer; | ||
|
|
||
| spin_lock_irqsave(&manager->fallback_lock, flags); | ||
| node = find_fallback_node(&manager->fallback_buffers, handle); | ||
| if (node) | ||
| rb_erase(&node->node, &manager->fallback_buffers); | ||
| spin_unlock_irqrestore(&manager->fallback_lock, flags); | ||
|
|
||
| if (!node) | ||
| return false; | ||
|
|
||
| if (inited) | ||
| free_buffer = io_bounce_buffers_release_buffer_cb( | ||
| manager, node->info.iova, node->info.size); | ||
| else | ||
| free_buffer = true; | ||
|
|
||
| if (free_buffer) { | ||
| io_buffer_manager_free_pages(node->info.bounce_buffer, | ||
| node->info.size >> PAGE_SHIFT); | ||
| __iommu_dma_free_iova(domain->iova_cookie, node->info.iova, | ||
| node->info.size, NULL); | ||
| } else { | ||
| pr_warn("Bounce buffer release failed; leaking buffer\n"); | ||
| } | ||
|
|
||
| kfree(node); | ||
| return true; | ||
| } | ||
|
|
||
| int io_buffer_manager_init(struct io_buffer_manager *manager) | ||
| { | ||
| manager->fallback_buffers = RB_ROOT; | ||
| spin_lock_init(&manager->fallback_lock); | ||
|
|
||
| return 0; | ||
| } |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,43 @@ | ||
| /* SPDX-License-Identifier: GPL-2.0-only */ | ||
| /* | ||
| * Copyright (C) 2021 Google, Inc. | ||
| */ | ||
|
|
||
| #ifndef _LINUX_IO_BUFFER_MANAGER_H | ||
| #define _LINUX_IO_BUFFER_MANAGER_H | ||
|
|
||
| #include <linux/dma-iommu.h> | ||
| #include <linux/iova.h> | ||
| #include <linux/spinlock.h> | ||
|
|
||
| struct io_buffer_manager { | ||
| spinlock_t fallback_lock; | ||
| struct rb_root fallback_buffers; | ||
| }; | ||
|
|
||
| struct io_bounce_buffer_info { | ||
| struct page **bounce_buffer; | ||
| dma_addr_t iova; | ||
| size_t size; | ||
| }; | ||
|
|
||
| bool io_buffer_manager_alloc_buffer(struct io_buffer_manager *manager, | ||
| struct device *dev, void *orig_buffer, | ||
| size_t size, int prot, unsigned int nid, | ||
| struct io_bounce_buffer_info *info); | ||
|
|
||
| bool io_buffer_manager_find_buffer(struct io_buffer_manager *manager, | ||
| dma_addr_t handle, | ||
| struct io_bounce_buffer_info *info, | ||
| void **orig_buffer, int *prot); | ||
|
|
||
| bool io_buffer_manager_release_buffer(struct io_buffer_manager *manager, | ||
| struct iommu_domain *domain, | ||
| dma_addr_t handle, bool inited); | ||
|
|
||
| int io_buffer_manager_init(struct io_buffer_manager *manager); | ||
|
|
||
| bool io_bounce_buffers_release_buffer_cb(struct io_buffer_manager *manager, | ||
| dma_addr_t iova, size_t size); | ||
|
|
||
| #endif /* _LINUX_IO_BUFFER_MANAGER_H */ |