From d3712a4b0e7adafbeb4717333ec242d59a490537 Mon Sep 17 00:00:00 2001 From: Sultan Alsawaf Date: Sun, 4 Aug 2019 23:52:09 -0700 Subject: [PATCH] ion: Rewrite for improved clarity and performance The ION driver suffers from massive code bloat caused by excessive debug features, as well as poor lock usage as a result of that. Multiple locks in ION exist to make the debug features thread-safe, which hurts ION's actual performance when doing its job. There are numerous code paths in ION that hold mutexes for no reason and hold them for longer than necessary. This results in not only unwanted lock contention, but also long delays when a mutex lock results in the calling thread getting preempted for a while. All lock usage in ION follows this pattern, which causes poor performance across the board. Furthermore, a single big lock is used mostly everywhere rather than multiple fine-grained locks. Most of the mutex locks can be replaced with simple atomic operations. Where a mutex lock can't be eliminated completely, a spinlock or rwlock can be used instead for quick operations, thereby avoiding long delays due to preemption. Fine-grained locks are also now used in place of the single big lock that was used before. Additionally, ion_dupe_sg_table is called very frequently, and lies within the rendering path for the display. Speed it up by reserving caches for its sg_table and page-sized scatterlist allocations, as well as by improving the sg copy process. Note that sg_alloc_table zeroes out `table`, so there's no need to zero it out using the memory allocator. Overall, just rewrite ION entirely to fix its deficiencies. This optimizes ION for excellent performance and discards its rarely-used debug bloat. Signed-off-by: Sultan Alsawaf Signed-off-by: Adam W. Willis --- drivers/staging/android/ion/Kconfig | 12 - drivers/staging/android/ion/ion-ioctl.c | 40 +- drivers/staging/android/ion/ion.c | 881 ++++-------------- drivers/staging/android/ion/ion.h | 66 +- drivers/staging/android/ion/ion_cma_heap.c | 3 +- drivers/staging/android/ion/ion_heap.c | 24 +- drivers/staging/android/ion/ion_page_pool.c | 20 +- drivers/staging/android/ion/ion_system_heap.c | 103 +- drivers/staging/android/ion/msm_ion_priv.h | 17 - 9 files changed, 245 insertions(+), 921 deletions(-) diff --git a/drivers/staging/android/ion/Kconfig b/drivers/staging/android/ion/Kconfig index 563e4a82a73a..8dfb485a9ad9 100644 --- a/drivers/staging/android/ion/Kconfig +++ b/drivers/staging/android/ion/Kconfig @@ -44,18 +44,6 @@ config ION_CMA_HEAP by the Contiguous Memory Allocator (CMA). If your system has these regions, you should say Y here. -config ION_FORCE_DMA_SYNC - bool "Force ION to always DMA sync buffer memory" - depends on ION - help - Force ION to DMA sync buffer memory when it is allocated and to - always DMA sync the buffer memory on calls to begin/end cpu - access. This makes ION DMA sync behavior similar to that of the - older version of ION. - We generally don't want to enable this config as it breaks the - cache maintenance model. - If you're not sure say N here. - config ION_DEFER_FREE_NO_SCHED_IDLE bool "Increases the priority of ION defer free thead" depends on ION diff --git a/drivers/staging/android/ion/ion-ioctl.c b/drivers/staging/android/ion/ion-ioctl.c index 83cbcc0dcd8c..3f970b5e2b58 100644 --- a/drivers/staging/android/ion/ion-ioctl.c +++ b/drivers/staging/android/ion/ion-ioctl.c @@ -44,22 +44,11 @@ static int validate_ioctl_arg(unsigned int cmd, union ion_ioctl_arg *arg) return ret ? -EINVAL : 0; } -/* fix up the cases where the ioctl direction bits are incorrect */ -static unsigned int ion_ioctl_dir(unsigned int cmd) -{ - switch (cmd) { - default: - return _IOC_DIR(cmd); - } -} - long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { - int ret = 0; - unsigned int dir; + unsigned int dir = _IOC_DIR(cmd); union ion_ioctl_arg data; - - dir = ion_ioctl_dir(cmd); + int ret = 0; if (_IOC_SIZE(cmd) > sizeof(data)) return -EINVAL; @@ -73,10 +62,8 @@ long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) return -EFAULT; ret = validate_ioctl_arg(cmd, &data); - if (ret) { - pr_warn_once("%s: ioctl validate failed\n", __func__); + if (ret) return ret; - } if (!(dir & _IOC_WRITE)) memset(&data, 0, sizeof(data)); @@ -84,25 +71,19 @@ long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) switch (cmd) { case ION_IOC_ALLOC: { - int fd; - - fd = ion_alloc_fd(data.allocation.len, - data.allocation.heap_id_mask, - data.allocation.flags); + int fd = ion_alloc_fd(data.allocation.len, + data.allocation.heap_id_mask, + data.allocation.flags); if (fd < 0) return fd; data.allocation.fd = fd; - break; } case ION_IOC_HEAP_QUERY: ret = ion_query_heaps(&data.query); break; case ION_IOC_PREFETCH: - { - int ret; - ret = ion_walk_heaps(data.prefetch_data.heap_id, (enum ion_heap_type) ION_HEAP_TYPE_SYSTEM_SECURE, @@ -110,22 +91,18 @@ long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) ion_system_secure_heap_prefetch); if (ret) return ret; + break; - } case ION_IOC_DRAIN: - { - int ret; - ret = ion_walk_heaps(data.prefetch_data.heap_id, (enum ion_heap_type) ION_HEAP_TYPE_SYSTEM_SECURE, (void *)&data.prefetch_data, ion_system_secure_heap_drain); - if (ret) return ret; + break; - } default: return -ENOTTY; } @@ -134,5 +111,6 @@ long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) if (copy_to_user((void __user *)arg, &data, _IOC_SIZE(cmd))) return -EFAULT; } + return ret; } diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c index bac021c2c331..475eb6ab425d 100644 --- a/drivers/staging/android/ion/ion.c +++ b/drivers/staging/android/ion/ion.c @@ -1,109 +1,43 @@ +// SPDX-License-Identifier: GPL-2.0 /* - * - * drivers/staging/android/ion/ion.c - * * Copyright (C) 2011 Google, Inc. * Copyright (c) 2011-2019, The Linux Foundation. All rights reserved. - * - * This software is licensed under the terms of the GNU General Public - * License version 2, as published by the Free Software Foundation, and - * may be copied, distributed, and modified under those terms. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * + * Copyright (C) 2019 Sultan Alsawaf . */ -#include -#include -#include -#include -#include -#include -#include -#include +#include #include #include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include #include -#define CREATE_TRACE_POINTS -#include -#include +#include +#include +#include #include "ion.h" #include "ion_secure_util.h" static struct ion_device *internal_dev; +static struct kmem_cache *ion_sg_table_pool; int ion_walk_heaps(int heap_id, enum ion_heap_type type, void *data, int (*f)(struct ion_heap *heap, void *data)) { - int ret_val = 0; - struct ion_heap *heap; struct ion_device *dev = internal_dev; - /* - * traverse the list of heaps available in this system - * and find the heap that is specified. - */ - down_write(&dev->lock); - plist_for_each_entry(heap, &dev->heaps, node) { - if (ION_HEAP(heap->id) != heap_id || - type != heap->type) - continue; - ret_val = f(heap, data); - break; - } - up_write(&dev->lock); - return ret_val; -} -EXPORT_SYMBOL(ion_walk_heaps); - -bool ion_buffer_cached(struct ion_buffer *buffer) -{ - return !!(buffer->flags & ION_FLAG_CACHED); -} + struct ion_heap *heap; + int ret = 0; -/* this function should only be called while dev->lock is held */ -static void ion_buffer_add(struct ion_device *dev, - struct ion_buffer *buffer) -{ - struct rb_node **p = &dev->buffers.rb_node; - struct rb_node *parent = NULL; - struct ion_buffer *entry; - - while (*p) { - parent = *p; - entry = rb_entry(parent, struct ion_buffer, node); - - if (buffer < entry) { - p = &(*p)->rb_left; - } else if (buffer > entry) { - p = &(*p)->rb_right; - } else { - pr_err("%s: buffer already found.", __func__); - BUG(); + down_write(&dev->heap_lock); + plist_for_each_entry(heap, &dev->heaps, node) { + if (heap->type == type && ION_HEAP(heap->id) == heap_id) { + ret = f(heap, data); + break; } } + up_write(&dev->heap_lock); - rb_link_node(&buffer->node, parent, p); - rb_insert_color(&buffer->node, &dev->buffers); + return ret; } -/* this function should only be called while dev->lock is held */ static struct ion_buffer *ion_buffer_create(struct ion_heap *heap, struct ion_device *dev, unsigned long len, @@ -113,80 +47,51 @@ static struct ion_buffer *ion_buffer_create(struct ion_heap *heap, struct sg_table *table; int ret; - buffer = kzalloc(sizeof(*buffer), GFP_KERNEL); + buffer = kmalloc(sizeof(*buffer), GFP_KERNEL); if (!buffer) return ERR_PTR(-ENOMEM); - buffer->heap = heap; - buffer->flags = flags; + *buffer = (typeof(*buffer)){ + .dev = dev, + .heap = heap, + .flags = flags, + .size = len, + .attachments = LIST_HEAD_INIT(buffer->attachments), + .vmas = LIST_HEAD_INIT(buffer->vmas), + .attachment_lock = __MUTEX_INITIALIZER(buffer->attachment_lock), + .kmap_lock = __MUTEX_INITIALIZER(buffer->kmap_lock), + .vma_lock = __MUTEX_INITIALIZER(buffer->vma_lock) + }; ret = heap->ops->allocate(heap, buffer, len, flags); - if (ret) { if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE)) - goto err2; + goto free_buffer; if (ret == -EINTR) - goto err2; + goto free_buffer; ion_heap_freelist_drain(heap, 0); ret = heap->ops->allocate(heap, buffer, len, flags); if (ret) - goto err2; + goto free_buffer; } - if (buffer->sg_table == NULL) { - WARN_ONCE(1, "This heap needs to set the sgtable"); - ret = -EINVAL; - goto err1; - } + if (buffer->sg_table == NULL) + goto free_heap; table = buffer->sg_table; - buffer->dev = dev; - buffer->size = len; - - buffer->dev = dev; - buffer->size = len; - INIT_LIST_HEAD(&buffer->attachments); - INIT_LIST_HEAD(&buffer->vmas); - mutex_init(&buffer->lock); - - if (IS_ENABLED(CONFIG_ION_FORCE_DMA_SYNC)) { - int i; - struct scatterlist *sg; - - /* - * this will set up dma addresses for the sglist -- it is not - * technically correct as per the dma api -- a specific - * device isn't really taking ownership here. However, in - * practice on our systems the only dma_address space is - * physical addresses. - */ - for_each_sg(table->sgl, sg, table->nents, i) { - sg_dma_address(sg) = sg_phys(sg); - sg_dma_len(sg) = sg->length; - } - } - - mutex_lock(&dev->buffer_lock); - ion_buffer_add(dev, buffer); - mutex_unlock(&dev->buffer_lock); - atomic_long_add(len, &heap->total_allocated); return buffer; -err1: +free_heap: heap->ops->free(buffer); -err2: +free_buffer: kfree(buffer); - return ERR_PTR(ret); + return ERR_PTR(-EINVAL); } void ion_buffer_destroy(struct ion_buffer *buffer) { - if (buffer->kmap_cnt > 0) { - pr_warn_ratelimited("ION client likely missing a call to dma_buf_kunmap or dma_buf_vunmap\n"); - buffer->heap->ops->unmap_kernel(buffer->heap, buffer); - } buffer->heap->ops->free(buffer); kfree(buffer); } @@ -194,15 +99,9 @@ void ion_buffer_destroy(struct ion_buffer *buffer) static void _ion_buffer_destroy(struct ion_buffer *buffer) { struct ion_heap *heap = buffer->heap; - struct ion_device *dev = buffer->dev; msm_dma_buf_freed(buffer); - mutex_lock(&dev->buffer_lock); - rb_erase(&buffer->node, &dev->buffers); - mutex_unlock(&dev->buffer_lock); - - atomic_long_sub(buffer->size, &buffer->heap->total_allocated); if (heap->flags & ION_HEAP_FLAG_DEFER_FREE) ion_heap_freelist_add(heap, buffer); else @@ -213,58 +112,60 @@ static void *ion_buffer_kmap_get(struct ion_buffer *buffer) { void *vaddr; + mutex_lock(&buffer->kmap_lock); if (buffer->kmap_cnt) { + vaddr = buffer->vaddr; buffer->kmap_cnt++; - return buffer->vaddr; + } else { + vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer); + if (IS_ERR_OR_NULL(vaddr)) { + vaddr = ERR_PTR(-EINVAL); + } else { + buffer->vaddr = vaddr; + buffer->kmap_cnt++; + } } - vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer); - if (WARN_ONCE(vaddr == NULL, - "heap->ops->map_kernel should return ERR_PTR on error")) - return ERR_PTR(-EINVAL); - if (IS_ERR(vaddr)) - return vaddr; - buffer->vaddr = vaddr; - buffer->kmap_cnt++; + mutex_unlock(&buffer->kmap_lock); + return vaddr; } static void ion_buffer_kmap_put(struct ion_buffer *buffer) { - if (buffer->kmap_cnt == 0) { - pr_warn_ratelimited("ION client likely missing a call to dma_buf_kmap or dma_buf_vmap, pid:%d\n", - current->pid); - return; - } - - buffer->kmap_cnt--; - if (!buffer->kmap_cnt) { + mutex_lock(&buffer->kmap_lock); + if (!--buffer->kmap_cnt) buffer->heap->ops->unmap_kernel(buffer->heap, buffer); - buffer->vaddr = NULL; - } + mutex_unlock(&buffer->kmap_lock); } static struct sg_table *dup_sg_table(struct sg_table *table) { + struct scatterlist *sg, *new_sg; struct sg_table *new_table; int ret, i; - struct scatterlist *sg, *new_sg; - new_table = kzalloc(sizeof(*new_table), GFP_KERNEL); + new_table = kmem_cache_alloc(ion_sg_table_pool, GFP_KERNEL); if (!new_table) return ERR_PTR(-ENOMEM); ret = sg_alloc_table(new_table, table->nents, GFP_KERNEL); if (ret) { - kfree(new_table); + kmem_cache_free(ion_sg_table_pool, new_table); return ERR_PTR(-ENOMEM); } new_sg = new_table->sgl; - for_each_sg(table->sgl, sg, table->nents, i) { - memcpy(new_sg, sg, sizeof(*sg)); + if (new_table->nents <= SG_MAX_SINGLE_ALLOC) { + memcpy(new_sg, table->sgl, new_table->nents * sizeof(*new_sg)); sg_dma_address(new_sg) = 0; sg_dma_len(new_sg) = 0; - new_sg = sg_next(new_sg); + } else { + for_each_sg(table->sgl, sg, table->nents, i) { + *new_sg = *sg; + sg_dma_address(new_sg) = 0; + sg_dma_len(new_sg) = 0; + new_sg = sg_next(new_sg); + } } return new_table; @@ -273,7 +174,7 @@ static struct sg_table *dup_sg_table(struct sg_table *table) static void free_duped_table(struct sg_table *table) { sg_free_table(table); - kfree(table); + kmem_cache_free(ion_sg_table_pool, table); } struct ion_dma_buf_attachment { @@ -300,31 +201,35 @@ static int ion_dma_buf_attach(struct dma_buf *dmabuf, struct device *dev, return -ENOMEM; } - a->table = table; - a->dev = dev; - a->dma_mapped = false; - INIT_LIST_HEAD(&a->list); + *a = (typeof(*a)){ + .table = table, + .dev = dev + }; attachment->priv = a; - mutex_lock(&buffer->lock); - list_add(&a->list, &buffer->attachments); - mutex_unlock(&buffer->lock); + if (buffer->flags & ION_FLAG_CACHED) { + mutex_lock(&buffer->attachment_lock); + list_add(&a->list, &buffer->attachments); + mutex_unlock(&buffer->attachment_lock); + } return 0; } -static void ion_dma_buf_detatch(struct dma_buf *dmabuf, +static void ion_dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attachment) { struct ion_dma_buf_attachment *a = attachment->priv; struct ion_buffer *buffer = dmabuf->priv; - mutex_lock(&buffer->lock); - list_del(&a->list); - mutex_unlock(&buffer->lock); - free_duped_table(a->table); + if (buffer->flags & ION_FLAG_CACHED) { + mutex_lock(&buffer->attachment_lock); + list_del(&a->list); + mutex_unlock(&buffer->attachment_lock); + } + free_duped_table(a->table); kfree(a); } @@ -344,22 +249,6 @@ static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment, !hlos_accessible_buffer(buffer)) map_attrs |= DMA_ATTR_SKIP_CPU_SYNC; - mutex_lock(&buffer->lock); - if (map_attrs & DMA_ATTR_SKIP_CPU_SYNC) - trace_ion_dma_map_cmo_skip(attachment->dev, - attachment->dmabuf->name, - ion_buffer_cached(buffer), - hlos_accessible_buffer(buffer), - attachment->dma_map_attrs, - direction); - else - trace_ion_dma_map_cmo_apply(attachment->dev, - attachment->dmabuf->name, - ion_buffer_cached(buffer), - hlos_accessible_buffer(buffer), - attachment->dma_map_attrs, - direction); - if (map_attrs & DMA_ATTR_DELAYED_UNMAP) { count = msm_dma_map_sg_attrs(attachment->dev, table->sgl, table->nents, direction, @@ -370,13 +259,10 @@ static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment, map_attrs); } - if (count <= 0) { - mutex_unlock(&buffer->lock); + if (count <= 0) return ERR_PTR(-ENOMEM); - } a->dma_mapped = true; - mutex_unlock(&buffer->lock); return table; } @@ -393,22 +279,6 @@ static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment, !hlos_accessible_buffer(buffer)) map_attrs |= DMA_ATTR_SKIP_CPU_SYNC; - mutex_lock(&buffer->lock); - if (map_attrs & DMA_ATTR_SKIP_CPU_SYNC) - trace_ion_dma_unmap_cmo_skip(attachment->dev, - attachment->dmabuf->name, - ion_buffer_cached(buffer), - hlos_accessible_buffer(buffer), - attachment->dma_map_attrs, - direction); - else - trace_ion_dma_unmap_cmo_apply(attachment->dev, - attachment->dmabuf->name, - ion_buffer_cached(buffer), - hlos_accessible_buffer(buffer), - attachment->dma_map_attrs, - direction); - if (map_attrs & DMA_ATTR_DELAYED_UNMAP) msm_dma_unmap_sg_attrs(attachment->dev, table->sgl, table->nents, direction, @@ -418,7 +288,6 @@ static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment, dma_unmap_sg_attrs(attachment->dev, table->sgl, table->nents, direction, map_attrs); a->dma_mapped = false; - mutex_unlock(&buffer->lock); } void ion_pages_sync_for_device(struct device *dev, struct page *page, @@ -428,11 +297,6 @@ void ion_pages_sync_for_device(struct device *dev, struct page *page, sg_init_table(&sg, 1); sg_set_page(&sg, page, size, 0); - /* - * This is not correct - sg_dma_address needs a dma_addr_t that is valid - * for the targeted device, but this works on the currently targeted - * hardware. - */ sg_dma_address(&sg) = page_to_phys(page); dma_sync_sg_for_device(dev, &sg, 1, dir); } @@ -445,43 +309,42 @@ static void ion_vm_open(struct vm_area_struct *vma) vma_list = kmalloc(sizeof(*vma_list), GFP_KERNEL); if (!vma_list) return; + vma_list->vma = vma; - mutex_lock(&buffer->lock); + + mutex_lock(&buffer->vma_lock); list_add(&vma_list->list, &buffer->vmas); - mutex_unlock(&buffer->lock); + mutex_unlock(&buffer->vma_lock); } static void ion_vm_close(struct vm_area_struct *vma) { struct ion_buffer *buffer = vma->vm_private_data; - struct ion_vma_list *vma_list, *tmp; + struct ion_vma_list *vma_list; - mutex_lock(&buffer->lock); - list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) { - if (vma_list->vma != vma) - continue; - list_del(&vma_list->list); - kfree(vma_list); - break; + mutex_lock(&buffer->vma_lock); + list_for_each_entry(vma_list, &buffer->vmas, list) { + if (vma_list->vma == vma) { + list_del(&vma_list->list); + break; + } } - mutex_unlock(&buffer->lock); + mutex_unlock(&buffer->vma_lock); + + kfree(vma_list); } static const struct vm_operations_struct ion_vma_ops = { .open = ion_vm_open, - .close = ion_vm_close, + .close = ion_vm_close }; static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma) { struct ion_buffer *buffer = dmabuf->priv; - int ret = 0; - if (!buffer->heap->ops->map_user) { - pr_err("%s: this heap does not define a method for mapping to userspace\n", - __func__); + if (!buffer->heap->ops->map_user) return -EINVAL; - } if (!(buffer->flags & ION_FLAG_CACHED)) vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); @@ -490,16 +353,7 @@ static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma) vma->vm_ops = &ion_vma_ops; ion_vm_open(vma); - mutex_lock(&buffer->lock); - /* now map it to userspace */ - ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma); - mutex_unlock(&buffer->lock); - - if (ret) - pr_err("%s: failure mapping buffer to userspace\n", - __func__); - - return ret; + return buffer->heap->ops->map_user(buffer->heap, buffer, vma); } static void ion_dma_buf_release(struct dma_buf *dmabuf) @@ -507,35 +361,23 @@ static void ion_dma_buf_release(struct dma_buf *dmabuf) struct ion_buffer *buffer = dmabuf->priv; _ion_buffer_destroy(buffer); - kfree(dmabuf->exp_name); } static void *ion_dma_buf_vmap(struct dma_buf *dmabuf) { struct ion_buffer *buffer = dmabuf->priv; - void *vaddr = ERR_PTR(-EINVAL); - if (buffer->heap->ops->map_kernel) { - mutex_lock(&buffer->lock); - vaddr = ion_buffer_kmap_get(buffer); - mutex_unlock(&buffer->lock); - } else { - pr_warn_ratelimited("heap %s doesn't support map_kernel\n", - buffer->heap->name); - } + if (!buffer->heap->ops->map_kernel) + return ERR_PTR(-EINVAL); - return vaddr; + return ion_buffer_kmap_get(buffer); } static void ion_dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr) { struct ion_buffer *buffer = dmabuf->priv; - if (buffer->heap->ops->map_kernel) { - mutex_lock(&buffer->lock); - ion_buffer_kmap_put(buffer); - mutex_unlock(&buffer->lock); - } + ion_buffer_kmap_put(buffer); } static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset) @@ -649,61 +491,18 @@ static int __ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, struct ion_dma_buf_attachment *a; int ret = 0; - if (!hlos_accessible_buffer(buffer)) { - trace_ion_begin_cpu_access_cmo_skip(NULL, dmabuf->name, - ion_buffer_cached(buffer), - false, direction, - sync_only_mapped); - ret = -EPERM; - goto out; - } - - if (!(buffer->flags & ION_FLAG_CACHED)) { - trace_ion_begin_cpu_access_cmo_skip(NULL, dmabuf->name, false, - true, direction, - sync_only_mapped); - goto out; - } - - mutex_lock(&buffer->lock); + if (!hlos_accessible_buffer(buffer)) + return -EPERM; - if (IS_ENABLED(CONFIG_ION_FORCE_DMA_SYNC)) { - struct device *dev = buffer->heap->priv; - struct sg_table *table = buffer->sg_table; - - if (sync_only_mapped) - ret = ion_sgl_sync_mapped(dev, table->sgl, - table->nents, &buffer->vmas, - direction, true); - else - dma_sync_sg_for_cpu(dev, table->sgl, - table->nents, direction); - - if (!ret) - trace_ion_begin_cpu_access_cmo_apply(dev, dmabuf->name, - true, true, - direction, - sync_only_mapped); - else - trace_ion_begin_cpu_access_cmo_skip(dev, dmabuf->name, - true, true, - direction, - sync_only_mapped); - mutex_unlock(&buffer->lock); - goto out; - } + if (!(buffer->flags & ION_FLAG_CACHED)) + return 0; + mutex_lock(&buffer->attachment_lock); list_for_each_entry(a, &buffer->attachments, list) { int tmp = 0; - if (!a->dma_mapped) { - trace_ion_begin_cpu_access_notmapped(a->dev, - dmabuf->name, - true, true, - direction, - sync_only_mapped); + if (!a->dma_mapped) continue; - } if (sync_only_mapped) tmp = ion_sgl_sync_mapped(a->dev, a->table->sgl, @@ -714,24 +513,11 @@ static int __ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, dma_sync_sg_for_cpu(a->dev, a->table->sgl, a->table->nents, direction); - if (!tmp) { - trace_ion_begin_cpu_access_cmo_apply(a->dev, - dmabuf->name, - true, true, - direction, - sync_only_mapped); - } else { - trace_ion_begin_cpu_access_cmo_skip(a->dev, - dmabuf->name, true, - true, direction, - sync_only_mapped); + if (tmp) ret = tmp; - } - } - mutex_unlock(&buffer->lock); + mutex_unlock(&buffer->attachment_lock); -out: return ret; } @@ -743,59 +529,18 @@ static int __ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, struct ion_dma_buf_attachment *a; int ret = 0; - if (!hlos_accessible_buffer(buffer)) { - trace_ion_end_cpu_access_cmo_skip(NULL, dmabuf->name, - ion_buffer_cached(buffer), - false, direction, - sync_only_mapped); - ret = -EPERM; - goto out; - } + if (!hlos_accessible_buffer(buffer)) + return -EPERM; - if (!(buffer->flags & ION_FLAG_CACHED)) { - trace_ion_end_cpu_access_cmo_skip(NULL, dmabuf->name, false, - true, direction, - sync_only_mapped); - goto out; - } - - mutex_lock(&buffer->lock); - if (IS_ENABLED(CONFIG_ION_FORCE_DMA_SYNC)) { - struct device *dev = buffer->heap->priv; - struct sg_table *table = buffer->sg_table; - - if (sync_only_mapped) - ret = ion_sgl_sync_mapped(dev, table->sgl, - table->nents, &buffer->vmas, - direction, false); - else - dma_sync_sg_for_device(dev, table->sgl, - table->nents, direction); - - if (!ret) - trace_ion_end_cpu_access_cmo_apply(dev, dmabuf->name, - true, true, - direction, - sync_only_mapped); - else - trace_ion_end_cpu_access_cmo_skip(dev, dmabuf->name, - true, true, direction, - sync_only_mapped); - mutex_unlock(&buffer->lock); - goto out; - } + if (!(buffer->flags & ION_FLAG_CACHED)) + return 0; + mutex_lock(&buffer->attachment_lock); list_for_each_entry(a, &buffer->attachments, list) { int tmp = 0; - if (!a->dma_mapped) { - trace_ion_end_cpu_access_notmapped(a->dev, - dmabuf->name, - true, true, - direction, - sync_only_mapped); + if (!a->dma_mapped) continue; - } if (sync_only_mapped) tmp = ion_sgl_sync_mapped(a->dev, a->table->sgl, @@ -806,21 +551,11 @@ static int __ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, dma_sync_sg_for_device(a->dev, a->table->sgl, a->table->nents, direction); - if (!tmp) { - trace_ion_end_cpu_access_cmo_apply(a->dev, dmabuf->name, - true, true, - direction, - sync_only_mapped); - } else { - trace_ion_end_cpu_access_cmo_skip(a->dev, dmabuf->name, - true, true, direction, - sync_only_mapped); + if (tmp) ret = tmp; - } } - mutex_unlock(&buffer->lock); + mutex_unlock(&buffer->attachment_lock); -out: return ret; } @@ -857,74 +592,27 @@ static int ion_dma_buf_begin_cpu_access_partial(struct dma_buf *dmabuf, struct ion_dma_buf_attachment *a; int ret = 0; - if (!hlos_accessible_buffer(buffer)) { - trace_ion_begin_cpu_access_cmo_skip(NULL, dmabuf->name, - ion_buffer_cached(buffer), - false, dir, - false); - ret = -EPERM; - goto out; - } - - if (!(buffer->flags & ION_FLAG_CACHED)) { - trace_ion_begin_cpu_access_cmo_skip(NULL, dmabuf->name, false, - true, dir, - false); - goto out; - } - - mutex_lock(&buffer->lock); - if (IS_ENABLED(CONFIG_ION_FORCE_DMA_SYNC)) { - struct device *dev = buffer->heap->priv; - struct sg_table *table = buffer->sg_table; - - ret = ion_sgl_sync_range(dev, table->sgl, table->nents, - offset, len, dir, true); + if (!hlos_accessible_buffer(buffer)) + return -EPERM; - if (!ret) - trace_ion_begin_cpu_access_cmo_apply(dev, dmabuf->name, - true, true, dir, - false); - else - trace_ion_begin_cpu_access_cmo_skip(dev, dmabuf->name, - true, true, dir, - false); - mutex_unlock(&buffer->lock); - goto out; - } + if (!(buffer->flags & ION_FLAG_CACHED)) + return 0; + mutex_lock(&buffer->attachment_lock); list_for_each_entry(a, &buffer->attachments, list) { int tmp = 0; - if (!a->dma_mapped) { - trace_ion_begin_cpu_access_notmapped(a->dev, - dmabuf->name, - true, true, - dir, - false); + if (!a->dma_mapped) continue; - } - tmp = ion_sgl_sync_range(a->dev, a->table->sgl, a->table->nents, + ret = ion_sgl_sync_range(a->dev, a->table->sgl, a->table->nents, offset, len, dir, true); - if (!tmp) { - trace_ion_begin_cpu_access_cmo_apply(a->dev, - dmabuf->name, - true, true, dir, - false); - } else { - trace_ion_begin_cpu_access_cmo_skip(a->dev, - dmabuf->name, - true, true, dir, - false); + if (tmp) ret = tmp; - } - } - mutex_unlock(&buffer->lock); + mutex_unlock(&buffer->attachment_lock); -out: return ret; } @@ -937,73 +625,27 @@ static int ion_dma_buf_end_cpu_access_partial(struct dma_buf *dmabuf, struct ion_dma_buf_attachment *a; int ret = 0; - if (!hlos_accessible_buffer(buffer)) { - trace_ion_end_cpu_access_cmo_skip(NULL, dmabuf->name, - ion_buffer_cached(buffer), - false, direction, - false); - ret = -EPERM; - goto out; - } - - if (!(buffer->flags & ION_FLAG_CACHED)) { - trace_ion_end_cpu_access_cmo_skip(NULL, dmabuf->name, false, - true, direction, - false); - goto out; - } - - mutex_lock(&buffer->lock); - if (IS_ENABLED(CONFIG_ION_FORCE_DMA_SYNC)) { - struct device *dev = buffer->heap->priv; - struct sg_table *table = buffer->sg_table; + if (!hlos_accessible_buffer(buffer)) + return -EPERM; - ret = ion_sgl_sync_range(dev, table->sgl, table->nents, - offset, len, direction, false); - - if (!ret) - trace_ion_end_cpu_access_cmo_apply(dev, dmabuf->name, - true, true, - direction, false); - else - trace_ion_end_cpu_access_cmo_skip(dev, dmabuf->name, - true, true, - direction, false); - - mutex_unlock(&buffer->lock); - goto out; - } + if (!(buffer->flags & ION_FLAG_CACHED)) + return 0; + mutex_lock(&buffer->attachment_lock); list_for_each_entry(a, &buffer->attachments, list) { int tmp = 0; - if (!a->dma_mapped) { - trace_ion_end_cpu_access_notmapped(a->dev, - dmabuf->name, - true, true, - direction, - false); + if (!a->dma_mapped) continue; - } - tmp = ion_sgl_sync_range(a->dev, a->table->sgl, a->table->nents, + ret = ion_sgl_sync_range(a->dev, a->table->sgl, a->table->nents, offset, len, direction, false); - if (!tmp) { - trace_ion_end_cpu_access_cmo_apply(a->dev, dmabuf->name, - true, true, - direction, false); - - } else { - trace_ion_end_cpu_access_cmo_skip(a->dev, dmabuf->name, - true, true, direction, - false); + if (tmp) ret = tmp; - } } - mutex_unlock(&buffer->lock); + mutex_unlock(&buffer->attachment_lock); -out: return ret; } @@ -1011,8 +653,8 @@ static int ion_dma_buf_get_flags(struct dma_buf *dmabuf, unsigned long *flags) { struct ion_buffer *buffer = dmabuf->priv; - *flags = buffer->flags; + *flags = buffer->flags; return 0; } @@ -1022,7 +664,7 @@ static const struct dma_buf_ops dma_buf_ops = { .mmap = ion_mmap, .release = ion_dma_buf_release, .attach = ion_dma_buf_attach, - .detach = ion_dma_buf_detatch, + .detach = ion_dma_buf_detach, .begin_cpu_access = ion_dma_buf_begin_cpu_access, .end_cpu_access = ion_dma_buf_end_cpu_access, .begin_cpu_access_umapped = ion_dma_buf_begin_cpu_access_umapped, @@ -1035,115 +677,58 @@ static const struct dma_buf_ops dma_buf_ops = { .unmap = ion_dma_buf_kunmap, .vmap = ion_dma_buf_vmap, .vunmap = ion_dma_buf_vunmap, - .get_flags = ion_dma_buf_get_flags, + .get_flags = ion_dma_buf_get_flags }; -struct dma_buf *ion_alloc_dmabuf(size_t len, unsigned int heap_id_mask, - unsigned int flags) +struct dma_buf *ion_alloc(size_t len, unsigned int heap_id_mask, + unsigned int flags) { struct ion_device *dev = internal_dev; + struct dma_buf_export_info exp_info; struct ion_buffer *buffer = NULL; - struct ion_heap *heap; - DEFINE_DMA_BUF_EXPORT_INFO(exp_info); struct dma_buf *dmabuf; - char task_comm[TASK_COMM_LEN]; + struct ion_heap *heap; - pr_debug("%s: len %zu heap_id_mask %u flags %x\n", __func__, - len, heap_id_mask, flags); - /* - * traverse the list of heaps available in this system in priority - * order. If the heap type is supported by the client, and matches the - * request of the caller allocate from it. Repeat until allocate has - * succeeded or all heaps have been tried - */ len = PAGE_ALIGN(len); - if (!len) return ERR_PTR(-EINVAL); - down_read(&dev->lock); + down_read(&dev->heap_lock); plist_for_each_entry(heap, &dev->heaps, node) { - /* if the caller didn't specify this heap id */ - if (!((1 << heap->id) & heap_id_mask)) + if (!(BIT(heap->id) & heap_id_mask)) continue; + buffer = ion_buffer_create(heap, dev, len, flags); if (!IS_ERR(buffer) || PTR_ERR(buffer) == -EINTR) break; } - up_read(&dev->lock); - - if (!buffer) - return ERR_PTR(-ENODEV); - - if (IS_ERR(buffer)) - return ERR_CAST(buffer); + up_read(&dev->heap_lock); - get_task_comm(task_comm, current->group_leader); + if (IS_ERR_OR_NULL(buffer)) + return ERR_PTR(-EINVAL); - exp_info.ops = &dma_buf_ops; - exp_info.size = buffer->size; - exp_info.flags = O_RDWR; - exp_info.priv = buffer; - exp_info.exp_name = kasprintf(GFP_KERNEL, "%s-%s-%d-%s", KBUILD_MODNAME, - heap->name, current->tgid, task_comm); + exp_info = (typeof(exp_info)){ + .ops = &dma_buf_ops, + .flags = O_RDWR, + .size = buffer->size, + .priv = buffer + }; dmabuf = dma_buf_export(&exp_info); - if (IS_ERR(dmabuf)) { + if (IS_ERR(dmabuf)) _ion_buffer_destroy(buffer); - kfree(exp_info.exp_name); - } return dmabuf; } -struct dma_buf *ion_alloc(size_t len, unsigned int heap_id_mask, - unsigned int flags) -{ - struct ion_device *dev = internal_dev; - struct ion_heap *heap; - bool type_valid = false; - - pr_debug("%s: len %zu heap_id_mask %u flags %x\n", __func__, - len, heap_id_mask, flags); - /* - * traverse the list of heaps available in this system in priority - * order. Check the heap type is supported. - */ - - down_read(&dev->lock); - plist_for_each_entry(heap, &dev->heaps, node) { - /* if the caller didn't specify this heap id */ - if (!((1 << heap->id) & heap_id_mask)) - continue; - if (heap->type == ION_HEAP_TYPE_SYSTEM || - heap->type == (enum ion_heap_type)ION_HEAP_TYPE_HYP_CMA || - heap->type == - (enum ion_heap_type)ION_HEAP_TYPE_SYSTEM_SECURE) { - type_valid = true; - } else { - pr_warn("%s: heap type not supported, type:%d\n", - __func__, heap->type); - } - break; - } - up_read(&dev->lock); - - if (!type_valid) - return ERR_PTR(-EINVAL); - - return ion_alloc_dmabuf(len, heap_id_mask, flags); -} -EXPORT_SYMBOL(ion_alloc); - int ion_alloc_fd(size_t len, unsigned int heap_id_mask, unsigned int flags) { - int fd; struct dma_buf *dmabuf; + int fd; - dmabuf = ion_alloc_dmabuf(len, heap_id_mask, flags); - if (IS_ERR(dmabuf)) { + dmabuf = ion_alloc(len, heap_id_mask, flags); + if (IS_ERR(dmabuf)) return PTR_ERR(dmabuf); - } fd = dma_buf_fd(dmabuf, O_CLOEXEC); if (fd < 0) @@ -1154,47 +739,47 @@ int ion_alloc_fd(size_t len, unsigned int heap_id_mask, unsigned int flags) int ion_query_heaps(struct ion_heap_query *query) { + struct ion_heap_data __user *ubuf = u64_to_user_ptr(query->heaps); struct ion_device *dev = internal_dev; - struct ion_heap_data __user *buffer = u64_to_user_ptr(query->heaps); - int ret = -EINVAL, cnt = 0, max_cnt; - struct ion_heap *heap; struct ion_heap_data hdata; + struct ion_heap *heap; + int cnt = 0, max_cnt; memset(&hdata, 0, sizeof(hdata)); - down_read(&dev->lock); - if (!buffer) { + if (!ubuf) { + down_read(&dev->heap_lock); query->cnt = dev->heap_cnt; - ret = 0; - goto out; + up_read(&dev->heap_lock); + + return 0; } if (query->cnt <= 0) - goto out; + return -EINVAL; max_cnt = query->cnt; + down_read(&dev->heap_lock); plist_for_each_entry(heap, &dev->heaps, node) { strlcpy(hdata.name, heap->name, sizeof(hdata.name)); hdata.name[sizeof(hdata.name) - 1] = '\0'; hdata.type = heap->type; hdata.heap_id = heap->id; - if (copy_to_user(&buffer[cnt], &hdata, sizeof(hdata))) { - ret = -EFAULT; - goto out; + if (copy_to_user(&ubuf[cnt], &hdata, sizeof(hdata))) { + up_read(&dev->heap_lock); + return -EFAULT; } cnt++; if (cnt >= max_cnt) break; } + up_read(&dev->heap_lock); query->cnt = cnt; - ret = 0; -out: - up_read(&dev->lock); - return ret; + return 0; } static const struct file_operations ion_fops = { @@ -1205,121 +790,55 @@ static const struct file_operations ion_fops = { #endif }; -static int debug_shrink_set(void *data, u64 val) -{ - struct ion_heap *heap = data; - struct shrink_control sc; - int objs; - - sc.gfp_mask = GFP_HIGHUSER; - sc.nr_to_scan = val; - - if (!val) { - objs = heap->shrinker.count_objects(&heap->shrinker, &sc); - sc.nr_to_scan = objs; - } - - heap->shrinker.scan_objects(&heap->shrinker, &sc); - return 0; -} - -static int debug_shrink_get(void *data, u64 *val) -{ - struct ion_heap *heap = data; - struct shrink_control sc; - int objs; - - sc.gfp_mask = GFP_HIGHUSER; - sc.nr_to_scan = 0; - - objs = heap->shrinker.count_objects(&heap->shrinker, &sc); - *val = objs; - return 0; -} - -DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get, - debug_shrink_set, "%llu\n"); - void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap) { - struct dentry *debug_file; - - if (!heap->ops->allocate || !heap->ops->free) - pr_err("%s: can not add heap with invalid ops struct.\n", - __func__); - spin_lock_init(&heap->free_lock); heap->free_list_size = 0; if (heap->flags & ION_HEAP_FLAG_DEFER_FREE) ion_heap_init_deferred_free(heap); - if ((heap->flags & ION_HEAP_FLAG_DEFER_FREE) || heap->ops->shrink) + if (heap->flags & ION_HEAP_FLAG_DEFER_FREE || heap->ops->shrink) ion_heap_init_shrinker(heap); heap->dev = dev; - down_write(&dev->lock); - /* - * use negative heap->id to reverse the priority -- when traversing - * the list later attempt higher id numbers first - */ plist_node_init(&heap->node, -heap->id); - plist_add(&heap->node, &dev->heaps); - - if (heap->shrinker.count_objects && heap->shrinker.scan_objects) { - char debug_name[64]; - - snprintf(debug_name, 64, "%s_shrink", heap->name); - debug_file = debugfs_create_file( - debug_name, 0644, dev->debug_root, heap, - &debug_shrink_fops); - if (!debug_file) { - char buf[256], *path; - - path = dentry_path(dev->debug_root, buf, 256); - pr_err("Failed to create heap shrinker debugfs at %s/%s\n", - path, debug_name); - } - } + down_write(&dev->heap_lock); + plist_add(&heap->node, &dev->heaps); dev->heap_cnt++; - up_write(&dev->lock); + up_write(&dev->heap_lock); } -EXPORT_SYMBOL(ion_device_add_heap); struct ion_device *ion_device_create(void) { - struct ion_device *idev; + struct ion_device *dev; int ret; - idev = kzalloc(sizeof(*idev), GFP_KERNEL); - if (!idev) + dev = kzalloc(sizeof(*dev), GFP_KERNEL); + if (!dev) return ERR_PTR(-ENOMEM); - idev->dev.minor = MISC_DYNAMIC_MINOR; - idev->dev.name = "ion"; - idev->dev.fops = &ion_fops; - idev->dev.parent = NULL; - ret = misc_register(&idev->dev); - if (ret) { - pr_err("ion: failed to register misc device.\n"); - kfree(idev); - return ERR_PTR(ret); - } + ion_sg_table_pool = KMEM_CACHE(sg_table, SLAB_HWCACHE_ALIGN); + if (!ion_sg_table_pool) + goto free_dev; - idev->debug_root = debugfs_create_dir("ion", NULL); - if (!idev->debug_root) { - pr_err("ion: failed to create debugfs root directory.\n"); - goto debugfs_done; - } + dev->dev.minor = MISC_DYNAMIC_MINOR; + dev->dev.name = "ion"; + dev->dev.fops = &ion_fops; + dev->dev.parent = NULL; + ret = misc_register(&dev->dev); + if (ret) + goto free_table_pool; -debugfs_done: + init_rwsem(&dev->heap_lock); + plist_head_init(&dev->heaps); + internal_dev = dev; + return dev; - idev->buffers = RB_ROOT; - mutex_init(&idev->buffer_lock); - init_rwsem(&idev->lock); - plist_head_init(&idev->heaps); - internal_dev = idev; - return idev; +free_table_pool: + kmem_cache_destroy(ion_sg_table_pool); +free_dev: + kfree(dev); + return ERR_PTR(-ENOMEM); } -EXPORT_SYMBOL(ion_device_create); diff --git a/drivers/staging/android/ion/ion.h b/drivers/staging/android/ion/ion.h index 9e98c9c2f452..6ca8a873cb93 100644 --- a/drivers/staging/android/ion/ion.h +++ b/drivers/staging/android/ion/ion.h @@ -47,21 +47,6 @@ #define ION_IS_CACHED(__flags) ((__flags) & ION_FLAG_CACHED) -/** - * Debug feature. Make ION allocations DMA - * ready to help identify clients who are wrongly - * dependending on ION allocations being DMA - * ready. - * - * As default set to 'false' since ION allocations - * are no longer required to be DMA ready - */ -#ifdef CONFIG_ION_FORCE_DMA_SYNC -#define MAKE_ION_ALLOC_DMA_READY 1 -#else -#define MAKE_ION_ALLOC_DMA_READY 0 -#endif - /** * struct ion_platform_heap - defines a heap in the given platform * @type: type of the heap from ion_heap_type enum @@ -120,18 +105,16 @@ struct ion_vma_list { * @vmas: list of vma's mapping this buffer */ struct ion_buffer { - union { - struct rb_node node; - struct list_head list; - }; + struct list_head list; struct ion_device *dev; struct ion_heap *heap; unsigned long flags; unsigned long private_flags; size_t size; void *priv_virt; - /* Protect ion buffer */ - struct mutex lock; + struct mutex attachment_lock; + struct mutex kmap_lock; + struct mutex vma_lock; int kmap_cnt; void *vaddr; struct sg_table *sg_table; @@ -141,21 +124,10 @@ struct ion_buffer { void ion_buffer_destroy(struct ion_buffer *buffer); -/** - * struct ion_device - the metadata of the ion device node - * @dev: the actual misc device - * @buffers: an rb tree of all the existing buffers - * @buffer_lock: lock protecting the tree of buffers - * @lock: rwsem protecting the tree of heaps and clients - */ struct ion_device { struct miscdevice dev; - struct rb_root buffers; - /* buffer_lock used for adding and removing buffers */ - struct mutex buffer_lock; - struct rw_semaphore lock; struct plist_head heaps; - struct dentry *debug_root; + struct rw_semaphore heap_lock; int heap_cnt; }; @@ -220,8 +192,6 @@ struct ion_heap_ops { * @lock: protects the free list * @waitqueue: queue to wait on from deferred free thread * @task: task struct of deferred free thread - * @debug_show: called when heap debug file is read to add any - * heap specific debug info to output * * Represents a pool of memory from which buffers can be made. In some * systems the only heap is regular system memory allocated via vmalloc. @@ -243,28 +213,12 @@ struct ion_heap { /* Protect the free list */ spinlock_t free_lock; wait_queue_head_t waitqueue; - struct task_struct *task; - atomic_long_t total_allocated; - - int (*debug_show)(struct ion_heap *heap, struct seq_file *, void *); }; -/** - * ion_buffer_cached - this ion buffer is cached - * @buffer: buffer - * - * indicates whether this ion buffer is cached - */ -bool ion_buffer_cached(struct ion_buffer *buffer); - -/** - * ion_buffer_fault_user_mappings - fault in user mappings of this buffer - * @buffer: buffer - * - * indicates whether userspace mappings of this buffer will be faulted - * in, this can affect how buffers are allocated from the heap. - */ -bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer); +static inline bool ion_buffer_cached(struct ion_buffer *buffer) +{ + return buffer->flags & ION_FLAG_CACHED; +} /** * ion_device_create - allocates and returns an ion device @@ -437,7 +391,7 @@ struct ion_page_pool { struct list_head high_items; struct list_head low_items; /* Protect the pool */ - struct mutex mutex; + spinlock_t lock; gfp_t gfp_mask; unsigned int order; struct plist_node list; diff --git a/drivers/staging/android/ion/ion_cma_heap.c b/drivers/staging/android/ion/ion_cma_heap.c index 7782183b993b..fe6c5c52a816 100644 --- a/drivers/staging/android/ion/ion_cma_heap.c +++ b/drivers/staging/android/ion/ion_cma_heap.c @@ -112,8 +112,7 @@ static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer, } } - if (MAKE_ION_ALLOC_DMA_READY || - (flags & ION_FLAG_SECURE) || + if ((flags & ION_FLAG_SECURE) || !ion_buffer_cached(buffer)) ion_pages_sync_for_device(dev, pages, size, DMA_BIDIRECTIONAL); diff --git a/drivers/staging/android/ion/ion_heap.c b/drivers/staging/android/ion/ion_heap.c index b187ff340094..9b6bcacc746f 100644 --- a/drivers/staging/android/ion/ion_heap.c +++ b/drivers/staging/android/ion/ion_heap.c @@ -224,8 +224,17 @@ size_t ion_heap_freelist_shrink(struct ion_heap *heap, size_t size) static int ion_heap_deferred_free(void *data) { +#ifndef CONFIG_ION_DEFER_FREE_NO_SCHED_IDLE + static const struct sched_param param = { + .sched_priority = 0 + }; +#endif struct ion_heap *heap = data; +#ifndef CONFIG_ION_DEFER_FREE_NO_SCHED_IDLE + sched_setscheduler(current, SCHED_IDLE, ¶m); +#endif + while (true) { struct ion_buffer *buffer; @@ -250,21 +259,16 @@ static int ion_heap_deferred_free(void *data) int ion_heap_init_deferred_free(struct ion_heap *heap) { -#ifndef CONFIG_ION_DEFER_FREE_NO_SCHED_IDLE - struct sched_param param = { .sched_priority = 0 }; -#endif + struct task_struct *thread; + INIT_LIST_HEAD(&heap->free_list); init_waitqueue_head(&heap->waitqueue); - heap->task = kthread_run(ion_heap_deferred_free, heap, - "%s", heap->name); - if (IS_ERR(heap->task)) { + thread = kthread_run(ion_heap_deferred_free, heap, "%s", heap->name); + if (IS_ERR(thread)) { pr_err("%s: creating thread for deferred free failed\n", __func__); - return PTR_ERR_OR_ZERO(heap->task); + return PTR_ERR_OR_ZERO(thread); } -#ifndef CONFIG_ION_DEFER_FREE_NO_SCHED_IDLE - sched_setscheduler(heap->task, SCHED_IDLE, ¶m); -#endif return 0; } diff --git a/drivers/staging/android/ion/ion_page_pool.c b/drivers/staging/android/ion/ion_page_pool.c index 2568a5184865..9de62706149c 100644 --- a/drivers/staging/android/ion/ion_page_pool.c +++ b/drivers/staging/android/ion/ion_page_pool.c @@ -41,7 +41,7 @@ static void ion_page_pool_free_pages(struct ion_page_pool *pool, static int ion_page_pool_add(struct ion_page_pool *pool, struct page *page) { - mutex_lock(&pool->mutex); + spin_lock(&pool->lock); if (PageHighMem(page)) { list_add_tail(&page->lru, &pool->high_items); pool->high_count++; @@ -52,7 +52,7 @@ static int ion_page_pool_add(struct ion_page_pool *pool, struct page *page) mod_node_page_state(page_pgdat(page), NR_INDIRECTLY_RECLAIMABLE_BYTES, (1 << (PAGE_SHIFT + pool->order))); - mutex_unlock(&pool->mutex); + spin_unlock(&pool->lock); return 0; } @@ -85,12 +85,12 @@ struct page *ion_page_pool_alloc(struct ion_page_pool *pool, bool *from_pool) if (fatal_signal_pending(current)) return ERR_PTR(-EINTR); - if (*from_pool && mutex_trylock(&pool->mutex)) { + if (*from_pool && spin_trylock(&pool->lock)) { if (pool->high_count) page = ion_page_pool_remove(pool, true); else if (pool->low_count) page = ion_page_pool_remove(pool, false); - mutex_unlock(&pool->mutex); + spin_unlock(&pool->lock); } if (!page) { page = ion_page_pool_alloc_pages(pool); @@ -112,12 +112,12 @@ struct page *ion_page_pool_alloc_pool_only(struct ion_page_pool *pool) if (!pool) return ERR_PTR(-EINVAL); - if (mutex_trylock(&pool->mutex)) { + if (spin_trylock(&pool->lock)) { if (pool->high_count) page = ion_page_pool_remove(pool, true); else if (pool->low_count) page = ion_page_pool_remove(pool, false); - mutex_unlock(&pool->mutex); + spin_unlock(&pool->lock); } if (!page) @@ -166,16 +166,16 @@ int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask, while (freed < nr_to_scan) { struct page *page; - mutex_lock(&pool->mutex); + spin_lock(&pool->lock); if (pool->low_count) { page = ion_page_pool_remove(pool, false); } else if (high && pool->high_count) { page = ion_page_pool_remove(pool, true); } else { - mutex_unlock(&pool->mutex); + spin_unlock(&pool->lock); break; } - mutex_unlock(&pool->mutex); + spin_unlock(&pool->lock); ion_page_pool_free_pages(pool, page); freed += (1 << pool->order); } @@ -196,7 +196,7 @@ struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order, INIT_LIST_HEAD(&pool->high_items); pool->gfp_mask = gfp_mask; pool->order = order; - mutex_init(&pool->mutex); + spin_lock_init(&pool->lock); plist_node_init(&pool->list, order); if (cached) pool->cached = true; diff --git a/drivers/staging/android/ion/ion_system_heap.c b/drivers/staging/android/ion/ion_system_heap.c index 8978e3cf2d08..41d8c7cd9bbf 100644 --- a/drivers/staging/android/ion/ion_system_heap.c +++ b/drivers/staging/android/ion/ion_system_heap.c @@ -86,7 +86,7 @@ static struct page *alloc_buffer_page(struct ion_system_heap *heap, if (IS_ERR(page)) return page; - if ((MAKE_ION_ALLOC_DMA_READY && vmid <= 0) || !(*from_pool)) + if (!(*from_pool)) ion_pages_sync_for_device(dev, page, PAGE_SIZE << order, DMA_BIDIRECTIONAL); @@ -524,106 +524,6 @@ static struct ion_heap_ops system_heap_ops = { .shrink = ion_system_heap_shrink, }; -static int ion_system_heap_debug_show(struct ion_heap *heap, struct seq_file *s, - void *unused) -{ - - struct ion_system_heap *sys_heap = container_of( - heap, struct ion_system_heap, heap); - bool use_seq = s; - unsigned long uncached_total = 0; - unsigned long cached_total = 0; - unsigned long secure_total = 0; - struct ion_page_pool *pool; - int i, j; - - for (i = 0; i < NUM_ORDERS; i++) { - pool = sys_heap->uncached_pools[i]; - if (use_seq) { - seq_printf(s, - "%d order %u highmem pages in uncached pool = %lu total\n", - pool->high_count, pool->order, - (1 << pool->order) * PAGE_SIZE * - pool->high_count); - seq_printf(s, - "%d order %u lowmem pages in uncached pool = %lu total\n", - pool->low_count, pool->order, - (1 << pool->order) * PAGE_SIZE * - pool->low_count); - } - - uncached_total += (1 << pool->order) * PAGE_SIZE * - pool->high_count; - uncached_total += (1 << pool->order) * PAGE_SIZE * - pool->low_count; - } - - for (i = 0; i < NUM_ORDERS; i++) { - pool = sys_heap->cached_pools[i]; - if (use_seq) { - seq_printf(s, - "%d order %u highmem pages in cached pool = %lu total\n", - pool->high_count, pool->order, - (1 << pool->order) * PAGE_SIZE * - pool->high_count); - seq_printf(s, - "%d order %u lowmem pages in cached pool = %lu total\n", - pool->low_count, pool->order, - (1 << pool->order) * PAGE_SIZE * - pool->low_count); - } - - cached_total += (1 << pool->order) * PAGE_SIZE * - pool->high_count; - cached_total += (1 << pool->order) * PAGE_SIZE * - pool->low_count; - } - - for (i = 0; i < NUM_ORDERS; i++) { - for (j = 0; j < VMID_LAST; j++) { - if (!is_secure_vmid_valid(j)) - continue; - pool = sys_heap->secure_pools[j][i]; - - if (use_seq) { - seq_printf(s, - "VMID %d: %d order %u highmem pages in secure pool = %lu total\n", - j, pool->high_count, pool->order, - (1 << pool->order) * PAGE_SIZE * - pool->high_count); - seq_printf(s, - "VMID %d: %d order %u lowmem pages in secure pool = %lu total\n", - j, pool->low_count, pool->order, - (1 << pool->order) * PAGE_SIZE * - pool->low_count); - } - - secure_total += (1 << pool->order) * PAGE_SIZE * - pool->high_count; - secure_total += (1 << pool->order) * PAGE_SIZE * - pool->low_count; - } - } - - if (use_seq) { - seq_puts(s, "--------------------------------------------\n"); - seq_printf(s, "uncached pool = %lu cached pool = %lu secure pool = %lu\n", - uncached_total, cached_total, secure_total); - seq_printf(s, "pool total (uncached + cached + secure) = %lu\n", - uncached_total + cached_total + secure_total); - seq_puts(s, "--------------------------------------------\n"); - } else { - pr_info("-------------------------------------------------\n"); - pr_info("uncached pool = %lu cached pool = %lu secure pool = %lu\n", - uncached_total, cached_total, secure_total); - pr_info("pool total (uncached + cached + secure) = %lu\n", - uncached_total + cached_total + secure_total); - pr_info("-------------------------------------------------\n"); - } - - return 0; -} - static void ion_system_heap_destroy_pools(struct ion_page_pool **pools) { int i; @@ -694,7 +594,6 @@ struct ion_heap *ion_system_heap_create(struct ion_platform_heap *data) mutex_init(&heap->split_page_mutex); - heap->heap.debug_show = ion_system_heap_debug_show; return &heap->heap; destroy_uncached_pools: diff --git a/drivers/staging/android/ion/msm_ion_priv.h b/drivers/staging/android/ion/msm_ion_priv.h index 181ae1cc2c05..11baa424aa6d 100644 --- a/drivers/staging/android/ion/msm_ion_priv.h +++ b/drivers/staging/android/ion/msm_ion_priv.h @@ -40,10 +40,6 @@ int ion_system_secure_heap_drain(struct ion_heap *heap, void *data); struct ion_heap *ion_cma_secure_heap_create(struct ion_platform_heap *heap); void ion_cma_secure_heap_destroy(struct ion_heap *heap); -long msm_ion_custom_ioctl(struct ion_client *client, - unsigned int cmd, - unsigned long arg); - #ifdef CONFIG_CMA struct ion_heap *ion_secure_cma_heap_create(struct ion_platform_heap *heap); void ion_secure_cma_heap_destroy(struct ion_heap *heap); @@ -95,18 +91,5 @@ bool is_secure_vmid_valid(int vmid); int ion_system_secure_heap_unassign_sg(struct sg_table *sgt, int source_vmid); int ion_system_secure_heap_assign_sg(struct sg_table *sgt, int dest_vmid); -/** - * ion_create_chunked_sg_table - helper function to create sg table - * with specified chunk size - * @buffer_base: The starting address used for the sg dma address - * @chunk_size: The size of each entry in the sg table - * @total_size: The total size of the sg table (i.e. the sum of the - * entries). This will be rounded up to the nearest - * multiple of `chunk_size' - */ -struct sg_table *ion_create_chunked_sg_table(phys_addr_t buffer_base, - size_t chunk_size, - size_t total_size); - void show_ion_usage(struct ion_device *dev); #endif /* _MSM_ION_PRIV_H */