Skip to content

Commit e50f4e6

Browse files
Carlos Llamasgregkh
authored andcommitted
binder: reverse locking order in shrinker callback
The locking order currently requires the alloc->mutex to be acquired first followed by the mmap lock. However, the alloc->mutex is converted into a spinlock in subsequent commits so the order needs to be reversed to avoid nesting the sleeping mmap lock under the spinlock. The shrinker's callback binder_alloc_free_page() is the only place that needs to be reordered since other functions have been refactored and no longer nest these locks. Some minor cosmetic changes are also included in this patch. Signed-off-by: Carlos Llamas <cmllamas@google.com> Link: https://lore.kernel.org/r/20231201172212.1813387-28-cmllamas@google.com Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
1 parent 162c797 commit e50f4e6

File tree

1 file changed

+22
-24
lines changed

1 file changed

+22
-24
lines changed

drivers/android/binder_alloc.c

Lines changed: 22 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -1061,35 +1061,39 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
10611061
void *cb_arg)
10621062
__must_hold(lock)
10631063
{
1064-
struct mm_struct *mm = NULL;
1065-
struct binder_lru_page *page = container_of(item,
1066-
struct binder_lru_page,
1067-
lru);
1068-
struct binder_alloc *alloc;
1064+
struct binder_lru_page *page = container_of(item, typeof(*page), lru);
1065+
struct binder_alloc *alloc = page->alloc;
1066+
struct mm_struct *mm = alloc->mm;
1067+
struct vm_area_struct *vma;
1068+
struct page *page_to_free;
10691069
unsigned long page_addr;
10701070
size_t index;
1071-
struct vm_area_struct *vma;
10721071

1073-
alloc = page->alloc;
1072+
if (!mmget_not_zero(mm))
1073+
goto err_mmget;
1074+
if (!mmap_read_trylock(mm))
1075+
goto err_mmap_read_lock_failed;
10741076
if (!mutex_trylock(&alloc->mutex))
10751077
goto err_get_alloc_mutex_failed;
1076-
10771078
if (!page->page_ptr)
10781079
goto err_page_already_freed;
10791080

10801081
index = page - alloc->pages;
10811082
page_addr = alloc->buffer + index * PAGE_SIZE;
10821083

1083-
mm = alloc->mm;
1084-
if (!mmget_not_zero(mm))
1085-
goto err_mmget;
1086-
if (!mmap_read_trylock(mm))
1087-
goto err_mmap_read_lock_failed;
10881084
vma = vma_lookup(mm, page_addr);
10891085
if (vma && vma != binder_alloc_get_vma(alloc))
10901086
goto err_invalid_vma;
10911087

1088+
trace_binder_unmap_kernel_start(alloc, index);
1089+
1090+
page_to_free = page->page_ptr;
1091+
page->page_ptr = NULL;
1092+
1093+
trace_binder_unmap_kernel_end(alloc, index);
1094+
10921095
list_lru_isolate(lru, item);
1096+
mutex_unlock(&alloc->mutex);
10931097
spin_unlock(lock);
10941098

10951099
if (vma) {
@@ -1099,28 +1103,22 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
10991103

11001104
trace_binder_unmap_user_end(alloc, index);
11011105
}
1106+
11021107
mmap_read_unlock(mm);
11031108
mmput_async(mm);
1104-
1105-
trace_binder_unmap_kernel_start(alloc, index);
1106-
1107-
__free_page(page->page_ptr);
1108-
page->page_ptr = NULL;
1109-
1110-
trace_binder_unmap_kernel_end(alloc, index);
1109+
__free_page(page_to_free);
11111110

11121111
spin_lock(lock);
1113-
mutex_unlock(&alloc->mutex);
11141112
return LRU_REMOVED_RETRY;
11151113

11161114
err_invalid_vma:
1115+
err_page_already_freed:
1116+
mutex_unlock(&alloc->mutex);
1117+
err_get_alloc_mutex_failed:
11171118
mmap_read_unlock(mm);
11181119
err_mmap_read_lock_failed:
11191120
mmput_async(mm);
11201121
err_mmget:
1121-
err_page_already_freed:
1122-
mutex_unlock(&alloc->mutex);
1123-
err_get_alloc_mutex_failed:
11241122
return LRU_SKIP;
11251123
}
11261124

0 commit comments

Comments
 (0)