Skip to content

Commit 8b52c72

Browse files
Carlos Llamasgregkh
authored andcommitted
Revert "binder: switch alloc->mutex to spinlock_t"
This reverts commit 7710e2c. In preparation for concurrent page installations, restore the original alloc->mutex which will serialize zap_page_range_single() against page installations in subsequent patches (instead of the mmap_sem). Resolved trivial conflicts with commit 2c10a20 ("binder_alloc: Fix sleeping function called from invalid context") and commit da0c025 ("mm/list_lru: simplify the list_lru walk callback function"). Cc: Mukesh Ojha <quic_mojha@quicinc.com> Reviewed-by: Suren Baghdasaryan <surenb@google.com> Signed-off-by: Carlos Llamas <cmllamas@google.com> Link: https://lore.kernel.org/r/20241210143114.661252-2-cmllamas@google.com Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
1 parent 94ddd8b commit 8b52c72

File tree

2 files changed

+28
-28
lines changed

2 files changed

+28
-28
lines changed

drivers/android/binder_alloc.c

Lines changed: 23 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -169,9 +169,9 @@ struct binder_buffer *binder_alloc_prepare_to_free(struct binder_alloc *alloc,
169169
{
170170
struct binder_buffer *buffer;
171171

172-
spin_lock(&alloc->lock);
172+
mutex_lock(&alloc->mutex);
173173
buffer = binder_alloc_prepare_to_free_locked(alloc, user_ptr);
174-
spin_unlock(&alloc->lock);
174+
mutex_unlock(&alloc->mutex);
175175
return buffer;
176176
}
177177

@@ -597,18 +597,18 @@ struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
597597
if (!next)
598598
return ERR_PTR(-ENOMEM);
599599

600-
spin_lock(&alloc->lock);
600+
mutex_lock(&alloc->mutex);
601601
buffer = binder_alloc_new_buf_locked(alloc, next, size, is_async);
602602
if (IS_ERR(buffer)) {
603-
spin_unlock(&alloc->lock);
603+
mutex_unlock(&alloc->mutex);
604604
goto out;
605605
}
606606

607607
buffer->data_size = data_size;
608608
buffer->offsets_size = offsets_size;
609609
buffer->extra_buffers_size = extra_buffers_size;
610610
buffer->pid = current->tgid;
611-
spin_unlock(&alloc->lock);
611+
mutex_unlock(&alloc->mutex);
612612

613613
ret = binder_install_buffer_pages(alloc, buffer, size);
614614
if (ret) {
@@ -785,17 +785,17 @@ void binder_alloc_free_buf(struct binder_alloc *alloc,
785785
* We could eliminate the call to binder_alloc_clear_buf()
786786
* from binder_alloc_deferred_release() by moving this to
787787
* binder_free_buf_locked(). However, that could
788-
* increase contention for the alloc->lock if clear_on_free
789-
* is used frequently for large buffers. This lock is not
788+
* increase contention for the alloc mutex if clear_on_free
789+
* is used frequently for large buffers. The mutex is not
790790
* needed for correctness here.
791791
*/
792792
if (buffer->clear_on_free) {
793793
binder_alloc_clear_buf(alloc, buffer);
794794
buffer->clear_on_free = false;
795795
}
796-
spin_lock(&alloc->lock);
796+
mutex_lock(&alloc->mutex);
797797
binder_free_buf_locked(alloc, buffer);
798-
spin_unlock(&alloc->lock);
798+
mutex_unlock(&alloc->mutex);
799799
}
800800

801801
/**
@@ -893,7 +893,7 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
893893
struct binder_buffer *buffer;
894894

895895
buffers = 0;
896-
spin_lock(&alloc->lock);
896+
mutex_lock(&alloc->mutex);
897897
BUG_ON(alloc->vma);
898898

899899
while ((n = rb_first(&alloc->allocated_buffers))) {
@@ -940,7 +940,7 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
940940
page_count++;
941941
}
942942
}
943-
spin_unlock(&alloc->lock);
943+
mutex_unlock(&alloc->mutex);
944944
kvfree(alloc->pages);
945945
if (alloc->mm)
946946
mmdrop(alloc->mm);
@@ -964,7 +964,7 @@ void binder_alloc_print_allocated(struct seq_file *m,
964964
struct binder_buffer *buffer;
965965
struct rb_node *n;
966966

967-
spin_lock(&alloc->lock);
967+
mutex_lock(&alloc->mutex);
968968
for (n = rb_first(&alloc->allocated_buffers); n; n = rb_next(n)) {
969969
buffer = rb_entry(n, struct binder_buffer, rb_node);
970970
seq_printf(m, " buffer %d: %lx size %zd:%zd:%zd %s\n",
@@ -974,7 +974,7 @@ void binder_alloc_print_allocated(struct seq_file *m,
974974
buffer->extra_buffers_size,
975975
buffer->transaction ? "active" : "delivered");
976976
}
977-
spin_unlock(&alloc->lock);
977+
mutex_unlock(&alloc->mutex);
978978
}
979979

980980
/**
@@ -991,7 +991,7 @@ void binder_alloc_print_pages(struct seq_file *m,
991991
int lru = 0;
992992
int free = 0;
993993

994-
spin_lock(&alloc->lock);
994+
mutex_lock(&alloc->mutex);
995995
/*
996996
* Make sure the binder_alloc is fully initialized, otherwise we might
997997
* read inconsistent state.
@@ -1007,7 +1007,7 @@ void binder_alloc_print_pages(struct seq_file *m,
10071007
lru++;
10081008
}
10091009
}
1010-
spin_unlock(&alloc->lock);
1010+
mutex_unlock(&alloc->mutex);
10111011
seq_printf(m, " pages: %d:%d:%d\n", active, lru, free);
10121012
seq_printf(m, " pages high watermark: %zu\n", alloc->pages_high);
10131013
}
@@ -1023,10 +1023,10 @@ int binder_alloc_get_allocated_count(struct binder_alloc *alloc)
10231023
struct rb_node *n;
10241024
int count = 0;
10251025

1026-
spin_lock(&alloc->lock);
1026+
mutex_lock(&alloc->mutex);
10271027
for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n))
10281028
count++;
1029-
spin_unlock(&alloc->lock);
1029+
mutex_unlock(&alloc->mutex);
10301030
return count;
10311031
}
10321032

@@ -1070,8 +1070,8 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
10701070
goto err_mmget;
10711071
if (!mmap_read_trylock(mm))
10721072
goto err_mmap_read_lock_failed;
1073-
if (!spin_trylock(&alloc->lock))
1074-
goto err_get_alloc_lock_failed;
1073+
if (!mutex_trylock(&alloc->mutex))
1074+
goto err_get_alloc_mutex_failed;
10751075
if (!page->page_ptr)
10761076
goto err_page_already_freed;
10771077

@@ -1090,7 +1090,7 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
10901090
trace_binder_unmap_kernel_end(alloc, index);
10911091

10921092
list_lru_isolate(lru, item);
1093-
spin_unlock(&alloc->lock);
1093+
mutex_unlock(&alloc->mutex);
10941094
spin_unlock(&lru->lock);
10951095

10961096
if (vma) {
@@ -1109,8 +1109,8 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
11091109

11101110
err_invalid_vma:
11111111
err_page_already_freed:
1112-
spin_unlock(&alloc->lock);
1113-
err_get_alloc_lock_failed:
1112+
mutex_unlock(&alloc->mutex);
1113+
err_get_alloc_mutex_failed:
11141114
mmap_read_unlock(mm);
11151115
err_mmap_read_lock_failed:
11161116
mmput_async(mm);
@@ -1145,7 +1145,7 @@ void binder_alloc_init(struct binder_alloc *alloc)
11451145
alloc->pid = current->group_leader->pid;
11461146
alloc->mm = current->mm;
11471147
mmgrab(alloc->mm);
1148-
spin_lock_init(&alloc->lock);
1148+
mutex_init(&alloc->mutex);
11491149
INIT_LIST_HEAD(&alloc->buffers);
11501150
}
11511151

drivers/android/binder_alloc.h

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@
99
#include <linux/rbtree.h>
1010
#include <linux/list.h>
1111
#include <linux/mm.h>
12-
#include <linux/spinlock.h>
12+
#include <linux/rtmutex.h>
1313
#include <linux/vmalloc.h>
1414
#include <linux/slab.h>
1515
#include <linux/list_lru.h>
@@ -72,7 +72,7 @@ struct binder_lru_page {
7272

7373
/**
7474
* struct binder_alloc - per-binder proc state for binder allocator
75-
* @lock: protects binder_alloc fields
75+
* @mutex: protects binder_alloc fields
7676
* @vma: vm_area_struct passed to mmap_handler
7777
* (invariant after mmap)
7878
* @mm: copy of task->mm (invariant after open)
@@ -96,7 +96,7 @@ struct binder_lru_page {
9696
* struct binder_buffer objects used to track the user buffers
9797
*/
9898
struct binder_alloc {
99-
spinlock_t lock;
99+
struct mutex mutex;
100100
struct vm_area_struct *vma;
101101
struct mm_struct *mm;
102102
unsigned long buffer;
@@ -153,9 +153,9 @@ binder_alloc_get_free_async_space(struct binder_alloc *alloc)
153153
{
154154
size_t free_async_space;
155155

156-
spin_lock(&alloc->lock);
156+
mutex_lock(&alloc->mutex);
157157
free_async_space = alloc->free_async_space;
158-
spin_unlock(&alloc->lock);
158+
mutex_unlock(&alloc->mutex);
159159
return free_async_space;
160160
}
161161

0 commit comments

Comments
 (0)