@@ -169,9 +169,9 @@ struct binder_buffer *binder_alloc_prepare_to_free(struct binder_alloc *alloc,
169169{
170170 struct binder_buffer * buffer ;
171171
172- spin_lock (& alloc -> lock );
172+ mutex_lock (& alloc -> mutex );
173173 buffer = binder_alloc_prepare_to_free_locked (alloc , user_ptr );
174- spin_unlock (& alloc -> lock );
174+ mutex_unlock (& alloc -> mutex );
175175 return buffer ;
176176}
177177
@@ -597,18 +597,18 @@ struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
597597 if (!next )
598598 return ERR_PTR (- ENOMEM );
599599
600- spin_lock (& alloc -> lock );
600+ mutex_lock (& alloc -> mutex );
601601 buffer = binder_alloc_new_buf_locked (alloc , next , size , is_async );
602602 if (IS_ERR (buffer )) {
603- spin_unlock (& alloc -> lock );
603+ mutex_unlock (& alloc -> mutex );
604604 goto out ;
605605 }
606606
607607 buffer -> data_size = data_size ;
608608 buffer -> offsets_size = offsets_size ;
609609 buffer -> extra_buffers_size = extra_buffers_size ;
610610 buffer -> pid = current -> tgid ;
611- spin_unlock (& alloc -> lock );
611+ mutex_unlock (& alloc -> mutex );
612612
613613 ret = binder_install_buffer_pages (alloc , buffer , size );
614614 if (ret ) {
@@ -785,17 +785,17 @@ void binder_alloc_free_buf(struct binder_alloc *alloc,
785785 * We could eliminate the call to binder_alloc_clear_buf()
786786 * from binder_alloc_deferred_release() by moving this to
787787 * binder_free_buf_locked(). However, that could
788- * increase contention for the alloc->lock if clear_on_free
789- * is used frequently for large buffers. This lock is not
788+ * increase contention for the alloc mutex if clear_on_free
789+ * is used frequently for large buffers. The mutex is not
790790 * needed for correctness here.
791791 */
792792 if (buffer -> clear_on_free ) {
793793 binder_alloc_clear_buf (alloc , buffer );
794794 buffer -> clear_on_free = false;
795795 }
796- spin_lock (& alloc -> lock );
796+ mutex_lock (& alloc -> mutex );
797797 binder_free_buf_locked (alloc , buffer );
798- spin_unlock (& alloc -> lock );
798+ mutex_unlock (& alloc -> mutex );
799799}
800800
801801/**
@@ -893,7 +893,7 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
893893 struct binder_buffer * buffer ;
894894
895895 buffers = 0 ;
896- spin_lock (& alloc -> lock );
896+ mutex_lock (& alloc -> mutex );
897897 BUG_ON (alloc -> vma );
898898
899899 while ((n = rb_first (& alloc -> allocated_buffers ))) {
@@ -940,7 +940,7 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
940940 page_count ++ ;
941941 }
942942 }
943- spin_unlock (& alloc -> lock );
943+ mutex_unlock (& alloc -> mutex );
944944 kvfree (alloc -> pages );
945945 if (alloc -> mm )
946946 mmdrop (alloc -> mm );
@@ -964,7 +964,7 @@ void binder_alloc_print_allocated(struct seq_file *m,
964964 struct binder_buffer * buffer ;
965965 struct rb_node * n ;
966966
967- spin_lock (& alloc -> lock );
967+ mutex_lock (& alloc -> mutex );
968968 for (n = rb_first (& alloc -> allocated_buffers ); n ; n = rb_next (n )) {
969969 buffer = rb_entry (n , struct binder_buffer , rb_node );
970970 seq_printf (m , " buffer %d: %lx size %zd:%zd:%zd %s\n" ,
@@ -974,7 +974,7 @@ void binder_alloc_print_allocated(struct seq_file *m,
974974 buffer -> extra_buffers_size ,
975975 buffer -> transaction ? "active" : "delivered" );
976976 }
977- spin_unlock (& alloc -> lock );
977+ mutex_unlock (& alloc -> mutex );
978978}
979979
980980/**
@@ -991,7 +991,7 @@ void binder_alloc_print_pages(struct seq_file *m,
991991 int lru = 0 ;
992992 int free = 0 ;
993993
994- spin_lock (& alloc -> lock );
994+ mutex_lock (& alloc -> mutex );
995995 /*
996996 * Make sure the binder_alloc is fully initialized, otherwise we might
997997 * read inconsistent state.
@@ -1007,7 +1007,7 @@ void binder_alloc_print_pages(struct seq_file *m,
10071007 lru ++ ;
10081008 }
10091009 }
1010- spin_unlock (& alloc -> lock );
1010+ mutex_unlock (& alloc -> mutex );
10111011 seq_printf (m , " pages: %d:%d:%d\n" , active , lru , free );
10121012 seq_printf (m , " pages high watermark: %zu\n" , alloc -> pages_high );
10131013}
@@ -1023,10 +1023,10 @@ int binder_alloc_get_allocated_count(struct binder_alloc *alloc)
10231023 struct rb_node * n ;
10241024 int count = 0 ;
10251025
1026- spin_lock (& alloc -> lock );
1026+ mutex_lock (& alloc -> mutex );
10271027 for (n = rb_first (& alloc -> allocated_buffers ); n != NULL ; n = rb_next (n ))
10281028 count ++ ;
1029- spin_unlock (& alloc -> lock );
1029+ mutex_unlock (& alloc -> mutex );
10301030 return count ;
10311031}
10321032
@@ -1070,8 +1070,8 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
10701070 goto err_mmget ;
10711071 if (!mmap_read_trylock (mm ))
10721072 goto err_mmap_read_lock_failed ;
1073- if (!spin_trylock (& alloc -> lock ))
1074- goto err_get_alloc_lock_failed ;
1073+ if (!mutex_trylock (& alloc -> mutex ))
1074+ goto err_get_alloc_mutex_failed ;
10751075 if (!page -> page_ptr )
10761076 goto err_page_already_freed ;
10771077
@@ -1090,7 +1090,7 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
10901090 trace_binder_unmap_kernel_end (alloc , index );
10911091
10921092 list_lru_isolate (lru , item );
1093- spin_unlock (& alloc -> lock );
1093+ mutex_unlock (& alloc -> mutex );
10941094 spin_unlock (& lru -> lock );
10951095
10961096 if (vma ) {
@@ -1109,8 +1109,8 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
11091109
11101110err_invalid_vma :
11111111err_page_already_freed :
1112- spin_unlock (& alloc -> lock );
1113- err_get_alloc_lock_failed :
1112+ mutex_unlock (& alloc -> mutex );
1113+ err_get_alloc_mutex_failed :
11141114 mmap_read_unlock (mm );
11151115err_mmap_read_lock_failed :
11161116 mmput_async (mm );
@@ -1145,7 +1145,7 @@ void binder_alloc_init(struct binder_alloc *alloc)
11451145 alloc -> pid = current -> group_leader -> pid ;
11461146 alloc -> mm = current -> mm ;
11471147 mmgrab (alloc -> mm );
1148- spin_lock_init (& alloc -> lock );
1148+ mutex_init (& alloc -> mutex );
11491149 INIT_LIST_HEAD (& alloc -> buffers );
11501150}
11511151
0 commit comments