@@ -402,12 +402,15 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
402
402
size_t size , data_offsets_size ;
403
403
int ret ;
404
404
405
+ mmap_read_lock (alloc -> vma_vm_mm );
405
406
if (!binder_alloc_get_vma (alloc )) {
407
+ mmap_read_unlock (alloc -> vma_vm_mm );
406
408
binder_alloc_debug (BINDER_DEBUG_USER_ERROR ,
407
409
"%d: binder_alloc_buf, no vma\n" ,
408
410
alloc -> pid );
409
411
return ERR_PTR (- ESRCH );
410
412
}
413
+ mmap_read_unlock (alloc -> vma_vm_mm );
411
414
412
415
data_offsets_size = ALIGN (data_size , sizeof (void * )) +
413
416
ALIGN (offsets_size , sizeof (void * ));
@@ -929,17 +932,25 @@ void binder_alloc_print_pages(struct seq_file *m,
929
932
* Make sure the binder_alloc is fully initialized, otherwise we might
930
933
* read inconsistent state.
931
934
*/
932
- if (binder_alloc_get_vma (alloc ) != NULL ) {
933
- for (i = 0 ; i < alloc -> buffer_size / PAGE_SIZE ; i ++ ) {
934
- page = & alloc -> pages [i ];
935
- if (!page -> page_ptr )
936
- free ++ ;
937
- else if (list_empty (& page -> lru ))
938
- active ++ ;
939
- else
940
- lru ++ ;
941
- }
935
+
936
+ mmap_read_lock (alloc -> vma_vm_mm );
937
+ if (binder_alloc_get_vma (alloc ) == NULL ) {
938
+ mmap_read_unlock (alloc -> vma_vm_mm );
939
+ goto uninitialized ;
942
940
}
941
+
942
+ mmap_read_unlock (alloc -> vma_vm_mm );
943
+ for (i = 0 ; i < alloc -> buffer_size / PAGE_SIZE ; i ++ ) {
944
+ page = & alloc -> pages [i ];
945
+ if (!page -> page_ptr )
946
+ free ++ ;
947
+ else if (list_empty (& page -> lru ))
948
+ active ++ ;
949
+ else
950
+ lru ++ ;
951
+ }
952
+
953
+ uninitialized :
943
954
mutex_unlock (& alloc -> mutex );
944
955
seq_printf (m , " pages: %d:%d:%d\n" , active , lru , free );
945
956
seq_printf (m , " pages high watermark: %zu\n" , alloc -> pages_high );
0 commit comments