@@ -332,6 +332,35 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
332332 return vma ? - ENOMEM : - ESRCH ;
333333}
334334
335+
336+ static inline void binder_alloc_set_vma (struct binder_alloc * alloc ,
337+ struct vm_area_struct * vma )
338+ {
339+ if (vma )
340+ alloc -> vma_vm_mm = vma -> vm_mm ;
341+ /*
342+ * If we see alloc->vma is not NULL, buffer data structures set up
343+ * completely. Look at smp_rmb side binder_alloc_get_vma.
344+ * We also want to guarantee new alloc->vma_vm_mm is always visible
345+ * if alloc->vma is set.
346+ */
347+ smp_wmb ();
348+ alloc -> vma = vma ;
349+ }
350+
351+ static inline struct vm_area_struct * binder_alloc_get_vma (
352+ struct binder_alloc * alloc )
353+ {
354+ struct vm_area_struct * vma = NULL ;
355+
356+ if (alloc -> vma ) {
357+ /* Look at description in binder_alloc_set_vma */
358+ smp_rmb ();
359+ vma = alloc -> vma ;
360+ }
361+ return vma ;
362+ }
363+
335364static struct binder_buffer * binder_alloc_new_buf_locked (
336365 struct binder_alloc * alloc ,
337366 size_t data_size ,
@@ -348,7 +377,7 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
348377 size_t size , data_offsets_size ;
349378 int ret ;
350379
351- if (alloc -> vma == NULL ) {
380+ if (! binder_alloc_get_vma ( alloc ) ) {
352381 binder_alloc_debug (BINDER_DEBUG_USER_ERROR ,
353382 "%d: binder_alloc_buf, no vma\n" ,
354383 alloc -> pid );
@@ -723,9 +752,7 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
723752 buffer -> free = 1 ;
724753 binder_insert_free_buffer (alloc , buffer );
725754 alloc -> free_async_space = alloc -> buffer_size / 2 ;
726- barrier ();
727- alloc -> vma = vma ;
728- alloc -> vma_vm_mm = vma -> vm_mm ;
755+ binder_alloc_set_vma (alloc , vma );
729756 mmgrab (alloc -> vma_vm_mm );
730757
731758 return 0 ;
@@ -754,10 +781,10 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
754781 int buffers , page_count ;
755782 struct binder_buffer * buffer ;
756783
757- BUG_ON (alloc -> vma );
758-
759784 buffers = 0 ;
760785 mutex_lock (& alloc -> mutex );
786+ BUG_ON (alloc -> vma );
787+
761788 while ((n = rb_first (& alloc -> allocated_buffers ))) {
762789 buffer = rb_entry (n , struct binder_buffer , rb_node );
763790
@@ -900,7 +927,7 @@ int binder_alloc_get_allocated_count(struct binder_alloc *alloc)
900927 */
901928void binder_alloc_vma_close (struct binder_alloc * alloc )
902929{
903- WRITE_ONCE (alloc -> vma , NULL );
930+ binder_alloc_set_vma (alloc , NULL );
904931}
905932
906933/**
@@ -935,7 +962,7 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
935962
936963 index = page - alloc -> pages ;
937964 page_addr = (uintptr_t )alloc -> buffer + index * PAGE_SIZE ;
938- vma = alloc -> vma ;
965+ vma = binder_alloc_get_vma ( alloc ) ;
939966 if (vma ) {
940967 if (!mmget_not_zero (alloc -> vma_vm_mm ))
941968 goto err_mmget ;
0 commit comments