Commit 0fa53349 authored by Carlos Llamas's avatar Carlos Llamas Committed by Greg Kroah-Hartman

binder: add lockless binder_alloc_(set|get)_vma()

Bring back the original lockless design in binder_alloc to determine
whether the buffer setup has been completed by the ->mmap() handler.
However, this time use smp_load_acquire() and smp_store_release() to
wrap all the ordering in a single macro call.

Also, add comments to make it evident that binder uses alloc->vma to
determine when the binder_alloc has been fully initialized. In these
scenarios acquiring the mmap_lock is not required.

Fixes: a43cfc87 ("android: binder: stop saving a pointer to the VMA")
Cc: Liam Howlett <liam.howlett@oracle.com>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: stable@vger.kernel.org
Signed-off-by: default avatarCarlos Llamas <cmllamas@google.com>
Link: https://lore.kernel.org/r/20230502201220.1756319-3-cmllamas@google.comSigned-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent c0fd2101
...@@ -309,17 +309,18 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate, ...@@ -309,17 +309,18 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
return vma ? -ENOMEM : -ESRCH; return vma ? -ENOMEM : -ESRCH;
} }
static inline void binder_alloc_set_vma(struct binder_alloc *alloc,
struct vm_area_struct *vma)
{
/* pairs with smp_load_acquire in binder_alloc_get_vma() */
smp_store_release(&alloc->vma, vma);
}
static inline struct vm_area_struct *binder_alloc_get_vma( static inline struct vm_area_struct *binder_alloc_get_vma(
struct binder_alloc *alloc) struct binder_alloc *alloc)
{ {
struct vm_area_struct *vma = NULL; /* pairs with smp_store_release in binder_alloc_set_vma() */
return smp_load_acquire(&alloc->vma);
if (alloc->vma) {
/* Look at description in binder_alloc_set_vma */
smp_rmb();
vma = alloc->vma;
}
return vma;
} }
static bool debug_low_async_space_locked(struct binder_alloc *alloc, int pid) static bool debug_low_async_space_locked(struct binder_alloc *alloc, int pid)
...@@ -382,6 +383,7 @@ static struct binder_buffer *binder_alloc_new_buf_locked( ...@@ -382,6 +383,7 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
size_t size, data_offsets_size; size_t size, data_offsets_size;
int ret; int ret;
/* Check binder_alloc is fully initialized */
if (!binder_alloc_get_vma(alloc)) { if (!binder_alloc_get_vma(alloc)) {
binder_alloc_debug(BINDER_DEBUG_USER_ERROR, binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
"%d: binder_alloc_buf, no vma\n", "%d: binder_alloc_buf, no vma\n",
...@@ -777,7 +779,9 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc, ...@@ -777,7 +779,9 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
buffer->free = 1; buffer->free = 1;
binder_insert_free_buffer(alloc, buffer); binder_insert_free_buffer(alloc, buffer);
alloc->free_async_space = alloc->buffer_size / 2; alloc->free_async_space = alloc->buffer_size / 2;
alloc->vma = vma;
/* Signal binder_alloc is fully initialized */
binder_alloc_set_vma(alloc, vma);
return 0; return 0;
...@@ -959,7 +963,7 @@ int binder_alloc_get_allocated_count(struct binder_alloc *alloc) ...@@ -959,7 +963,7 @@ int binder_alloc_get_allocated_count(struct binder_alloc *alloc)
*/ */
void binder_alloc_vma_close(struct binder_alloc *alloc) void binder_alloc_vma_close(struct binder_alloc *alloc)
{ {
alloc->vma = 0; binder_alloc_set_vma(alloc, NULL);
} }
/** /**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment