Commit 37ebbb4f authored by Carlos Llamas's avatar Carlos Llamas Committed by Greg Kroah-Hartman

binder: perform page installation outside of locks

Split out the insertion of pages to be outside of the alloc->mutex in a
separate binder_install_buffer_pages() routine. Since this is no longer
serialized, we must look at the full range of pages used by the buffers.
The installation is protected with mmap_sem in write mode since multiple
tasks might race to install the same page.

Besides avoiding unnecessary nested locking this helps in preparation of
switching the alloc->mutex into a spinlock_t in subsequent patches.
Signed-off-by: default avatarCarlos Llamas <cmllamas@google.com>
Reviewed-by: default avatarAlice Ryhl <aliceryhl@google.com>
Link: https://lore.kernel.org/r/20231201172212.1813387-20-cmllamas@google.comSigned-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 68aef12d
...@@ -175,6 +175,21 @@ struct binder_buffer *binder_alloc_prepare_to_free(struct binder_alloc *alloc, ...@@ -175,6 +175,21 @@ struct binder_buffer *binder_alloc_prepare_to_free(struct binder_alloc *alloc,
return buffer; return buffer;
} }
static inline void
binder_set_installed_page(struct binder_lru_page *lru_page,
struct page *page)
{
/* Pairs with acquire in binder_get_installed_page() */
smp_store_release(&lru_page->page_ptr, page);
}
static inline struct page *
binder_get_installed_page(struct binder_lru_page *lru_page)
{
/* Pairs with release in binder_set_installed_page() */
return smp_load_acquire(&lru_page->page_ptr);
}
static void binder_free_page_range(struct binder_alloc *alloc, static void binder_free_page_range(struct binder_alloc *alloc,
unsigned long start, unsigned long end) unsigned long start, unsigned long end)
{ {
...@@ -190,6 +205,9 @@ static void binder_free_page_range(struct binder_alloc *alloc, ...@@ -190,6 +205,9 @@ static void binder_free_page_range(struct binder_alloc *alloc,
index = (page_addr - alloc->buffer) / PAGE_SIZE; index = (page_addr - alloc->buffer) / PAGE_SIZE;
page = &alloc->pages[index]; page = &alloc->pages[index];
if (!binder_get_installed_page(page))
continue;
trace_binder_free_lru_start(alloc, index); trace_binder_free_lru_start(alloc, index);
ret = list_lru_add(&binder_alloc_lru, &page->lru); ret = list_lru_add(&binder_alloc_lru, &page->lru);
...@@ -209,7 +227,14 @@ static int binder_install_single_page(struct binder_alloc *alloc, ...@@ -209,7 +227,14 @@ static int binder_install_single_page(struct binder_alloc *alloc,
if (!mmget_not_zero(alloc->mm)) if (!mmget_not_zero(alloc->mm))
return -ESRCH; return -ESRCH;
/*
* Protected with mmap_sem in write mode as multiple tasks
* might race to install the same page.
*/
mmap_write_lock(alloc->mm); mmap_write_lock(alloc->mm);
if (binder_get_installed_page(lru_page))
goto out;
if (!alloc->vma) { if (!alloc->vma) {
pr_err("%d: %s failed, no vma\n", alloc->pid, __func__); pr_err("%d: %s failed, no vma\n", alloc->pid, __func__);
ret = -ESRCH; ret = -ESRCH;
...@@ -232,15 +257,50 @@ static int binder_install_single_page(struct binder_alloc *alloc, ...@@ -232,15 +257,50 @@ static int binder_install_single_page(struct binder_alloc *alloc,
goto out; goto out;
} }
lru_page->page_ptr = page; /* Mark page installation complete and safe to use */
binder_set_installed_page(lru_page, page);
out: out:
mmap_write_unlock(alloc->mm); mmap_write_unlock(alloc->mm);
mmput_async(alloc->mm); mmput_async(alloc->mm);
return ret; return ret;
} }
static int binder_allocate_page_range(struct binder_alloc *alloc, static int binder_install_buffer_pages(struct binder_alloc *alloc,
unsigned long start, unsigned long end) struct binder_buffer *buffer,
size_t size)
{
struct binder_lru_page *page;
unsigned long start, final;
unsigned long page_addr;
start = buffer->user_data & PAGE_MASK;
final = PAGE_ALIGN(buffer->user_data + size);
for (page_addr = start; page_addr < final; page_addr += PAGE_SIZE) {
unsigned long index;
int ret;
index = (page_addr - alloc->buffer) / PAGE_SIZE;
page = &alloc->pages[index];
if (binder_get_installed_page(page))
continue;
trace_binder_alloc_page_start(alloc, index);
ret = binder_install_single_page(alloc, page, page_addr);
if (ret)
return ret;
trace_binder_alloc_page_end(alloc, index);
}
return 0;
}
/* The range of pages should exclude those shared with other buffers */
static void binder_allocate_page_range(struct binder_alloc *alloc,
unsigned long start, unsigned long end)
{ {
struct binder_lru_page *page; struct binder_lru_page *page;
unsigned long page_addr; unsigned long page_addr;
...@@ -249,15 +309,11 @@ static int binder_allocate_page_range(struct binder_alloc *alloc, ...@@ -249,15 +309,11 @@ static int binder_allocate_page_range(struct binder_alloc *alloc,
"%d: allocate pages %lx-%lx\n", "%d: allocate pages %lx-%lx\n",
alloc->pid, start, end); alloc->pid, start, end);
if (end <= start)
return 0;
trace_binder_update_page_range(alloc, true, start, end); trace_binder_update_page_range(alloc, true, start, end);
for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) { for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
unsigned long index; unsigned long index;
bool on_lru; bool on_lru;
int ret;
index = (page_addr - alloc->buffer) / PAGE_SIZE; index = (page_addr - alloc->buffer) / PAGE_SIZE;
page = &alloc->pages[index]; page = &alloc->pages[index];
...@@ -272,21 +328,9 @@ static int binder_allocate_page_range(struct binder_alloc *alloc, ...@@ -272,21 +328,9 @@ static int binder_allocate_page_range(struct binder_alloc *alloc,
continue; continue;
} }
trace_binder_alloc_page_start(alloc, index);
ret = binder_install_single_page(alloc, page, page_addr);
if (ret) {
binder_free_page_range(alloc, start, page_addr);
return ret;
}
if (index + 1 > alloc->pages_high) if (index + 1 > alloc->pages_high)
alloc->pages_high = index + 1; alloc->pages_high = index + 1;
trace_binder_alloc_page_end(alloc, index);
} }
return 0;
} }
static inline void binder_alloc_set_vma(struct binder_alloc *alloc, static inline void binder_alloc_set_vma(struct binder_alloc *alloc,
...@@ -405,7 +449,6 @@ static struct binder_buffer *binder_alloc_new_buf_locked( ...@@ -405,7 +449,6 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
unsigned long has_page_addr; unsigned long has_page_addr;
unsigned long end_page_addr; unsigned long end_page_addr;
size_t buffer_size; size_t buffer_size;
int ret;
if (is_async && alloc->free_async_space < size) { if (is_async && alloc->free_async_space < size) {
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
...@@ -449,18 +492,14 @@ static struct binder_buffer *binder_alloc_new_buf_locked( ...@@ -449,18 +492,14 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
"%d: binder_alloc_buf size %zd got buffer %pK size %zd\n", "%d: binder_alloc_buf size %zd got buffer %pK size %zd\n",
alloc->pid, size, buffer, buffer_size); alloc->pid, size, buffer, buffer_size);
has_page_addr = (buffer->user_data + buffer_size) & PAGE_MASK;
WARN_ON(n && buffer_size != size); WARN_ON(n && buffer_size != size);
has_page_addr = (buffer->user_data + buffer_size) & PAGE_MASK;
end_page_addr = PAGE_ALIGN(buffer->user_data + size); end_page_addr = PAGE_ALIGN(buffer->user_data + size);
if (end_page_addr > has_page_addr) if (end_page_addr > has_page_addr)
end_page_addr = has_page_addr; end_page_addr = has_page_addr;
ret = binder_allocate_page_range(alloc, PAGE_ALIGN(buffer->user_data), binder_allocate_page_range(alloc, PAGE_ALIGN(buffer->user_data),
end_page_addr); end_page_addr);
if (ret) {
buffer = ERR_PTR(ret);
goto out;
}
if (buffer_size != size) { if (buffer_size != size) {
new_buffer->user_data = buffer->user_data + size; new_buffer->user_data = buffer->user_data + size;
list_add(&new_buffer->entry, &buffer->entry); list_add(&new_buffer->entry, &buffer->entry);
...@@ -538,6 +577,7 @@ struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc, ...@@ -538,6 +577,7 @@ struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
{ {
struct binder_buffer *buffer, *next; struct binder_buffer *buffer, *next;
size_t size; size_t size;
int ret;
/* Check binder_alloc is fully initialized */ /* Check binder_alloc is fully initialized */
if (!binder_alloc_get_vma(alloc)) { if (!binder_alloc_get_vma(alloc)) {
...@@ -574,6 +614,11 @@ struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc, ...@@ -574,6 +614,11 @@ struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
buffer->pid = current->tgid; buffer->pid = current->tgid;
mutex_unlock(&alloc->mutex); mutex_unlock(&alloc->mutex);
ret = binder_install_buffer_pages(alloc, buffer, size);
if (ret) {
binder_alloc_free_buf(alloc, buffer);
buffer = ERR_PTR(ret);
}
out: out:
return buffer; return buffer;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment