Commit e66b77e5 authored by Carlos Llamas's avatar Carlos Llamas Committed by Greg Kroah-Hartman

binder: rename alloc->vma_vm_mm to alloc->mm

Rename ->vma_vm_mm to ->mm to reflect the fact that we no longer cache
this reference from vma->vm_mm but from current->mm instead.

No functional changes in this patch.
Reviewed-by: default avatarChristian Brauner (Microsoft) <brauner@kernel.org>
Acked-by: default avatarTodd Kjos <tkjos@google.com>
Signed-off-by: default avatarCarlos Llamas <cmllamas@google.com>
Link: https://lore.kernel.org/r/20220906135948.3048225-2-cmllamas@google.comSigned-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent e3c9b0dd
...@@ -208,8 +208,8 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate, ...@@ -208,8 +208,8 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
} }
} }
if (need_mm && mmget_not_zero(alloc->vma_vm_mm)) if (need_mm && mmget_not_zero(alloc->mm))
mm = alloc->vma_vm_mm; mm = alloc->mm;
if (mm) { if (mm) {
mmap_read_lock(mm); mmap_read_lock(mm);
...@@ -322,9 +322,9 @@ static inline void binder_alloc_set_vma(struct binder_alloc *alloc, ...@@ -322,9 +322,9 @@ static inline void binder_alloc_set_vma(struct binder_alloc *alloc,
*/ */
if (vma) { if (vma) {
vm_start = vma->vm_start; vm_start = vma->vm_start;
mmap_assert_write_locked(alloc->vma_vm_mm); mmap_assert_write_locked(alloc->mm);
} else { } else {
mmap_assert_locked(alloc->vma_vm_mm); mmap_assert_locked(alloc->mm);
} }
alloc->vma_addr = vm_start; alloc->vma_addr = vm_start;
...@@ -336,7 +336,7 @@ static inline struct vm_area_struct *binder_alloc_get_vma( ...@@ -336,7 +336,7 @@ static inline struct vm_area_struct *binder_alloc_get_vma(
struct vm_area_struct *vma = NULL; struct vm_area_struct *vma = NULL;
if (alloc->vma_addr) if (alloc->vma_addr)
vma = vma_lookup(alloc->vma_vm_mm, alloc->vma_addr); vma = vma_lookup(alloc->mm, alloc->vma_addr);
return vma; return vma;
} }
...@@ -401,15 +401,15 @@ static struct binder_buffer *binder_alloc_new_buf_locked( ...@@ -401,15 +401,15 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
size_t size, data_offsets_size; size_t size, data_offsets_size;
int ret; int ret;
mmap_read_lock(alloc->vma_vm_mm); mmap_read_lock(alloc->mm);
if (!binder_alloc_get_vma(alloc)) { if (!binder_alloc_get_vma(alloc)) {
mmap_read_unlock(alloc->vma_vm_mm); mmap_read_unlock(alloc->mm);
binder_alloc_debug(BINDER_DEBUG_USER_ERROR, binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
"%d: binder_alloc_buf, no vma\n", "%d: binder_alloc_buf, no vma\n",
alloc->pid); alloc->pid);
return ERR_PTR(-ESRCH); return ERR_PTR(-ESRCH);
} }
mmap_read_unlock(alloc->vma_vm_mm); mmap_read_unlock(alloc->mm);
data_offsets_size = ALIGN(data_size, sizeof(void *)) + data_offsets_size = ALIGN(data_size, sizeof(void *)) +
ALIGN(offsets_size, sizeof(void *)); ALIGN(offsets_size, sizeof(void *));
...@@ -823,7 +823,7 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc) ...@@ -823,7 +823,7 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
buffers = 0; buffers = 0;
mutex_lock(&alloc->mutex); mutex_lock(&alloc->mutex);
BUG_ON(alloc->vma_addr && BUG_ON(alloc->vma_addr &&
vma_lookup(alloc->vma_vm_mm, alloc->vma_addr)); vma_lookup(alloc->mm, alloc->vma_addr));
while ((n = rb_first(&alloc->allocated_buffers))) { while ((n = rb_first(&alloc->allocated_buffers))) {
buffer = rb_entry(n, struct binder_buffer, rb_node); buffer = rb_entry(n, struct binder_buffer, rb_node);
...@@ -873,8 +873,8 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc) ...@@ -873,8 +873,8 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
kfree(alloc->pages); kfree(alloc->pages);
} }
mutex_unlock(&alloc->mutex); mutex_unlock(&alloc->mutex);
if (alloc->vma_vm_mm) if (alloc->mm)
mmdrop(alloc->vma_vm_mm); mmdrop(alloc->mm);
binder_alloc_debug(BINDER_DEBUG_OPEN_CLOSE, binder_alloc_debug(BINDER_DEBUG_OPEN_CLOSE,
"%s: %d buffers %d, pages %d\n", "%s: %d buffers %d, pages %d\n",
...@@ -931,13 +931,13 @@ void binder_alloc_print_pages(struct seq_file *m, ...@@ -931,13 +931,13 @@ void binder_alloc_print_pages(struct seq_file *m,
* read inconsistent state. * read inconsistent state.
*/ */
mmap_read_lock(alloc->vma_vm_mm); mmap_read_lock(alloc->mm);
if (binder_alloc_get_vma(alloc) == NULL) { if (binder_alloc_get_vma(alloc) == NULL) {
mmap_read_unlock(alloc->vma_vm_mm); mmap_read_unlock(alloc->mm);
goto uninitialized; goto uninitialized;
} }
mmap_read_unlock(alloc->vma_vm_mm); mmap_read_unlock(alloc->mm);
for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) { for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
page = &alloc->pages[i]; page = &alloc->pages[i];
if (!page->page_ptr) if (!page->page_ptr)
...@@ -1020,7 +1020,7 @@ enum lru_status binder_alloc_free_page(struct list_head *item, ...@@ -1020,7 +1020,7 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
index = page - alloc->pages; index = page - alloc->pages;
page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE; page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE;
mm = alloc->vma_vm_mm; mm = alloc->mm;
if (!mmget_not_zero(mm)) if (!mmget_not_zero(mm))
goto err_mmget; goto err_mmget;
if (!mmap_read_trylock(mm)) if (!mmap_read_trylock(mm))
...@@ -1089,8 +1089,8 @@ static struct shrinker binder_shrinker = { ...@@ -1089,8 +1089,8 @@ static struct shrinker binder_shrinker = {
void binder_alloc_init(struct binder_alloc *alloc) void binder_alloc_init(struct binder_alloc *alloc)
{ {
alloc->pid = current->group_leader->pid; alloc->pid = current->group_leader->pid;
alloc->vma_vm_mm = current->mm; alloc->mm = current->mm;
mmgrab(alloc->vma_vm_mm); mmgrab(alloc->mm);
mutex_init(&alloc->mutex); mutex_init(&alloc->mutex);
INIT_LIST_HEAD(&alloc->buffers); INIT_LIST_HEAD(&alloc->buffers);
} }
......
...@@ -78,7 +78,7 @@ struct binder_lru_page { ...@@ -78,7 +78,7 @@ struct binder_lru_page {
* (invariant after mmap) * (invariant after mmap)
* @tsk: tid for task that called init for this proc * @tsk: tid for task that called init for this proc
* (invariant after init) * (invariant after init)
* @vma_vm_mm: copy of vma->vm_mm (invariant after mmap) * @mm: copy of task->mm (invariant after open)
* @buffer: base of per-proc address space mapped via mmap * @buffer: base of per-proc address space mapped via mmap
* @buffers: list of all buffers for this proc * @buffers: list of all buffers for this proc
* @free_buffers: rb tree of buffers available for allocation * @free_buffers: rb tree of buffers available for allocation
...@@ -101,7 +101,7 @@ struct binder_lru_page { ...@@ -101,7 +101,7 @@ struct binder_lru_page {
struct binder_alloc { struct binder_alloc {
struct mutex mutex; struct mutex mutex;
unsigned long vma_addr; unsigned long vma_addr;
struct mm_struct *vma_vm_mm; struct mm_struct *mm;
void __user *buffer; void __user *buffer;
struct list_head buffers; struct list_head buffers;
struct rb_root free_buffers; struct rb_root free_buffers;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment