Commit 64019a2e authored by Peter Xu's avatar Peter Xu Committed by Linus Torvalds

mm/gup: remove task_struct pointer for all gup code

After the cleanup of page fault accounting, gup does not need to pass
task_struct around any more.  Remove that parameter in the whole gup
stack.
Signed-off-by: default avatarPeter Xu <peterx@redhat.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Reviewed-by: default avatarJohn Hubbard <jhubbard@nvidia.com>
Link: http://lkml.kernel.org/r/20200707225021.200906-26-peterx@redhat.comSigned-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent a2beb5f1
...@@ -91,7 +91,7 @@ SYSCALL_DEFINE3(arc_usr_cmpxchg, int *, uaddr, int, expected, int, new) ...@@ -91,7 +91,7 @@ SYSCALL_DEFINE3(arc_usr_cmpxchg, int *, uaddr, int, expected, int, new)
goto fail; goto fail;
mmap_read_lock(current->mm); mmap_read_lock(current->mm);
ret = fixup_user_fault(current, current->mm, (unsigned long) uaddr, ret = fixup_user_fault(current->mm, (unsigned long) uaddr,
FAULT_FLAG_WRITE, NULL); FAULT_FLAG_WRITE, NULL);
mmap_read_unlock(current->mm); mmap_read_unlock(current->mm);
......
...@@ -2768,7 +2768,7 @@ static struct page *get_map_page(struct kvm *kvm, u64 uaddr) ...@@ -2768,7 +2768,7 @@ static struct page *get_map_page(struct kvm *kvm, u64 uaddr)
struct page *page = NULL; struct page *page = NULL;
mmap_read_lock(kvm->mm); mmap_read_lock(kvm->mm);
get_user_pages_remote(NULL, kvm->mm, uaddr, 1, FOLL_WRITE, get_user_pages_remote(kvm->mm, uaddr, 1, FOLL_WRITE,
&page, NULL, NULL); &page, NULL, NULL);
mmap_read_unlock(kvm->mm); mmap_read_unlock(kvm->mm);
return page; return page;
......
...@@ -1892,7 +1892,7 @@ static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args) ...@@ -1892,7 +1892,7 @@ static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
r = set_guest_storage_key(current->mm, hva, keys[i], 0); r = set_guest_storage_key(current->mm, hva, keys[i], 0);
if (r) { if (r) {
r = fixup_user_fault(current, current->mm, hva, r = fixup_user_fault(current->mm, hva,
FAULT_FLAG_WRITE, &unlocked); FAULT_FLAG_WRITE, &unlocked);
if (r) if (r)
break; break;
......
...@@ -273,7 +273,7 @@ static int handle_iske(struct kvm_vcpu *vcpu) ...@@ -273,7 +273,7 @@ static int handle_iske(struct kvm_vcpu *vcpu)
rc = get_guest_storage_key(current->mm, vmaddr, &key); rc = get_guest_storage_key(current->mm, vmaddr, &key);
if (rc) { if (rc) {
rc = fixup_user_fault(current, current->mm, vmaddr, rc = fixup_user_fault(current->mm, vmaddr,
FAULT_FLAG_WRITE, &unlocked); FAULT_FLAG_WRITE, &unlocked);
if (!rc) { if (!rc) {
mmap_read_unlock(current->mm); mmap_read_unlock(current->mm);
...@@ -319,7 +319,7 @@ static int handle_rrbe(struct kvm_vcpu *vcpu) ...@@ -319,7 +319,7 @@ static int handle_rrbe(struct kvm_vcpu *vcpu)
mmap_read_lock(current->mm); mmap_read_lock(current->mm);
rc = reset_guest_reference_bit(current->mm, vmaddr); rc = reset_guest_reference_bit(current->mm, vmaddr);
if (rc < 0) { if (rc < 0) {
rc = fixup_user_fault(current, current->mm, vmaddr, rc = fixup_user_fault(current->mm, vmaddr,
FAULT_FLAG_WRITE, &unlocked); FAULT_FLAG_WRITE, &unlocked);
if (!rc) { if (!rc) {
mmap_read_unlock(current->mm); mmap_read_unlock(current->mm);
...@@ -390,7 +390,7 @@ static int handle_sske(struct kvm_vcpu *vcpu) ...@@ -390,7 +390,7 @@ static int handle_sske(struct kvm_vcpu *vcpu)
m3 & SSKE_MC); m3 & SSKE_MC);
if (rc < 0) { if (rc < 0) {
rc = fixup_user_fault(current, current->mm, vmaddr, rc = fixup_user_fault(current->mm, vmaddr,
FAULT_FLAG_WRITE, &unlocked); FAULT_FLAG_WRITE, &unlocked);
rc = !rc ? -EAGAIN : rc; rc = !rc ? -EAGAIN : rc;
} }
...@@ -1094,7 +1094,7 @@ static int handle_pfmf(struct kvm_vcpu *vcpu) ...@@ -1094,7 +1094,7 @@ static int handle_pfmf(struct kvm_vcpu *vcpu)
rc = cond_set_guest_storage_key(current->mm, vmaddr, rc = cond_set_guest_storage_key(current->mm, vmaddr,
key, NULL, nq, mr, mc); key, NULL, nq, mr, mc);
if (rc < 0) { if (rc < 0) {
rc = fixup_user_fault(current, current->mm, vmaddr, rc = fixup_user_fault(current->mm, vmaddr,
FAULT_FLAG_WRITE, &unlocked); FAULT_FLAG_WRITE, &unlocked);
rc = !rc ? -EAGAIN : rc; rc = !rc ? -EAGAIN : rc;
} }
......
...@@ -649,7 +649,7 @@ int gmap_fault(struct gmap *gmap, unsigned long gaddr, ...@@ -649,7 +649,7 @@ int gmap_fault(struct gmap *gmap, unsigned long gaddr,
rc = vmaddr; rc = vmaddr;
goto out_up; goto out_up;
} }
if (fixup_user_fault(current, gmap->mm, vmaddr, fault_flags, if (fixup_user_fault(gmap->mm, vmaddr, fault_flags,
&unlocked)) { &unlocked)) {
rc = -EFAULT; rc = -EFAULT;
goto out_up; goto out_up;
...@@ -879,7 +879,7 @@ static int gmap_pte_op_fixup(struct gmap *gmap, unsigned long gaddr, ...@@ -879,7 +879,7 @@ static int gmap_pte_op_fixup(struct gmap *gmap, unsigned long gaddr,
BUG_ON(gmap_is_shadow(gmap)); BUG_ON(gmap_is_shadow(gmap));
fault_flags = (prot == PROT_WRITE) ? FAULT_FLAG_WRITE : 0; fault_flags = (prot == PROT_WRITE) ? FAULT_FLAG_WRITE : 0;
if (fixup_user_fault(current, mm, vmaddr, fault_flags, &unlocked)) if (fixup_user_fault(mm, vmaddr, fault_flags, &unlocked))
return -EFAULT; return -EFAULT;
if (unlocked) if (unlocked)
/* lost mmap_lock, caller has to retry __gmap_translate */ /* lost mmap_lock, caller has to retry __gmap_translate */
......
...@@ -469,7 +469,7 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work) ...@@ -469,7 +469,7 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
locked = 1; locked = 1;
} }
ret = pin_user_pages_remote ret = pin_user_pages_remote
(work->task, mm, (mm,
obj->userptr.ptr + pinned * PAGE_SIZE, obj->userptr.ptr + pinned * PAGE_SIZE,
npages - pinned, npages - pinned,
flags, flags,
......
...@@ -439,7 +439,7 @@ int ib_umem_odp_map_dma_pages(struct ib_umem_odp *umem_odp, u64 user_virt, ...@@ -439,7 +439,7 @@ int ib_umem_odp_map_dma_pages(struct ib_umem_odp *umem_odp, u64 user_virt,
* complex (and doesn't gain us much performance in most use * complex (and doesn't gain us much performance in most use
* cases). * cases).
*/ */
npages = get_user_pages_remote(owning_process, owning_mm, npages = get_user_pages_remote(owning_mm,
user_virt, gup_num_pages, user_virt, gup_num_pages,
flags, local_page_list, NULL, NULL); flags, local_page_list, NULL, NULL);
mmap_read_unlock(owning_mm); mmap_read_unlock(owning_mm);
......
...@@ -425,7 +425,7 @@ static int follow_fault_pfn(struct vm_area_struct *vma, struct mm_struct *mm, ...@@ -425,7 +425,7 @@ static int follow_fault_pfn(struct vm_area_struct *vma, struct mm_struct *mm,
if (ret) { if (ret) {
bool unlocked = false; bool unlocked = false;
ret = fixup_user_fault(NULL, mm, vaddr, ret = fixup_user_fault(mm, vaddr,
FAULT_FLAG_REMOTE | FAULT_FLAG_REMOTE |
(write_fault ? FAULT_FLAG_WRITE : 0), (write_fault ? FAULT_FLAG_WRITE : 0),
&unlocked); &unlocked);
...@@ -453,7 +453,7 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr, ...@@ -453,7 +453,7 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr,
flags |= FOLL_WRITE; flags |= FOLL_WRITE;
mmap_read_lock(mm); mmap_read_lock(mm);
ret = pin_user_pages_remote(NULL, mm, vaddr, 1, flags | FOLL_LONGTERM, ret = pin_user_pages_remote(mm, vaddr, 1, flags | FOLL_LONGTERM,
page, NULL, NULL); page, NULL, NULL);
if (ret == 1) { if (ret == 1) {
*pfn = page_to_pfn(page[0]); *pfn = page_to_pfn(page[0]);
......
...@@ -217,7 +217,7 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos, ...@@ -217,7 +217,7 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
* We are doing an exec(). 'current' is the process * We are doing an exec(). 'current' is the process
* doing the exec and bprm->mm is the new process's mm. * doing the exec and bprm->mm is the new process's mm.
*/ */
ret = get_user_pages_remote(current, bprm->mm, pos, 1, gup_flags, ret = get_user_pages_remote(bprm->mm, pos, 1, gup_flags,
&page, NULL, NULL); &page, NULL, NULL);
if (ret <= 0) if (ret <= 0)
return NULL; return NULL;
......
...@@ -1661,7 +1661,7 @@ int invalidate_inode_page(struct page *page); ...@@ -1661,7 +1661,7 @@ int invalidate_inode_page(struct page *page);
extern vm_fault_t handle_mm_fault(struct vm_area_struct *vma, extern vm_fault_t handle_mm_fault(struct vm_area_struct *vma,
unsigned long address, unsigned int flags, unsigned long address, unsigned int flags,
struct pt_regs *regs); struct pt_regs *regs);
extern int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm, extern int fixup_user_fault(struct mm_struct *mm,
unsigned long address, unsigned int fault_flags, unsigned long address, unsigned int fault_flags,
bool *unlocked); bool *unlocked);
void unmap_mapping_pages(struct address_space *mapping, void unmap_mapping_pages(struct address_space *mapping,
...@@ -1677,8 +1677,7 @@ static inline vm_fault_t handle_mm_fault(struct vm_area_struct *vma, ...@@ -1677,8 +1677,7 @@ static inline vm_fault_t handle_mm_fault(struct vm_area_struct *vma,
BUG(); BUG();
return VM_FAULT_SIGBUS; return VM_FAULT_SIGBUS;
} }
static inline int fixup_user_fault(struct task_struct *tsk, static inline int fixup_user_fault(struct mm_struct *mm, unsigned long address,
struct mm_struct *mm, unsigned long address,
unsigned int fault_flags, bool *unlocked) unsigned int fault_flags, bool *unlocked)
{ {
/* should never happen if there's no MMU */ /* should never happen if there's no MMU */
...@@ -1704,11 +1703,11 @@ extern int access_remote_vm(struct mm_struct *mm, unsigned long addr, ...@@ -1704,11 +1703,11 @@ extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
extern int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm, extern int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
unsigned long addr, void *buf, int len, unsigned int gup_flags); unsigned long addr, void *buf, int len, unsigned int gup_flags);
long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm, long get_user_pages_remote(struct mm_struct *mm,
unsigned long start, unsigned long nr_pages, unsigned long start, unsigned long nr_pages,
unsigned int gup_flags, struct page **pages, unsigned int gup_flags, struct page **pages,
struct vm_area_struct **vmas, int *locked); struct vm_area_struct **vmas, int *locked);
long pin_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm, long pin_user_pages_remote(struct mm_struct *mm,
unsigned long start, unsigned long nr_pages, unsigned long start, unsigned long nr_pages,
unsigned int gup_flags, struct page **pages, unsigned int gup_flags, struct page **pages,
struct vm_area_struct **vmas, int *locked); struct vm_area_struct **vmas, int *locked);
......
...@@ -376,7 +376,7 @@ __update_ref_ctr(struct mm_struct *mm, unsigned long vaddr, short d) ...@@ -376,7 +376,7 @@ __update_ref_ctr(struct mm_struct *mm, unsigned long vaddr, short d)
if (!vaddr || !d) if (!vaddr || !d)
return -EINVAL; return -EINVAL;
ret = get_user_pages_remote(NULL, mm, vaddr, 1, ret = get_user_pages_remote(mm, vaddr, 1,
FOLL_WRITE, &page, &vma, NULL); FOLL_WRITE, &page, &vma, NULL);
if (unlikely(ret <= 0)) { if (unlikely(ret <= 0)) {
/* /*
...@@ -477,7 +477,7 @@ int uprobe_write_opcode(struct arch_uprobe *auprobe, struct mm_struct *mm, ...@@ -477,7 +477,7 @@ int uprobe_write_opcode(struct arch_uprobe *auprobe, struct mm_struct *mm,
if (is_register) if (is_register)
gup_flags |= FOLL_SPLIT_PMD; gup_flags |= FOLL_SPLIT_PMD;
/* Read the page with vaddr into memory */ /* Read the page with vaddr into memory */
ret = get_user_pages_remote(NULL, mm, vaddr, 1, gup_flags, ret = get_user_pages_remote(mm, vaddr, 1, gup_flags,
&old_page, &vma, NULL); &old_page, &vma, NULL);
if (ret <= 0) if (ret <= 0)
return ret; return ret;
...@@ -2029,7 +2029,7 @@ static int is_trap_at_addr(struct mm_struct *mm, unsigned long vaddr) ...@@ -2029,7 +2029,7 @@ static int is_trap_at_addr(struct mm_struct *mm, unsigned long vaddr)
* but we treat this as a 'remote' access since it is * but we treat this as a 'remote' access since it is
* essentially a kernel access to the memory. * essentially a kernel access to the memory.
*/ */
result = get_user_pages_remote(NULL, mm, vaddr, 1, FOLL_FORCE, &page, result = get_user_pages_remote(mm, vaddr, 1, FOLL_FORCE, &page,
NULL, NULL); NULL, NULL);
if (result < 0) if (result < 0)
return result; return result;
......
...@@ -678,7 +678,7 @@ static int fault_in_user_writeable(u32 __user *uaddr) ...@@ -678,7 +678,7 @@ static int fault_in_user_writeable(u32 __user *uaddr)
int ret; int ret;
mmap_read_lock(mm); mmap_read_lock(mm);
ret = fixup_user_fault(current, mm, (unsigned long)uaddr, ret = fixup_user_fault(mm, (unsigned long)uaddr,
FAULT_FLAG_WRITE, NULL); FAULT_FLAG_WRITE, NULL);
mmap_read_unlock(mm); mmap_read_unlock(mm);
......
This diff is collapsed.
...@@ -4742,7 +4742,7 @@ int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm, ...@@ -4742,7 +4742,7 @@ int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
void *maddr; void *maddr;
struct page *page = NULL; struct page *page = NULL;
ret = get_user_pages_remote(tsk, mm, addr, 1, ret = get_user_pages_remote(mm, addr, 1,
gup_flags, &page, &vma, NULL); gup_flags, &page, &vma, NULL);
if (ret <= 0) { if (ret <= 0) {
#ifndef CONFIG_HAVE_IOREMAP_PROT #ifndef CONFIG_HAVE_IOREMAP_PROT
......
...@@ -105,7 +105,7 @@ static int process_vm_rw_single_vec(unsigned long addr, ...@@ -105,7 +105,7 @@ static int process_vm_rw_single_vec(unsigned long addr,
* current/current->mm * current/current->mm
*/ */
mmap_read_lock(mm); mmap_read_lock(mm);
pinned_pages = pin_user_pages_remote(task, mm, pa, pinned_pages, pinned_pages = pin_user_pages_remote(mm, pa, pinned_pages,
flags, process_pages, flags, process_pages,
NULL, &locked); NULL, &locked);
if (locked) if (locked)
......
...@@ -914,7 +914,7 @@ bool tomoyo_dump_page(struct linux_binprm *bprm, unsigned long pos, ...@@ -914,7 +914,7 @@ bool tomoyo_dump_page(struct linux_binprm *bprm, unsigned long pos,
* (represented by bprm). 'current' is the process doing * (represented by bprm). 'current' is the process doing
* the execve(). * the execve().
*/ */
if (get_user_pages_remote(current, bprm->mm, pos, 1, if (get_user_pages_remote(bprm->mm, pos, 1,
FOLL_FORCE, &page, NULL, NULL) <= 0) FOLL_FORCE, &page, NULL, NULL) <= 0)
return false; return false;
#else #else
......
...@@ -61,7 +61,7 @@ static void async_pf_execute(struct work_struct *work) ...@@ -61,7 +61,7 @@ static void async_pf_execute(struct work_struct *work)
* access remotely. * access remotely.
*/ */
mmap_read_lock(mm); mmap_read_lock(mm);
get_user_pages_remote(NULL, mm, addr, 1, FOLL_WRITE, NULL, NULL, get_user_pages_remote(mm, addr, 1, FOLL_WRITE, NULL, NULL,
&locked); &locked);
if (locked) if (locked)
mmap_read_unlock(mm); mmap_read_unlock(mm);
......
...@@ -1893,7 +1893,7 @@ static int hva_to_pfn_remapped(struct vm_area_struct *vma, ...@@ -1893,7 +1893,7 @@ static int hva_to_pfn_remapped(struct vm_area_struct *vma,
* not call the fault handler, so do it here. * not call the fault handler, so do it here.
*/ */
bool unlocked = false; bool unlocked = false;
r = fixup_user_fault(current, current->mm, addr, r = fixup_user_fault(current->mm, addr,
(write_fault ? FAULT_FLAG_WRITE : 0), (write_fault ? FAULT_FLAG_WRITE : 0),
&unlocked); &unlocked);
if (unlocked) if (unlocked)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment