Commit a0e136e5 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'work.get_user_pages_fast' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs

Pull get_user_pages_fast() conversion from Al Viro:
 "A bunch of places switched to get_user_pages_fast()"

* 'work.get_user_pages_fast' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs:
  ceph: use get_user_pages_fast()
  pvr2fs: use get_user_pages_fast()
  atomisp: use get_user_pages_fast()
  st: use get_user_pages_fast()
  via_dmablit(): use get_user_pages_fast()
  fsl_hypervisor: switch to get_user_pages_fast()
  rapidio: switch to get_user_pages_fast()
  vchiq_2835_arm: switch to get_user_pages_fast()
parents 16382e17 77478715
...@@ -238,9 +238,9 @@ via_lock_all_dma_pages(drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer) ...@@ -238,9 +238,9 @@ via_lock_all_dma_pages(drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer)
vsg->pages = vzalloc(sizeof(struct page *) * vsg->num_pages); vsg->pages = vzalloc(sizeof(struct page *) * vsg->num_pages);
if (NULL == vsg->pages) if (NULL == vsg->pages)
return -ENOMEM; return -ENOMEM;
ret = get_user_pages_unlocked((unsigned long)xfer->mem_addr, ret = get_user_pages_fast((unsigned long)xfer->mem_addr,
vsg->num_pages, vsg->pages, vsg->num_pages, vsg->direction == DMA_FROM_DEVICE,
(vsg->direction == DMA_FROM_DEVICE) ? FOLL_WRITE : 0); vsg->pages);
if (ret != vsg->num_pages) { if (ret != vsg->num_pages) {
if (ret < 0) if (ret < 0)
return ret; return ret;
......
...@@ -889,11 +889,9 @@ rio_dma_transfer(struct file *filp, u32 transfer_mode, ...@@ -889,11 +889,9 @@ rio_dma_transfer(struct file *filp, u32 transfer_mode,
goto err_req; goto err_req;
} }
pinned = get_user_pages_unlocked( pinned = get_user_pages_fast(
(unsigned long)xfer->loc_addr & PAGE_MASK, (unsigned long)xfer->loc_addr & PAGE_MASK,
nr_pages, nr_pages, dir == DMA_FROM_DEVICE, page_list);
page_list,
dir == DMA_FROM_DEVICE ? FOLL_WRITE : 0);
if (pinned != nr_pages) { if (pinned != nr_pages) {
if (pinned < 0) { if (pinned < 0) {
......
...@@ -4920,11 +4920,7 @@ static int sgl_map_user_pages(struct st_buffer *STbp, ...@@ -4920,11 +4920,7 @@ static int sgl_map_user_pages(struct st_buffer *STbp,
/* Try to fault in all of the necessary pages */ /* Try to fault in all of the necessary pages */
/* rw==READ means read from drive, write into memory area */ /* rw==READ means read from drive, write into memory area */
res = get_user_pages_unlocked( res = get_user_pages_fast(uaddr, nr_pages, rw == READ, pages);
uaddr,
nr_pages,
pages,
rw == READ ? FOLL_WRITE : 0); /* don't force */
/* Errors and no page mapped should return here */ /* Errors and no page mapped should return here */
if (res < nr_pages) if (res < nr_pages)
......
...@@ -1020,10 +1020,8 @@ static int alloc_user_pages(struct hmm_buffer_object *bo, ...@@ -1020,10 +1020,8 @@ static int alloc_user_pages(struct hmm_buffer_object *bo,
} else { } else {
/*Handle frame buffer allocated in user space*/ /*Handle frame buffer allocated in user space*/
mutex_unlock(&bo->mutex); mutex_unlock(&bo->mutex);
down_read(&current->mm->mmap_sem); page_nr = get_user_pages_fast((unsigned long)userptr,
page_nr = get_user_pages((unsigned long)userptr, (int)(bo->pgnr), 1, pages);
(int)(bo->pgnr), 1, pages, NULL);
up_read(&current->mm->mmap_sem);
mutex_lock(&bo->mutex); mutex_lock(&bo->mutex);
bo->mem_type = HMM_BO_MEM_TYPE_USER; bo->mem_type = HMM_BO_MEM_TYPE_USER;
} }
......
...@@ -90,8 +90,7 @@ static irqreturn_t ...@@ -90,8 +90,7 @@ static irqreturn_t
vchiq_doorbell_irq(int irq, void *dev_id); vchiq_doorbell_irq(int irq, void *dev_id);
static struct vchiq_pagelist_info * static struct vchiq_pagelist_info *
create_pagelist(char __user *buf, size_t count, unsigned short type, create_pagelist(char __user *buf, size_t count, unsigned short type);
struct task_struct *task);
static void static void
free_pagelist(struct vchiq_pagelist_info *pagelistinfo, free_pagelist(struct vchiq_pagelist_info *pagelistinfo,
...@@ -255,8 +254,7 @@ vchiq_prepare_bulk_data(VCHIQ_BULK_T *bulk, VCHI_MEM_HANDLE_T memhandle, ...@@ -255,8 +254,7 @@ vchiq_prepare_bulk_data(VCHIQ_BULK_T *bulk, VCHI_MEM_HANDLE_T memhandle,
pagelistinfo = create_pagelist((char __user *)offset, size, pagelistinfo = create_pagelist((char __user *)offset, size,
(dir == VCHIQ_BULK_RECEIVE) (dir == VCHIQ_BULK_RECEIVE)
? PAGELIST_READ ? PAGELIST_READ
: PAGELIST_WRITE, : PAGELIST_WRITE);
current);
if (!pagelistinfo) if (!pagelistinfo)
return VCHIQ_ERROR; return VCHIQ_ERROR;
...@@ -395,8 +393,7 @@ cleanup_pagelistinfo(struct vchiq_pagelist_info *pagelistinfo) ...@@ -395,8 +393,7 @@ cleanup_pagelistinfo(struct vchiq_pagelist_info *pagelistinfo)
*/ */
static struct vchiq_pagelist_info * static struct vchiq_pagelist_info *
create_pagelist(char __user *buf, size_t count, unsigned short type, create_pagelist(char __user *buf, size_t count, unsigned short type)
struct task_struct *task)
{ {
PAGELIST_T *pagelist; PAGELIST_T *pagelist;
struct vchiq_pagelist_info *pagelistinfo; struct vchiq_pagelist_info *pagelistinfo;
...@@ -476,14 +473,11 @@ create_pagelist(char __user *buf, size_t count, unsigned short type, ...@@ -476,14 +473,11 @@ create_pagelist(char __user *buf, size_t count, unsigned short type,
} }
/* do not try and release vmalloc pages */ /* do not try and release vmalloc pages */
} else { } else {
down_read(&task->mm->mmap_sem); actual_pages = get_user_pages_fast(
actual_pages = get_user_pages( (unsigned long)buf & PAGE_MASK,
(unsigned long)buf & PAGE_MASK,
num_pages, num_pages,
(type == PAGELIST_READ) ? FOLL_WRITE : 0, type == PAGELIST_READ,
pages, pages);
NULL /*vmas */);
up_read(&task->mm->mmap_sem);
if (actual_pages != num_pages) { if (actual_pages != num_pages) {
vchiq_log_info(vchiq_arm_log_level, vchiq_log_info(vchiq_arm_log_level,
......
...@@ -686,9 +686,7 @@ static ssize_t pvr2fb_write(struct fb_info *info, const char *buf, ...@@ -686,9 +686,7 @@ static ssize_t pvr2fb_write(struct fb_info *info, const char *buf,
if (!pages) if (!pages)
return -ENOMEM; return -ENOMEM;
ret = get_user_pages_unlocked((unsigned long)buf, nr_pages, pages, ret = get_user_pages_fast((unsigned long)buf, nr_pages, true, pages);
FOLL_WRITE);
if (ret < nr_pages) { if (ret < nr_pages) {
nr_pages = ret; nr_pages = ret;
ret = -EINVAL; ret = -EINVAL;
......
...@@ -243,8 +243,8 @@ static long ioctl_memcpy(struct fsl_hv_ioctl_memcpy __user *p) ...@@ -243,8 +243,8 @@ static long ioctl_memcpy(struct fsl_hv_ioctl_memcpy __user *p)
sg_list = PTR_ALIGN(sg_list_unaligned, sizeof(struct fh_sg_list)); sg_list = PTR_ALIGN(sg_list_unaligned, sizeof(struct fh_sg_list));
/* Get the physical addresses of the source buffer */ /* Get the physical addresses of the source buffer */
num_pinned = get_user_pages_unlocked(param.local_vaddr - lb_offset, num_pinned = get_user_pages_fast(param.local_vaddr - lb_offset,
num_pages, pages, (param.source == -1) ? 0 : FOLL_WRITE); num_pages, param.source != -1, pages);
if (num_pinned != num_pages) { if (num_pinned != num_pages) {
/* get_user_pages() failed */ /* get_user_pages() failed */
......
...@@ -25,9 +25,9 @@ struct page **ceph_get_direct_page_vector(const void __user *data, ...@@ -25,9 +25,9 @@ struct page **ceph_get_direct_page_vector(const void __user *data,
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
while (got < num_pages) { while (got < num_pages) {
rc = get_user_pages_unlocked( rc = get_user_pages_fast(
(unsigned long)data + ((unsigned long)got * PAGE_SIZE), (unsigned long)data + ((unsigned long)got * PAGE_SIZE),
num_pages - got, pages + got, write_page ? FOLL_WRITE : 0); num_pages - got, write_page, pages + got);
if (rc < 0) if (rc < 0)
break; break;
BUG_ON(rc == 0); BUG_ON(rc == 0);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment