Commit 14a638ab authored by Mauro Carvalho Chehab's avatar Mauro Carvalho Chehab

media: atomisp: use pin_user_pages() for memory allocation

Instead of using a hacked version of an old copy of
get_user_pages(), use pin_user_pages().
Signed-off-by: default avatarMauro Carvalho Chehab <mchehab+huawei@kernel.org>
parent 19ae0855
......@@ -130,6 +130,8 @@ struct hmm_buffer_object {
struct list_head list;
struct kref kref;
struct page **pages;
/* mutex protecting this BO */
struct mutex mutex;
enum hmm_bo_type type;
......
......@@ -854,109 +854,20 @@ static void free_private_pages(struct hmm_buffer_object *bo,
kfree(bo->page_obj);
}
/*
* Hacked from kernel function __get_user_pages in mm/memory.c
*
* Handle buffers allocated by other kernel space driver and mmaped into user
* space, function Ignore the VM_PFNMAP and VM_IO flag in VMA structure
*
* Get physical pages from user space virtual address and update into page list
*/
static int __get_pfnmap_pages(struct task_struct *tsk, struct mm_struct *mm,
unsigned long start, int nr_pages,
unsigned int gup_flags, struct page **pages,
struct vm_area_struct **vmas)
static void free_user_pages(struct hmm_buffer_object *bo)
{
int i, ret;
unsigned long vm_flags;
if (nr_pages <= 0)
return 0;
VM_BUG_ON(!!pages != !!(gup_flags & FOLL_GET));
/*
* Require read or write permissions.
* If FOLL_FORCE is set, we only require the "MAY" flags.
*/
vm_flags = (gup_flags & FOLL_WRITE) ?
(VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
vm_flags &= (gup_flags & FOLL_FORCE) ?
(VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
i = 0;
do {
struct vm_area_struct *vma;
vma = find_vma(mm, start);
if (!vma) {
dev_err(atomisp_dev, "find_vma failed\n");
return i ? : -EFAULT;
}
if (is_vm_hugetlb_page(vma)) {
/*
i = follow_hugetlb_page(mm, vma, pages, vmas,
&start, &nr_pages, i, gup_flags);
*/
continue;
}
int i;
do {
struct page *page;
unsigned long pfn;
hmm_mem_stat.usr_size -= bo->pgnr;
/*
* If we have a pending SIGKILL, don't keep faulting
* pages and potentially allocating memory.
*/
if (unlikely(fatal_signal_pending(current))) {
dev_err(atomisp_dev,
"fatal_signal_pending in %s\n",
__func__);
return i ? i : -ERESTARTSYS;
if (bo->mem_type == HMM_BO_MEM_TYPE_PFN) {
unpin_user_pages(bo->pages, bo->pgnr);
} else {
for (i = 0; i < bo->pgnr; i++)
put_page(bo->pages[i]);
}
ret = follow_pfn(vma, start, &pfn);
if (ret) {
dev_err(atomisp_dev, "follow_pfn() failed\n");
return i ? : -EFAULT;
}
page = pfn_to_page(pfn);
if (IS_ERR(page))
return i ? i : PTR_ERR(page);
if (pages) {
pages[i] = page;
get_page(page);
flush_anon_page(vma, page, start);
flush_dcache_page(page);
}
if (vmas)
vmas[i] = vma;
i++;
start += PAGE_SIZE;
nr_pages--;
} while (nr_pages && start < vma->vm_end);
} while (nr_pages);
return i;
}
static int get_pfnmap_pages(struct task_struct *tsk, struct mm_struct *mm,
unsigned long start, int nr_pages, int write, int force,
struct page **pages, struct vm_area_struct **vmas)
{
int flags = FOLL_TOUCH;
if (pages)
flags |= FOLL_GET;
if (write)
flags |= FOLL_WRITE;
if (force)
flags |= FOLL_FORCE;
return __get_pfnmap_pages(tsk, mm, start, nr_pages, flags, pages, vmas);
kfree(bo->pages);
kfree(bo->page_obj);
}
/*
......@@ -1000,10 +911,11 @@ static int alloc_user_pages(struct hmm_buffer_object *bo,
userptr = untagged_addr(userptr);
bo->pages = pages;
if (vma->vm_flags & (VM_IO | VM_PFNMAP)) {
page_nr = get_pfnmap_pages(current, current->mm,
(unsigned long)userptr,
(int)(bo->pgnr), 1, 0,
page_nr = pin_user_pages((unsigned long)userptr, bo->pgnr,
FOLL_LONGTERM | FOLL_WRITE,
pages, NULL);
bo->mem_type = HMM_BO_MEM_TYPE_PFN;
} else {
......@@ -1020,6 +932,8 @@ static int alloc_user_pages(struct hmm_buffer_object *bo,
bo->pgnr,
bo->mem_type == HMM_BO_MEM_TYPE_USER ? "user" : "pfn", page_nr);
hmm_mem_stat.usr_size += bo->pgnr;
/* can be written by caller, not forced */
if (page_nr != bo->pgnr) {
dev_err(atomisp_dev,
......@@ -1032,29 +946,14 @@ static int alloc_user_pages(struct hmm_buffer_object *bo,
bo->page_obj[i].page = pages[i];
bo->page_obj[i].type = HMM_PAGE_TYPE_GENERAL;
}
hmm_mem_stat.usr_size += bo->pgnr;
kfree(pages);
return 0;
out_of_mem:
for (i = 0; i < page_nr; i++)
put_page(pages[i]);
kfree(pages);
kfree(bo->page_obj);
return -ENOMEM;
}
static void free_user_pages(struct hmm_buffer_object *bo)
{
int i;
for (i = 0; i < bo->pgnr; i++)
put_page(bo->page_obj[i].page);
hmm_mem_stat.usr_size -= bo->pgnr;
free_user_pages(bo);
kfree(bo->page_obj);
return -ENOMEM;
}
/*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment