Commit e913a8cd authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'topic/iomem-mmap-vs-gup-2021-02-22' of git://anongit.freedesktop.org/drm/drm

Pull follow_pfn() updates from Daniel Vetter:
 "Fixes around VM_FPNMAP and follow_pfn:

   - replace mm/frame_vector.c by get_user_pages in misc/habana and
     drm/exynos drivers, then move that into media as it's sole user

   - close race in generic_access_phys

   - s390 pci ioctl fix of this series landed in 5.11 already

   - properly revoke iomem mappings (/dev/mem, pci files)"

* tag 'topic/iomem-mmap-vs-gup-2021-02-22' of git://anongit.freedesktop.org/drm/drm:
  PCI: Revoke mappings like devmem
  PCI: Also set up legacy files only after sysfs init
  sysfs: Support zapping of binary attr mmaps
  resource: Move devmem revoke code to resource framework
  /dev/mem: Only set filp->f_mapping
  PCI: Obey iomem restrictions for procfs mmap
  mm: Close race in generic_access_phys
  media: videobuf2: Move frame_vector into media subsystem
  mm/frame-vector: Use FOLL_LONGTERM
  misc/habana: Use FOLL_LONGTERM for userptr
  misc/habana: Stop using frame_vector helpers
  drm/exynos: Use FOLL_LONGTERM for g2d cmdlists
  drm/exynos: Stop using frame_vector helpers
parents 4b5f9254 636b21b5
...@@ -31,9 +31,6 @@ ...@@ -31,9 +31,6 @@
#include <linux/uio.h> #include <linux/uio.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <linux/security.h> #include <linux/security.h>
#include <linux/pseudo_fs.h>
#include <uapi/linux/magic.h>
#include <linux/mount.h>
#ifdef CONFIG_IA64 #ifdef CONFIG_IA64
# include <linux/efi.h> # include <linux/efi.h>
...@@ -829,42 +826,6 @@ static loff_t memory_lseek(struct file *file, loff_t offset, int orig) ...@@ -829,42 +826,6 @@ static loff_t memory_lseek(struct file *file, loff_t offset, int orig)
return ret; return ret;
} }
static struct inode *devmem_inode;
#ifdef CONFIG_IO_STRICT_DEVMEM
void revoke_devmem(struct resource *res)
{
/* pairs with smp_store_release() in devmem_init_inode() */
struct inode *inode = smp_load_acquire(&devmem_inode);
/*
* Check that the initialization has completed. Losing the race
* is ok because it means drivers are claiming resources before
* the fs_initcall level of init and prevent /dev/mem from
* establishing mappings.
*/
if (!inode)
return;
/*
* The expectation is that the driver has successfully marked
* the resource busy by this point, so devmem_is_allowed()
* should start returning false, however for performance this
* does not iterate the entire resource range.
*/
if (devmem_is_allowed(PHYS_PFN(res->start)) &&
devmem_is_allowed(PHYS_PFN(res->end))) {
/*
* *cringe* iomem=relaxed says "go ahead, what's the
* worst that can happen?"
*/
return;
}
unmap_mapping_range(inode->i_mapping, res->start, resource_size(res), 1);
}
#endif
static int open_port(struct inode *inode, struct file *filp) static int open_port(struct inode *inode, struct file *filp)
{ {
int rc; int rc;
...@@ -884,8 +845,7 @@ static int open_port(struct inode *inode, struct file *filp) ...@@ -884,8 +845,7 @@ static int open_port(struct inode *inode, struct file *filp)
* revocations when drivers want to take over a /dev/mem mapped * revocations when drivers want to take over a /dev/mem mapped
* range. * range.
*/ */
inode->i_mapping = devmem_inode->i_mapping; filp->f_mapping = iomem_get_mapping();
filp->f_mapping = inode->i_mapping;
return 0; return 0;
} }
...@@ -1017,48 +977,6 @@ static char *mem_devnode(struct device *dev, umode_t *mode) ...@@ -1017,48 +977,6 @@ static char *mem_devnode(struct device *dev, umode_t *mode)
static struct class *mem_class; static struct class *mem_class;
static int devmem_fs_init_fs_context(struct fs_context *fc)
{
return init_pseudo(fc, DEVMEM_MAGIC) ? 0 : -ENOMEM;
}
static struct file_system_type devmem_fs_type = {
.name = "devmem",
.owner = THIS_MODULE,
.init_fs_context = devmem_fs_init_fs_context,
.kill_sb = kill_anon_super,
};
static int devmem_init_inode(void)
{
static struct vfsmount *devmem_vfs_mount;
static int devmem_fs_cnt;
struct inode *inode;
int rc;
rc = simple_pin_fs(&devmem_fs_type, &devmem_vfs_mount, &devmem_fs_cnt);
if (rc < 0) {
pr_err("Cannot mount /dev/mem pseudo filesystem: %d\n", rc);
return rc;
}
inode = alloc_anon_inode(devmem_vfs_mount->mnt_sb);
if (IS_ERR(inode)) {
rc = PTR_ERR(inode);
pr_err("Cannot allocate inode for /dev/mem: %d\n", rc);
simple_release_fs(&devmem_vfs_mount, &devmem_fs_cnt);
return rc;
}
/*
* Publish /dev/mem initialized.
* Pairs with smp_load_acquire() in revoke_devmem().
*/
smp_store_release(&devmem_inode, inode);
return 0;
}
static int __init chr_dev_init(void) static int __init chr_dev_init(void)
{ {
int minor; int minor;
...@@ -1080,8 +998,6 @@ static int __init chr_dev_init(void) ...@@ -1080,8 +998,6 @@ static int __init chr_dev_init(void)
*/ */
if ((minor == DEVPORT_MINOR) && !arch_has_dev_port()) if ((minor == DEVPORT_MINOR) && !arch_has_dev_port())
continue; continue;
if ((minor == DEVMEM_MINOR) && devmem_init_inode() != 0)
continue;
device_create(mem_class, NULL, MKDEV(MEM_MAJOR, minor), device_create(mem_class, NULL, MKDEV(MEM_MAJOR, minor),
NULL, devlist[minor].name); NULL, devlist[minor].name);
......
...@@ -89,7 +89,6 @@ comment "Sub-drivers" ...@@ -89,7 +89,6 @@ comment "Sub-drivers"
config DRM_EXYNOS_G2D config DRM_EXYNOS_G2D
bool "G2D" bool "G2D"
depends on VIDEO_SAMSUNG_S5P_G2D=n || COMPILE_TEST depends on VIDEO_SAMSUNG_S5P_G2D=n || COMPILE_TEST
select FRAME_VECTOR
help help
Choose this option if you want to use Exynos G2D for DRM. Choose this option if you want to use Exynos G2D for DRM.
......
...@@ -205,7 +205,8 @@ struct g2d_cmdlist_userptr { ...@@ -205,7 +205,8 @@ struct g2d_cmdlist_userptr {
dma_addr_t dma_addr; dma_addr_t dma_addr;
unsigned long userptr; unsigned long userptr;
unsigned long size; unsigned long size;
struct frame_vector *vec; struct page **pages;
unsigned int npages;
struct sg_table *sgt; struct sg_table *sgt;
atomic_t refcount; atomic_t refcount;
bool in_pool; bool in_pool;
...@@ -378,7 +379,6 @@ static void g2d_userptr_put_dma_addr(struct g2d_data *g2d, ...@@ -378,7 +379,6 @@ static void g2d_userptr_put_dma_addr(struct g2d_data *g2d,
bool force) bool force)
{ {
struct g2d_cmdlist_userptr *g2d_userptr = obj; struct g2d_cmdlist_userptr *g2d_userptr = obj;
struct page **pages;
if (!obj) if (!obj)
return; return;
...@@ -398,15 +398,9 @@ static void g2d_userptr_put_dma_addr(struct g2d_data *g2d, ...@@ -398,15 +398,9 @@ static void g2d_userptr_put_dma_addr(struct g2d_data *g2d,
dma_unmap_sgtable(to_dma_dev(g2d->drm_dev), g2d_userptr->sgt, dma_unmap_sgtable(to_dma_dev(g2d->drm_dev), g2d_userptr->sgt,
DMA_BIDIRECTIONAL, 0); DMA_BIDIRECTIONAL, 0);
pages = frame_vector_pages(g2d_userptr->vec); unpin_user_pages_dirty_lock(g2d_userptr->pages, g2d_userptr->npages,
if (!IS_ERR(pages)) { true);
int i; kvfree(g2d_userptr->pages);
for (i = 0; i < frame_vector_count(g2d_userptr->vec); i++)
set_page_dirty_lock(pages[i]);
}
put_vaddr_frames(g2d_userptr->vec);
frame_vector_destroy(g2d_userptr->vec);
if (!g2d_userptr->out_of_list) if (!g2d_userptr->out_of_list)
list_del_init(&g2d_userptr->list); list_del_init(&g2d_userptr->list);
...@@ -474,35 +468,35 @@ static dma_addr_t *g2d_userptr_get_dma_addr(struct g2d_data *g2d, ...@@ -474,35 +468,35 @@ static dma_addr_t *g2d_userptr_get_dma_addr(struct g2d_data *g2d,
offset = userptr & ~PAGE_MASK; offset = userptr & ~PAGE_MASK;
end = PAGE_ALIGN(userptr + size); end = PAGE_ALIGN(userptr + size);
npages = (end - start) >> PAGE_SHIFT; npages = (end - start) >> PAGE_SHIFT;
g2d_userptr->vec = frame_vector_create(npages); g2d_userptr->pages = kvmalloc_array(npages, sizeof(*g2d_userptr->pages),
if (!g2d_userptr->vec) { GFP_KERNEL);
if (!g2d_userptr->pages) {
ret = -ENOMEM; ret = -ENOMEM;
goto err_free; goto err_free;
} }
ret = get_vaddr_frames(start, npages, FOLL_FORCE | FOLL_WRITE, ret = pin_user_pages_fast(start, npages,
g2d_userptr->vec); FOLL_FORCE | FOLL_WRITE | FOLL_LONGTERM,
g2d_userptr->pages);
if (ret != npages) { if (ret != npages) {
DRM_DEV_ERROR(g2d->dev, DRM_DEV_ERROR(g2d->dev,
"failed to get user pages from userptr.\n"); "failed to get user pages from userptr.\n");
if (ret < 0) if (ret < 0)
goto err_destroy_framevec; goto err_destroy_pages;
ret = -EFAULT; npages = ret;
goto err_put_framevec;
}
if (frame_vector_to_pages(g2d_userptr->vec) < 0) {
ret = -EFAULT; ret = -EFAULT;
goto err_put_framevec; goto err_unpin_pages;
} }
g2d_userptr->npages = npages;
sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
if (!sgt) { if (!sgt) {
ret = -ENOMEM; ret = -ENOMEM;
goto err_put_framevec; goto err_unpin_pages;
} }
ret = sg_alloc_table_from_pages(sgt, ret = sg_alloc_table_from_pages(sgt,
frame_vector_pages(g2d_userptr->vec), g2d_userptr->pages,
npages, offset, size, GFP_KERNEL); npages, offset, size, GFP_KERNEL);
if (ret < 0) { if (ret < 0) {
DRM_DEV_ERROR(g2d->dev, "failed to get sgt from pages.\n"); DRM_DEV_ERROR(g2d->dev, "failed to get sgt from pages.\n");
...@@ -538,11 +532,11 @@ static dma_addr_t *g2d_userptr_get_dma_addr(struct g2d_data *g2d, ...@@ -538,11 +532,11 @@ static dma_addr_t *g2d_userptr_get_dma_addr(struct g2d_data *g2d,
err_free_sgt: err_free_sgt:
kfree(sgt); kfree(sgt);
err_put_framevec: err_unpin_pages:
put_vaddr_frames(g2d_userptr->vec); unpin_user_pages(g2d_userptr->pages, npages);
err_destroy_framevec: err_destroy_pages:
frame_vector_destroy(g2d_userptr->vec); kvfree(g2d_userptr->pages);
err_free: err_free:
kfree(g2d_userptr); kfree(g2d_userptr);
......
...@@ -9,7 +9,6 @@ config VIDEOBUF2_V4L2 ...@@ -9,7 +9,6 @@ config VIDEOBUF2_V4L2
config VIDEOBUF2_MEMOPS config VIDEOBUF2_MEMOPS
tristate tristate
select FRAME_VECTOR
config VIDEOBUF2_DMA_CONTIG config VIDEOBUF2_DMA_CONTIG
tristate tristate
......
# SPDX-License-Identifier: GPL-2.0 # SPDX-License-Identifier: GPL-2.0
videobuf2-common-objs := videobuf2-core.o videobuf2-common-objs := videobuf2-core.o
videobuf2-common-objs += frame_vector.o
ifeq ($(CONFIG_TRACEPOINTS),y) ifeq ($(CONFIG_TRACEPOINTS),y)
videobuf2-common-objs += vb2-trace.o videobuf2-common-objs += vb2-trace.o
......
...@@ -8,6 +8,8 @@ ...@@ -8,6 +8,8 @@
#include <linux/pagemap.h> #include <linux/pagemap.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <media/frame_vector.h>
/** /**
* get_vaddr_frames() - map virtual addresses to pfns * get_vaddr_frames() - map virtual addresses to pfns
* @start: starting user address * @start: starting user address
...@@ -32,13 +34,12 @@ ...@@ -32,13 +34,12 @@
* This function takes care of grabbing mmap_lock as necessary. * This function takes care of grabbing mmap_lock as necessary.
*/ */
int get_vaddr_frames(unsigned long start, unsigned int nr_frames, int get_vaddr_frames(unsigned long start, unsigned int nr_frames,
unsigned int gup_flags, struct frame_vector *vec) struct frame_vector *vec)
{ {
struct mm_struct *mm = current->mm; struct mm_struct *mm = current->mm;
struct vm_area_struct *vma; struct vm_area_struct *vma;
int ret = 0; int ret = 0;
int err; int err;
int locked;
if (nr_frames == 0) if (nr_frames == 0)
return 0; return 0;
...@@ -48,40 +49,26 @@ int get_vaddr_frames(unsigned long start, unsigned int nr_frames, ...@@ -48,40 +49,26 @@ int get_vaddr_frames(unsigned long start, unsigned int nr_frames,
start = untagged_addr(start); start = untagged_addr(start);
mmap_read_lock(mm); ret = pin_user_pages_fast(start, nr_frames,
locked = 1; FOLL_FORCE | FOLL_WRITE | FOLL_LONGTERM,
vma = find_vma_intersection(mm, start, start + 1); (struct page **)(vec->ptrs));
if (!vma) { if (ret > 0) {
ret = -EFAULT;
goto out;
}
/*
* While get_vaddr_frames() could be used for transient (kernel
* controlled lifetime) pinning of memory pages all current
* users establish long term (userspace controlled lifetime)
* page pinning. Treat get_vaddr_frames() like
* get_user_pages_longterm() and disallow it for filesystem-dax
* mappings.
*/
if (vma_is_fsdax(vma)) {
ret = -EOPNOTSUPP;
goto out;
}
if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) {
vec->got_ref = true; vec->got_ref = true;
vec->is_pfns = false; vec->is_pfns = false;
ret = pin_user_pages_locked(start, nr_frames, goto out_unlocked;
gup_flags, (struct page **)(vec->ptrs), &locked);
goto out;
} }
mmap_read_lock(mm);
vec->got_ref = false; vec->got_ref = false;
vec->is_pfns = true; vec->is_pfns = true;
ret = 0;
do { do {
unsigned long *nums = frame_vector_pfns(vec); unsigned long *nums = frame_vector_pfns(vec);
vma = find_vma_intersection(mm, start, start + 1);
if (!vma)
break;
while (ret < nr_frames && start + PAGE_SIZE <= vma->vm_end) { while (ret < nr_frames && start + PAGE_SIZE <= vma->vm_end) {
err = follow_pfn(vma, start, &nums[ret]); err = follow_pfn(vma, start, &nums[ret]);
if (err) { if (err) {
...@@ -92,17 +79,13 @@ int get_vaddr_frames(unsigned long start, unsigned int nr_frames, ...@@ -92,17 +79,13 @@ int get_vaddr_frames(unsigned long start, unsigned int nr_frames,
start += PAGE_SIZE; start += PAGE_SIZE;
ret++; ret++;
} }
/* /* Bail out if VMA doesn't completely cover the tail page. */
* We stop if we have enough pages or if VMA doesn't completely if (start < vma->vm_end)
* cover the tail page.
*/
if (ret >= nr_frames || start < vma->vm_end)
break; break;
vma = find_vma_intersection(mm, start, start + 1); } while (ret < nr_frames);
} while (vma && vma->vm_flags & (VM_IO | VM_PFNMAP));
out: out:
if (locked)
mmap_read_unlock(mm); mmap_read_unlock(mm);
out_unlocked:
if (!ret) if (!ret)
ret = -EFAULT; ret = -EFAULT;
if (ret > 0) if (ret > 0)
......
...@@ -40,7 +40,6 @@ struct frame_vector *vb2_create_framevec(unsigned long start, ...@@ -40,7 +40,6 @@ struct frame_vector *vb2_create_framevec(unsigned long start,
unsigned long first, last; unsigned long first, last;
unsigned long nr; unsigned long nr;
struct frame_vector *vec; struct frame_vector *vec;
unsigned int flags = FOLL_FORCE | FOLL_WRITE;
first = start >> PAGE_SHIFT; first = start >> PAGE_SHIFT;
last = (start + length - 1) >> PAGE_SHIFT; last = (start + length - 1) >> PAGE_SHIFT;
...@@ -48,7 +47,7 @@ struct frame_vector *vb2_create_framevec(unsigned long start, ...@@ -48,7 +47,7 @@ struct frame_vector *vb2_create_framevec(unsigned long start,
vec = frame_vector_create(nr); vec = frame_vector_create(nr);
if (!vec) if (!vec)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
ret = get_vaddr_frames(start & PAGE_MASK, nr, flags, vec); ret = get_vaddr_frames(start & PAGE_MASK, nr, vec);
if (ret < 0) if (ret < 0)
goto out_destroy; goto out_destroy;
/* We accept only complete set of PFNs */ /* We accept only complete set of PFNs */
......
...@@ -12,6 +12,5 @@ config VIDEO_OMAP2_VOUT ...@@ -12,6 +12,5 @@ config VIDEO_OMAP2_VOUT
depends on VIDEO_V4L2 depends on VIDEO_V4L2
select VIDEOBUF2_DMA_CONTIG select VIDEOBUF2_DMA_CONTIG
select OMAP2_VRFB if ARCH_OMAP2 || ARCH_OMAP3 select OMAP2_VRFB if ARCH_OMAP2 || ARCH_OMAP3
select FRAME_VECTOR
help help
V4L2 Display driver support for OMAP2/3 based boards. V4L2 Display driver support for OMAP2/3 based boards.
...@@ -6,7 +6,6 @@ ...@@ -6,7 +6,6 @@
config HABANA_AI config HABANA_AI
tristate "HabanaAI accelerators (habanalabs)" tristate "HabanaAI accelerators (habanalabs)"
depends on PCI && HAS_IOMEM depends on PCI && HAS_IOMEM
select FRAME_VECTOR
select GENERIC_ALLOCATOR select GENERIC_ALLOCATOR
select HWMON select HWMON
help help
......
...@@ -1089,7 +1089,8 @@ struct hl_ctx_mgr { ...@@ -1089,7 +1089,8 @@ struct hl_ctx_mgr {
* struct hl_userptr - memory mapping chunk information * struct hl_userptr - memory mapping chunk information
* @vm_type: type of the VM. * @vm_type: type of the VM.
* @job_node: linked-list node for hanging the object on the Job's list. * @job_node: linked-list node for hanging the object on the Job's list.
* @vec: pointer to the frame vector. * @pages: pointer to struct page array
* @npages: size of @pages array
* @sgt: pointer to the scatter-gather table that holds the pages. * @sgt: pointer to the scatter-gather table that holds the pages.
* @dir: for DMA unmapping, the direction must be supplied, so save it. * @dir: for DMA unmapping, the direction must be supplied, so save it.
* @debugfs_list: node in debugfs list of command submissions. * @debugfs_list: node in debugfs list of command submissions.
...@@ -1100,7 +1101,8 @@ struct hl_ctx_mgr { ...@@ -1100,7 +1101,8 @@ struct hl_ctx_mgr {
struct hl_userptr { struct hl_userptr {
enum vm_type_t vm_type; /* must be first */ enum vm_type_t vm_type; /* must be first */
struct list_head job_node; struct list_head job_node;
struct frame_vector *vec; struct page **pages;
unsigned int npages;
struct sg_table *sgt; struct sg_table *sgt;
enum dma_data_direction dir; enum dma_data_direction dir;
struct list_head debugfs_list; struct list_head debugfs_list;
......
...@@ -1436,45 +1436,40 @@ static int get_user_memory(struct hl_device *hdev, u64 addr, u64 size, ...@@ -1436,45 +1436,40 @@ static int get_user_memory(struct hl_device *hdev, u64 addr, u64 size,
return -EFAULT; return -EFAULT;
} }
userptr->vec = frame_vector_create(npages); userptr->pages = kvmalloc_array(npages, sizeof(*userptr->pages),
if (!userptr->vec) { GFP_KERNEL);
dev_err(hdev->dev, "Failed to create frame vector\n"); if (!userptr->pages)
return -ENOMEM; return -ENOMEM;
}
rc = get_vaddr_frames(start, npages, FOLL_FORCE | FOLL_WRITE, rc = pin_user_pages_fast(start, npages,
userptr->vec); FOLL_FORCE | FOLL_WRITE | FOLL_LONGTERM,
userptr->pages);
if (rc != npages) { if (rc != npages) {
dev_err(hdev->dev, dev_err(hdev->dev,
"Failed to map host memory, user ptr probably wrong\n"); "Failed to map host memory, user ptr probably wrong\n");
if (rc < 0) if (rc < 0)
goto destroy_framevec; goto destroy_pages;
rc = -EFAULT; npages = rc;
goto put_framevec;
}
if (frame_vector_to_pages(userptr->vec) < 0) {
dev_err(hdev->dev,
"Failed to translate frame vector to pages\n");
rc = -EFAULT; rc = -EFAULT;
goto put_framevec; goto put_pages;
} }
userptr->npages = npages;
rc = sg_alloc_table_from_pages(userptr->sgt, rc = sg_alloc_table_from_pages(userptr->sgt,
frame_vector_pages(userptr->vec), userptr->pages,
npages, offset, size, GFP_ATOMIC); npages, offset, size, GFP_ATOMIC);
if (rc < 0) { if (rc < 0) {
dev_err(hdev->dev, "failed to create SG table from pages\n"); dev_err(hdev->dev, "failed to create SG table from pages\n");
goto put_framevec; goto put_pages;
} }
return 0; return 0;
put_framevec: put_pages:
put_vaddr_frames(userptr->vec); unpin_user_pages(userptr->pages, npages);
destroy_framevec: destroy_pages:
frame_vector_destroy(userptr->vec); kvfree(userptr->pages);
return rc; return rc;
} }
...@@ -1560,8 +1555,6 @@ int hl_pin_host_memory(struct hl_device *hdev, u64 addr, u64 size, ...@@ -1560,8 +1555,6 @@ int hl_pin_host_memory(struct hl_device *hdev, u64 addr, u64 size,
*/ */
void hl_unpin_host_memory(struct hl_device *hdev, struct hl_userptr *userptr) void hl_unpin_host_memory(struct hl_device *hdev, struct hl_userptr *userptr)
{ {
struct page **pages;
hl_debugfs_remove_userptr(hdev, userptr); hl_debugfs_remove_userptr(hdev, userptr);
if (userptr->dma_mapped) if (userptr->dma_mapped)
...@@ -1569,15 +1562,8 @@ void hl_unpin_host_memory(struct hl_device *hdev, struct hl_userptr *userptr) ...@@ -1569,15 +1562,8 @@ void hl_unpin_host_memory(struct hl_device *hdev, struct hl_userptr *userptr)
userptr->sgt->nents, userptr->sgt->nents,
userptr->dir); userptr->dir);
pages = frame_vector_pages(userptr->vec); unpin_user_pages_dirty_lock(userptr->pages, userptr->npages, true);
if (!IS_ERR(pages)) { kvfree(userptr->pages);
int i;
for (i = 0; i < frame_vector_count(userptr->vec); i++)
set_page_dirty_lock(pages[i]);
}
put_vaddr_frames(userptr->vec);
frame_vector_destroy(userptr->vec);
list_del(&userptr->job_node); list_del(&userptr->job_node);
......
...@@ -927,6 +927,9 @@ void pci_create_legacy_files(struct pci_bus *b) ...@@ -927,6 +927,9 @@ void pci_create_legacy_files(struct pci_bus *b)
{ {
int error; int error;
if (!sysfs_initialized)
return;
b->legacy_io = kcalloc(2, sizeof(struct bin_attribute), b->legacy_io = kcalloc(2, sizeof(struct bin_attribute),
GFP_ATOMIC); GFP_ATOMIC);
if (!b->legacy_io) if (!b->legacy_io)
...@@ -939,6 +942,7 @@ void pci_create_legacy_files(struct pci_bus *b) ...@@ -939,6 +942,7 @@ void pci_create_legacy_files(struct pci_bus *b)
b->legacy_io->read = pci_read_legacy_io; b->legacy_io->read = pci_read_legacy_io;
b->legacy_io->write = pci_write_legacy_io; b->legacy_io->write = pci_write_legacy_io;
b->legacy_io->mmap = pci_mmap_legacy_io; b->legacy_io->mmap = pci_mmap_legacy_io;
b->legacy_io->mapping = iomem_get_mapping();
pci_adjust_legacy_attr(b, pci_mmap_io); pci_adjust_legacy_attr(b, pci_mmap_io);
error = device_create_bin_file(&b->dev, b->legacy_io); error = device_create_bin_file(&b->dev, b->legacy_io);
if (error) if (error)
...@@ -951,6 +955,7 @@ void pci_create_legacy_files(struct pci_bus *b) ...@@ -951,6 +955,7 @@ void pci_create_legacy_files(struct pci_bus *b)
b->legacy_mem->size = 1024*1024; b->legacy_mem->size = 1024*1024;
b->legacy_mem->attr.mode = 0600; b->legacy_mem->attr.mode = 0600;
b->legacy_mem->mmap = pci_mmap_legacy_mem; b->legacy_mem->mmap = pci_mmap_legacy_mem;
b->legacy_io->mapping = iomem_get_mapping();
pci_adjust_legacy_attr(b, pci_mmap_mem); pci_adjust_legacy_attr(b, pci_mmap_mem);
error = device_create_bin_file(&b->dev, b->legacy_mem); error = device_create_bin_file(&b->dev, b->legacy_mem);
if (error) if (error)
...@@ -1166,6 +1171,8 @@ static int pci_create_attr(struct pci_dev *pdev, int num, int write_combine) ...@@ -1166,6 +1171,8 @@ static int pci_create_attr(struct pci_dev *pdev, int num, int write_combine)
res_attr->mmap = pci_mmap_resource_uc; res_attr->mmap = pci_mmap_resource_uc;
} }
} }
if (res_attr->mmap)
res_attr->mapping = iomem_get_mapping();
res_attr->attr.name = res_attr_name; res_attr->attr.name = res_attr_name;
res_attr->attr.mode = 0600; res_attr->attr.mode = 0600;
res_attr->size = pci_resource_len(pdev, num); res_attr->size = pci_resource_len(pdev, num);
...@@ -1448,6 +1455,7 @@ void pci_remove_sysfs_dev_files(struct pci_dev *pdev) ...@@ -1448,6 +1455,7 @@ void pci_remove_sysfs_dev_files(struct pci_dev *pdev)
static int __init pci_sysfs_init(void) static int __init pci_sysfs_init(void)
{ {
struct pci_dev *pdev = NULL; struct pci_dev *pdev = NULL;
struct pci_bus *pbus = NULL;
int retval; int retval;
sysfs_initialized = 1; sysfs_initialized = 1;
...@@ -1459,6 +1467,9 @@ static int __init pci_sysfs_init(void) ...@@ -1459,6 +1467,9 @@ static int __init pci_sysfs_init(void)
} }
} }
while ((pbus = pci_find_next_bus(pbus)))
pci_create_legacy_files(pbus);
return 0; return 0;
} }
late_initcall(pci_sysfs_init); late_initcall(pci_sysfs_init);
......
...@@ -274,6 +274,11 @@ static int proc_bus_pci_mmap(struct file *file, struct vm_area_struct *vma) ...@@ -274,6 +274,11 @@ static int proc_bus_pci_mmap(struct file *file, struct vm_area_struct *vma)
else else
return -EINVAL; return -EINVAL;
} }
if (dev->resource[i].flags & IORESOURCE_MEM &&
iomem_is_exclusive(dev->resource[i].start))
return -EINVAL;
ret = pci_mmap_page_range(dev, i, vma, ret = pci_mmap_page_range(dev, i, vma,
fpriv->mmap_state, write_combine); fpriv->mmap_state, write_combine);
if (ret < 0) if (ret < 0)
...@@ -293,6 +298,7 @@ static int proc_bus_pci_open(struct inode *inode, struct file *file) ...@@ -293,6 +298,7 @@ static int proc_bus_pci_open(struct inode *inode, struct file *file)
fpriv->write_combine = 0; fpriv->write_combine = 0;
file->private_data = fpriv; file->private_data = fpriv;
file->f_mapping = iomem_get_mapping();
return 0; return 0;
} }
......
...@@ -170,6 +170,16 @@ static int sysfs_kf_bin_mmap(struct kernfs_open_file *of, ...@@ -170,6 +170,16 @@ static int sysfs_kf_bin_mmap(struct kernfs_open_file *of,
return battr->mmap(of->file, kobj, battr, vma); return battr->mmap(of->file, kobj, battr, vma);
} }
static int sysfs_kf_bin_open(struct kernfs_open_file *of)
{
struct bin_attribute *battr = of->kn->priv;
if (battr->mapping)
of->file->f_mapping = battr->mapping;
return 0;
}
void sysfs_notify(struct kobject *kobj, const char *dir, const char *attr) void sysfs_notify(struct kobject *kobj, const char *dir, const char *attr)
{ {
struct kernfs_node *kn = kobj->sd, *tmp; struct kernfs_node *kn = kobj->sd, *tmp;
...@@ -241,6 +251,7 @@ static const struct kernfs_ops sysfs_bin_kfops_mmap = { ...@@ -241,6 +251,7 @@ static const struct kernfs_ops sysfs_bin_kfops_mmap = {
.read = sysfs_kf_bin_read, .read = sysfs_kf_bin_read,
.write = sysfs_kf_bin_write, .write = sysfs_kf_bin_write,
.mmap = sysfs_kf_bin_mmap, .mmap = sysfs_kf_bin_mmap,
.open = sysfs_kf_bin_open,
}; };
int sysfs_add_file_mode_ns(struct kernfs_node *parent, int sysfs_add_file_mode_ns(struct kernfs_node *parent,
......
...@@ -334,11 +334,7 @@ static inline void irqresource_disabled(struct resource *res, u32 irq) ...@@ -334,11 +334,7 @@ static inline void irqresource_disabled(struct resource *res, u32 irq)
res->flags = IORESOURCE_IRQ | IORESOURCE_DISABLED | IORESOURCE_UNSET; res->flags = IORESOURCE_IRQ | IORESOURCE_DISABLED | IORESOURCE_UNSET;
} }
#ifdef CONFIG_IO_STRICT_DEVMEM extern struct address_space *iomem_get_mapping(void);
void revoke_devmem(struct resource *res);
#else
static inline void revoke_devmem(struct resource *res) { };
#endif
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#endif /* _LINUX_IOPORT_H */ #endif /* _LINUX_IOPORT_H */
...@@ -593,7 +593,8 @@ struct vm_operations_struct { ...@@ -593,7 +593,8 @@ struct vm_operations_struct {
vm_fault_t (*pfn_mkwrite)(struct vm_fault *vmf); vm_fault_t (*pfn_mkwrite)(struct vm_fault *vmf);
/* called by access_process_vm when get_user_pages() fails, typically /* called by access_process_vm when get_user_pages() fails, typically
* for use by special VMAs that can switch between memory and hardware * for use by special VMAs. See also generic_access_phys() for a generic
* implementation useful for any iomem mapping.
*/ */
int (*access)(struct vm_area_struct *vma, unsigned long addr, int (*access)(struct vm_area_struct *vma, unsigned long addr,
void *buf, int len, int write); void *buf, int len, int write);
...@@ -1761,48 +1762,6 @@ int account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc); ...@@ -1761,48 +1762,6 @@ int account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc);
int __account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc, int __account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc,
struct task_struct *task, bool bypass_rlim); struct task_struct *task, bool bypass_rlim);
/* Container for pinned pfns / pages */
struct frame_vector {
unsigned int nr_allocated; /* Number of frames we have space for */
unsigned int nr_frames; /* Number of frames stored in ptrs array */
bool got_ref; /* Did we pin pages by getting page ref? */
bool is_pfns; /* Does array contain pages or pfns? */
void *ptrs[]; /* Array of pinned pfns / pages. Use
* pfns_vector_pages() or pfns_vector_pfns()
* for access */
};
struct frame_vector *frame_vector_create(unsigned int nr_frames);
void frame_vector_destroy(struct frame_vector *vec);
int get_vaddr_frames(unsigned long start, unsigned int nr_pfns,
unsigned int gup_flags, struct frame_vector *vec);
void put_vaddr_frames(struct frame_vector *vec);
int frame_vector_to_pages(struct frame_vector *vec);
void frame_vector_to_pfns(struct frame_vector *vec);
static inline unsigned int frame_vector_count(struct frame_vector *vec)
{
return vec->nr_frames;
}
static inline struct page **frame_vector_pages(struct frame_vector *vec)
{
if (vec->is_pfns) {
int err = frame_vector_to_pages(vec);
if (err)
return ERR_PTR(err);
}
return (struct page **)(vec->ptrs);
}
static inline unsigned long *frame_vector_pfns(struct frame_vector *vec)
{
if (!vec->is_pfns)
frame_vector_to_pfns(vec);
return (unsigned long *)(vec->ptrs);
}
struct kvec; struct kvec;
int get_kernel_pages(const struct kvec *iov, int nr_pages, int write, int get_kernel_pages(const struct kvec *iov, int nr_pages, int write,
struct page **pages); struct page **pages);
......
...@@ -164,11 +164,13 @@ __ATTRIBUTE_GROUPS(_name) ...@@ -164,11 +164,13 @@ __ATTRIBUTE_GROUPS(_name)
struct file; struct file;
struct vm_area_struct; struct vm_area_struct;
struct address_space;
struct bin_attribute { struct bin_attribute {
struct attribute attr; struct attribute attr;
size_t size; size_t size;
void *private; void *private;
struct address_space *mapping;
ssize_t (*read)(struct file *, struct kobject *, struct bin_attribute *, ssize_t (*read)(struct file *, struct kobject *, struct bin_attribute *,
char *, loff_t, size_t); char *, loff_t, size_t);
ssize_t (*write)(struct file *, struct kobject *, struct bin_attribute *, ssize_t (*write)(struct file *, struct kobject *, struct bin_attribute *,
......
// SPDX-License-Identifier: GPL-2.0
#ifndef _MEDIA_FRAME_VECTOR_H
#define _MEDIA_FRAME_VECTOR_H
/* Container for pinned pfns / pages in frame_vector.c */
struct frame_vector {
unsigned int nr_allocated; /* Number of frames we have space for */
unsigned int nr_frames; /* Number of frames stored in ptrs array */
bool got_ref; /* Did we pin pages by getting page ref? */
bool is_pfns; /* Does array contain pages or pfns? */
void *ptrs[]; /* Array of pinned pfns / pages. Use
* pfns_vector_pages() or pfns_vector_pfns()
* for access */
};
struct frame_vector *frame_vector_create(unsigned int nr_frames);
void frame_vector_destroy(struct frame_vector *vec);
int get_vaddr_frames(unsigned long start, unsigned int nr_pfns,
struct frame_vector *vec);
void put_vaddr_frames(struct frame_vector *vec);
int frame_vector_to_pages(struct frame_vector *vec);
void frame_vector_to_pfns(struct frame_vector *vec);
static inline unsigned int frame_vector_count(struct frame_vector *vec)
{
return vec->nr_frames;
}
static inline struct page **frame_vector_pages(struct frame_vector *vec)
{
if (vec->is_pfns) {
int err = frame_vector_to_pages(vec);
if (err)
return ERR_PTR(err);
}
return (struct page **)(vec->ptrs);
}
static inline unsigned long *frame_vector_pfns(struct frame_vector *vec)
{
if (!vec->is_pfns)
frame_vector_to_pfns(vec);
return (unsigned long *)(vec->ptrs);
}
#endif /* _MEDIA_FRAME_VECTOR_H */
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
#include <linux/dma-buf.h> #include <linux/dma-buf.h>
#include <linux/bitops.h> #include <linux/bitops.h>
#include <media/media-request.h> #include <media/media-request.h>
#include <media/frame_vector.h>
#define VB2_MAX_FRAME (32) #define VB2_MAX_FRAME (32)
#define VB2_MAX_PLANES (8) #define VB2_MAX_PLANES (8)
......
...@@ -18,12 +18,15 @@ ...@@ -18,12 +18,15 @@
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/fs.h> #include <linux/fs.h>
#include <linux/proc_fs.h> #include <linux/proc_fs.h>
#include <linux/pseudo_fs.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/seq_file.h> #include <linux/seq_file.h>
#include <linux/device.h> #include <linux/device.h>
#include <linux/pfn.h> #include <linux/pfn.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/mount.h>
#include <linux/resource_ext.h> #include <linux/resource_ext.h>
#include <uapi/linux/magic.h>
#include <asm/io.h> #include <asm/io.h>
...@@ -1119,6 +1122,55 @@ resource_size_t resource_alignment(struct resource *res) ...@@ -1119,6 +1122,55 @@ resource_size_t resource_alignment(struct resource *res)
static DECLARE_WAIT_QUEUE_HEAD(muxed_resource_wait); static DECLARE_WAIT_QUEUE_HEAD(muxed_resource_wait);
static struct inode *iomem_inode;
#ifdef CONFIG_IO_STRICT_DEVMEM
static void revoke_iomem(struct resource *res)
{
/* pairs with smp_store_release() in iomem_init_inode() */
struct inode *inode = smp_load_acquire(&iomem_inode);
/*
* Check that the initialization has completed. Losing the race
* is ok because it means drivers are claiming resources before
* the fs_initcall level of init and prevent iomem_get_mapping users
* from establishing mappings.
*/
if (!inode)
return;
/*
* The expectation is that the driver has successfully marked
* the resource busy by this point, so devmem_is_allowed()
* should start returning false, however for performance this
* does not iterate the entire resource range.
*/
if (devmem_is_allowed(PHYS_PFN(res->start)) &&
devmem_is_allowed(PHYS_PFN(res->end))) {
/*
* *cringe* iomem=relaxed says "go ahead, what's the
* worst that can happen?"
*/
return;
}
unmap_mapping_range(inode->i_mapping, res->start, resource_size(res), 1);
}
#else
static void revoke_iomem(struct resource *res) {}
#endif
struct address_space *iomem_get_mapping(void)
{
/*
* This function is only called from file open paths, hence guaranteed
* that fs_initcalls have completed and no need to check for NULL. But
* since revoke_iomem can be called before the initcall we still need
* the barrier to appease checkers.
*/
return smp_load_acquire(&iomem_inode)->i_mapping;
}
/** /**
* __request_region - create a new busy resource region * __request_region - create a new busy resource region
* @parent: parent resource descriptor * @parent: parent resource descriptor
...@@ -1186,7 +1238,7 @@ struct resource * __request_region(struct resource *parent, ...@@ -1186,7 +1238,7 @@ struct resource * __request_region(struct resource *parent,
write_unlock(&resource_lock); write_unlock(&resource_lock);
if (res && orig_parent == &iomem_resource) if (res && orig_parent == &iomem_resource)
revoke_devmem(res); revoke_iomem(res);
return res; return res;
} }
...@@ -1786,4 +1838,48 @@ static int __init strict_iomem(char *str) ...@@ -1786,4 +1838,48 @@ static int __init strict_iomem(char *str)
return 1; return 1;
} }
static int iomem_fs_init_fs_context(struct fs_context *fc)
{
return init_pseudo(fc, DEVMEM_MAGIC) ? 0 : -ENOMEM;
}
static struct file_system_type iomem_fs_type = {
.name = "iomem",
.owner = THIS_MODULE,
.init_fs_context = iomem_fs_init_fs_context,
.kill_sb = kill_anon_super,
};
static int __init iomem_init_inode(void)
{
static struct vfsmount *iomem_vfs_mount;
static int iomem_fs_cnt;
struct inode *inode;
int rc;
rc = simple_pin_fs(&iomem_fs_type, &iomem_vfs_mount, &iomem_fs_cnt);
if (rc < 0) {
pr_err("Cannot mount iomem pseudo filesystem: %d\n", rc);
return rc;
}
inode = alloc_anon_inode(iomem_vfs_mount->mnt_sb);
if (IS_ERR(inode)) {
rc = PTR_ERR(inode);
pr_err("Cannot allocate inode for iomem: %d\n", rc);
simple_release_fs(&iomem_vfs_mount, &iomem_fs_cnt);
return rc;
}
/*
* Publish iomem revocation inode initialized.
* Pairs with smp_load_acquire() in revoke_iomem().
*/
smp_store_release(&iomem_inode, inode);
return 0;
}
fs_initcall(iomem_init_inode);
__setup("iomem=", strict_iomem); __setup("iomem=", strict_iomem);
...@@ -804,9 +804,6 @@ config DEVICE_PRIVATE ...@@ -804,9 +804,6 @@ config DEVICE_PRIVATE
config VMAP_PFN config VMAP_PFN
bool bool
config FRAME_VECTOR
bool
config ARCH_USES_HIGH_VMA_FLAGS config ARCH_USES_HIGH_VMA_FLAGS
bool bool
config ARCH_HAS_PKEYS config ARCH_HAS_PKEYS
......
...@@ -110,7 +110,6 @@ obj-$(CONFIG_PAGE_EXTENSION) += page_ext.o ...@@ -110,7 +110,6 @@ obj-$(CONFIG_PAGE_EXTENSION) += page_ext.o
obj-$(CONFIG_CMA_DEBUGFS) += cma_debug.o obj-$(CONFIG_CMA_DEBUGFS) += cma_debug.o
obj-$(CONFIG_USERFAULTFD) += userfaultfd.o obj-$(CONFIG_USERFAULTFD) += userfaultfd.o
obj-$(CONFIG_IDLE_PAGE_TRACKING) += page_idle.o obj-$(CONFIG_IDLE_PAGE_TRACKING) += page_idle.o
obj-$(CONFIG_FRAME_VECTOR) += frame_vector.o
obj-$(CONFIG_DEBUG_PAGE_REF) += debug_page_ref.o obj-$(CONFIG_DEBUG_PAGE_REF) += debug_page_ref.o
obj-$(CONFIG_HARDENED_USERCOPY) += usercopy.o obj-$(CONFIG_HARDENED_USERCOPY) += usercopy.o
obj-$(CONFIG_PERCPU_STATS) += percpu-stats.o obj-$(CONFIG_PERCPU_STATS) += percpu-stats.o
......
...@@ -4798,28 +4798,68 @@ int follow_phys(struct vm_area_struct *vma, ...@@ -4798,28 +4798,68 @@ int follow_phys(struct vm_area_struct *vma,
return ret; return ret;
} }
/**
* generic_access_phys - generic implementation for iomem mmap access
* @vma: the vma to access
* @addr: userspace addres, not relative offset within @vma
* @buf: buffer to read/write
* @len: length of transfer
* @write: set to FOLL_WRITE when writing, otherwise reading
*
* This is a generic implementation for &vm_operations_struct.access for an
* iomem mapping. This callback is used by access_process_vm() when the @vma is
* not page based.
*/
int generic_access_phys(struct vm_area_struct *vma, unsigned long addr, int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
void *buf, int len, int write) void *buf, int len, int write)
{ {
resource_size_t phys_addr; resource_size_t phys_addr;
unsigned long prot = 0; unsigned long prot = 0;
void __iomem *maddr; void __iomem *maddr;
int offset = addr & (PAGE_SIZE-1); pte_t *ptep, pte;
spinlock_t *ptl;
int offset = offset_in_page(addr);
int ret = -EINVAL;
if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
return -EINVAL;
retry:
if (follow_pte(vma->vm_mm, addr, &ptep, &ptl))
return -EINVAL;
pte = *ptep;
pte_unmap_unlock(ptep, ptl);
prot = pgprot_val(pte_pgprot(pte));
phys_addr = (resource_size_t)pte_pfn(pte) << PAGE_SHIFT;
if (follow_phys(vma, addr, write, &prot, &phys_addr)) if ((write & FOLL_WRITE) && !pte_write(pte))
return -EINVAL; return -EINVAL;
maddr = ioremap_prot(phys_addr, PAGE_ALIGN(len + offset), prot); maddr = ioremap_prot(phys_addr, PAGE_ALIGN(len + offset), prot);
if (!maddr) if (!maddr)
return -ENOMEM; return -ENOMEM;
if (follow_pte(vma->vm_mm, addr, &ptep, &ptl))
goto out_unmap;
if (!pte_same(pte, *ptep)) {
pte_unmap_unlock(ptep, ptl);
iounmap(maddr);
goto retry;
}
if (write) if (write)
memcpy_toio(maddr + offset, buf, len); memcpy_toio(maddr + offset, buf, len);
else else
memcpy_fromio(buf, maddr + offset, len); memcpy_fromio(buf, maddr + offset, len);
ret = len;
pte_unmap_unlock(ptep, ptl);
out_unmap:
iounmap(maddr); iounmap(maddr);
return len; return ret;
} }
EXPORT_SYMBOL_GPL(generic_access_phys); EXPORT_SYMBOL_GPL(generic_access_phys);
#endif #endif
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment