Commit ca0b07d9 authored by Nick Piggin's avatar Nick Piggin Committed by Dave Airlie

drm: convert drm from nopage to fault.

Remove redundant vma range checks.
Signed-off-by: default avatarNick Piggin <npiggin@suse.de>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarDave Airlie <airlied@linux.ie>
parent b39d50e5
...@@ -66,7 +66,7 @@ static pgprot_t drm_io_prot(uint32_t map_type, struct vm_area_struct *vma) ...@@ -66,7 +66,7 @@ static pgprot_t drm_io_prot(uint32_t map_type, struct vm_area_struct *vma)
} }
/** /**
* \c nopage method for AGP virtual memory. * \c fault method for AGP virtual memory.
* *
* \param vma virtual memory area. * \param vma virtual memory area.
* \param address access address. * \param address access address.
...@@ -76,8 +76,7 @@ static pgprot_t drm_io_prot(uint32_t map_type, struct vm_area_struct *vma) ...@@ -76,8 +76,7 @@ static pgprot_t drm_io_prot(uint32_t map_type, struct vm_area_struct *vma)
* map, get the page, increment the use count and return it. * map, get the page, increment the use count and return it.
*/ */
#if __OS_HAS_AGP #if __OS_HAS_AGP
static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma, static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
unsigned long address)
{ {
struct drm_file *priv = vma->vm_file->private_data; struct drm_file *priv = vma->vm_file->private_data;
struct drm_device *dev = priv->head->dev; struct drm_device *dev = priv->head->dev;
...@@ -89,19 +88,24 @@ static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma, ...@@ -89,19 +88,24 @@ static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma,
* Find the right map * Find the right map
*/ */
if (!drm_core_has_AGP(dev)) if (!drm_core_has_AGP(dev))
goto vm_nopage_error; goto vm_fault_error;
if (!dev->agp || !dev->agp->cant_use_aperture) if (!dev->agp || !dev->agp->cant_use_aperture)
goto vm_nopage_error; goto vm_fault_error;
if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash)) if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash))
goto vm_nopage_error; goto vm_fault_error;
r_list = drm_hash_entry(hash, struct drm_map_list, hash); r_list = drm_hash_entry(hash, struct drm_map_list, hash);
map = r_list->map; map = r_list->map;
if (map && map->type == _DRM_AGP) { if (map && map->type == _DRM_AGP) {
unsigned long offset = address - vma->vm_start; /*
* Using vm_pgoff as a selector forces us to use this unusual
* addressing scheme.
*/
unsigned long offset = (unsigned long)vmf->virtual_address -
vma->vm_start;
unsigned long baddr = map->offset + offset; unsigned long baddr = map->offset + offset;
struct drm_agp_mem *agpmem; struct drm_agp_mem *agpmem;
struct page *page; struct page *page;
...@@ -123,7 +127,7 @@ static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma, ...@@ -123,7 +127,7 @@ static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma,
} }
if (!agpmem) if (!agpmem)
goto vm_nopage_error; goto vm_fault_error;
/* /*
* Get the page, inc the use count, and return it * Get the page, inc the use count, and return it
...@@ -131,22 +135,21 @@ static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma, ...@@ -131,22 +135,21 @@ static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma,
offset = (baddr - agpmem->bound) >> PAGE_SHIFT; offset = (baddr - agpmem->bound) >> PAGE_SHIFT;
page = virt_to_page(__va(agpmem->memory->memory[offset])); page = virt_to_page(__va(agpmem->memory->memory[offset]));
get_page(page); get_page(page);
vmf->page = page;
DRM_DEBUG DRM_DEBUG
("baddr = 0x%lx page = 0x%p, offset = 0x%lx, count=%d\n", ("baddr = 0x%lx page = 0x%p, offset = 0x%lx, count=%d\n",
baddr, __va(agpmem->memory->memory[offset]), offset, baddr, __va(agpmem->memory->memory[offset]), offset,
page_count(page)); page_count(page));
return 0;
return page;
} }
vm_nopage_error: vm_fault_error:
return NOPAGE_SIGBUS; /* Disallow mremap */ return VM_FAULT_SIGBUS; /* Disallow mremap */
} }
#else /* __OS_HAS_AGP */ #else /* __OS_HAS_AGP */
static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma, static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
unsigned long address)
{ {
return NOPAGE_SIGBUS; return VM_FAULT_SIGBUS;
} }
#endif /* __OS_HAS_AGP */ #endif /* __OS_HAS_AGP */
...@@ -160,28 +163,26 @@ static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma, ...@@ -160,28 +163,26 @@ static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma,
* Get the mapping, find the real physical page to map, get the page, and * Get the mapping, find the real physical page to map, get the page, and
* return it. * return it.
*/ */
static __inline__ struct page *drm_do_vm_shm_nopage(struct vm_area_struct *vma, static int drm_do_vm_shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
unsigned long address)
{ {
struct drm_map *map = (struct drm_map *) vma->vm_private_data; struct drm_map *map = (struct drm_map *) vma->vm_private_data;
unsigned long offset; unsigned long offset;
unsigned long i; unsigned long i;
struct page *page; struct page *page;
if (address > vma->vm_end)
return NOPAGE_SIGBUS; /* Disallow mremap */
if (!map) if (!map)
return NOPAGE_SIGBUS; /* Nothing allocated */ return VM_FAULT_SIGBUS; /* Nothing allocated */
offset = address - vma->vm_start; offset = (unsigned long)vmf->virtual_address - vma->vm_start;
i = (unsigned long)map->handle + offset; i = (unsigned long)map->handle + offset;
page = vmalloc_to_page((void *)i); page = vmalloc_to_page((void *)i);
if (!page) if (!page)
return NOPAGE_SIGBUS; return VM_FAULT_SIGBUS;
get_page(page); get_page(page);
vmf->page = page;
DRM_DEBUG("0x%lx\n", address); DRM_DEBUG("shm_fault 0x%lx\n", offset);
return page; return 0;
} }
/** /**
...@@ -263,7 +264,7 @@ static void drm_vm_shm_close(struct vm_area_struct *vma) ...@@ -263,7 +264,7 @@ static void drm_vm_shm_close(struct vm_area_struct *vma)
} }
/** /**
* \c nopage method for DMA virtual memory. * \c fault method for DMA virtual memory.
* *
* \param vma virtual memory area. * \param vma virtual memory area.
* \param address access address. * \param address access address.
...@@ -271,8 +272,7 @@ static void drm_vm_shm_close(struct vm_area_struct *vma) ...@@ -271,8 +272,7 @@ static void drm_vm_shm_close(struct vm_area_struct *vma)
* *
* Determine the page number from the page offset and get it from drm_device_dma::pagelist. * Determine the page number from the page offset and get it from drm_device_dma::pagelist.
*/ */
static __inline__ struct page *drm_do_vm_dma_nopage(struct vm_area_struct *vma, static int drm_do_vm_dma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
unsigned long address)
{ {
struct drm_file *priv = vma->vm_file->private_data; struct drm_file *priv = vma->vm_file->private_data;
struct drm_device *dev = priv->head->dev; struct drm_device *dev = priv->head->dev;
...@@ -282,24 +282,23 @@ static __inline__ struct page *drm_do_vm_dma_nopage(struct vm_area_struct *vma, ...@@ -282,24 +282,23 @@ static __inline__ struct page *drm_do_vm_dma_nopage(struct vm_area_struct *vma,
struct page *page; struct page *page;
if (!dma) if (!dma)
return NOPAGE_SIGBUS; /* Error */ return VM_FAULT_SIGBUS; /* Error */
if (address > vma->vm_end)
return NOPAGE_SIGBUS; /* Disallow mremap */
if (!dma->pagelist) if (!dma->pagelist)
return NOPAGE_SIGBUS; /* Nothing allocated */ return VM_FAULT_SIGBUS; /* Nothing allocated */
offset = address - vma->vm_start; /* vm_[pg]off[set] should be 0 */ offset = (unsigned long)vmf->virtual_address - vma->vm_start; /* vm_[pg]off[set] should be 0 */
page_nr = offset >> PAGE_SHIFT; page_nr = offset >> PAGE_SHIFT; /* page_nr could just be vmf->pgoff */
page = virt_to_page((dma->pagelist[page_nr] + (offset & (~PAGE_MASK)))); page = virt_to_page((dma->pagelist[page_nr] + (offset & (~PAGE_MASK))));
get_page(page); get_page(page);
vmf->page = page;
DRM_DEBUG("0x%lx (page %lu)\n", address, page_nr); DRM_DEBUG("dma_fault 0x%lx (page %lu)\n", offset, page_nr);
return page; return 0;
} }
/** /**
* \c nopage method for scatter-gather virtual memory. * \c fault method for scatter-gather virtual memory.
* *
* \param vma virtual memory area. * \param vma virtual memory area.
* \param address access address. * \param address access address.
...@@ -307,8 +306,7 @@ static __inline__ struct page *drm_do_vm_dma_nopage(struct vm_area_struct *vma, ...@@ -307,8 +306,7 @@ static __inline__ struct page *drm_do_vm_dma_nopage(struct vm_area_struct *vma,
* *
* Determine the map offset from the page offset and get it from drm_sg_mem::pagelist. * Determine the map offset from the page offset and get it from drm_sg_mem::pagelist.
*/ */
static __inline__ struct page *drm_do_vm_sg_nopage(struct vm_area_struct *vma, static int drm_do_vm_sg_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
unsigned long address)
{ {
struct drm_map *map = (struct drm_map *) vma->vm_private_data; struct drm_map *map = (struct drm_map *) vma->vm_private_data;
struct drm_file *priv = vma->vm_file->private_data; struct drm_file *priv = vma->vm_file->private_data;
...@@ -320,77 +318,64 @@ static __inline__ struct page *drm_do_vm_sg_nopage(struct vm_area_struct *vma, ...@@ -320,77 +318,64 @@ static __inline__ struct page *drm_do_vm_sg_nopage(struct vm_area_struct *vma,
struct page *page; struct page *page;
if (!entry) if (!entry)
return NOPAGE_SIGBUS; /* Error */ return VM_FAULT_SIGBUS; /* Error */
if (address > vma->vm_end)
return NOPAGE_SIGBUS; /* Disallow mremap */
if (!entry->pagelist) if (!entry->pagelist)
return NOPAGE_SIGBUS; /* Nothing allocated */ return VM_FAULT_SIGBUS; /* Nothing allocated */
offset = address - vma->vm_start; offset = (unsigned long)vmf->virtual_address - vma->vm_start;
map_offset = map->offset - (unsigned long)dev->sg->virtual; map_offset = map->offset - (unsigned long)dev->sg->virtual;
page_offset = (offset >> PAGE_SHIFT) + (map_offset >> PAGE_SHIFT); page_offset = (offset >> PAGE_SHIFT) + (map_offset >> PAGE_SHIFT);
page = entry->pagelist[page_offset]; page = entry->pagelist[page_offset];
get_page(page); get_page(page);
vmf->page = page;
return page; return 0;
} }
static struct page *drm_vm_nopage(struct vm_area_struct *vma, static int drm_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
unsigned long address, int *type)
{ {
if (type) return drm_do_vm_fault(vma, vmf);
*type = VM_FAULT_MINOR;
return drm_do_vm_nopage(vma, address);
} }
static struct page *drm_vm_shm_nopage(struct vm_area_struct *vma, static int drm_vm_shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
unsigned long address, int *type)
{ {
if (type) return drm_do_vm_shm_fault(vma, vmf);
*type = VM_FAULT_MINOR;
return drm_do_vm_shm_nopage(vma, address);
} }
static struct page *drm_vm_dma_nopage(struct vm_area_struct *vma, static int drm_vm_dma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
unsigned long address, int *type)
{ {
if (type) return drm_do_vm_dma_fault(vma, vmf);
*type = VM_FAULT_MINOR;
return drm_do_vm_dma_nopage(vma, address);
} }
static struct page *drm_vm_sg_nopage(struct vm_area_struct *vma, static int drm_vm_sg_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
unsigned long address, int *type)
{ {
if (type) return drm_do_vm_sg_fault(vma, vmf);
*type = VM_FAULT_MINOR;
return drm_do_vm_sg_nopage(vma, address);
} }
/** AGP virtual memory operations */ /** AGP virtual memory operations */
static struct vm_operations_struct drm_vm_ops = { static struct vm_operations_struct drm_vm_ops = {
.nopage = drm_vm_nopage, .fault = drm_vm_fault,
.open = drm_vm_open, .open = drm_vm_open,
.close = drm_vm_close, .close = drm_vm_close,
}; };
/** Shared virtual memory operations */ /** Shared virtual memory operations */
static struct vm_operations_struct drm_vm_shm_ops = { static struct vm_operations_struct drm_vm_shm_ops = {
.nopage = drm_vm_shm_nopage, .fault = drm_vm_shm_fault,
.open = drm_vm_open, .open = drm_vm_open,
.close = drm_vm_shm_close, .close = drm_vm_shm_close,
}; };
/** DMA virtual memory operations */ /** DMA virtual memory operations */
static struct vm_operations_struct drm_vm_dma_ops = { static struct vm_operations_struct drm_vm_dma_ops = {
.nopage = drm_vm_dma_nopage, .fault = drm_vm_dma_fault,
.open = drm_vm_open, .open = drm_vm_open,
.close = drm_vm_close, .close = drm_vm_close,
}; };
/** Scatter-gather virtual memory operations */ /** Scatter-gather virtual memory operations */
static struct vm_operations_struct drm_vm_sg_ops = { static struct vm_operations_struct drm_vm_sg_ops = {
.nopage = drm_vm_sg_nopage, .fault = drm_vm_sg_fault,
.open = drm_vm_open, .open = drm_vm_open,
.close = drm_vm_close, .close = drm_vm_close,
}; };
...@@ -604,7 +589,7 @@ static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma) ...@@ -604,7 +589,7 @@ static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
/* /*
* On some platforms we can't talk to bus dma address from the CPU, so for * On some platforms we can't talk to bus dma address from the CPU, so for
* memory of type DRM_AGP, we'll deal with sorting out the real physical * memory of type DRM_AGP, we'll deal with sorting out the real physical
* pages and mappings in nopage() * pages and mappings in fault()
*/ */
#if defined(__powerpc__) #if defined(__powerpc__)
pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE; pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
...@@ -634,7 +619,7 @@ static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma) ...@@ -634,7 +619,7 @@ static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
break; break;
case _DRM_CONSISTENT: case _DRM_CONSISTENT:
/* Consistent memory is really like shared memory. But /* Consistent memory is really like shared memory. But
* it's allocated in a different way, so avoid nopage */ * it's allocated in a different way, so avoid fault */
if (remap_pfn_range(vma, vma->vm_start, if (remap_pfn_range(vma, vma->vm_start,
page_to_pfn(virt_to_page(map->handle)), page_to_pfn(virt_to_page(map->handle)),
vma->vm_end - vma->vm_start, vma->vm_page_prot)) vma->vm_end - vma->vm_start, vma->vm_page_prot))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment