Commit edee06b6 authored by Dave Airlie's avatar Dave Airlie

Merge branch 'drm/next' of git://linuxtv.org/pinchartl/fbdev into drm-next

GEM CMA PRIME support from Laurent.

* 'drm/next' of git://linuxtv.org/pinchartl/fbdev:
  drm: GEM CMA: Add DRM PRIME support
  drm: GEM CMA: Split object mapping into GEM mapping and CMA mapping
  drm: GEM CMA: Split object creation into object alloc and DMA memory alloc
  drm/omap: Use drm_gem_mmap_obj() to implement dma-buf mmap
  drm/gem: Split drm_gem_mmap() into object search and object mapping
parents d20d3174 71d7282a
...@@ -644,6 +644,55 @@ void drm_gem_vm_close(struct vm_area_struct *vma) ...@@ -644,6 +644,55 @@ void drm_gem_vm_close(struct vm_area_struct *vma)
} }
EXPORT_SYMBOL(drm_gem_vm_close); EXPORT_SYMBOL(drm_gem_vm_close);
/**
* drm_gem_mmap_obj - memory map a GEM object
* @obj: the GEM object to map
* @obj_size: the object size to be mapped, in bytes
* @vma: VMA for the area to be mapped
*
* Set up the VMA to prepare mapping of the GEM object using the gem_vm_ops
* provided by the driver. Depending on their requirements, drivers can either
* provide a fault handler in their gem_vm_ops (in which case any accesses to
* the object will be trapped, to perform migration, GTT binding, surface
* register allocation, or performance monitoring), or mmap the buffer memory
* synchronously after calling drm_gem_mmap_obj.
*
* This function is mainly intended to implement the DMABUF mmap operation, when
* the GEM object is not looked up based on its fake offset. To implement the
* DRM mmap operation, drivers should use the drm_gem_mmap() function.
*
* Return 0 or success or -EINVAL if the object size is smaller than the VMA
* size, or if no gem_vm_ops are provided.
*/
int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
struct vm_area_struct *vma)
{
struct drm_device *dev = obj->dev;
/* Check for valid size. */
if (obj_size < vma->vm_end - vma->vm_start)
return -EINVAL;
if (!dev->driver->gem_vm_ops)
return -EINVAL;
vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
vma->vm_ops = dev->driver->gem_vm_ops;
vma->vm_private_data = obj;
vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
/* Take a ref for this mapping of the object, so that the fault
* handler can dereference the mmap offset's pointer to the object.
* This reference is cleaned up by the corresponding vm_close
* (which should happen whether the vma was created by this call, or
* by a vm_open due to mremap or partial unmap or whatever).
*/
drm_gem_object_reference(obj);
drm_vm_open_locked(dev, vma);
return 0;
}
EXPORT_SYMBOL(drm_gem_mmap_obj);
/** /**
* drm_gem_mmap - memory map routine for GEM objects * drm_gem_mmap - memory map routine for GEM objects
...@@ -653,11 +702,9 @@ EXPORT_SYMBOL(drm_gem_vm_close); ...@@ -653,11 +702,9 @@ EXPORT_SYMBOL(drm_gem_vm_close);
* If a driver supports GEM object mapping, mmap calls on the DRM file * If a driver supports GEM object mapping, mmap calls on the DRM file
* descriptor will end up here. * descriptor will end up here.
* *
* If we find the object based on the offset passed in (vma->vm_pgoff will * Look up the GEM object based on the offset passed in (vma->vm_pgoff will
* contain the fake offset we created when the GTT map ioctl was called on * contain the fake offset we created when the GTT map ioctl was called on
* the object), we set up the driver fault handler so that any accesses * the object) and map it with a call to drm_gem_mmap_obj().
* to the object can be trapped, to perform migration, GTT binding, surface
* register allocation, or performance monitoring.
*/ */
int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
{ {
...@@ -665,7 +712,6 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) ...@@ -665,7 +712,6 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
struct drm_device *dev = priv->minor->dev; struct drm_device *dev = priv->minor->dev;
struct drm_gem_mm *mm = dev->mm_private; struct drm_gem_mm *mm = dev->mm_private;
struct drm_local_map *map = NULL; struct drm_local_map *map = NULL;
struct drm_gem_object *obj;
struct drm_hash_item *hash; struct drm_hash_item *hash;
int ret = 0; int ret = 0;
...@@ -686,32 +732,7 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) ...@@ -686,32 +732,7 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
goto out_unlock; goto out_unlock;
} }
/* Check for valid size. */ ret = drm_gem_mmap_obj(map->handle, map->size, vma);
if (map->size < vma->vm_end - vma->vm_start) {
ret = -EINVAL;
goto out_unlock;
}
obj = map->handle;
if (!obj->dev->driver->gem_vm_ops) {
ret = -EINVAL;
goto out_unlock;
}
vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
vma->vm_ops = obj->dev->driver->gem_vm_ops;
vma->vm_private_data = map->handle;
vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
/* Take a ref for this mapping of the object, so that the fault
* handler can dereference the mmap offset's pointer to the object.
* This reference is cleaned up by the corresponding vm_close
* (which should happen whether the vma was created by this call, or
* by a vm_open due to mremap or partial unmap or whatever).
*/
drm_gem_object_reference(obj);
drm_vm_open_locked(dev, vma);
out_unlock: out_unlock:
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
......
This diff is collapsed.
...@@ -136,10 +136,6 @@ static void omap_gem_dmabuf_kunmap(struct dma_buf *buffer, ...@@ -136,10 +136,6 @@ static void omap_gem_dmabuf_kunmap(struct dma_buf *buffer,
kunmap(pages[page_num]); kunmap(pages[page_num]);
} }
/*
* TODO maybe we can split up drm_gem_mmap to avoid duplicating
* some here.. or at least have a drm_dmabuf_mmap helper.
*/
static int omap_gem_dmabuf_mmap(struct dma_buf *buffer, static int omap_gem_dmabuf_mmap(struct dma_buf *buffer,
struct vm_area_struct *vma) struct vm_area_struct *vma)
{ {
...@@ -149,31 +145,9 @@ static int omap_gem_dmabuf_mmap(struct dma_buf *buffer, ...@@ -149,31 +145,9 @@ static int omap_gem_dmabuf_mmap(struct dma_buf *buffer,
if (WARN_ON(!obj->filp)) if (WARN_ON(!obj->filp))
return -EINVAL; return -EINVAL;
/* Check for valid size. */ ret = drm_gem_mmap_obj(obj, omap_gem_mmap_size(obj), vma);
if (omap_gem_mmap_size(obj) < vma->vm_end - vma->vm_start) { if (ret < 0)
ret = -EINVAL; return ret;
goto out_unlock;
}
if (!obj->dev->driver->gem_vm_ops) {
ret = -EINVAL;
goto out_unlock;
}
vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
vma->vm_ops = obj->dev->driver->gem_vm_ops;
vma->vm_private_data = obj;
vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
/* Take a ref for this mapping of the object, so that the fault
* handler can dereference the mmap offset's pointer to the object.
* This reference is cleaned up by the corresponding vm_close
* (which should happen whether the vma was created by this call, or
* by a vm_open due to mremap or partial unmap or whatever).
*/
vma->vm_ops->open(vma);
out_unlock:
return omap_gem_mmap_obj(obj, vma); return omap_gem_mmap_obj(obj, vma);
} }
......
...@@ -1616,6 +1616,8 @@ int drm_gem_private_object_init(struct drm_device *dev, ...@@ -1616,6 +1616,8 @@ int drm_gem_private_object_init(struct drm_device *dev,
void drm_gem_object_handle_free(struct drm_gem_object *obj); void drm_gem_object_handle_free(struct drm_gem_object *obj);
void drm_gem_vm_open(struct vm_area_struct *vma); void drm_gem_vm_open(struct vm_area_struct *vma);
void drm_gem_vm_close(struct vm_area_struct *vma); void drm_gem_vm_close(struct vm_area_struct *vma);
int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
struct vm_area_struct *vma);
int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma); int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
#include <drm/drm_global.h> #include <drm/drm_global.h>
......
...@@ -4,6 +4,9 @@ ...@@ -4,6 +4,9 @@
struct drm_gem_cma_object { struct drm_gem_cma_object {
struct drm_gem_object base; struct drm_gem_object base;
dma_addr_t paddr; dma_addr_t paddr;
struct sg_table *sgt;
/* For objects with DMA memory allocated by GEM CMA */
void *vaddr; void *vaddr;
}; };
...@@ -45,4 +48,10 @@ extern const struct vm_operations_struct drm_gem_cma_vm_ops; ...@@ -45,4 +48,10 @@ extern const struct vm_operations_struct drm_gem_cma_vm_ops;
void drm_gem_cma_describe(struct drm_gem_cma_object *obj, struct seq_file *m); void drm_gem_cma_describe(struct drm_gem_cma_object *obj, struct seq_file *m);
#endif #endif
struct dma_buf *drm_gem_cma_dmabuf_export(struct drm_device *drm_dev,
struct drm_gem_object *obj,
int flags);
struct drm_gem_object *drm_gem_cma_dmabuf_import(struct drm_device *drm_dev,
struct dma_buf *dma_buf);
#endif /* __DRM_GEM_CMA_HELPER_H__ */ #endif /* __DRM_GEM_CMA_HELPER_H__ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment