Commit 3cbd0c58 authored by Laurent Pinchart's avatar Laurent Pinchart Committed by Tomi Valkeinen

drm/omap: gem: Replace struct_mutex usage with omap_obj private lock

The DRM device struct_mutex is used to protect against concurrent GEM
object operations that deal with memory allocation and pinning. All
those operations are local to a GEM object and don't need to be
serialized across different GEM objects. Replace the struct_mutex with
a local omap_obj.lock or drop it altogether where not needed.
Signed-off-by: default avatarLaurent Pinchart <laurent.pinchart@ideasonboard.com>
Reviewed-by: default avatarTomi Valkeinen <tomi.valkeinen@ti.com>
Signed-off-by: default avatarTomi Valkeinen <tomi.valkeinen@ti.com>
parent dc8c9aee
...@@ -30,17 +30,10 @@ static int gem_show(struct seq_file *m, void *arg) ...@@ -30,17 +30,10 @@ static int gem_show(struct seq_file *m, void *arg)
struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev; struct drm_device *dev = node->minor->dev;
struct omap_drm_private *priv = dev->dev_private; struct omap_drm_private *priv = dev->dev_private;
int ret;
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
seq_printf(m, "All Objects:\n"); seq_printf(m, "All Objects:\n");
omap_gem_describe_objects(&priv->obj_list, m); omap_gem_describe_objects(&priv->obj_list, m);
mutex_unlock(&dev->struct_mutex);
return 0; return 0;
} }
......
...@@ -170,13 +170,11 @@ static int omap_fbdev_create(struct drm_fb_helper *helper, ...@@ -170,13 +170,11 @@ static int omap_fbdev_create(struct drm_fb_helper *helper,
goto fail; goto fail;
} }
mutex_lock(&dev->struct_mutex);
fbi = drm_fb_helper_alloc_fbi(helper); fbi = drm_fb_helper_alloc_fbi(helper);
if (IS_ERR(fbi)) { if (IS_ERR(fbi)) {
dev_err(dev->dev, "failed to allocate fb info\n"); dev_err(dev->dev, "failed to allocate fb info\n");
ret = PTR_ERR(fbi); ret = PTR_ERR(fbi);
goto fail_unlock; goto fail;
} }
DBG("fbi=%p, dev=%p", fbi, dev); DBG("fbi=%p, dev=%p", fbi, dev);
...@@ -212,12 +210,8 @@ static int omap_fbdev_create(struct drm_fb_helper *helper, ...@@ -212,12 +210,8 @@ static int omap_fbdev_create(struct drm_fb_helper *helper,
DBG("par=%p, %dx%d", fbi->par, fbi->var.xres, fbi->var.yres); DBG("par=%p, %dx%d", fbi->par, fbi->var.xres, fbi->var.yres);
DBG("allocated %dx%d fb", fbdev->fb->width, fbdev->fb->height); DBG("allocated %dx%d fb", fbdev->fb->width, fbdev->fb->height);
mutex_unlock(&dev->struct_mutex);
return 0; return 0;
fail_unlock:
mutex_unlock(&dev->struct_mutex);
fail: fail:
if (ret) { if (ret) {
......
...@@ -47,6 +47,9 @@ struct omap_gem_object { ...@@ -47,6 +47,9 @@ struct omap_gem_object {
/** roll applied when mapping to DMM */ /** roll applied when mapping to DMM */
u32 roll; u32 roll;
/** protects dma_addr_cnt, block, pages, dma_addrs and vaddr */
struct mutex lock;
/** /**
* dma_addr contains the buffer DMA address. It is valid for * dma_addr contains the buffer DMA address. It is valid for
* *
...@@ -220,7 +223,10 @@ static void omap_gem_evict(struct drm_gem_object *obj) ...@@ -220,7 +223,10 @@ static void omap_gem_evict(struct drm_gem_object *obj)
* Page Management * Page Management
*/ */
/* Ensure backing pages are allocated. */ /*
* Ensure backing pages are allocated. Must be called with the omap_obj.lock
* held.
*/
static int omap_gem_attach_pages(struct drm_gem_object *obj) static int omap_gem_attach_pages(struct drm_gem_object *obj)
{ {
struct drm_device *dev = obj->dev; struct drm_device *dev = obj->dev;
...@@ -230,6 +236,8 @@ static int omap_gem_attach_pages(struct drm_gem_object *obj) ...@@ -230,6 +236,8 @@ static int omap_gem_attach_pages(struct drm_gem_object *obj)
int i, ret; int i, ret;
dma_addr_t *addrs; dma_addr_t *addrs;
lockdep_assert_held(&omap_obj->lock);
/* /*
* If not using shmem (in which case backing pages don't need to be * If not using shmem (in which case backing pages don't need to be
* allocated) or if pages are already allocated we're done. * allocated) or if pages are already allocated we're done.
...@@ -291,13 +299,15 @@ static int omap_gem_attach_pages(struct drm_gem_object *obj) ...@@ -291,13 +299,15 @@ static int omap_gem_attach_pages(struct drm_gem_object *obj)
return ret; return ret;
} }
/** release backing pages */ /* Release backing pages. Must be called with the omap_obj.lock held. */
static void omap_gem_detach_pages(struct drm_gem_object *obj) static void omap_gem_detach_pages(struct drm_gem_object *obj)
{ {
struct omap_gem_object *omap_obj = to_omap_bo(obj); struct omap_gem_object *omap_obj = to_omap_bo(obj);
unsigned int npages = obj->size >> PAGE_SHIFT; unsigned int npages = obj->size >> PAGE_SHIFT;
unsigned int i; unsigned int i;
lockdep_assert_held(&omap_obj->lock);
for (i = 0; i < npages; i++) { for (i = 0; i < npages; i++) {
if (omap_obj->dma_addrs[i]) if (omap_obj->dma_addrs[i])
dma_unmap_page(obj->dev->dev, omap_obj->dma_addrs[i], dma_unmap_page(obj->dev->dev, omap_obj->dma_addrs[i],
...@@ -491,14 +501,13 @@ vm_fault_t omap_gem_fault(struct vm_fault *vmf) ...@@ -491,14 +501,13 @@ vm_fault_t omap_gem_fault(struct vm_fault *vmf)
struct vm_area_struct *vma = vmf->vma; struct vm_area_struct *vma = vmf->vma;
struct drm_gem_object *obj = vma->vm_private_data; struct drm_gem_object *obj = vma->vm_private_data;
struct omap_gem_object *omap_obj = to_omap_bo(obj); struct omap_gem_object *omap_obj = to_omap_bo(obj);
struct drm_device *dev = obj->dev;
int err; int err;
vm_fault_t ret; vm_fault_t ret;
/* Make sure we don't parallel update on a fault, nor move or remove /* Make sure we don't parallel update on a fault, nor move or remove
* something from beneath our feet * something from beneath our feet
*/ */
mutex_lock(&dev->struct_mutex); mutex_lock(&omap_obj->lock);
/* if a shmem backed object, make sure we have pages attached now */ /* if a shmem backed object, make sure we have pages attached now */
err = omap_gem_attach_pages(obj); err = omap_gem_attach_pages(obj);
...@@ -520,7 +529,7 @@ vm_fault_t omap_gem_fault(struct vm_fault *vmf) ...@@ -520,7 +529,7 @@ vm_fault_t omap_gem_fault(struct vm_fault *vmf)
fail: fail:
mutex_unlock(&dev->struct_mutex); mutex_unlock(&omap_obj->lock);
return ret; return ret;
} }
...@@ -654,7 +663,7 @@ int omap_gem_roll(struct drm_gem_object *obj, u32 roll) ...@@ -654,7 +663,7 @@ int omap_gem_roll(struct drm_gem_object *obj, u32 roll)
omap_obj->roll = roll; omap_obj->roll = roll;
mutex_lock(&obj->dev->struct_mutex); mutex_lock(&omap_obj->lock);
/* if we aren't mapped yet, we don't need to do anything */ /* if we aren't mapped yet, we don't need to do anything */
if (omap_obj->block) { if (omap_obj->block) {
...@@ -669,7 +678,7 @@ int omap_gem_roll(struct drm_gem_object *obj, u32 roll) ...@@ -669,7 +678,7 @@ int omap_gem_roll(struct drm_gem_object *obj, u32 roll)
} }
fail: fail:
mutex_unlock(&obj->dev->struct_mutex); mutex_unlock(&omap_obj->lock);
return ret; return ret;
} }
...@@ -770,7 +779,7 @@ int omap_gem_pin(struct drm_gem_object *obj, dma_addr_t *dma_addr) ...@@ -770,7 +779,7 @@ int omap_gem_pin(struct drm_gem_object *obj, dma_addr_t *dma_addr)
struct omap_gem_object *omap_obj = to_omap_bo(obj); struct omap_gem_object *omap_obj = to_omap_bo(obj);
int ret = 0; int ret = 0;
mutex_lock(&obj->dev->struct_mutex); mutex_lock(&omap_obj->lock);
if (!omap_gem_is_contiguous(omap_obj) && priv->has_dmm) { if (!omap_gem_is_contiguous(omap_obj) && priv->has_dmm) {
if (omap_obj->dma_addr_cnt == 0) { if (omap_obj->dma_addr_cnt == 0) {
...@@ -826,7 +835,7 @@ int omap_gem_pin(struct drm_gem_object *obj, dma_addr_t *dma_addr) ...@@ -826,7 +835,7 @@ int omap_gem_pin(struct drm_gem_object *obj, dma_addr_t *dma_addr)
} }
fail: fail:
mutex_unlock(&obj->dev->struct_mutex); mutex_unlock(&omap_obj->lock);
return ret; return ret;
} }
...@@ -844,7 +853,8 @@ void omap_gem_unpin(struct drm_gem_object *obj) ...@@ -844,7 +853,8 @@ void omap_gem_unpin(struct drm_gem_object *obj)
struct omap_gem_object *omap_obj = to_omap_bo(obj); struct omap_gem_object *omap_obj = to_omap_bo(obj);
int ret; int ret;
mutex_lock(&obj->dev->struct_mutex); mutex_lock(&omap_obj->lock);
if (omap_obj->dma_addr_cnt > 0) { if (omap_obj->dma_addr_cnt > 0) {
omap_obj->dma_addr_cnt--; omap_obj->dma_addr_cnt--;
if (omap_obj->dma_addr_cnt == 0) { if (omap_obj->dma_addr_cnt == 0) {
...@@ -863,7 +873,7 @@ void omap_gem_unpin(struct drm_gem_object *obj) ...@@ -863,7 +873,7 @@ void omap_gem_unpin(struct drm_gem_object *obj)
} }
} }
mutex_unlock(&obj->dev->struct_mutex); mutex_unlock(&omap_obj->lock);
} }
/* Get rotated scanout address (only valid if already pinned), at the /* Get rotated scanout address (only valid if already pinned), at the
...@@ -876,13 +886,16 @@ int omap_gem_rotated_dma_addr(struct drm_gem_object *obj, u32 orient, ...@@ -876,13 +886,16 @@ int omap_gem_rotated_dma_addr(struct drm_gem_object *obj, u32 orient,
struct omap_gem_object *omap_obj = to_omap_bo(obj); struct omap_gem_object *omap_obj = to_omap_bo(obj);
int ret = -EINVAL; int ret = -EINVAL;
mutex_lock(&obj->dev->struct_mutex); mutex_lock(&omap_obj->lock);
if ((omap_obj->dma_addr_cnt > 0) && omap_obj->block && if ((omap_obj->dma_addr_cnt > 0) && omap_obj->block &&
(omap_obj->flags & OMAP_BO_TILED)) { (omap_obj->flags & OMAP_BO_TILED)) {
*dma_addr = tiler_tsptr(omap_obj->block, orient, x, y); *dma_addr = tiler_tsptr(omap_obj->block, orient, x, y);
ret = 0; ret = 0;
} }
mutex_unlock(&obj->dev->struct_mutex);
mutex_unlock(&omap_obj->lock);
return ret; return ret;
} }
...@@ -910,18 +923,26 @@ int omap_gem_get_pages(struct drm_gem_object *obj, struct page ***pages, ...@@ -910,18 +923,26 @@ int omap_gem_get_pages(struct drm_gem_object *obj, struct page ***pages,
bool remap) bool remap)
{ {
struct omap_gem_object *omap_obj = to_omap_bo(obj); struct omap_gem_object *omap_obj = to_omap_bo(obj);
int ret; int ret = 0;
if (!remap) { mutex_lock(&omap_obj->lock);
if (!omap_obj->pages)
return -ENOMEM; if (remap) {
*pages = omap_obj->pages; ret = omap_gem_attach_pages(obj);
return 0; if (ret)
goto unlock;
} }
mutex_lock(&obj->dev->struct_mutex);
ret = omap_gem_attach_pages(obj); if (!omap_obj->pages) {
ret = -ENOMEM;
goto unlock;
}
*pages = omap_obj->pages; *pages = omap_obj->pages;
mutex_unlock(&obj->dev->struct_mutex);
unlock:
mutex_unlock(&omap_obj->lock);
return ret; return ret;
} }
...@@ -936,24 +957,34 @@ int omap_gem_put_pages(struct drm_gem_object *obj) ...@@ -936,24 +957,34 @@ int omap_gem_put_pages(struct drm_gem_object *obj)
} }
#ifdef CONFIG_DRM_FBDEV_EMULATION #ifdef CONFIG_DRM_FBDEV_EMULATION
/* Get kernel virtual address for CPU access.. this more or less only /*
* exists for omap_fbdev. This should be called with struct_mutex * Get kernel virtual address for CPU access.. this more or less only
* held. * exists for omap_fbdev.
*/ */
void *omap_gem_vaddr(struct drm_gem_object *obj) void *omap_gem_vaddr(struct drm_gem_object *obj)
{ {
struct omap_gem_object *omap_obj = to_omap_bo(obj); struct omap_gem_object *omap_obj = to_omap_bo(obj);
WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); void *vaddr;
if (!omap_obj->vaddr) { int ret;
int ret;
mutex_lock(&omap_obj->lock);
if (!omap_obj->vaddr) {
ret = omap_gem_attach_pages(obj); ret = omap_gem_attach_pages(obj);
if (ret) if (ret) {
return ERR_PTR(ret); vaddr = ERR_PTR(ret);
goto unlock;
}
omap_obj->vaddr = vmap(omap_obj->pages, obj->size >> PAGE_SHIFT, omap_obj->vaddr = vmap(omap_obj->pages, obj->size >> PAGE_SHIFT,
VM_MAP, pgprot_writecombine(PAGE_KERNEL)); VM_MAP, pgprot_writecombine(PAGE_KERNEL));
} }
return omap_obj->vaddr;
vaddr = omap_obj->vaddr;
unlock:
mutex_unlock(&omap_obj->lock);
return vaddr;
} }
#endif #endif
...@@ -1001,6 +1032,8 @@ void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m) ...@@ -1001,6 +1032,8 @@ void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
off = drm_vma_node_start(&obj->vma_node); off = drm_vma_node_start(&obj->vma_node);
mutex_lock(&omap_obj->lock);
seq_printf(m, "%08x: %2d (%2d) %08llx %pad (%2d) %p %4d", seq_printf(m, "%08x: %2d (%2d) %08llx %pad (%2d) %p %4d",
omap_obj->flags, obj->name, kref_read(&obj->refcount), omap_obj->flags, obj->name, kref_read(&obj->refcount),
off, &omap_obj->dma_addr, omap_obj->dma_addr_cnt, off, &omap_obj->dma_addr, omap_obj->dma_addr_cnt,
...@@ -1018,6 +1051,8 @@ void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m) ...@@ -1018,6 +1051,8 @@ void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
seq_printf(m, " %zu", obj->size); seq_printf(m, " %zu", obj->size);
} }
mutex_unlock(&omap_obj->lock);
seq_printf(m, "\n"); seq_printf(m, "\n");
} }
...@@ -1051,15 +1086,19 @@ void omap_gem_free_object(struct drm_gem_object *obj) ...@@ -1051,15 +1086,19 @@ void omap_gem_free_object(struct drm_gem_object *obj)
omap_gem_evict(obj); omap_gem_evict(obj);
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
spin_lock(&priv->list_lock); spin_lock(&priv->list_lock);
list_del(&omap_obj->mm_list); list_del(&omap_obj->mm_list);
spin_unlock(&priv->list_lock); spin_unlock(&priv->list_lock);
/* this means the object is still pinned.. which really should /*
* not happen. I think.. * We own the sole reference to the object at this point, but to keep
* lockdep happy, we must still take the omap_obj_lock to call
* omap_gem_detach_pages(). This should hardly make any difference as
* there can't be any lock contention.
*/ */
mutex_lock(&omap_obj->lock);
/* The object should not be pinned. */
WARN_ON(omap_obj->dma_addr_cnt > 0); WARN_ON(omap_obj->dma_addr_cnt > 0);
if (omap_obj->pages) { if (omap_obj->pages) {
...@@ -1078,8 +1117,12 @@ void omap_gem_free_object(struct drm_gem_object *obj) ...@@ -1078,8 +1117,12 @@ void omap_gem_free_object(struct drm_gem_object *obj)
drm_prime_gem_destroy(obj, omap_obj->sgt); drm_prime_gem_destroy(obj, omap_obj->sgt);
} }
mutex_unlock(&omap_obj->lock);
drm_gem_object_release(obj); drm_gem_object_release(obj);
mutex_destroy(&omap_obj->lock);
kfree(omap_obj); kfree(omap_obj);
} }
...@@ -1135,6 +1178,7 @@ struct drm_gem_object *omap_gem_new(struct drm_device *dev, ...@@ -1135,6 +1178,7 @@ struct drm_gem_object *omap_gem_new(struct drm_device *dev,
obj = &omap_obj->base; obj = &omap_obj->base;
omap_obj->flags = flags; omap_obj->flags = flags;
mutex_init(&omap_obj->lock);
if (flags & OMAP_BO_TILED) { if (flags & OMAP_BO_TILED) {
/* /*
...@@ -1199,16 +1243,15 @@ struct drm_gem_object *omap_gem_new_dmabuf(struct drm_device *dev, size_t size, ...@@ -1199,16 +1243,15 @@ struct drm_gem_object *omap_gem_new_dmabuf(struct drm_device *dev, size_t size,
if (sgt->orig_nents != 1 && !priv->has_dmm) if (sgt->orig_nents != 1 && !priv->has_dmm)
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
mutex_lock(&dev->struct_mutex);
gsize.bytes = PAGE_ALIGN(size); gsize.bytes = PAGE_ALIGN(size);
obj = omap_gem_new(dev, gsize, OMAP_BO_MEM_DMABUF | OMAP_BO_WC); obj = omap_gem_new(dev, gsize, OMAP_BO_MEM_DMABUF | OMAP_BO_WC);
if (!obj) { if (!obj)
obj = ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
goto done;
}
omap_obj = to_omap_bo(obj); omap_obj = to_omap_bo(obj);
mutex_lock(&omap_obj->lock);
omap_obj->sgt = sgt; omap_obj->sgt = sgt;
if (sgt->orig_nents == 1) { if (sgt->orig_nents == 1) {
...@@ -1244,7 +1287,7 @@ struct drm_gem_object *omap_gem_new_dmabuf(struct drm_device *dev, size_t size, ...@@ -1244,7 +1287,7 @@ struct drm_gem_object *omap_gem_new_dmabuf(struct drm_device *dev, size_t size,
} }
done: done:
mutex_unlock(&dev->struct_mutex); mutex_unlock(&omap_obj->lock);
return obj; return obj;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment