Commit 32ecd242 authored by Dave Airlie's avatar Dave Airlie Committed by Daniel Vetter

drm/udl: call begin/end cpu access at more appropriate time

We need to call these before we transfer the damaged areas to the device
not before/after we setup the long lived vmaps.
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>
Signed-off-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
parent ec6f1bb9
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
#include <linux/module.h> #include <linux/module.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/fb.h> #include <linux/fb.h>
#include <linux/dma-buf.h>
#include "drmP.h" #include "drmP.h"
#include "drm.h" #include "drm.h"
...@@ -377,16 +378,33 @@ static int udl_user_framebuffer_dirty(struct drm_framebuffer *fb, ...@@ -377,16 +378,33 @@ static int udl_user_framebuffer_dirty(struct drm_framebuffer *fb,
{ {
struct udl_framebuffer *ufb = to_udl_fb(fb); struct udl_framebuffer *ufb = to_udl_fb(fb);
int i; int i;
int ret = 0;
if (!ufb->active_16) if (!ufb->active_16)
return 0; return 0;
if (ufb->obj->base.import_attach) {
ret = dma_buf_begin_cpu_access(ufb->obj->base.import_attach->dmabuf,
0, ufb->obj->base.size,
DMA_FROM_DEVICE);
if (ret)
return ret;
}
for (i = 0; i < num_clips; i++) { for (i = 0; i < num_clips; i++) {
udl_handle_damage(ufb, clips[i].x1, clips[i].y1, ret = udl_handle_damage(ufb, clips[i].x1, clips[i].y1,
clips[i].x2 - clips[i].x1, clips[i].x2 - clips[i].x1,
clips[i].y2 - clips[i].y1); clips[i].y2 - clips[i].y1);
if (ret)
break;
} }
return 0;
if (ufb->obj->base.import_attach) {
dma_buf_end_cpu_access(ufb->obj->base.import_attach->dmabuf,
0, ufb->obj->base.size,
DMA_FROM_DEVICE);
}
return ret;
} }
static void udl_user_framebuffer_destroy(struct drm_framebuffer *fb) static void udl_user_framebuffer_destroy(struct drm_framebuffer *fb)
......
...@@ -181,11 +181,6 @@ int udl_gem_vmap(struct udl_gem_object *obj) ...@@ -181,11 +181,6 @@ int udl_gem_vmap(struct udl_gem_object *obj)
int ret; int ret;
if (obj->base.import_attach) { if (obj->base.import_attach) {
ret = dma_buf_begin_cpu_access(obj->base.import_attach->dmabuf,
0, obj->base.size, DMA_BIDIRECTIONAL);
if (ret)
return -EINVAL;
obj->vmapping = dma_buf_vmap(obj->base.import_attach->dmabuf); obj->vmapping = dma_buf_vmap(obj->base.import_attach->dmabuf);
if (!obj->vmapping) if (!obj->vmapping)
return -ENOMEM; return -ENOMEM;
...@@ -206,8 +201,6 @@ void udl_gem_vunmap(struct udl_gem_object *obj) ...@@ -206,8 +201,6 @@ void udl_gem_vunmap(struct udl_gem_object *obj)
{ {
if (obj->base.import_attach) { if (obj->base.import_attach) {
dma_buf_vunmap(obj->base.import_attach->dmabuf, obj->vmapping); dma_buf_vunmap(obj->base.import_attach->dmabuf, obj->vmapping);
dma_buf_end_cpu_access(obj->base.import_attach->dmabuf, 0,
obj->base.size, DMA_BIDIRECTIONAL);
return; return;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment