Commit 5a13761f authored by Christian König's avatar Christian König Committed by Alex Deucher

drm/amdgpu: implement amdgpu_gem_map_(attach/detach)

Instead of the pin/unpin callback implement the attach/detach ones.

Functional identical, but allows us access to the attachment.
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarAlex Deucher <alexander.deucher@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent d8217921
...@@ -393,8 +393,6 @@ struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev, ...@@ -393,8 +393,6 @@ struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev,
int flags); int flags);
struct drm_gem_object *amdgpu_gem_prime_import(struct drm_device *dev, struct drm_gem_object *amdgpu_gem_prime_import(struct drm_device *dev,
struct dma_buf *dma_buf); struct dma_buf *dma_buf);
int amdgpu_gem_prime_pin(struct drm_gem_object *obj);
void amdgpu_gem_prime_unpin(struct drm_gem_object *obj);
struct reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *); struct reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *);
void *amdgpu_gem_prime_vmap(struct drm_gem_object *obj); void *amdgpu_gem_prime_vmap(struct drm_gem_object *obj);
void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
......
...@@ -885,8 +885,6 @@ static struct drm_driver kms_driver = { ...@@ -885,8 +885,6 @@ static struct drm_driver kms_driver = {
.prime_fd_to_handle = drm_gem_prime_fd_to_handle, .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_export = amdgpu_gem_prime_export, .gem_prime_export = amdgpu_gem_prime_export,
.gem_prime_import = amdgpu_gem_prime_import, .gem_prime_import = amdgpu_gem_prime_import,
.gem_prime_pin = amdgpu_gem_prime_pin,
.gem_prime_unpin = amdgpu_gem_prime_unpin,
.gem_prime_res_obj = amdgpu_gem_prime_res_obj, .gem_prime_res_obj = amdgpu_gem_prime_res_obj,
.gem_prime_get_sg_table = amdgpu_gem_prime_get_sg_table, .gem_prime_get_sg_table = amdgpu_gem_prime_get_sg_table,
.gem_prime_import_sg_table = amdgpu_gem_prime_import_sg_table, .gem_prime_import_sg_table = amdgpu_gem_prime_import_sg_table,
......
...@@ -113,49 +113,65 @@ amdgpu_gem_prime_import_sg_table(struct drm_device *dev, ...@@ -113,49 +113,65 @@ amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
return &bo->gem_base; return &bo->gem_base;
} }
int amdgpu_gem_prime_pin(struct drm_gem_object *obj) static int amdgpu_gem_map_attach(struct dma_buf *dma_buf,
struct device *target_dev,
struct dma_buf_attachment *attach)
{ {
struct drm_gem_object *obj = dma_buf->priv;
struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
long ret = 0; long r;
ret = amdgpu_bo_reserve(bo, false); r = drm_gem_map_attach(dma_buf, target_dev, attach);
if (unlikely(ret != 0)) if (r)
return ret; return r;
r = amdgpu_bo_reserve(bo, false);
if (unlikely(r != 0))
goto error_detach;
/* /*
* Wait for all shared fences to complete before we switch to future * Wait for all shared fences to complete before we switch to future
* use of exclusive fence on this prime shared bo. * use of exclusive fence on this prime shared bo.
*/ */
ret = reservation_object_wait_timeout_rcu(bo->tbo.resv, true, false, r = reservation_object_wait_timeout_rcu(bo->tbo.resv, true, false,
MAX_SCHEDULE_TIMEOUT); MAX_SCHEDULE_TIMEOUT);
if (unlikely(ret < 0)) { if (unlikely(r < 0)) {
DRM_DEBUG_PRIME("Fence wait failed: %li\n", ret); DRM_DEBUG_PRIME("Fence wait failed: %li\n", r);
amdgpu_bo_unreserve(bo); goto error_unreserve;
return ret;
} }
/* pin buffer into GTT */ /* pin buffer into GTT */
ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT, NULL); r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT, NULL);
if (likely(ret == 0)) if (likely(r == 0))
bo->prime_shared_count++; bo->prime_shared_count++;
error_unreserve:
amdgpu_bo_unreserve(bo); amdgpu_bo_unreserve(bo);
return ret;
error_detach:
if (r)
drm_gem_map_detach(dma_buf, attach);
return r;
} }
void amdgpu_gem_prime_unpin(struct drm_gem_object *obj) static void amdgpu_gem_map_detach(struct dma_buf *dma_buf,
struct dma_buf_attachment *attach)
{ {
struct drm_gem_object *obj = dma_buf->priv;
struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
int ret = 0; int ret = 0;
ret = amdgpu_bo_reserve(bo, true); ret = amdgpu_bo_reserve(bo, true);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
return; goto error;
amdgpu_bo_unpin(bo); amdgpu_bo_unpin(bo);
if (bo->prime_shared_count) if (bo->prime_shared_count)
bo->prime_shared_count--; bo->prime_shared_count--;
amdgpu_bo_unreserve(bo); amdgpu_bo_unreserve(bo);
error:
drm_gem_map_detach(dma_buf, attach);
} }
struct reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *obj) struct reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *obj)
...@@ -194,8 +210,8 @@ static int amdgpu_gem_begin_cpu_access(struct dma_buf *dma_buf, ...@@ -194,8 +210,8 @@ static int amdgpu_gem_begin_cpu_access(struct dma_buf *dma_buf,
} }
static const struct dma_buf_ops amdgpu_dmabuf_ops = { static const struct dma_buf_ops amdgpu_dmabuf_ops = {
.attach = drm_gem_map_attach, .attach = amdgpu_gem_map_attach,
.detach = drm_gem_map_detach, .detach = amdgpu_gem_map_detach,
.map_dma_buf = drm_gem_map_dma_buf, .map_dma_buf = drm_gem_map_dma_buf,
.unmap_dma_buf = drm_gem_unmap_dma_buf, .unmap_dma_buf = drm_gem_unmap_dma_buf,
.release = drm_gem_dmabuf_release, .release = drm_gem_dmabuf_release,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment