Commit 660e8558 authored by Dave Airlie's avatar Dave Airlie Committed by Alex Deucher

amdgpu: use drm sync objects for shared semaphores (v6)

This creates a new command submission chunk for amdgpu
to add in and out sync objects around the submission.

Sync objects are managed via the drm syncobj ioctls.

The command submission interface is enhanced with two new
chunks, one for syncobj pre submission dependencies,
and one for post submission sync obj signalling,
and just takes a list of handles for each.

This is based on work originally done by David Zhou at AMD,
with input from Christian Konig on what things should look like.

In theory VkFences could be backed with sync objects and
just get passed into the cs as syncobj handles as well.

NOTE: this interface addition needs a version bump to expose
it to userspace.

TODO: update to dep_sync when rebasing onto amdgpu master.
(with this - r-b from Christian)

v1.1: keep file reference on import.
v2: move to using syncobjs
v2.1: change some APIs to just use p pointer.
v3: make more robust against CS failures, we now add the
wait sems but only remove them once the CS job has been
submitted.
v4: rewrite names of API and base on new syncobj code.
v5: move post deps earlier, rename some apis
v6: lookup post deps earlier, and just replace fences
in post deps stage (Christian)
Reviewed-by: default avatarChristian König <christian.koenig@amd.com>
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 6f0308eb
...@@ -1159,6 +1159,9 @@ struct amdgpu_cs_parser { ...@@ -1159,6 +1159,9 @@ struct amdgpu_cs_parser {
/* user fence */ /* user fence */
struct amdgpu_bo_list_entry uf_entry; struct amdgpu_bo_list_entry uf_entry;
unsigned num_post_dep_syncobjs;
struct drm_syncobj **post_dep_syncobjs;
}; };
#define AMDGPU_PREAMBLE_IB_PRESENT (1 << 0) /* bit set means command submit involves a preamble IB */ #define AMDGPU_PREAMBLE_IB_PRESENT (1 << 0) /* bit set means command submit involves a preamble IB */
......
...@@ -27,6 +27,7 @@ ...@@ -27,6 +27,7 @@
#include <linux/pagemap.h> #include <linux/pagemap.h>
#include <drm/drmP.h> #include <drm/drmP.h>
#include <drm/amdgpu_drm.h> #include <drm/amdgpu_drm.h>
#include <drm/drm_syncobj.h>
#include "amdgpu.h" #include "amdgpu.h"
#include "amdgpu_trace.h" #include "amdgpu_trace.h"
...@@ -154,6 +155,8 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) ...@@ -154,6 +155,8 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
break; break;
case AMDGPU_CHUNK_ID_DEPENDENCIES: case AMDGPU_CHUNK_ID_DEPENDENCIES:
case AMDGPU_CHUNK_ID_SYNCOBJ_IN:
case AMDGPU_CHUNK_ID_SYNCOBJ_OUT:
break; break;
default: default:
...@@ -682,6 +685,11 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bo ...@@ -682,6 +685,11 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bo
ttm_eu_backoff_reservation(&parser->ticket, ttm_eu_backoff_reservation(&parser->ticket,
&parser->validated); &parser->validated);
} }
for (i = 0; i < parser->num_post_dep_syncobjs; i++)
drm_syncobj_put(parser->post_dep_syncobjs[i]);
kfree(parser->post_dep_syncobjs);
dma_fence_put(parser->fence); dma_fence_put(parser->fence);
if (parser->ctx) if (parser->ctx)
...@@ -971,6 +979,64 @@ static int amdgpu_cs_process_fence_dep(struct amdgpu_cs_parser *p, ...@@ -971,6 +979,64 @@ static int amdgpu_cs_process_fence_dep(struct amdgpu_cs_parser *p,
return 0; return 0;
} }
static int amdgpu_syncobj_lookup_and_add_to_sync(struct amdgpu_cs_parser *p,
uint32_t handle)
{
int r;
struct dma_fence *fence;
r = drm_syncobj_fence_get(p->filp, handle, &fence);
if (r)
return r;
r = amdgpu_sync_fence(p->adev, &p->job->sync, fence);
dma_fence_put(fence);
return r;
}
static int amdgpu_cs_process_syncobj_in_dep(struct amdgpu_cs_parser *p,
struct amdgpu_cs_chunk *chunk)
{
unsigned num_deps;
int i, r;
struct drm_amdgpu_cs_chunk_sem *deps;
deps = (struct drm_amdgpu_cs_chunk_sem *)chunk->kdata;
num_deps = chunk->length_dw * 4 /
sizeof(struct drm_amdgpu_cs_chunk_sem);
for (i = 0; i < num_deps; ++i) {
r = amdgpu_syncobj_lookup_and_add_to_sync(p, deps[i].handle);
if (r)
return r;
}
return 0;
}
static int amdgpu_cs_process_syncobj_out_dep(struct amdgpu_cs_parser *p,
struct amdgpu_cs_chunk *chunk)
{
unsigned num_deps;
int i;
struct drm_amdgpu_cs_chunk_sem *deps;
deps = (struct drm_amdgpu_cs_chunk_sem *)chunk->kdata;
num_deps = chunk->length_dw * 4 /
sizeof(struct drm_amdgpu_cs_chunk_sem);
p->post_dep_syncobjs = kmalloc_array(num_deps,
sizeof(struct drm_syncobj *),
GFP_KERNEL);
p->num_post_dep_syncobjs = 0;
for (i = 0; i < num_deps; ++i) {
p->post_dep_syncobjs[i] = drm_syncobj_find(p->filp, deps[i].handle);
if (!p->post_dep_syncobjs[i])
return -EINVAL;
p->num_post_dep_syncobjs++;
}
return 0;
}
static int amdgpu_cs_dependencies(struct amdgpu_device *adev, static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
struct amdgpu_cs_parser *p) struct amdgpu_cs_parser *p)
{ {
...@@ -985,12 +1051,30 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev, ...@@ -985,12 +1051,30 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
r = amdgpu_cs_process_fence_dep(p, chunk); r = amdgpu_cs_process_fence_dep(p, chunk);
if (r) if (r)
return r; return r;
} else if (chunk->chunk_id == AMDGPU_CHUNK_ID_SYNCOBJ_IN) {
r = amdgpu_cs_process_syncobj_in_dep(p, chunk);
if (r)
return r;
} else if (chunk->chunk_id == AMDGPU_CHUNK_ID_SYNCOBJ_OUT) {
r = amdgpu_cs_process_syncobj_out_dep(p, chunk);
if (r)
return r;
} }
} }
return 0; return 0;
} }
static void amdgpu_cs_post_dependencies(struct amdgpu_cs_parser *p)
{
int i;
for (i = 0; i < p->num_post_dep_syncobjs; ++i) {
drm_syncobj_replace_fence(p->filp, p->post_dep_syncobjs[i],
p->fence);
}
}
static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
union drm_amdgpu_cs *cs) union drm_amdgpu_cs *cs)
{ {
...@@ -1011,6 +1095,9 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, ...@@ -1011,6 +1095,9 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
job->owner = p->filp; job->owner = p->filp;
job->fence_ctx = entity->fence_context; job->fence_ctx = entity->fence_context;
p->fence = dma_fence_get(&job->base.s_fence->finished); p->fence = dma_fence_get(&job->base.s_fence->finished);
amdgpu_cs_post_dependencies(p);
cs->out.handle = amdgpu_ctx_add_fence(p->ctx, ring, p->fence); cs->out.handle = amdgpu_ctx_add_fence(p->ctx, ring, p->fence);
job->uf_sequence = cs->out.handle; job->uf_sequence = cs->out.handle;
amdgpu_job_free_resources(job); amdgpu_job_free_resources(job);
...@@ -1018,7 +1105,6 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, ...@@ -1018,7 +1105,6 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
trace_amdgpu_cs_ioctl(job); trace_amdgpu_cs_ioctl(job);
amd_sched_entity_push_job(&job->base); amd_sched_entity_push_job(&job->base);
return 0; return 0;
} }
......
...@@ -782,7 +782,7 @@ static struct drm_driver kms_driver = { ...@@ -782,7 +782,7 @@ static struct drm_driver kms_driver = {
.driver_features = .driver_features =
DRIVER_USE_AGP | DRIVER_USE_AGP |
DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM |
DRIVER_PRIME | DRIVER_RENDER | DRIVER_MODESET, DRIVER_PRIME | DRIVER_RENDER | DRIVER_MODESET | DRIVER_SYNCOBJ,
.load = amdgpu_driver_load_kms, .load = amdgpu_driver_load_kms,
.open = amdgpu_driver_open_kms, .open = amdgpu_driver_open_kms,
.postclose = amdgpu_driver_postclose_kms, .postclose = amdgpu_driver_postclose_kms,
......
...@@ -440,6 +440,8 @@ struct drm_amdgpu_gem_va { ...@@ -440,6 +440,8 @@ struct drm_amdgpu_gem_va {
#define AMDGPU_CHUNK_ID_IB 0x01 #define AMDGPU_CHUNK_ID_IB 0x01
#define AMDGPU_CHUNK_ID_FENCE 0x02 #define AMDGPU_CHUNK_ID_FENCE 0x02
#define AMDGPU_CHUNK_ID_DEPENDENCIES 0x03 #define AMDGPU_CHUNK_ID_DEPENDENCIES 0x03
#define AMDGPU_CHUNK_ID_SYNCOBJ_IN 0x04
#define AMDGPU_CHUNK_ID_SYNCOBJ_OUT 0x05
struct drm_amdgpu_cs_chunk { struct drm_amdgpu_cs_chunk {
__u32 chunk_id; __u32 chunk_id;
...@@ -507,6 +509,10 @@ struct drm_amdgpu_cs_chunk_fence { ...@@ -507,6 +509,10 @@ struct drm_amdgpu_cs_chunk_fence {
__u32 offset; __u32 offset;
}; };
struct drm_amdgpu_cs_chunk_sem {
__u32 handle;
};
struct drm_amdgpu_cs_chunk_data { struct drm_amdgpu_cs_chunk_data {
union { union {
struct drm_amdgpu_cs_chunk_ib ib_data; struct drm_amdgpu_cs_chunk_ib ib_data;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment