Commit 5e8ec0d9 authored by Deepak Rawat's avatar Deepak Rawat Committed by Roland Scheidegger

drm/vmwgfx: Add support for UA view commands

Virtual device now support new commands to manage unordered access
views. Allow them as part of user-space command buffer. This involves
adding UA view cotable, binding tracker info, new view type and command
verifier functions.

v2: fix comment typo
v3: style fixes (don't use deprecated PTR_RET)
Signed-off-by: default avatarDeepak Rawat <drawat.floss@gmail.com>
Signed-off-by: default avatarNeha Bhende <bhenden@vmware.com>
Reviewed-by: default avatarThomas Hellström (VMware) <thomas_os@shipmail.org>
Reviewed-by: default avatarRoland Scheidegger <sroland@vmware.com>
Signed-off-by: default avatarRoland Scheidegger <sroland@vmware.com>
parent d2e90ab3
...@@ -59,7 +59,9 @@ ...@@ -59,7 +59,9 @@
#define VMW_BINDING_PS_BIT 1 #define VMW_BINDING_PS_BIT 1
#define VMW_BINDING_SO_BIT 2 #define VMW_BINDING_SO_BIT 2
#define VMW_BINDING_VB_BIT 3 #define VMW_BINDING_VB_BIT 3
#define VMW_BINDING_NUM_BITS 4 #define VMW_BINDING_UAV_BIT 4
#define VMW_BINDING_CS_UAV_BIT 5
#define VMW_BINDING_NUM_BITS 6
#define VMW_BINDING_PS_SR_BIT 0 #define VMW_BINDING_PS_SR_BIT 0
...@@ -75,6 +77,7 @@ ...@@ -75,6 +77,7 @@
* @vertex_buffers: Vertex buffer bindings. * @vertex_buffers: Vertex buffer bindings.
* @index_buffer: Index buffer binding. * @index_buffer: Index buffer binding.
* @per_shader: Per shader-type bindings. * @per_shader: Per shader-type bindings.
* @ua_views: UAV bindings.
* @dirty: Bitmap tracking per binding-type changes that have not yet * @dirty: Bitmap tracking per binding-type changes that have not yet
* been emitted to the device. * been emitted to the device.
* @dirty_vb: Bitmap tracking individual vertex buffer binding changes that * @dirty_vb: Bitmap tracking individual vertex buffer binding changes that
...@@ -99,6 +102,7 @@ struct vmw_ctx_binding_state { ...@@ -99,6 +102,7 @@ struct vmw_ctx_binding_state {
struct vmw_ctx_bindinfo_vb vertex_buffers[SVGA3D_DX_MAX_VERTEXBUFFERS]; struct vmw_ctx_bindinfo_vb vertex_buffers[SVGA3D_DX_MAX_VERTEXBUFFERS];
struct vmw_ctx_bindinfo_ib index_buffer; struct vmw_ctx_bindinfo_ib index_buffer;
struct vmw_dx_shader_bindings per_shader[SVGA3D_NUM_SHADERTYPE]; struct vmw_dx_shader_bindings per_shader[SVGA3D_NUM_SHADERTYPE];
struct vmw_ctx_bindinfo_uav ua_views[VMW_MAX_UAV_BIND_TYPE];
unsigned long dirty; unsigned long dirty;
DECLARE_BITMAP(dirty_vb, SVGA3D_DX_MAX_VERTEXBUFFERS); DECLARE_BITMAP(dirty_vb, SVGA3D_DX_MAX_VERTEXBUFFERS);
...@@ -121,6 +125,9 @@ static int vmw_binding_scrub_dx_shader(struct vmw_ctx_bindinfo *bi, ...@@ -121,6 +125,9 @@ static int vmw_binding_scrub_dx_shader(struct vmw_ctx_bindinfo *bi,
bool rebind); bool rebind);
static int vmw_binding_scrub_ib(struct vmw_ctx_bindinfo *bi, bool rebind); static int vmw_binding_scrub_ib(struct vmw_ctx_bindinfo *bi, bool rebind);
static int vmw_binding_scrub_vb(struct vmw_ctx_bindinfo *bi, bool rebind); static int vmw_binding_scrub_vb(struct vmw_ctx_bindinfo *bi, bool rebind);
static int vmw_binding_scrub_uav(struct vmw_ctx_bindinfo *bi, bool rebind);
static int vmw_binding_scrub_cs_uav(struct vmw_ctx_bindinfo *bi, bool rebind);
static void vmw_binding_build_asserts(void) __attribute__ ((unused)); static void vmw_binding_build_asserts(void) __attribute__ ((unused));
typedef int (*vmw_scrub_func)(struct vmw_ctx_bindinfo *, bool); typedef int (*vmw_scrub_func)(struct vmw_ctx_bindinfo *, bool);
...@@ -189,6 +196,12 @@ static const size_t vmw_binding_vb_offsets[] = { ...@@ -189,6 +196,12 @@ static const size_t vmw_binding_vb_offsets[] = {
static const size_t vmw_binding_ib_offsets[] = { static const size_t vmw_binding_ib_offsets[] = {
offsetof(struct vmw_ctx_binding_state, index_buffer), offsetof(struct vmw_ctx_binding_state, index_buffer),
}; };
static const size_t vmw_binding_uav_offsets[] = {
offsetof(struct vmw_ctx_binding_state, ua_views[0].views),
};
static const size_t vmw_binding_cs_uav_offsets[] = {
offsetof(struct vmw_ctx_binding_state, ua_views[1].views),
};
static const struct vmw_binding_info vmw_binding_infos[] = { static const struct vmw_binding_info vmw_binding_infos[] = {
[vmw_ctx_binding_shader] = { [vmw_ctx_binding_shader] = {
...@@ -235,6 +248,14 @@ static const struct vmw_binding_info vmw_binding_infos[] = { ...@@ -235,6 +248,14 @@ static const struct vmw_binding_info vmw_binding_infos[] = {
.size = sizeof(struct vmw_ctx_bindinfo_ib), .size = sizeof(struct vmw_ctx_bindinfo_ib),
.offsets = vmw_binding_ib_offsets, .offsets = vmw_binding_ib_offsets,
.scrub_func = vmw_binding_scrub_ib}, .scrub_func = vmw_binding_scrub_ib},
[vmw_ctx_binding_uav] = {
.size = sizeof(struct vmw_ctx_bindinfo_view),
.offsets = vmw_binding_uav_offsets,
.scrub_func = vmw_binding_scrub_uav},
[vmw_ctx_binding_cs_uav] = {
.size = sizeof(struct vmw_ctx_bindinfo_view),
.offsets = vmw_binding_cs_uav_offsets,
.scrub_func = vmw_binding_scrub_cs_uav},
}; };
/** /**
...@@ -320,6 +341,18 @@ void vmw_binding_add(struct vmw_ctx_binding_state *cbs, ...@@ -320,6 +341,18 @@ void vmw_binding_add(struct vmw_ctx_binding_state *cbs,
INIT_LIST_HEAD(&loc->res_list); INIT_LIST_HEAD(&loc->res_list);
} }
/**
* vmw_binding_add_uav_index - Add UAV index for tracking.
* @cbs: Pointer to the context binding state tracker.
* @slot: UAV type to which bind this index.
* @index: The splice index to track.
*/
void vmw_binding_add_uav_index(struct vmw_ctx_binding_state *cbs, uint32 slot,
uint32 index)
{
cbs->ua_views[slot].index = index;
}
/** /**
* vmw_binding_transfer: Transfer a context binding tracking entry. * vmw_binding_transfer: Transfer a context binding tracking entry.
* *
...@@ -459,6 +492,10 @@ void vmw_binding_state_commit(struct vmw_ctx_binding_state *to, ...@@ -459,6 +492,10 @@ void vmw_binding_state_commit(struct vmw_ctx_binding_state *to,
vmw_binding_transfer(to, from, entry); vmw_binding_transfer(to, from, entry);
vmw_binding_drop(entry); vmw_binding_drop(entry);
} }
/* Also transfer uav splice indices */
to->ua_views[0].index = from->ua_views[0].index;
to->ua_views[1].index = from->ua_views[1].index;
} }
/** /**
...@@ -1014,6 +1051,66 @@ static int vmw_emit_set_vb(struct vmw_ctx_binding_state *cbs) ...@@ -1014,6 +1051,66 @@ static int vmw_emit_set_vb(struct vmw_ctx_binding_state *cbs)
return 0; return 0;
} }
static int vmw_emit_set_uav(struct vmw_ctx_binding_state *cbs)
{
const struct vmw_ctx_bindinfo *loc = &cbs->ua_views[0].views[0].bi;
struct {
SVGA3dCmdHeader header;
SVGA3dCmdDXSetUAViews body;
} *cmd;
size_t cmd_size, view_id_size;
const struct vmw_resource *ctx = vmw_cbs_context(cbs);
vmw_collect_view_ids(cbs, loc, SVGA3D_MAX_UAVIEWS);
view_id_size = cbs->bind_cmd_count*sizeof(uint32);
cmd_size = sizeof(*cmd) + view_id_size;
cmd = VMW_FIFO_RESERVE_DX(ctx->dev_priv, cmd_size, ctx->id);
if (!cmd)
return -ENOMEM;
cmd->header.id = SVGA_3D_CMD_DX_SET_UA_VIEWS;
cmd->header.size = sizeof(cmd->body) + view_id_size;
/* Splice index is specified user-space */
cmd->body.uavSpliceIndex = cbs->ua_views[0].index;
memcpy(&cmd[1], cbs->bind_cmd_buffer, view_id_size);
vmw_fifo_commit(ctx->dev_priv, cmd_size);
return 0;
}
static int vmw_emit_set_cs_uav(struct vmw_ctx_binding_state *cbs)
{
const struct vmw_ctx_bindinfo *loc = &cbs->ua_views[1].views[0].bi;
struct {
SVGA3dCmdHeader header;
SVGA3dCmdDXSetCSUAViews body;
} *cmd;
size_t cmd_size, view_id_size;
const struct vmw_resource *ctx = vmw_cbs_context(cbs);
vmw_collect_view_ids(cbs, loc, SVGA3D_MAX_UAVIEWS);
view_id_size = cbs->bind_cmd_count*sizeof(uint32);
cmd_size = sizeof(*cmd) + view_id_size;
cmd = VMW_FIFO_RESERVE_DX(ctx->dev_priv, cmd_size, ctx->id);
if (!cmd)
return -ENOMEM;
cmd->header.id = SVGA_3D_CMD_DX_SET_CS_UA_VIEWS;
cmd->header.size = sizeof(cmd->body) + view_id_size;
/* Start index is specified user-space */
cmd->body.startIndex = cbs->ua_views[1].index;
memcpy(&cmd[1], cbs->bind_cmd_buffer, view_id_size);
vmw_fifo_commit(ctx->dev_priv, cmd_size);
return 0;
}
/** /**
* vmw_binding_emit_dirty - Issue delayed binding commands * vmw_binding_emit_dirty - Issue delayed binding commands
* *
...@@ -1045,6 +1142,12 @@ static int vmw_binding_emit_dirty(struct vmw_ctx_binding_state *cbs) ...@@ -1045,6 +1142,12 @@ static int vmw_binding_emit_dirty(struct vmw_ctx_binding_state *cbs)
case VMW_BINDING_VB_BIT: case VMW_BINDING_VB_BIT:
ret = vmw_emit_set_vb(cbs); ret = vmw_emit_set_vb(cbs);
break; break;
case VMW_BINDING_UAV_BIT:
ret = vmw_emit_set_uav(cbs);
break;
case VMW_BINDING_CS_UAV_BIT:
ret = vmw_emit_set_cs_uav(cbs);
break;
default: default:
BUG(); BUG();
} }
...@@ -1171,6 +1274,22 @@ static int vmw_binding_scrub_ib(struct vmw_ctx_bindinfo *bi, bool rebind) ...@@ -1171,6 +1274,22 @@ static int vmw_binding_scrub_ib(struct vmw_ctx_bindinfo *bi, bool rebind)
return 0; return 0;
} }
static int vmw_binding_scrub_uav(struct vmw_ctx_bindinfo *bi, bool rebind)
{
struct vmw_ctx_binding_state *cbs = vmw_context_binding_state(bi->ctx);
__set_bit(VMW_BINDING_UAV_BIT, &cbs->dirty);
return 0;
}
static int vmw_binding_scrub_cs_uav(struct vmw_ctx_bindinfo *bi, bool rebind)
{
struct vmw_ctx_binding_state *cbs = vmw_context_binding_state(bi->ctx);
__set_bit(VMW_BINDING_CS_UAV_BIT, &cbs->dirty);
return 0;
}
/** /**
* vmw_binding_state_alloc - Allocate a struct vmw_ctx_binding_state with * vmw_binding_state_alloc - Allocate a struct vmw_ctx_binding_state with
* memory accounting. * memory accounting.
...@@ -1257,8 +1376,8 @@ void vmw_binding_state_reset(struct vmw_ctx_binding_state *cbs) ...@@ -1257,8 +1376,8 @@ void vmw_binding_state_reset(struct vmw_ctx_binding_state *cbs)
* Each time a resource is put on the validation list as the result of a * Each time a resource is put on the validation list as the result of a
* context binding referencing it, we need to determine whether that resource * context binding referencing it, we need to determine whether that resource
* will be dirtied (written to by the GPU) as a result of the corresponding * will be dirtied (written to by the GPU) as a result of the corresponding
* GPU operation. Currently rendertarget-, depth-stencil-, and * GPU operation. Currently rendertarget-, depth-stencil-, stream-output-target
* stream-output-target bindings are capable of dirtying its resource. * and unordered access view bindings are capable of dirtying its resource.
* *
* Return: Whether the binding type dirties the resource its binding points to. * Return: Whether the binding type dirties the resource its binding points to.
*/ */
...@@ -1269,10 +1388,12 @@ u32 vmw_binding_dirtying(enum vmw_ctx_binding_type binding_type) ...@@ -1269,10 +1388,12 @@ u32 vmw_binding_dirtying(enum vmw_ctx_binding_type binding_type)
[vmw_ctx_binding_dx_rt] = VMW_RES_DIRTY_SET, [vmw_ctx_binding_dx_rt] = VMW_RES_DIRTY_SET,
[vmw_ctx_binding_ds] = VMW_RES_DIRTY_SET, [vmw_ctx_binding_ds] = VMW_RES_DIRTY_SET,
[vmw_ctx_binding_so] = VMW_RES_DIRTY_SET, [vmw_ctx_binding_so] = VMW_RES_DIRTY_SET,
[vmw_ctx_binding_uav] = VMW_RES_DIRTY_SET,
[vmw_ctx_binding_cs_uav] = VMW_RES_DIRTY_SET,
}; };
/* Review this function as new bindings are added. */ /* Review this function as new bindings are added. */
BUILD_BUG_ON(vmw_ctx_binding_max != 11); BUILD_BUG_ON(vmw_ctx_binding_max != 13);
return is_binding_dirtying[binding_type]; return is_binding_dirtying[binding_type];
} }
......
...@@ -33,6 +33,8 @@ ...@@ -33,6 +33,8 @@
#define VMW_MAX_VIEW_BINDINGS 128 #define VMW_MAX_VIEW_BINDINGS 128
#define VMW_MAX_UAV_BIND_TYPE 2
struct vmw_private; struct vmw_private;
struct vmw_ctx_binding_state; struct vmw_ctx_binding_state;
...@@ -51,6 +53,8 @@ enum vmw_ctx_binding_type { ...@@ -51,6 +53,8 @@ enum vmw_ctx_binding_type {
vmw_ctx_binding_so, vmw_ctx_binding_so,
vmw_ctx_binding_vb, vmw_ctx_binding_vb,
vmw_ctx_binding_ib, vmw_ctx_binding_ib,
vmw_ctx_binding_uav,
vmw_ctx_binding_cs_uav,
vmw_ctx_binding_max vmw_ctx_binding_max
}; };
...@@ -189,9 +193,21 @@ struct vmw_dx_shader_bindings { ...@@ -189,9 +193,21 @@ struct vmw_dx_shader_bindings {
unsigned long dirty; unsigned long dirty;
}; };
/**
* struct vmw_ctx_bindinfo_uav - UAV context binding state.
* @views: UAV view bindings.
* @splice_index: The device splice index set by user-space.
*/
struct vmw_ctx_bindinfo_uav {
struct vmw_ctx_bindinfo_view views[SVGA3D_MAX_UAVIEWS];
uint32 index;
};
extern void vmw_binding_add(struct vmw_ctx_binding_state *cbs, extern void vmw_binding_add(struct vmw_ctx_binding_state *cbs,
const struct vmw_ctx_bindinfo *ci, const struct vmw_ctx_bindinfo *ci,
u32 shader_slot, u32 slot); u32 shader_slot, u32 slot);
extern void vmw_binding_add_uav_index(struct vmw_ctx_binding_state *cbs,
uint32 slot, uint32 splice_index);
extern void extern void
vmw_binding_state_commit(struct vmw_ctx_binding_state *to, vmw_binding_state_commit(struct vmw_ctx_binding_state *to,
struct vmw_ctx_binding_state *from); struct vmw_ctx_binding_state *from);
......
...@@ -36,7 +36,7 @@ struct vmw_user_context { ...@@ -36,7 +36,7 @@ struct vmw_user_context {
struct vmw_resource res; struct vmw_resource res;
struct vmw_ctx_binding_state *cbs; struct vmw_ctx_binding_state *cbs;
struct vmw_cmdbuf_res_manager *man; struct vmw_cmdbuf_res_manager *man;
struct vmw_resource *cotables[SVGA_COTABLE_DX10_MAX]; struct vmw_resource *cotables[SVGA_COTABLE_MAX];
spinlock_t cotable_lock; spinlock_t cotable_lock;
struct vmw_buffer_object *dx_query_mob; struct vmw_buffer_object *dx_query_mob;
}; };
...@@ -116,12 +116,15 @@ static const struct vmw_res_func vmw_dx_context_func = { ...@@ -116,12 +116,15 @@ static const struct vmw_res_func vmw_dx_context_func = {
* Context management: * Context management:
*/ */
static void vmw_context_cotables_unref(struct vmw_user_context *uctx) static void vmw_context_cotables_unref(struct vmw_private *dev_priv,
struct vmw_user_context *uctx)
{ {
struct vmw_resource *res; struct vmw_resource *res;
int i; int i;
u32 cotable_max = has_sm5_context(dev_priv) ?
SVGA_COTABLE_MAX : SVGA_COTABLE_DX10_MAX;
for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) { for (i = 0; i < cotable_max; ++i) {
spin_lock(&uctx->cotable_lock); spin_lock(&uctx->cotable_lock);
res = uctx->cotables[i]; res = uctx->cotables[i];
uctx->cotables[i] = NULL; uctx->cotables[i] = NULL;
...@@ -155,7 +158,7 @@ static void vmw_hw_context_destroy(struct vmw_resource *res) ...@@ -155,7 +158,7 @@ static void vmw_hw_context_destroy(struct vmw_resource *res)
!dev_priv->query_cid_valid) !dev_priv->query_cid_valid)
__vmw_execbuf_release_pinned_bo(dev_priv, NULL); __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
mutex_unlock(&dev_priv->cmdbuf_mutex); mutex_unlock(&dev_priv->cmdbuf_mutex);
vmw_context_cotables_unref(uctx); vmw_context_cotables_unref(dev_priv, uctx);
return; return;
} }
...@@ -208,7 +211,9 @@ static int vmw_gb_context_init(struct vmw_private *dev_priv, ...@@ -208,7 +211,9 @@ static int vmw_gb_context_init(struct vmw_private *dev_priv,
spin_lock_init(&uctx->cotable_lock); spin_lock_init(&uctx->cotable_lock);
if (dx) { if (dx) {
for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) { u32 cotable_max = has_sm5_context(dev_priv) ?
SVGA_COTABLE_MAX : SVGA_COTABLE_DX10_MAX;
for (i = 0; i < cotable_max; ++i) {
uctx->cotables[i] = vmw_cotable_alloc(dev_priv, uctx->cotables[i] = vmw_cotable_alloc(dev_priv,
&uctx->res, i); &uctx->res, i);
if (IS_ERR(uctx->cotables[i])) { if (IS_ERR(uctx->cotables[i])) {
...@@ -222,7 +227,7 @@ static int vmw_gb_context_init(struct vmw_private *dev_priv, ...@@ -222,7 +227,7 @@ static int vmw_gb_context_init(struct vmw_private *dev_priv,
return 0; return 0;
out_cotables: out_cotables:
vmw_context_cotables_unref(uctx); vmw_context_cotables_unref(dev_priv, uctx);
out_err: out_err:
if (res_free) if (res_free)
res_free(res); res_free(res);
...@@ -545,10 +550,12 @@ void vmw_dx_context_scrub_cotables(struct vmw_resource *ctx, ...@@ -545,10 +550,12 @@ void vmw_dx_context_scrub_cotables(struct vmw_resource *ctx,
{ {
struct vmw_user_context *uctx = struct vmw_user_context *uctx =
container_of(ctx, struct vmw_user_context, res); container_of(ctx, struct vmw_user_context, res);
u32 cotable_max = has_sm5_context(ctx->dev_priv) ?
SVGA_COTABLE_MAX : SVGA_COTABLE_DX10_MAX;
int i; int i;
vmw_binding_state_scrub(uctx->cbs); vmw_binding_state_scrub(uctx->cbs);
for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) { for (i = 0; i < cotable_max; ++i) {
struct vmw_resource *res; struct vmw_resource *res;
/* Avoid racing with ongoing cotable destruction. */ /* Avoid racing with ongoing cotable destruction. */
...@@ -839,7 +846,10 @@ struct vmw_cmdbuf_res_manager *vmw_context_res_man(struct vmw_resource *ctx) ...@@ -839,7 +846,10 @@ struct vmw_cmdbuf_res_manager *vmw_context_res_man(struct vmw_resource *ctx)
struct vmw_resource *vmw_context_cotable(struct vmw_resource *ctx, struct vmw_resource *vmw_context_cotable(struct vmw_resource *ctx,
SVGACOTableType cotable_type) SVGACOTableType cotable_type)
{ {
if (cotable_type >= SVGA_COTABLE_DX10_MAX) u32 cotable_max = has_sm5_context(ctx->dev_priv) ?
SVGA_COTABLE_MAX : SVGA_COTABLE_DX10_MAX;
if (cotable_type >= cotable_max)
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
return container_of(ctx, struct vmw_user_context, res)-> return container_of(ctx, struct vmw_user_context, res)->
......
...@@ -82,7 +82,8 @@ static const struct vmw_cotable_info co_info[] = { ...@@ -82,7 +82,8 @@ static const struct vmw_cotable_info co_info[] = {
{1, sizeof(SVGACOTableDXSamplerEntry), NULL}, {1, sizeof(SVGACOTableDXSamplerEntry), NULL},
{1, sizeof(SVGACOTableDXStreamOutputEntry), NULL}, {1, sizeof(SVGACOTableDXStreamOutputEntry), NULL},
{1, sizeof(SVGACOTableDXQueryEntry), NULL}, {1, sizeof(SVGACOTableDXQueryEntry), NULL},
{1, sizeof(SVGACOTableDXShaderEntry), &vmw_dx_shader_cotable_list_scrub} {1, sizeof(SVGACOTableDXShaderEntry), &vmw_dx_shader_cotable_list_scrub},
{1, sizeof(SVGACOTableDXUAViewEntry), &vmw_view_cotable_list_destroy}
}; };
/* /*
...@@ -102,6 +103,7 @@ const SVGACOTableType vmw_cotable_scrub_order[] = { ...@@ -102,6 +103,7 @@ const SVGACOTableType vmw_cotable_scrub_order[] = {
SVGA_COTABLE_SAMPLER, SVGA_COTABLE_SAMPLER,
SVGA_COTABLE_STREAMOUTPUT, SVGA_COTABLE_STREAMOUTPUT,
SVGA_COTABLE_DXQUERY, SVGA_COTABLE_DXQUERY,
SVGA_COTABLE_UAVIEW,
}; };
static int vmw_cotable_bind(struct vmw_resource *res, static int vmw_cotable_bind(struct vmw_resource *res,
......
...@@ -459,11 +459,13 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv, ...@@ -459,11 +459,13 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
int ret = 0; int ret = 0;
struct vmw_resource *res; struct vmw_resource *res;
u32 i; u32 i;
u32 cotable_max = has_sm5_context(ctx->dev_priv) ?
SVGA_COTABLE_MAX : SVGA_COTABLE_DX10_MAX;
/* Add all cotables to the validation list. */ /* Add all cotables to the validation list. */
if (has_sm4_context(dev_priv) && if (has_sm4_context(dev_priv) &&
vmw_res_type(ctx) == vmw_res_dx_context) { vmw_res_type(ctx) == vmw_res_dx_context) {
for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) { for (i = 0; i < cotable_max; ++i) {
res = vmw_context_cotable(ctx, i); res = vmw_context_cotable(ctx, i);
if (IS_ERR(res)) if (IS_ERR(res))
continue; continue;
...@@ -2814,6 +2816,128 @@ static int vmw_cmd_intra_surface_copy(struct vmw_private *dev_priv, ...@@ -2814,6 +2816,128 @@ static int vmw_cmd_intra_surface_copy(struct vmw_private *dev_priv,
&cmd->body.surface.sid, NULL); &cmd->body.surface.sid, NULL);
} }
static int vmw_cmd_sm5_view_define(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
if (!has_sm5_context(dev_priv))
return -EINVAL;
return vmw_cmd_dx_view_define(dev_priv, sw_context, header);
}
static int vmw_cmd_sm5_view_remove(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
if (!has_sm5_context(dev_priv))
return -EINVAL;
return vmw_cmd_dx_view_remove(dev_priv, sw_context, header);
}
static int vmw_cmd_clear_uav_uint(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
struct {
SVGA3dCmdHeader header;
SVGA3dCmdDXClearUAViewUint body;
} *cmd = container_of(header, typeof(*cmd), header);
struct vmw_resource *ret;
if (!has_sm5_context(dev_priv))
return -EINVAL;
ret = vmw_view_id_val_add(sw_context, vmw_view_ua,
cmd->body.uaViewId);
return PTR_ERR_OR_ZERO(ret);
}
static int vmw_cmd_clear_uav_float(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
struct {
SVGA3dCmdHeader header;
SVGA3dCmdDXClearUAViewFloat body;
} *cmd = container_of(header, typeof(*cmd), header);
struct vmw_resource *ret;
if (!has_sm5_context(dev_priv))
return -EINVAL;
ret = vmw_view_id_val_add(sw_context, vmw_view_ua,
cmd->body.uaViewId);
return PTR_ERR_OR_ZERO(ret);
}
static int vmw_cmd_set_uav(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
struct {
SVGA3dCmdHeader header;
SVGA3dCmdDXSetUAViews body;
} *cmd = container_of(header, typeof(*cmd), header);
u32 num_uav = (cmd->header.size - sizeof(cmd->body)) /
sizeof(SVGA3dUAViewId);
int ret;
if (!has_sm5_context(dev_priv))
return -EINVAL;
if (num_uav > SVGA3D_MAX_UAVIEWS) {
VMW_DEBUG_USER("Invalid UAV binding.\n");
return -EINVAL;
}
ret = vmw_view_bindings_add(sw_context, vmw_view_ua,
vmw_ctx_binding_uav, 0, (void *)&cmd[1],
num_uav, 0);
if (ret)
return ret;
vmw_binding_add_uav_index(sw_context->dx_ctx_node->staged, 0,
cmd->body.uavSpliceIndex);
return ret;
}
static int vmw_cmd_set_cs_uav(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
struct {
SVGA3dCmdHeader header;
SVGA3dCmdDXSetCSUAViews body;
} *cmd = container_of(header, typeof(*cmd), header);
u32 num_uav = (cmd->header.size - sizeof(cmd->body)) /
sizeof(SVGA3dUAViewId);
int ret;
if (!has_sm5_context(dev_priv))
return -EINVAL;
if (num_uav > SVGA3D_MAX_UAVIEWS) {
VMW_DEBUG_USER("Invalid UAV binding.\n");
return -EINVAL;
}
ret = vmw_view_bindings_add(sw_context, vmw_view_ua,
vmw_ctx_binding_cs_uav, 0, (void *)&cmd[1],
num_uav, 0);
if (ret)
return ret;
vmw_binding_add_uav_index(sw_context->dx_ctx_node->staged, 1,
cmd->body.startIndex);
return ret;
}
static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv, static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context, struct vmw_sw_context *sw_context,
void *buf, uint32_t *size) void *buf, uint32_t *size)
...@@ -3163,6 +3287,24 @@ static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = { ...@@ -3163,6 +3287,24 @@ static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
true, false, true), true, false, true),
VMW_CMD_DEF(SVGA_3D_CMD_INTRA_SURFACE_COPY, &vmw_cmd_intra_surface_copy, VMW_CMD_DEF(SVGA_3D_CMD_INTRA_SURFACE_COPY, &vmw_cmd_intra_surface_copy,
true, false, true), true, false, true),
/*
* SM5 commands
*/
VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_UA_VIEW, &vmw_cmd_sm5_view_define,
true, false, true),
VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_UA_VIEW, &vmw_cmd_sm5_view_remove,
true, false, true),
VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_UA_VIEW_UINT, &vmw_cmd_clear_uav_uint,
true, false, true),
VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_UA_VIEW_FLOAT,
&vmw_cmd_clear_uav_float, true, false, true),
VMW_CMD_DEF(SVGA_3D_CMD_DX_COPY_STRUCTURE_COUNT, &vmw_cmd_invalid, true,
false, true),
VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_UA_VIEWS, &vmw_cmd_set_uav, true, false,
true),
VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_CS_UA_VIEWS, &vmw_cmd_set_cs_uav, true,
false, true),
}; };
bool vmw_cmd_describe(const void *buf, u32 *size, char const **cmd) bool vmw_cmd_describe(const void *buf, u32 *size, char const **cmd)
......
...@@ -319,7 +319,8 @@ int vmw_view_add(struct vmw_cmdbuf_res_manager *man, ...@@ -319,7 +319,8 @@ int vmw_view_add(struct vmw_cmdbuf_res_manager *man,
static const size_t vmw_view_define_sizes[] = { static const size_t vmw_view_define_sizes[] = {
[vmw_view_sr] = sizeof(SVGA3dCmdDXDefineShaderResourceView), [vmw_view_sr] = sizeof(SVGA3dCmdDXDefineShaderResourceView),
[vmw_view_rt] = sizeof(SVGA3dCmdDXDefineRenderTargetView), [vmw_view_rt] = sizeof(SVGA3dCmdDXDefineRenderTargetView),
[vmw_view_ds] = sizeof(SVGA3dCmdDXDefineDepthStencilView) [vmw_view_ds] = sizeof(SVGA3dCmdDXDefineDepthStencilView),
[vmw_view_ua] = sizeof(SVGA3dCmdDXDefineUAView)
}; };
struct vmw_private *dev_priv = ctx->dev_priv; struct vmw_private *dev_priv = ctx->dev_priv;
...@@ -499,8 +500,8 @@ struct vmw_resource *vmw_view_lookup(struct vmw_cmdbuf_res_manager *man, ...@@ -499,8 +500,8 @@ struct vmw_resource *vmw_view_lookup(struct vmw_cmdbuf_res_manager *man,
* Each time a resource is put on the validation list as the result of a * Each time a resource is put on the validation list as the result of a
* view pointing to it, we need to determine whether that resource will * view pointing to it, we need to determine whether that resource will
* be dirtied (written to by the GPU) as a result of the corresponding * be dirtied (written to by the GPU) as a result of the corresponding
* GPU operation. Currently only rendertarget- and depth-stencil views are * GPU operation. Currently only rendertarget-, depth-stencil and unordered
* capable of dirtying its resource. * access views are capable of dirtying its resource.
* *
* Return: Whether the view type of @res dirties the resource it points to. * Return: Whether the view type of @res dirties the resource it points to.
*/ */
...@@ -509,10 +510,11 @@ u32 vmw_view_dirtying(struct vmw_resource *res) ...@@ -509,10 +510,11 @@ u32 vmw_view_dirtying(struct vmw_resource *res)
static u32 view_is_dirtying[vmw_view_max] = { static u32 view_is_dirtying[vmw_view_max] = {
[vmw_view_rt] = VMW_RES_DIRTY_SET, [vmw_view_rt] = VMW_RES_DIRTY_SET,
[vmw_view_ds] = VMW_RES_DIRTY_SET, [vmw_view_ds] = VMW_RES_DIRTY_SET,
[vmw_view_ua] = VMW_RES_DIRTY_SET,
}; };
/* Update this function as we add more view types */ /* Update this function as we add more view types */
BUILD_BUG_ON(vmw_view_max != 3); BUILD_BUG_ON(vmw_view_max != 4);
return view_is_dirtying[vmw_view(res)->view_type]; return view_is_dirtying[vmw_view(res)->view_type];
} }
...@@ -520,12 +522,14 @@ const u32 vmw_view_destroy_cmds[] = { ...@@ -520,12 +522,14 @@ const u32 vmw_view_destroy_cmds[] = {
[vmw_view_sr] = SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW, [vmw_view_sr] = SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW,
[vmw_view_rt] = SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW, [vmw_view_rt] = SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW,
[vmw_view_ds] = SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW, [vmw_view_ds] = SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW,
[vmw_view_ua] = SVGA_3D_CMD_DX_DESTROY_UA_VIEW,
}; };
const SVGACOTableType vmw_view_cotables[] = { const SVGACOTableType vmw_view_cotables[] = {
[vmw_view_sr] = SVGA_COTABLE_SRVIEW, [vmw_view_sr] = SVGA_COTABLE_SRVIEW,
[vmw_view_rt] = SVGA_COTABLE_RTVIEW, [vmw_view_rt] = SVGA_COTABLE_RTVIEW,
[vmw_view_ds] = SVGA_COTABLE_DSVIEW, [vmw_view_ds] = SVGA_COTABLE_DSVIEW,
[vmw_view_ua] = SVGA_COTABLE_UAVIEW,
}; };
const SVGACOTableType vmw_so_cotables[] = { const SVGACOTableType vmw_so_cotables[] = {
......
...@@ -30,6 +30,7 @@ enum vmw_view_type { ...@@ -30,6 +30,7 @@ enum vmw_view_type {
vmw_view_sr, vmw_view_sr,
vmw_view_rt, vmw_view_rt,
vmw_view_ds, vmw_view_ds,
vmw_view_ua,
vmw_view_max, vmw_view_max,
}; };
...@@ -61,6 +62,7 @@ union vmw_view_destroy { ...@@ -61,6 +62,7 @@ union vmw_view_destroy {
struct SVGA3dCmdDXDestroyRenderTargetView rtv; struct SVGA3dCmdDXDestroyRenderTargetView rtv;
struct SVGA3dCmdDXDestroyShaderResourceView srv; struct SVGA3dCmdDXDestroyShaderResourceView srv;
struct SVGA3dCmdDXDestroyDepthStencilView dsv; struct SVGA3dCmdDXDestroyDepthStencilView dsv;
struct SVGA3dCmdDXDestroyUAView uav;
u32 view_id; u32 view_id;
}; };
...@@ -87,6 +89,10 @@ static inline enum vmw_view_type vmw_view_cmd_to_type(u32 id) ...@@ -87,6 +89,10 @@ static inline enum vmw_view_type vmw_view_cmd_to_type(u32 id)
{ {
u32 tmp = (id - SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW) / 2; u32 tmp = (id - SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW) / 2;
if (id == SVGA_3D_CMD_DX_DEFINE_UA_VIEW ||
id == SVGA_3D_CMD_DX_DESTROY_UA_VIEW)
return vmw_view_ua;
if (tmp > (u32)vmw_view_max) if (tmp > (u32)vmw_view_max)
return vmw_view_max; return vmw_view_max;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment