Commit d5bde956 authored by Thomas Hellstrom's avatar Thomas Hellstrom

drm/vmwgfx: Emulate legacy shaders on guest-backed devices v2

Command stream legacy shader creation and destruction is replaced by
NOPs in the command stream, and instead guest-backed shaders are created
and destroyed as part of the command validation process.

v2: Removed some stray debug messages.
Signed-off-by: default avatarThomas Hellstrom <thellstrom@vmware.com>
Reviewed-by: default avatarJakob Bornecrantz <jakob@vmware.com>
parent c1a21373
...@@ -941,6 +941,7 @@ static void vmw_postclose(struct drm_device *dev, ...@@ -941,6 +941,7 @@ static void vmw_postclose(struct drm_device *dev,
drm_master_put(&vmw_fp->locked_master); drm_master_put(&vmw_fp->locked_master);
} }
vmw_compat_shader_man_destroy(vmw_fp->shman);
ttm_object_file_release(&vmw_fp->tfile); ttm_object_file_release(&vmw_fp->tfile);
kfree(vmw_fp); kfree(vmw_fp);
} }
...@@ -960,11 +961,17 @@ static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv) ...@@ -960,11 +961,17 @@ static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv)
if (unlikely(vmw_fp->tfile == NULL)) if (unlikely(vmw_fp->tfile == NULL))
goto out_no_tfile; goto out_no_tfile;
vmw_fp->shman = vmw_compat_shader_man_create(dev_priv);
if (IS_ERR(vmw_fp->shman))
goto out_no_shman;
file_priv->driver_priv = vmw_fp; file_priv->driver_priv = vmw_fp;
dev_priv->bdev.dev_mapping = dev->dev_mapping; dev_priv->bdev.dev_mapping = dev->dev_mapping;
return 0; return 0;
out_no_shman:
ttm_object_file_release(&vmw_fp->tfile);
out_no_tfile: out_no_tfile:
kfree(vmw_fp); kfree(vmw_fp);
return ret; return ret;
......
...@@ -75,10 +75,14 @@ ...@@ -75,10 +75,14 @@
#define VMW_RES_FENCE ttm_driver_type3 #define VMW_RES_FENCE ttm_driver_type3
#define VMW_RES_SHADER ttm_driver_type4 #define VMW_RES_SHADER ttm_driver_type4
struct vmw_compat_shader_manager;
struct vmw_fpriv { struct vmw_fpriv {
struct drm_master *locked_master; struct drm_master *locked_master;
struct ttm_object_file *tfile; struct ttm_object_file *tfile;
struct list_head fence_events; struct list_head fence_events;
bool gb_aware;
struct vmw_compat_shader_manager *shman;
}; };
struct vmw_dma_buffer { struct vmw_dma_buffer {
...@@ -318,7 +322,7 @@ struct vmw_sw_context{ ...@@ -318,7 +322,7 @@ struct vmw_sw_context{
struct drm_open_hash res_ht; struct drm_open_hash res_ht;
bool res_ht_initialized; bool res_ht_initialized;
bool kernel; /**< is the called made from the kernel */ bool kernel; /**< is the called made from the kernel */
struct ttm_object_file *tfile; struct vmw_fpriv *fp;
struct list_head validate_nodes; struct list_head validate_nodes;
struct vmw_relocation relocs[VMWGFX_MAX_RELOCATIONS]; struct vmw_relocation relocs[VMWGFX_MAX_RELOCATIONS];
uint32_t cur_reloc; uint32_t cur_reloc;
...@@ -336,6 +340,7 @@ struct vmw_sw_context{ ...@@ -336,6 +340,7 @@ struct vmw_sw_context{
bool needs_post_query_barrier; bool needs_post_query_barrier;
struct vmw_resource *error_resource; struct vmw_resource *error_resource;
struct vmw_ctx_binding_state staged_bindings; struct vmw_ctx_binding_state staged_bindings;
struct list_head staged_shaders;
}; };
struct vmw_legacy_display; struct vmw_legacy_display;
...@@ -991,6 +996,28 @@ extern int vmw_shader_define_ioctl(struct drm_device *dev, void *data, ...@@ -991,6 +996,28 @@ extern int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv); struct drm_file *file_priv);
extern int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data, extern int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv); struct drm_file *file_priv);
extern int vmw_compat_shader_lookup(struct vmw_compat_shader_manager *man,
SVGA3dShaderType shader_type,
u32 *user_key);
extern void vmw_compat_shaders_commit(struct vmw_compat_shader_manager *man,
struct list_head *list);
extern void vmw_compat_shaders_revert(struct vmw_compat_shader_manager *man,
struct list_head *list);
extern int vmw_compat_shader_remove(struct vmw_compat_shader_manager *man,
u32 user_key,
SVGA3dShaderType shader_type,
struct list_head *list);
extern int vmw_compat_shader_add(struct vmw_compat_shader_manager *man,
u32 user_key, const void *bytecode,
SVGA3dShaderType shader_type,
size_t size,
struct ttm_object_file *tfile,
struct list_head *list);
extern struct vmw_compat_shader_manager *
vmw_compat_shader_man_create(struct vmw_private *dev_priv);
extern void
vmw_compat_shader_man_destroy(struct vmw_compat_shader_manager *man);
/** /**
* Inline helper functions * Inline helper functions
......
...@@ -235,8 +235,12 @@ static void vmw_resource_relocations_apply(uint32_t *cb, ...@@ -235,8 +235,12 @@ static void vmw_resource_relocations_apply(uint32_t *cb,
{ {
struct vmw_resource_relocation *rel; struct vmw_resource_relocation *rel;
list_for_each_entry(rel, list, head) list_for_each_entry(rel, list, head) {
cb[rel->offset] = rel->res->id; if (likely(rel->res != NULL))
cb[rel->offset] = rel->res->id;
else
cb[rel->offset] = SVGA_3D_CMD_NOP;
}
} }
static int vmw_cmd_invalid(struct vmw_private *dev_priv, static int vmw_cmd_invalid(struct vmw_private *dev_priv,
...@@ -381,22 +385,27 @@ static int vmw_resources_validate(struct vmw_sw_context *sw_context) ...@@ -381,22 +385,27 @@ static int vmw_resources_validate(struct vmw_sw_context *sw_context)
} }
/** /**
* vmw_cmd_res_check - Check that a resource is present and if so, put it * vmw_cmd_compat_res_check - Check that a resource is present and if so, put it
* on the resource validate list unless it's already there. * on the resource validate list unless it's already there.
* *
* @dev_priv: Pointer to a device private structure. * @dev_priv: Pointer to a device private structure.
* @sw_context: Pointer to the software context. * @sw_context: Pointer to the software context.
* @res_type: Resource type. * @res_type: Resource type.
* @converter: User-space visisble type specific information. * @converter: User-space visisble type specific information.
* @id: Pointer to the location in the command buffer currently being * @id: user-space resource id handle.
* @id_loc: Pointer to the location in the command buffer currently being
* parsed from where the user-space resource id handle is located. * parsed from where the user-space resource id handle is located.
* @p_val: Pointer to pointer to resource validalidation node. Populated
* on exit.
*/ */
static int vmw_cmd_res_check(struct vmw_private *dev_priv, static int
struct vmw_sw_context *sw_context, vmw_cmd_compat_res_check(struct vmw_private *dev_priv,
enum vmw_res_type res_type, struct vmw_sw_context *sw_context,
const struct vmw_user_resource_conv *converter, enum vmw_res_type res_type,
uint32_t *id, const struct vmw_user_resource_conv *converter,
struct vmw_resource_val_node **p_val) uint32_t id,
uint32_t *id_loc,
struct vmw_resource_val_node **p_val)
{ {
struct vmw_res_cache_entry *rcache = struct vmw_res_cache_entry *rcache =
&sw_context->res_cache[res_type]; &sw_context->res_cache[res_type];
...@@ -404,7 +413,7 @@ static int vmw_cmd_res_check(struct vmw_private *dev_priv, ...@@ -404,7 +413,7 @@ static int vmw_cmd_res_check(struct vmw_private *dev_priv,
struct vmw_resource_val_node *node; struct vmw_resource_val_node *node;
int ret; int ret;
if (*id == SVGA3D_INVALID_ID) { if (id == SVGA3D_INVALID_ID) {
if (p_val) if (p_val)
*p_val = NULL; *p_val = NULL;
if (res_type == vmw_res_context) { if (res_type == vmw_res_context) {
...@@ -419,7 +428,7 @@ static int vmw_cmd_res_check(struct vmw_private *dev_priv, ...@@ -419,7 +428,7 @@ static int vmw_cmd_res_check(struct vmw_private *dev_priv,
* resource * resource
*/ */
if (likely(rcache->valid && *id == rcache->handle)) { if (likely(rcache->valid && id == rcache->handle)) {
const struct vmw_resource *res = rcache->res; const struct vmw_resource *res = rcache->res;
rcache->node->first_usage = false; rcache->node->first_usage = false;
...@@ -428,28 +437,28 @@ static int vmw_cmd_res_check(struct vmw_private *dev_priv, ...@@ -428,28 +437,28 @@ static int vmw_cmd_res_check(struct vmw_private *dev_priv,
return vmw_resource_relocation_add return vmw_resource_relocation_add
(&sw_context->res_relocations, res, (&sw_context->res_relocations, res,
id - sw_context->buf_start); id_loc - sw_context->buf_start);
} }
ret = vmw_user_resource_lookup_handle(dev_priv, ret = vmw_user_resource_lookup_handle(dev_priv,
sw_context->tfile, sw_context->fp->tfile,
*id, id,
converter, converter,
&res); &res);
if (unlikely(ret != 0)) { if (unlikely(ret != 0)) {
DRM_ERROR("Could not find or use resource 0x%08x.\n", DRM_ERROR("Could not find or use resource 0x%08x.\n",
(unsigned) *id); (unsigned) id);
dump_stack(); dump_stack();
return ret; return ret;
} }
rcache->valid = true; rcache->valid = true;
rcache->res = res; rcache->res = res;
rcache->handle = *id; rcache->handle = id;
ret = vmw_resource_relocation_add(&sw_context->res_relocations, ret = vmw_resource_relocation_add(&sw_context->res_relocations,
res, res,
id - sw_context->buf_start); id_loc - sw_context->buf_start);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
goto out_no_reloc; goto out_no_reloc;
...@@ -482,6 +491,31 @@ static int vmw_cmd_res_check(struct vmw_private *dev_priv, ...@@ -482,6 +491,31 @@ static int vmw_cmd_res_check(struct vmw_private *dev_priv,
return ret; return ret;
} }
/**
* vmw_cmd_res_check - Check that a resource is present and if so, put it
* on the resource validate list unless it's already there.
*
* @dev_priv: Pointer to a device private structure.
* @sw_context: Pointer to the software context.
* @res_type: Resource type.
* @converter: User-space visisble type specific information.
* @id_loc: Pointer to the location in the command buffer currently being
* parsed from where the user-space resource id handle is located.
* @p_val: Pointer to pointer to resource validalidation node. Populated
* on exit.
*/
static int
vmw_cmd_res_check(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
enum vmw_res_type res_type,
const struct vmw_user_resource_conv *converter,
uint32_t *id_loc,
struct vmw_resource_val_node **p_val)
{
return vmw_cmd_compat_res_check(dev_priv, sw_context, res_type,
converter, *id_loc, id_loc, p_val);
}
/** /**
* vmw_cmd_cid_check - Check a command header for valid context information. * vmw_cmd_cid_check - Check a command header for valid context information.
* *
...@@ -769,7 +803,7 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv, ...@@ -769,7 +803,7 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
struct vmw_relocation *reloc; struct vmw_relocation *reloc;
int ret; int ret;
ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo); ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo);
if (unlikely(ret != 0)) { if (unlikely(ret != 0)) {
DRM_ERROR("Could not find or use MOB buffer.\n"); DRM_ERROR("Could not find or use MOB buffer.\n");
return -EINVAL; return -EINVAL;
...@@ -830,7 +864,7 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv, ...@@ -830,7 +864,7 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
struct vmw_relocation *reloc; struct vmw_relocation *reloc;
int ret; int ret;
ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo); ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo);
if (unlikely(ret != 0)) { if (unlikely(ret != 0)) {
DRM_ERROR("Could not find or use GMR region.\n"); DRM_ERROR("Could not find or use GMR region.\n");
return -EINVAL; return -EINVAL;
...@@ -1129,7 +1163,8 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv, ...@@ -1129,7 +1163,8 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res); srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res);
vmw_kms_cursor_snoop(srf, sw_context->tfile, &vmw_bo->base, header); vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->base,
header);
out_no_surface: out_no_surface:
vmw_dmabuf_unreference(&vmw_bo); vmw_dmabuf_unreference(&vmw_bo);
...@@ -1480,6 +1515,98 @@ static int vmw_cmd_invalidate_gb_surface(struct vmw_private *dev_priv, ...@@ -1480,6 +1515,98 @@ static int vmw_cmd_invalidate_gb_surface(struct vmw_private *dev_priv,
&cmd->body.sid, NULL); &cmd->body.sid, NULL);
} }
/**
* vmw_cmd_shader_define - Validate an SVGA_3D_CMD_SHADER_DEFINE
* command
*
* @dev_priv: Pointer to a device private struct.
* @sw_context: The software context being used for this batch.
* @header: Pointer to the command header in the command stream.
*/
static int vmw_cmd_shader_define(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
struct vmw_shader_define_cmd {
SVGA3dCmdHeader header;
SVGA3dCmdDefineShader body;
} *cmd;
int ret;
size_t size;
cmd = container_of(header, struct vmw_shader_define_cmd,
header);
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
user_context_converter, &cmd->body.cid,
NULL);
if (unlikely(ret != 0))
return ret;
if (unlikely(!dev_priv->has_mob))
return 0;
size = cmd->header.size - sizeof(cmd->body);
ret = vmw_compat_shader_add(sw_context->fp->shman,
cmd->body.shid, cmd + 1,
cmd->body.type, size,
sw_context->fp->tfile,
&sw_context->staged_shaders);
if (unlikely(ret != 0))
return ret;
return vmw_resource_relocation_add(&sw_context->res_relocations,
NULL, &cmd->header.id -
sw_context->buf_start);
return 0;
}
/**
* vmw_cmd_shader_destroy - Validate an SVGA_3D_CMD_SHADER_DESTROY
* command
*
* @dev_priv: Pointer to a device private struct.
* @sw_context: The software context being used for this batch.
* @header: Pointer to the command header in the command stream.
*/
static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
struct vmw_shader_destroy_cmd {
SVGA3dCmdHeader header;
SVGA3dCmdDestroyShader body;
} *cmd;
int ret;
cmd = container_of(header, struct vmw_shader_destroy_cmd,
header);
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
user_context_converter, &cmd->body.cid,
NULL);
if (unlikely(ret != 0))
return ret;
if (unlikely(!dev_priv->has_mob))
return 0;
ret = vmw_compat_shader_remove(sw_context->fp->shman,
cmd->body.shid,
cmd->body.type,
&sw_context->staged_shaders);
if (unlikely(ret != 0))
return ret;
return vmw_resource_relocation_add(&sw_context->res_relocations,
NULL, &cmd->header.id -
sw_context->buf_start);
return 0;
}
/** /**
* vmw_cmd_set_shader - Validate an SVGA_3D_CMD_SET_SHADER * vmw_cmd_set_shader - Validate an SVGA_3D_CMD_SET_SHADER
* command * command
...@@ -1511,10 +1638,17 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv, ...@@ -1511,10 +1638,17 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
if (dev_priv->has_mob) { if (dev_priv->has_mob) {
struct vmw_ctx_bindinfo bi; struct vmw_ctx_bindinfo bi;
struct vmw_resource_val_node *res_node; struct vmw_resource_val_node *res_node;
u32 shid = cmd->body.shid;
(void) vmw_compat_shader_lookup(sw_context->fp->shman,
cmd->body.type,
&shid);
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_shader, ret = vmw_cmd_compat_res_check(dev_priv, sw_context,
user_shader_converter, vmw_res_shader,
&cmd->body.shid, &res_node); user_shader_converter,
shid,
&cmd->body.shid, &res_node);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
return ret; return ret;
...@@ -1669,10 +1803,10 @@ static const struct vmw_cmd_entry const vmw_cmd_entries[SVGA_3D_CMD_MAX] = { ...@@ -1669,10 +1803,10 @@ static const struct vmw_cmd_entry const vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
true, false, false), true, false, false),
VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check, VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check,
false, false, false), false, false, false),
VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_cid_check, VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_shader_define,
true, true, false), true, false, false),
VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_cid_check, VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_shader_destroy,
true, true, false), true, false, false),
VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader, VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader,
true, false, false), true, false, false),
VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_set_shader_const, VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_set_shader_const,
...@@ -2206,7 +2340,7 @@ int vmw_execbuf_process(struct drm_file *file_priv, ...@@ -2206,7 +2340,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
} else } else
sw_context->kernel = true; sw_context->kernel = true;
sw_context->tfile = vmw_fpriv(file_priv)->tfile; sw_context->fp = vmw_fpriv(file_priv);
sw_context->cur_reloc = 0; sw_context->cur_reloc = 0;
sw_context->cur_val_buf = 0; sw_context->cur_val_buf = 0;
sw_context->fence_flags = 0; sw_context->fence_flags = 0;
...@@ -2223,6 +2357,7 @@ int vmw_execbuf_process(struct drm_file *file_priv, ...@@ -2223,6 +2357,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
goto out_unlock; goto out_unlock;
sw_context->res_ht_initialized = true; sw_context->res_ht_initialized = true;
} }
INIT_LIST_HEAD(&sw_context->staged_shaders);
INIT_LIST_HEAD(&resource_list); INIT_LIST_HEAD(&resource_list);
ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands, ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
...@@ -2311,6 +2446,8 @@ int vmw_execbuf_process(struct drm_file *file_priv, ...@@ -2311,6 +2446,8 @@ int vmw_execbuf_process(struct drm_file *file_priv,
} }
list_splice_init(&sw_context->resource_list, &resource_list); list_splice_init(&sw_context->resource_list, &resource_list);
vmw_compat_shaders_commit(sw_context->fp->shman,
&sw_context->staged_shaders);
mutex_unlock(&dev_priv->cmdbuf_mutex); mutex_unlock(&dev_priv->cmdbuf_mutex);
/* /*
...@@ -2337,6 +2474,8 @@ int vmw_execbuf_process(struct drm_file *file_priv, ...@@ -2337,6 +2474,8 @@ int vmw_execbuf_process(struct drm_file *file_priv,
list_splice_init(&sw_context->resource_list, &resource_list); list_splice_init(&sw_context->resource_list, &resource_list);
error_resource = sw_context->error_resource; error_resource = sw_context->error_resource;
sw_context->error_resource = NULL; sw_context->error_resource = NULL;
vmw_compat_shaders_revert(sw_context->fp->shman,
&sw_context->staged_shaders);
mutex_unlock(&dev_priv->cmdbuf_mutex); mutex_unlock(&dev_priv->cmdbuf_mutex);
/* /*
......
...@@ -29,6 +29,8 @@ ...@@ -29,6 +29,8 @@
#include "vmwgfx_resource_priv.h" #include "vmwgfx_resource_priv.h"
#include "ttm/ttm_placement.h" #include "ttm/ttm_placement.h"
#define VMW_COMPAT_SHADER_HT_ORDER 12
struct vmw_shader { struct vmw_shader {
struct vmw_resource res; struct vmw_resource res;
SVGA3dShaderType type; SVGA3dShaderType type;
...@@ -40,6 +42,50 @@ struct vmw_user_shader { ...@@ -40,6 +42,50 @@ struct vmw_user_shader {
struct vmw_shader shader; struct vmw_shader shader;
}; };
/**
* enum vmw_compat_shader_state - Staging state for compat shaders
*/
enum vmw_compat_shader_state {
VMW_COMPAT_COMMITED,
VMW_COMPAT_ADD,
VMW_COMPAT_DEL
};
/**
* struct vmw_compat_shader - Metadata for compat shaders.
*
* @handle: The TTM handle of the guest backed shader.
* @tfile: The struct ttm_object_file the guest backed shader is registered
* with.
* @hash: Hash item for lookup.
* @head: List head for staging lists or the compat shader manager list.
* @state: Staging state.
*
* The structure is protected by the cmdbuf lock.
*/
struct vmw_compat_shader {
u32 handle;
struct ttm_object_file *tfile;
struct drm_hash_item hash;
struct list_head head;
enum vmw_compat_shader_state state;
};
/**
* struct vmw_compat_shader_manager - Compat shader manager.
*
* @shaders: Hash table containing staged and commited compat shaders
* @list: List of commited shaders.
* @dev_priv: Pointer to a device private structure.
*
* @shaders and @list are protected by the cmdbuf mutex for now.
*/
struct vmw_compat_shader_manager {
struct drm_open_hash shaders;
struct list_head list;
struct vmw_private *dev_priv;
};
static void vmw_user_shader_free(struct vmw_resource *res); static void vmw_user_shader_free(struct vmw_resource *res);
static struct vmw_resource * static struct vmw_resource *
vmw_user_shader_base_to_res(struct ttm_base_object *base); vmw_user_shader_base_to_res(struct ttm_base_object *base);
...@@ -325,13 +371,81 @@ int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data, ...@@ -325,13 +371,81 @@ int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data,
TTM_REF_USAGE); TTM_REF_USAGE);
} }
int vmw_shader_alloc(struct vmw_private *dev_priv,
struct vmw_dma_buffer *buffer,
size_t shader_size,
size_t offset,
SVGA3dShaderType shader_type,
struct ttm_object_file *tfile,
u32 *handle)
{
struct vmw_user_shader *ushader;
struct vmw_resource *res, *tmp;
int ret;
/*
* Approximate idr memory usage with 128 bytes. It will be limited
* by maximum number_of shaders anyway.
*/
if (unlikely(vmw_user_shader_size == 0))
vmw_user_shader_size =
ttm_round_pot(sizeof(struct vmw_user_shader)) + 128;
ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
vmw_user_shader_size,
false, true);
if (unlikely(ret != 0)) {
if (ret != -ERESTARTSYS)
DRM_ERROR("Out of graphics memory for shader "
"creation.\n");
goto out;
}
ushader = kzalloc(sizeof(*ushader), GFP_KERNEL);
if (unlikely(ushader == NULL)) {
ttm_mem_global_free(vmw_mem_glob(dev_priv),
vmw_user_shader_size);
ret = -ENOMEM;
goto out;
}
res = &ushader->shader.res;
ushader->base.shareable = false;
ushader->base.tfile = NULL;
/*
* From here on, the destructor takes over resource freeing.
*/
ret = vmw_gb_shader_init(dev_priv, res, shader_size,
offset, shader_type, buffer,
vmw_user_shader_free);
if (unlikely(ret != 0))
goto out;
tmp = vmw_resource_reference(res);
ret = ttm_base_object_init(tfile, &ushader->base, false,
VMW_RES_SHADER,
&vmw_user_shader_base_release, NULL);
if (unlikely(ret != 0)) {
vmw_resource_unreference(&tmp);
goto out_err;
}
if (handle)
*handle = ushader->base.hash.key;
out_err:
vmw_resource_unreference(&res);
out:
return ret;
}
int vmw_shader_define_ioctl(struct drm_device *dev, void *data, int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv) struct drm_file *file_priv)
{ {
struct vmw_private *dev_priv = vmw_priv(dev); struct vmw_private *dev_priv = vmw_priv(dev);
struct vmw_user_shader *ushader;
struct vmw_resource *res;
struct vmw_resource *tmp;
struct drm_vmw_shader_create_arg *arg = struct drm_vmw_shader_create_arg *arg =
(struct drm_vmw_shader_create_arg *)data; (struct drm_vmw_shader_create_arg *)data;
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
...@@ -373,69 +487,324 @@ int vmw_shader_define_ioctl(struct drm_device *dev, void *data, ...@@ -373,69 +487,324 @@ int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
goto out_bad_arg; goto out_bad_arg;
} }
/* ret = ttm_read_lock(&vmaster->lock, true);
* Approximate idr memory usage with 128 bytes. It will be limited if (unlikely(ret != 0))
* by maximum number_of shaders anyway. goto out_bad_arg;
*/
if (unlikely(vmw_user_shader_size == 0)) ret = vmw_shader_alloc(dev_priv, buffer, arg->size, arg->offset,
vmw_user_shader_size = ttm_round_pot(sizeof(*ushader)) shader_type, tfile, &arg->shader_handle);
+ 128;
ret = ttm_read_lock(&vmaster->lock, true); ttm_read_unlock(&vmaster->lock);
out_bad_arg:
vmw_dmabuf_unreference(&buffer);
return ret;
}
/**
* vmw_compat_shader_lookup - Look up a compat shader
*
* @man: Pointer to the compat shader manager.
* @shader_type: The shader type, that combined with the user_key identifies
* the shader.
* @user_key: On entry, this should be a pointer to the user_key.
* On successful exit, it will contain the guest-backed shader's TTM handle.
*
* Returns 0 on success. Non-zero on failure, in which case the value pointed
* to by @user_key is unmodified.
*/
int vmw_compat_shader_lookup(struct vmw_compat_shader_manager *man,
SVGA3dShaderType shader_type,
u32 *user_key)
{
struct drm_hash_item *hash;
int ret;
unsigned long key = *user_key | (shader_type << 24);
ret = drm_ht_find_item(&man->shaders, key, &hash);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
return ret; return ret;
ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), *user_key = drm_hash_entry(hash, struct vmw_compat_shader,
vmw_user_shader_size, hash)->handle;
false, true);
if (unlikely(ret != 0)) { return 0;
if (ret != -ERESTARTSYS) }
DRM_ERROR("Out of graphics memory for shader"
" creation.\n"); /**
goto out_unlock; * vmw_compat_shader_free - Free a compat shader.
*
* @man: Pointer to the compat shader manager.
* @entry: Pointer to a struct vmw_compat_shader.
*
* Frees a struct vmw_compat_shder entry and drops its reference to the
* guest backed shader.
*/
static void vmw_compat_shader_free(struct vmw_compat_shader_manager *man,
struct vmw_compat_shader *entry)
{
list_del(&entry->head);
WARN_ON(drm_ht_remove_item(&man->shaders, &entry->hash));
WARN_ON(ttm_ref_object_base_unref(entry->tfile, entry->handle,
TTM_REF_USAGE));
kfree(entry);
}
/**
* vmw_compat_shaders_commit - Commit a list of compat shader actions.
*
* @man: Pointer to the compat shader manager.
* @list: Caller's list of compat shader actions.
*
* This function commits a list of compat shader additions or removals.
* It is typically called when the execbuf ioctl call triggering these
* actions has commited the fifo contents to the device.
*/
void vmw_compat_shaders_commit(struct vmw_compat_shader_manager *man,
struct list_head *list)
{
struct vmw_compat_shader *entry, *next;
list_for_each_entry_safe(entry, next, list, head) {
list_del(&entry->head);
switch (entry->state) {
case VMW_COMPAT_ADD:
entry->state = VMW_COMPAT_COMMITED;
list_add_tail(&entry->head, &man->list);
break;
case VMW_COMPAT_DEL:
ttm_ref_object_base_unref(entry->tfile, entry->handle,
TTM_REF_USAGE);
kfree(entry);
break;
default:
BUG();
break;
}
} }
}
ushader = kzalloc(sizeof(*ushader), GFP_KERNEL); /**
if (unlikely(ushader == NULL)) { * vmw_compat_shaders_revert - Revert a list of compat shader actions
ttm_mem_global_free(vmw_mem_glob(dev_priv), *
vmw_user_shader_size); * @man: Pointer to the compat shader manager.
ret = -ENOMEM; * @list: Caller's list of compat shader actions.
goto out_unlock; *
* This function reverts a list of compat shader additions or removals.
* It is typically called when the execbuf ioctl call triggering these
* actions failed for some reason, and the command stream was never
* submitted.
*/
void vmw_compat_shaders_revert(struct vmw_compat_shader_manager *man,
struct list_head *list)
{
struct vmw_compat_shader *entry, *next;
int ret;
list_for_each_entry_safe(entry, next, list, head) {
switch (entry->state) {
case VMW_COMPAT_ADD:
vmw_compat_shader_free(man, entry);
break;
case VMW_COMPAT_DEL:
ret = drm_ht_insert_item(&man->shaders, &entry->hash);
list_del(&entry->head);
list_add_tail(&entry->head, &man->list);
entry->state = VMW_COMPAT_COMMITED;
break;
default:
BUG();
break;
}
} }
}
res = &ushader->shader.res; /**
ushader->base.shareable = false; * vmw_compat_shader_remove - Stage a compat shader for removal.
ushader->base.tfile = NULL; *
* @man: Pointer to the compat shader manager
* @user_key: The key that is used to identify the shader. The key is
* unique to the shader type.
* @shader_type: Shader type.
* @list: Caller's list of staged shader actions.
*
* This function stages a compat shader for removal and removes the key from
* the shader manager's hash table. If the shader was previously only staged
* for addition it is completely removed (But the execbuf code may keep a
* reference if it was bound to a context between addition and removal). If
* it was previously commited to the manager, it is staged for removal.
*/
int vmw_compat_shader_remove(struct vmw_compat_shader_manager *man,
u32 user_key, SVGA3dShaderType shader_type,
struct list_head *list)
{
struct vmw_compat_shader *entry;
struct drm_hash_item *hash;
int ret;
/* ret = drm_ht_find_item(&man->shaders, user_key | (shader_type << 24),
* From here on, the destructor takes over resource freeing. &hash);
*/ if (likely(ret != 0))
return -EINVAL;
ret = vmw_gb_shader_init(dev_priv, res, arg->size, entry = drm_hash_entry(hash, struct vmw_compat_shader, hash);
arg->offset, shader_type, buffer,
vmw_user_shader_free); switch (entry->state) {
case VMW_COMPAT_ADD:
vmw_compat_shader_free(man, entry);
break;
case VMW_COMPAT_COMMITED:
(void) drm_ht_remove_item(&man->shaders, &entry->hash);
list_del(&entry->head);
entry->state = VMW_COMPAT_DEL;
list_add_tail(&entry->head, list);
break;
default:
BUG();
break;
}
return 0;
}
/**
* vmw_compat_shader_add - Create a compat shader and add the
* key to the manager
*
* @man: Pointer to the compat shader manager
* @user_key: The key that is used to identify the shader. The key is
* unique to the shader type.
* @bytecode: Pointer to the bytecode of the shader.
* @shader_type: Shader type.
* @tfile: Pointer to a struct ttm_object_file that the guest-backed shader is
* to be created with.
* @list: Caller's list of staged shader actions.
*
* Note that only the key is added to the shader manager's hash table.
* The shader is not yet added to the shader manager's list of shaders.
*/
int vmw_compat_shader_add(struct vmw_compat_shader_manager *man,
u32 user_key, const void *bytecode,
SVGA3dShaderType shader_type,
size_t size,
struct ttm_object_file *tfile,
struct list_head *list)
{
struct vmw_dma_buffer *buf;
struct ttm_bo_kmap_obj map;
bool is_iomem;
struct vmw_compat_shader *compat;
u32 handle;
int ret;
if (user_key > ((1 << 24) - 1) || (unsigned) shader_type > 16)
return -EINVAL;
/* Allocate and pin a DMA buffer */
buf = kzalloc(sizeof(*buf), GFP_KERNEL);
if (unlikely(buf == NULL))
return -ENOMEM;
ret = vmw_dmabuf_init(man->dev_priv, buf, size, &vmw_sys_ne_placement,
true, vmw_dmabuf_bo_free);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
goto out_unlock; goto out;
tmp = vmw_resource_reference(res); ret = ttm_bo_reserve(&buf->base, false, true, false, NULL);
ret = ttm_base_object_init(tfile, &ushader->base, false, if (unlikely(ret != 0))
VMW_RES_SHADER, goto no_reserve;
&vmw_user_shader_base_release, NULL);
/* Map and copy shader bytecode. */
ret = ttm_bo_kmap(&buf->base, 0, PAGE_ALIGN(size) >> PAGE_SHIFT,
&map);
if (unlikely(ret != 0)) { if (unlikely(ret != 0)) {
vmw_resource_unreference(&tmp); ttm_bo_unreserve(&buf->base);
goto out_err; goto no_reserve;
} }
arg->shader_handle = ushader->base.hash.key; memcpy(ttm_kmap_obj_virtual(&map, &is_iomem), bytecode, size);
out_err: WARN_ON(is_iomem);
vmw_resource_unreference(&res);
out_unlock: ttm_bo_kunmap(&map);
ttm_read_unlock(&vmaster->lock); ret = ttm_bo_validate(&buf->base, &vmw_sys_placement, false, true);
out_bad_arg: WARN_ON(ret != 0);
vmw_dmabuf_unreference(&buffer); ttm_bo_unreserve(&buf->base);
/* Create a guest-backed shader container backed by the dma buffer */
ret = vmw_shader_alloc(man->dev_priv, buf, size, 0, shader_type,
tfile, &handle);
vmw_dmabuf_unreference(&buf);
if (unlikely(ret != 0))
goto no_reserve;
/*
* Create a compat shader structure and stage it for insertion
* in the manager
*/
compat = kzalloc(sizeof(*compat), GFP_KERNEL);
if (compat == NULL)
goto no_compat;
compat->hash.key = user_key | (shader_type << 24);
ret = drm_ht_insert_item(&man->shaders, &compat->hash);
if (unlikely(ret != 0))
goto out_invalid_key;
compat->state = VMW_COMPAT_ADD;
compat->handle = handle;
compat->tfile = tfile;
list_add_tail(&compat->head, list);
return 0;
out_invalid_key:
kfree(compat);
no_compat:
ttm_ref_object_base_unref(tfile, handle, TTM_REF_USAGE);
no_reserve:
out:
return ret; return ret;
}
/**
* vmw_compat_shader_man_create - Create a compat shader manager
*
* @dev_priv: Pointer to a device private structure.
*
* Typically done at file open time. If successful returns a pointer to a
* compat shader manager. Otherwise returns an error pointer.
*/
struct vmw_compat_shader_manager *
vmw_compat_shader_man_create(struct vmw_private *dev_priv)
{
struct vmw_compat_shader_manager *man;
int ret;
man = kzalloc(sizeof(*man), GFP_KERNEL);
man->dev_priv = dev_priv;
INIT_LIST_HEAD(&man->list);
ret = drm_ht_create(&man->shaders, VMW_COMPAT_SHADER_HT_ORDER);
if (ret == 0)
return man;
kfree(man);
return ERR_PTR(ret);
}
/**
* vmw_compat_shader_man_destroy - Destroy a compat shader manager
*
* @man: Pointer to the shader manager to destroy.
*
* Typically done at file close time.
*/
void vmw_compat_shader_man_destroy(struct vmw_compat_shader_manager *man)
{
struct vmw_compat_shader *entry, *next;
mutex_lock(&man->dev_priv->cmdbuf_mutex);
list_for_each_entry_safe(entry, next, &man->list, head)
vmw_compat_shader_free(man, entry);
mutex_unlock(&man->dev_priv->cmdbuf_mutex);
kfree(man);
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment