Commit b139d43d authored by Thomas Hellstrom's avatar Thomas Hellstrom

drm/vmwgfx: Make buffer object lookups reference-free during validation

Make the process of looking up a buffer object and adding it to the
validation list reference-free unless when it's actually added to the
validation list where a single reference is taken.
This saves two locked atomic operations per command stream buffer object
handle lookup.
Signed-off-by: default avatarThomas Hellstrom <thellstrom@vmware.com>
Reviewed-by: default avatarSinclair Yeh <syeh@vmware.com>
parent b733bc2e
...@@ -1137,7 +1137,7 @@ static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv, ...@@ -1137,7 +1137,7 @@ static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
* @sw_context: The software context used for this command batch validation. * @sw_context: The software context used for this command batch validation.
* @id: Pointer to the user-space handle to be translated. * @id: Pointer to the user-space handle to be translated.
* @vmw_bo_p: Points to a location that, on successful return will carry * @vmw_bo_p: Points to a location that, on successful return will carry
* a reference-counted pointer to the DMA buffer identified by the * a non-reference-counted pointer to the buffer object identified by the
* user-space handle in @id. * user-space handle in @id.
* *
* This function saves information needed to translate a user-space buffer * This function saves information needed to translate a user-space buffer
...@@ -1152,38 +1152,34 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv, ...@@ -1152,38 +1152,34 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
SVGAMobId *id, SVGAMobId *id,
struct vmw_buffer_object **vmw_bo_p) struct vmw_buffer_object **vmw_bo_p)
{ {
struct vmw_buffer_object *vmw_bo = NULL; struct vmw_buffer_object *vmw_bo;
uint32_t handle = *id; uint32_t handle = *id;
struct vmw_relocation *reloc; struct vmw_relocation *reloc;
int ret; int ret;
ret = vmw_user_bo_lookup(sw_context->fp->tfile, handle, &vmw_bo, NULL); vmw_validation_preload_bo(sw_context->ctx);
if (unlikely(ret != 0)) { vmw_bo = vmw_user_bo_noref_lookup(sw_context->fp->tfile, handle);
if (IS_ERR(vmw_bo)) {
DRM_ERROR("Could not find or use MOB buffer.\n"); DRM_ERROR("Could not find or use MOB buffer.\n");
ret = -EINVAL; return PTR_ERR(vmw_bo);
goto out_no_reloc;
} }
ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, true, false);
vmw_user_bo_noref_release();
if (unlikely(ret != 0))
return ret;
reloc = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*reloc)); reloc = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*reloc));
if (!reloc) if (!reloc)
goto out_no_reloc; return -ENOMEM;
reloc->mob_loc = id; reloc->mob_loc = id;
reloc->vbo = vmw_bo; reloc->vbo = vmw_bo;
ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, true, false);
if (unlikely(ret != 0))
goto out_no_reloc;
*vmw_bo_p = vmw_bo; *vmw_bo_p = vmw_bo;
list_add_tail(&reloc->head, &sw_context->bo_relocations); list_add_tail(&reloc->head, &sw_context->bo_relocations);
return 0; return 0;
out_no_reloc:
vmw_bo_unreference(&vmw_bo);
*vmw_bo_p = NULL;
return ret;
} }
/** /**
...@@ -1194,7 +1190,7 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv, ...@@ -1194,7 +1190,7 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
* @sw_context: The software context used for this command batch validation. * @sw_context: The software context used for this command batch validation.
* @ptr: Pointer to the user-space handle to be translated. * @ptr: Pointer to the user-space handle to be translated.
* @vmw_bo_p: Points to a location that, on successful return will carry * @vmw_bo_p: Points to a location that, on successful return will carry
* a reference-counted pointer to the DMA buffer identified by the * a non-reference-counted pointer to the DMA buffer identified by the
* user-space handle in @id. * user-space handle in @id.
* *
* This function saves information needed to translate a user-space buffer * This function saves information needed to translate a user-space buffer
...@@ -1210,38 +1206,33 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv, ...@@ -1210,38 +1206,33 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
SVGAGuestPtr *ptr, SVGAGuestPtr *ptr,
struct vmw_buffer_object **vmw_bo_p) struct vmw_buffer_object **vmw_bo_p)
{ {
struct vmw_buffer_object *vmw_bo = NULL; struct vmw_buffer_object *vmw_bo;
uint32_t handle = ptr->gmrId; uint32_t handle = ptr->gmrId;
struct vmw_relocation *reloc; struct vmw_relocation *reloc;
int ret; int ret;
ret = vmw_user_bo_lookup(sw_context->fp->tfile, handle, &vmw_bo, NULL); vmw_validation_preload_bo(sw_context->ctx);
if (unlikely(ret != 0)) { vmw_bo = vmw_user_bo_noref_lookup(sw_context->fp->tfile, handle);
if (IS_ERR(vmw_bo)) {
DRM_ERROR("Could not find or use GMR region.\n"); DRM_ERROR("Could not find or use GMR region.\n");
ret = -EINVAL; return PTR_ERR(vmw_bo);
goto out_no_reloc;
} }
ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, false, false);
vmw_user_bo_noref_release();
if (unlikely(ret != 0))
return ret;
reloc = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*reloc)); reloc = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*reloc));
if (!reloc) if (!reloc)
goto out_no_reloc; return -ENOMEM;
reloc->location = ptr; reloc->location = ptr;
reloc->vbo = vmw_bo; reloc->vbo = vmw_bo;
ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, false, false);
if (unlikely(ret != 0))
goto out_no_reloc;
*vmw_bo_p = vmw_bo; *vmw_bo_p = vmw_bo;
list_add_tail(&reloc->head, &sw_context->bo_relocations); list_add_tail(&reloc->head, &sw_context->bo_relocations);
return 0; return 0;
out_no_reloc:
vmw_bo_unreference(&vmw_bo);
*vmw_bo_p = NULL;
return ret;
} }
...@@ -1328,10 +1319,7 @@ static int vmw_cmd_dx_bind_query(struct vmw_private *dev_priv, ...@@ -1328,10 +1319,7 @@ static int vmw_cmd_dx_bind_query(struct vmw_private *dev_priv,
sw_context->dx_query_mob = vmw_bo; sw_context->dx_query_mob = vmw_bo;
sw_context->dx_query_ctx = sw_context->dx_ctx_node->ctx; sw_context->dx_query_ctx = sw_context->dx_ctx_node->ctx;
return 0;
vmw_bo_unreference(&vmw_bo);
return ret;
} }
...@@ -1432,7 +1420,6 @@ static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv, ...@@ -1432,7 +1420,6 @@ static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv,
ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context); ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
vmw_bo_unreference(&vmw_bo);
return ret; return ret;
} }
...@@ -1486,7 +1473,6 @@ static int vmw_cmd_end_query(struct vmw_private *dev_priv, ...@@ -1486,7 +1473,6 @@ static int vmw_cmd_end_query(struct vmw_private *dev_priv,
ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context); ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
vmw_bo_unreference(&vmw_bo);
return ret; return ret;
} }
...@@ -1519,7 +1505,6 @@ static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv, ...@@ -1519,7 +1505,6 @@ static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv,
if (unlikely(ret != 0)) if (unlikely(ret != 0))
return ret; return ret;
vmw_bo_unreference(&vmw_bo);
return 0; return 0;
} }
...@@ -1571,7 +1556,6 @@ static int vmw_cmd_wait_query(struct vmw_private *dev_priv, ...@@ -1571,7 +1556,6 @@ static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
if (unlikely(ret != 0)) if (unlikely(ret != 0))
return ret; return ret;
vmw_bo_unreference(&vmw_bo);
return 0; return 0;
} }
...@@ -1622,7 +1606,7 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv, ...@@ -1622,7 +1606,7 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
if (unlikely(ret != 0)) { if (unlikely(ret != 0)) {
if (unlikely(ret != -ERESTARTSYS)) if (unlikely(ret != -ERESTARTSYS))
DRM_ERROR("could not find surface for DMA.\n"); DRM_ERROR("could not find surface for DMA.\n");
goto out_no_surface; return ret;
} }
srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res); srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res);
...@@ -1630,9 +1614,7 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv, ...@@ -1630,9 +1614,7 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->base, vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->base,
header); header);
out_no_surface: return 0;
vmw_bo_unreference(&vmw_bo);
return ret;
} }
static int vmw_cmd_draw(struct vmw_private *dev_priv, static int vmw_cmd_draw(struct vmw_private *dev_priv,
...@@ -1763,14 +1745,9 @@ static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv, ...@@ -1763,14 +1745,9 @@ static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
SVGAFifoCmdDefineGMRFB body; SVGAFifoCmdDefineGMRFB body;
} *cmd = buf; } *cmd = buf;
ret = vmw_translate_guest_ptr(dev_priv, sw_context, return vmw_translate_guest_ptr(dev_priv, sw_context,
&cmd->body.ptr, &cmd->body.ptr,
&vmw_bo); &vmw_bo);
if (unlikely(ret != 0))
return ret;
vmw_bo_unreference(&vmw_bo);
return ret; return ret;
} }
...@@ -1810,8 +1787,6 @@ static int vmw_cmd_res_switch_backup(struct vmw_private *dev_priv, ...@@ -1810,8 +1787,6 @@ static int vmw_cmd_res_switch_backup(struct vmw_private *dev_priv,
vmw_validation_res_switch_backup(sw_context->ctx, info, vbo, vmw_validation_res_switch_backup(sw_context->ctx, info, vbo,
backup_offset); backup_offset);
vmw_bo_unreference(&vbo);
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment