Commit d80efd5c authored by Thomas Hellstrom's avatar Thomas Hellstrom

drm/vmwgfx: Initial DX support

Initial DX support.
Co-authored with Sinclair Yeh, Charmaine Lee and Jakob Bornecrantz.
Signed-off-by: default avatarThomas Hellstrom <thellstrom@vmware.com>
Signed-off-by: default avatarSinclair Yeh <syeh@vmware.com>
Signed-off-by: default avatarCharmaine Lee <charmainel@vmware.com>
parent 8ce75f8a
...@@ -8,5 +8,6 @@ vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \ ...@@ -8,5 +8,6 @@ vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \
vmwgfx_fence.o vmwgfx_dmabuf.o vmwgfx_scrn.o vmwgfx_context.o \ vmwgfx_fence.o vmwgfx_dmabuf.o vmwgfx_scrn.o vmwgfx_context.o \
vmwgfx_surface.o vmwgfx_prime.o vmwgfx_mob.o vmwgfx_shader.o \ vmwgfx_surface.o vmwgfx_prime.o vmwgfx_mob.o vmwgfx_shader.o \
vmwgfx_cmdbuf_res.o vmwgfx_cmdbuf.o vmwgfx_stdu.o \ vmwgfx_cmdbuf_res.o vmwgfx_cmdbuf.o vmwgfx_stdu.o \
vmwgfx_cotable.o vmwgfx_so.o vmwgfx_binding.o
obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o
This diff is collapsed.
/**************************************************************************
*
* Copyright © 2015 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#ifndef _VMWGFX_BINDING_H_
#define _VMWGFX_BINDING_H_
#include "device_include/svga3d_reg.h"
#include <linux/list.h>
#define VMW_MAX_VIEW_BINDINGS 128
struct vmw_private;
struct vmw_ctx_binding_state;
/*
* enum vmw_ctx_binding_type - abstract resource to context binding types
*/
enum vmw_ctx_binding_type {
vmw_ctx_binding_shader,
vmw_ctx_binding_rt,
vmw_ctx_binding_tex,
vmw_ctx_binding_cb,
vmw_ctx_binding_dx_shader,
vmw_ctx_binding_dx_rt,
vmw_ctx_binding_sr,
vmw_ctx_binding_ds,
vmw_ctx_binding_so,
vmw_ctx_binding_vb,
vmw_ctx_binding_ib,
vmw_ctx_binding_max
};
/**
* struct vmw_ctx_bindinfo - single binding metadata
*
* @ctx_list: List head for the context's list of bindings.
* @res_list: List head for a resource's list of bindings.
* @ctx: Non-refcounted pointer to the context that owns the binding. NULL
* indicates no binding present.
* @res: Non-refcounted pointer to the resource the binding points to. This
* is typically a surface or a view.
* @bt: Binding type.
* @scrubbed: Whether the binding has been scrubbed from the context.
*/
struct vmw_ctx_bindinfo {
struct list_head ctx_list;
struct list_head res_list;
struct vmw_resource *ctx;
struct vmw_resource *res;
enum vmw_ctx_binding_type bt;
bool scrubbed;
};
/**
* struct vmw_ctx_bindinfo_tex - texture stage binding metadata
*
* @bi: struct vmw_ctx_bindinfo we derive from.
* @texture_stage: Device data used to reconstruct binding command.
*/
struct vmw_ctx_bindinfo_tex {
struct vmw_ctx_bindinfo bi;
uint32 texture_stage;
};
/**
* struct vmw_ctx_bindinfo_shader - Shader binding metadata
*
* @bi: struct vmw_ctx_bindinfo we derive from.
* @shader_slot: Device data used to reconstruct binding command.
*/
struct vmw_ctx_bindinfo_shader {
struct vmw_ctx_bindinfo bi;
SVGA3dShaderType shader_slot;
};
/**
* struct vmw_ctx_bindinfo_cb - Constant buffer binding metadata
*
* @bi: struct vmw_ctx_bindinfo we derive from.
* @shader_slot: Device data used to reconstruct binding command.
* @offset: Device data used to reconstruct binding command.
* @size: Device data used to reconstruct binding command.
* @slot: Device data used to reconstruct binding command.
*/
struct vmw_ctx_bindinfo_cb {
struct vmw_ctx_bindinfo bi;
SVGA3dShaderType shader_slot;
uint32 offset;
uint32 size;
uint32 slot;
};
/**
* struct vmw_ctx_bindinfo_view - View binding metadata
*
* @bi: struct vmw_ctx_bindinfo we derive from.
* @shader_slot: Device data used to reconstruct binding command.
* @slot: Device data used to reconstruct binding command.
*/
struct vmw_ctx_bindinfo_view {
struct vmw_ctx_bindinfo bi;
SVGA3dShaderType shader_slot;
uint32 slot;
};
/**
* struct vmw_ctx_bindinfo_so - StreamOutput binding metadata
*
* @bi: struct vmw_ctx_bindinfo we derive from.
* @offset: Device data used to reconstruct binding command.
* @size: Device data used to reconstruct binding command.
* @slot: Device data used to reconstruct binding command.
*/
struct vmw_ctx_bindinfo_so {
struct vmw_ctx_bindinfo bi;
uint32 offset;
uint32 size;
uint32 slot;
};
/**
* struct vmw_ctx_bindinfo_vb - Vertex buffer binding metadata
*
* @bi: struct vmw_ctx_bindinfo we derive from.
* @offset: Device data used to reconstruct binding command.
* @stride: Device data used to reconstruct binding command.
* @slot: Device data used to reconstruct binding command.
*/
struct vmw_ctx_bindinfo_vb {
struct vmw_ctx_bindinfo bi;
uint32 offset;
uint32 stride;
uint32 slot;
};
/**
* struct vmw_ctx_bindinfo_ib - StreamOutput binding metadata
*
* @bi: struct vmw_ctx_bindinfo we derive from.
* @offset: Device data used to reconstruct binding command.
* @format: Device data used to reconstruct binding command.
*/
struct vmw_ctx_bindinfo_ib {
struct vmw_ctx_bindinfo bi;
uint32 offset;
uint32 format;
};
/**
* struct vmw_dx_shader_bindings - per shader type context binding state
*
* @shader: The shader binding for this shader type
* @const_buffer: Const buffer bindings for this shader type.
* @shader_res: Shader resource view bindings for this shader type.
* @dirty_sr: Bitmap tracking individual shader resource bindings changes
* that have not yet been emitted to the device.
* @dirty: Bitmap tracking per-binding type binding changes that have not
* yet been emitted to the device.
*/
struct vmw_dx_shader_bindings {
struct vmw_ctx_bindinfo_shader shader;
struct vmw_ctx_bindinfo_cb const_buffers[SVGA3D_DX_MAX_CONSTBUFFERS];
struct vmw_ctx_bindinfo_view shader_res[SVGA3D_DX_MAX_SRVIEWS];
DECLARE_BITMAP(dirty_sr, SVGA3D_DX_MAX_SRVIEWS);
unsigned long dirty;
};
extern void vmw_binding_add(struct vmw_ctx_binding_state *cbs,
const struct vmw_ctx_bindinfo *ci,
u32 shader_slot, u32 slot);
extern void
vmw_binding_state_commit(struct vmw_ctx_binding_state *to,
struct vmw_ctx_binding_state *from);
extern void vmw_binding_res_list_kill(struct list_head *head);
extern void vmw_binding_res_list_scrub(struct list_head *head);
extern int vmw_binding_rebind_all(struct vmw_ctx_binding_state *cbs);
extern void vmw_binding_state_kill(struct vmw_ctx_binding_state *cbs);
extern void vmw_binding_state_scrub(struct vmw_ctx_binding_state *cbs);
extern struct vmw_ctx_binding_state *
vmw_binding_state_alloc(struct vmw_private *dev_priv);
extern void vmw_binding_state_free(struct vmw_ctx_binding_state *cbs);
extern struct list_head *
vmw_binding_state_list(struct vmw_ctx_binding_state *cbs);
extern void vmw_binding_state_reset(struct vmw_ctx_binding_state *cbs);
#endif
...@@ -916,8 +916,7 @@ static void *vmw_cmdbuf_reserve_cur(struct vmw_cmdbuf_man *man, ...@@ -916,8 +916,7 @@ static void *vmw_cmdbuf_reserve_cur(struct vmw_cmdbuf_man *man,
cur = man->cur; cur = man->cur;
if (cur && (size + man->cur_pos > cur->size || if (cur && (size + man->cur_pos > cur->size ||
(ctx_id != SVGA3D_INVALID_ID && ((cur->cb_header->flags & SVGA_CB_FLAG_DX_CONTEXT) &&
(cur->cb_header->flags & SVGA_CB_FLAG_DX_CONTEXT) &&
ctx_id != cur->cb_header->dxContext))) ctx_id != cur->cb_header->dxContext)))
__vmw_cmdbuf_cur_flush(man); __vmw_cmdbuf_cur_flush(man);
......
...@@ -26,15 +26,10 @@ ...@@ -26,15 +26,10 @@
**************************************************************************/ **************************************************************************/
#include "vmwgfx_drv.h" #include "vmwgfx_drv.h"
#include "vmwgfx_resource_priv.h"
#define VMW_CMDBUF_RES_MAN_HT_ORDER 12 #define VMW_CMDBUF_RES_MAN_HT_ORDER 12
enum vmw_cmdbuf_res_state {
VMW_CMDBUF_RES_COMMITED,
VMW_CMDBUF_RES_ADD,
VMW_CMDBUF_RES_DEL
};
/** /**
* struct vmw_cmdbuf_res - Command buffer managed resource entry. * struct vmw_cmdbuf_res - Command buffer managed resource entry.
* *
...@@ -132,9 +127,12 @@ void vmw_cmdbuf_res_commit(struct list_head *list) ...@@ -132,9 +127,12 @@ void vmw_cmdbuf_res_commit(struct list_head *list)
list_for_each_entry_safe(entry, next, list, head) { list_for_each_entry_safe(entry, next, list, head) {
list_del(&entry->head); list_del(&entry->head);
if (entry->res->func->commit_notify)
entry->res->func->commit_notify(entry->res,
entry->state);
switch (entry->state) { switch (entry->state) {
case VMW_CMDBUF_RES_ADD: case VMW_CMDBUF_RES_ADD:
entry->state = VMW_CMDBUF_RES_COMMITED; entry->state = VMW_CMDBUF_RES_COMMITTED;
list_add_tail(&entry->head, &entry->man->list); list_add_tail(&entry->head, &entry->man->list);
break; break;
case VMW_CMDBUF_RES_DEL: case VMW_CMDBUF_RES_DEL:
...@@ -175,7 +173,7 @@ void vmw_cmdbuf_res_revert(struct list_head *list) ...@@ -175,7 +173,7 @@ void vmw_cmdbuf_res_revert(struct list_head *list)
&entry->hash); &entry->hash);
list_del(&entry->head); list_del(&entry->head);
list_add_tail(&entry->head, &entry->man->list); list_add_tail(&entry->head, &entry->man->list);
entry->state = VMW_CMDBUF_RES_COMMITED; entry->state = VMW_CMDBUF_RES_COMMITTED;
break; break;
default: default:
BUG(); BUG();
...@@ -231,6 +229,9 @@ int vmw_cmdbuf_res_add(struct vmw_cmdbuf_res_manager *man, ...@@ -231,6 +229,9 @@ int vmw_cmdbuf_res_add(struct vmw_cmdbuf_res_manager *man,
* @res_type: The resource type. * @res_type: The resource type.
* @user_key: The user-space id of the resource. * @user_key: The user-space id of the resource.
* @list: The staging list. * @list: The staging list.
* @res_p: If the resource is in an already committed state, points to the
* struct vmw_resource on successful return. The pointer will be
* non ref-counted.
* *
* This function looks up the struct vmw_cmdbuf_res entry from the manager * This function looks up the struct vmw_cmdbuf_res entry from the manager
* hash table and, if it exists, removes it. Depending on its current staging * hash table and, if it exists, removes it. Depending on its current staging
...@@ -240,7 +241,8 @@ int vmw_cmdbuf_res_add(struct vmw_cmdbuf_res_manager *man, ...@@ -240,7 +241,8 @@ int vmw_cmdbuf_res_add(struct vmw_cmdbuf_res_manager *man,
int vmw_cmdbuf_res_remove(struct vmw_cmdbuf_res_manager *man, int vmw_cmdbuf_res_remove(struct vmw_cmdbuf_res_manager *man,
enum vmw_cmdbuf_res_type res_type, enum vmw_cmdbuf_res_type res_type,
u32 user_key, u32 user_key,
struct list_head *list) struct list_head *list,
struct vmw_resource **res_p)
{ {
struct vmw_cmdbuf_res *entry; struct vmw_cmdbuf_res *entry;
struct drm_hash_item *hash; struct drm_hash_item *hash;
...@@ -256,12 +258,14 @@ int vmw_cmdbuf_res_remove(struct vmw_cmdbuf_res_manager *man, ...@@ -256,12 +258,14 @@ int vmw_cmdbuf_res_remove(struct vmw_cmdbuf_res_manager *man,
switch (entry->state) { switch (entry->state) {
case VMW_CMDBUF_RES_ADD: case VMW_CMDBUF_RES_ADD:
vmw_cmdbuf_res_free(man, entry); vmw_cmdbuf_res_free(man, entry);
*res_p = NULL;
break; break;
case VMW_CMDBUF_RES_COMMITED: case VMW_CMDBUF_RES_COMMITTED:
(void) drm_ht_remove_item(&man->resources, &entry->hash); (void) drm_ht_remove_item(&man->resources, &entry->hash);
list_del(&entry->head); list_del(&entry->head);
entry->state = VMW_CMDBUF_RES_DEL; entry->state = VMW_CMDBUF_RES_DEL;
list_add_tail(&entry->head, list); list_add_tail(&entry->head, list);
*res_p = entry->res;
break; break;
default: default:
BUG(); BUG();
......
This diff is collapsed.
This diff is collapsed.
...@@ -28,6 +28,7 @@ ...@@ -28,6 +28,7 @@
#include <drm/drmP.h> #include <drm/drmP.h>
#include "vmwgfx_drv.h" #include "vmwgfx_drv.h"
#include "vmwgfx_binding.h"
#include <drm/ttm/ttm_placement.h> #include <drm/ttm/ttm_placement.h>
#include <drm/ttm/ttm_bo_driver.h> #include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_object.h> #include <drm/ttm/ttm_object.h>
...@@ -127,6 +128,9 @@ ...@@ -127,6 +128,9 @@
#define DRM_IOCTL_VMW_SYNCCPU \ #define DRM_IOCTL_VMW_SYNCCPU \
DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_SYNCCPU, \ DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_SYNCCPU, \
struct drm_vmw_synccpu_arg) struct drm_vmw_synccpu_arg)
#define DRM_IOCTL_VMW_CREATE_EXTENDED_CONTEXT \
DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_EXTENDED_CONTEXT, \
struct drm_vmw_context_arg)
/** /**
* The core DRM version of this macro doesn't account for * The core DRM version of this macro doesn't account for
...@@ -168,8 +172,8 @@ static const struct drm_ioctl_desc vmw_ioctls[] = { ...@@ -168,8 +172,8 @@ static const struct drm_ioctl_desc vmw_ioctls[] = {
DRM_UNLOCKED | DRM_RENDER_ALLOW), DRM_UNLOCKED | DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_REF_SURFACE, vmw_surface_reference_ioctl, VMW_IOCTL_DEF(VMW_REF_SURFACE, vmw_surface_reference_ioctl,
DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW), DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_EXECBUF, vmw_execbuf_ioctl, VMW_IOCTL_DEF(VMW_EXECBUF, NULL, DRM_AUTH | DRM_UNLOCKED |
DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW), DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_obj_wait_ioctl, VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_obj_wait_ioctl,
DRM_UNLOCKED | DRM_RENDER_ALLOW), DRM_UNLOCKED | DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_FENCE_SIGNALED, VMW_IOCTL_DEF(VMW_FENCE_SIGNALED,
...@@ -206,6 +210,9 @@ static const struct drm_ioctl_desc vmw_ioctls[] = { ...@@ -206,6 +210,9 @@ static const struct drm_ioctl_desc vmw_ioctls[] = {
VMW_IOCTL_DEF(VMW_SYNCCPU, VMW_IOCTL_DEF(VMW_SYNCCPU,
vmw_user_dmabuf_synccpu_ioctl, vmw_user_dmabuf_synccpu_ioctl,
DRM_UNLOCKED | DRM_RENDER_ALLOW), DRM_UNLOCKED | DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_CREATE_EXTENDED_CONTEXT,
vmw_extended_context_define_ioctl,
DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
}; };
static struct pci_device_id vmw_pci_id_list[] = { static struct pci_device_id vmw_pci_id_list[] = {
...@@ -390,8 +397,10 @@ static int vmw_request_device(struct vmw_private *dev_priv) ...@@ -390,8 +397,10 @@ static int vmw_request_device(struct vmw_private *dev_priv)
} }
vmw_fence_fifo_up(dev_priv->fman); vmw_fence_fifo_up(dev_priv->fman);
dev_priv->cman = vmw_cmdbuf_man_create(dev_priv); dev_priv->cman = vmw_cmdbuf_man_create(dev_priv);
if (IS_ERR(dev_priv->cman)) if (IS_ERR(dev_priv->cman)) {
dev_priv->cman = NULL; dev_priv->cman = NULL;
dev_priv->has_dx = false;
}
ret = vmw_request_device_late(dev_priv); ret = vmw_request_device_late(dev_priv);
if (ret) if (ret)
...@@ -848,6 +857,14 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) ...@@ -848,6 +857,14 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
} }
} }
if (dev_priv->has_mob) {
spin_lock(&dev_priv->cap_lock);
vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_DX);
dev_priv->has_dx = !!vmw_read(dev_priv, SVGA_REG_DEV_CAP);
spin_unlock(&dev_priv->cap_lock);
}
ret = vmw_kms_init(dev_priv); ret = vmw_kms_init(dev_priv);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
goto out_no_kms; goto out_no_kms;
...@@ -857,6 +874,8 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) ...@@ -857,6 +874,8 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
if (ret) if (ret)
goto out_no_fifo; goto out_no_fifo;
DRM_INFO("DX: %s\n", dev_priv->has_dx ? "yes." : "no.");
if (dev_priv->enable_fb) { if (dev_priv->enable_fb) {
vmw_fifo_resource_inc(dev_priv); vmw_fifo_resource_inc(dev_priv);
vmw_svga_enable(dev_priv); vmw_svga_enable(dev_priv);
...@@ -900,6 +919,8 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) ...@@ -900,6 +919,8 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
for (i = vmw_res_context; i < vmw_res_max; ++i) for (i = vmw_res_context; i < vmw_res_max; ++i)
idr_destroy(&dev_priv->res_idr[i]); idr_destroy(&dev_priv->res_idr[i]);
if (dev_priv->ctx.staged_bindings)
vmw_binding_state_free(dev_priv->ctx.staged_bindings);
kfree(dev_priv); kfree(dev_priv);
return ret; return ret;
} }
...@@ -945,6 +966,8 @@ static int vmw_driver_unload(struct drm_device *dev) ...@@ -945,6 +966,8 @@ static int vmw_driver_unload(struct drm_device *dev)
iounmap(dev_priv->mmio_virt); iounmap(dev_priv->mmio_virt);
arch_phys_wc_del(dev_priv->mmio_mtrr); arch_phys_wc_del(dev_priv->mmio_mtrr);
(void)ttm_bo_device_release(&dev_priv->bdev); (void)ttm_bo_device_release(&dev_priv->bdev);
if (dev_priv->ctx.staged_bindings)
vmw_binding_state_free(dev_priv->ctx.staged_bindings);
vmw_ttm_global_release(dev_priv); vmw_ttm_global_release(dev_priv);
for (i = vmw_res_context; i < vmw_res_max; ++i) for (i = vmw_res_context; i < vmw_res_max; ++i)
...@@ -1082,11 +1105,21 @@ static long vmw_generic_ioctl(struct file *filp, unsigned int cmd, ...@@ -1082,11 +1105,21 @@ static long vmw_generic_ioctl(struct file *filp, unsigned int cmd,
const struct drm_ioctl_desc *ioctl = const struct drm_ioctl_desc *ioctl =
&vmw_ioctls[nr - DRM_COMMAND_BASE]; &vmw_ioctls[nr - DRM_COMMAND_BASE];
if (unlikely(ioctl->cmd != cmd)) { if (nr == DRM_COMMAND_BASE + DRM_VMW_EXECBUF) {
DRM_ERROR("Invalid command format, ioctl %d\n", ret = (long) drm_ioctl_permit(ioctl->flags, file_priv);
nr - DRM_COMMAND_BASE); if (unlikely(ret != 0))
return -EINVAL; return ret;
if (unlikely((cmd & (IOC_IN | IOC_OUT)) != IOC_IN))
goto out_io_encoding;
return (long) vmw_execbuf_ioctl(dev, arg, file_priv,
_IOC_SIZE(cmd));
} }
if (unlikely(ioctl->cmd != cmd))
goto out_io_encoding;
flags = ioctl->flags; flags = ioctl->flags;
} else if (!drm_ioctl_flags(nr, &flags)) } else if (!drm_ioctl_flags(nr, &flags))
return -EINVAL; return -EINVAL;
...@@ -1106,6 +1139,12 @@ static long vmw_generic_ioctl(struct file *filp, unsigned int cmd, ...@@ -1106,6 +1139,12 @@ static long vmw_generic_ioctl(struct file *filp, unsigned int cmd,
ttm_read_unlock(&vmaster->lock); ttm_read_unlock(&vmaster->lock);
return ret; return ret;
out_io_encoding:
DRM_ERROR("Invalid command format, ioctl %d\n",
nr - DRM_COMMAND_BASE);
return -EINVAL;
} }
static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd, static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd,
...@@ -1156,7 +1195,6 @@ static void vmw_master_destroy(struct drm_device *dev, ...@@ -1156,7 +1195,6 @@ static void vmw_master_destroy(struct drm_device *dev,
kfree(vmaster); kfree(vmaster);
} }
static int vmw_master_set(struct drm_device *dev, static int vmw_master_set(struct drm_device *dev,
struct drm_file *file_priv, struct drm_file *file_priv,
bool from_open) bool from_open)
......
This diff is collapsed.
This diff is collapsed.
...@@ -29,6 +29,11 @@ ...@@ -29,6 +29,11 @@
#include <drm/drmP.h> #include <drm/drmP.h>
#include <drm/ttm/ttm_placement.h> #include <drm/ttm/ttm_placement.h>
struct vmw_temp_set_context {
SVGA3dCmdHeader header;
SVGA3dCmdDXTempSetContext body;
};
bool vmw_fifo_have_3d(struct vmw_private *dev_priv) bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
{ {
u32 __iomem *fifo_mem = dev_priv->mmio_virt; u32 __iomem *fifo_mem = dev_priv->mmio_virt;
...@@ -99,6 +104,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) ...@@ -99,6 +104,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
uint32_t max; uint32_t max;
uint32_t min; uint32_t min;
fifo->dx = false;
fifo->static_buffer_size = VMWGFX_FIFO_STATIC_SIZE; fifo->static_buffer_size = VMWGFX_FIFO_STATIC_SIZE;
fifo->static_buffer = vmalloc(fifo->static_buffer_size); fifo->static_buffer = vmalloc(fifo->static_buffer_size);
if (unlikely(fifo->static_buffer == NULL)) if (unlikely(fifo->static_buffer == NULL))
...@@ -396,15 +402,20 @@ static void *vmw_local_fifo_reserve(struct vmw_private *dev_priv, ...@@ -396,15 +402,20 @@ static void *vmw_local_fifo_reserve(struct vmw_private *dev_priv,
return NULL; return NULL;
} }
void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes) void *vmw_fifo_reserve_dx(struct vmw_private *dev_priv, uint32_t bytes,
int ctx_id)
{ {
void *ret; void *ret;
if (dev_priv->cman) if (dev_priv->cman)
ret = vmw_cmdbuf_reserve(dev_priv->cman, bytes, ret = vmw_cmdbuf_reserve(dev_priv->cman, bytes,
SVGA3D_INVALID_ID, false, NULL); ctx_id, false, NULL);
else else if (ctx_id == SVGA3D_INVALID_ID)
ret = vmw_local_fifo_reserve(dev_priv, bytes); ret = vmw_local_fifo_reserve(dev_priv, bytes);
else {
WARN_ON("Command buffer has not been allocated.\n");
ret = NULL;
}
if (IS_ERR_OR_NULL(ret)) { if (IS_ERR_OR_NULL(ret)) {
DRM_ERROR("Fifo reserve failure of %u bytes.\n", DRM_ERROR("Fifo reserve failure of %u bytes.\n",
(unsigned) bytes); (unsigned) bytes);
...@@ -466,6 +477,10 @@ static void vmw_local_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes) ...@@ -466,6 +477,10 @@ static void vmw_local_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
uint32_t min = ioread32(fifo_mem + SVGA_FIFO_MIN); uint32_t min = ioread32(fifo_mem + SVGA_FIFO_MIN);
bool reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE; bool reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE;
if (fifo_state->dx)
bytes += sizeof(struct vmw_temp_set_context);
fifo_state->dx = false;
BUG_ON((bytes & 3) != 0); BUG_ON((bytes & 3) != 0);
BUG_ON(bytes > fifo_state->reserved_size); BUG_ON(bytes > fifo_state->reserved_size);
...@@ -518,7 +533,7 @@ void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes) ...@@ -518,7 +533,7 @@ void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
* @dev_priv: Pointer to device private structure. * @dev_priv: Pointer to device private structure.
* @bytes: Number of bytes to commit. * @bytes: Number of bytes to commit.
*/ */
static void vmw_fifo_commit_flush(struct vmw_private *dev_priv, uint32_t bytes) void vmw_fifo_commit_flush(struct vmw_private *dev_priv, uint32_t bytes)
{ {
if (dev_priv->cman) if (dev_priv->cman)
vmw_cmdbuf_commit(dev_priv->cman, bytes, NULL, true); vmw_cmdbuf_commit(dev_priv->cman, bytes, NULL, true);
...@@ -706,3 +721,8 @@ int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv, ...@@ -706,3 +721,8 @@ int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv,
return vmw_fifo_emit_dummy_legacy_query(dev_priv, cid); return vmw_fifo_emit_dummy_legacy_query(dev_priv, cid);
} }
void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
{
return vmw_fifo_reserve_dx(dev_priv, bytes, SVGA3D_INVALID_ID);
}
...@@ -110,6 +110,9 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data, ...@@ -110,6 +110,9 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
param->value = param->value =
(dev_priv->active_display_unit == vmw_du_screen_target); (dev_priv->active_display_unit == vmw_du_screen_target);
break; break;
case DRM_VMW_PARAM_DX:
param->value = dev_priv->has_dx;
break;
default: default:
DRM_ERROR("Illegal vmwgfx get param request: %d\n", DRM_ERROR("Illegal vmwgfx get param request: %d\n",
param->param); param->param);
...@@ -193,8 +196,8 @@ int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data, ...@@ -193,8 +196,8 @@ int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
uint32_t *bounce32 = (uint32_t *) bounce; uint32_t *bounce32 = (uint32_t *) bounce;
num = size / sizeof(uint32_t); num = size / sizeof(uint32_t);
if (num > SVGA3D_DEVCAP_MAX) if (num > SVGA3D_DEVCAP_DX)
num = SVGA3D_DEVCAP_MAX; num = SVGA3D_DEVCAP_DX;
spin_lock(&dev_priv->cap_lock); spin_lock(&dev_priv->cap_lock);
for (i = 0; i < num; ++i) { for (i = 0; i < num; ++i) {
......
...@@ -528,7 +528,11 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv, ...@@ -528,7 +528,11 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
return -EINVAL; return -EINVAL;
} }
if (unlikely(format != surface->format)) { /*
* For DX, surface format validation is done when surface->scanout
* is set.
*/
if (!dev_priv->has_dx && format != surface->format) {
DRM_ERROR("Invalid surface format for requested mode.\n"); DRM_ERROR("Invalid surface format for requested mode.\n");
return -EINVAL; return -EINVAL;
} }
...@@ -754,6 +758,7 @@ static int vmw_create_dmabuf_proxy(struct drm_device *dev, ...@@ -754,6 +758,7 @@ static int vmw_create_dmabuf_proxy(struct drm_device *dev,
true, /* can be a scanout buffer */ true, /* can be a scanout buffer */
1, /* num of mip levels */ 1, /* num of mip levels */
0, 0,
0,
content_base_size, content_base_size,
srf_out); srf_out);
if (ret) { if (ret) {
...@@ -769,7 +774,7 @@ static int vmw_create_dmabuf_proxy(struct drm_device *dev, ...@@ -769,7 +774,7 @@ static int vmw_create_dmabuf_proxy(struct drm_device *dev,
vmw_dmabuf_unreference(&res->backup); vmw_dmabuf_unreference(&res->backup);
res->backup = vmw_dmabuf_reference(dmabuf_mob); res->backup = vmw_dmabuf_reference(dmabuf_mob);
res->backup_offset = 0; res->backup_offset = 0;
vmw_resource_unreserve(res, NULL, 0); vmw_resource_unreserve(res, false, NULL, 0);
mutex_unlock(&res->dev_priv->cmdbuf_mutex); mutex_unlock(&res->dev_priv->cmdbuf_mutex);
return 0; return 0;
...@@ -1869,7 +1874,7 @@ void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv, ...@@ -1869,7 +1874,7 @@ void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv,
void vmw_kms_helper_resource_revert(struct vmw_resource *res) void vmw_kms_helper_resource_revert(struct vmw_resource *res)
{ {
vmw_kms_helper_buffer_revert(res->backup); vmw_kms_helper_buffer_revert(res->backup);
vmw_resource_unreserve(res, NULL, 0); vmw_resource_unreserve(res, false, NULL, 0);
mutex_unlock(&res->dev_priv->cmdbuf_mutex); mutex_unlock(&res->dev_priv->cmdbuf_mutex);
} }
...@@ -1916,7 +1921,7 @@ int vmw_kms_helper_resource_prepare(struct vmw_resource *res, ...@@ -1916,7 +1921,7 @@ int vmw_kms_helper_resource_prepare(struct vmw_resource *res,
out_revert: out_revert:
vmw_kms_helper_buffer_revert(res->backup); vmw_kms_helper_buffer_revert(res->backup);
out_unreserve: out_unreserve:
vmw_resource_unreserve(res, NULL, 0); vmw_resource_unreserve(res, false, NULL, 0);
out_unlock: out_unlock:
mutex_unlock(&res->dev_priv->cmdbuf_mutex); mutex_unlock(&res->dev_priv->cmdbuf_mutex);
return ret; return ret;
...@@ -1937,7 +1942,7 @@ void vmw_kms_helper_resource_finish(struct vmw_resource *res, ...@@ -1937,7 +1942,7 @@ void vmw_kms_helper_resource_finish(struct vmw_resource *res,
vmw_kms_helper_buffer_finish(res->dev_priv, NULL, res->backup, vmw_kms_helper_buffer_finish(res->dev_priv, NULL, res->backup,
out_fence, NULL); out_fence, NULL);
vmw_resource_unreserve(res, NULL, 0); vmw_resource_unreserve(res, false, NULL, 0);
mutex_unlock(&res->dev_priv->cmdbuf_mutex); mutex_unlock(&res->dev_priv->cmdbuf_mutex);
} }
......
...@@ -67,9 +67,23 @@ struct vmw_mob { ...@@ -67,9 +67,23 @@ struct vmw_mob {
* @size: Size of the table (page-aligned). * @size: Size of the table (page-aligned).
* @page_table: Pointer to a struct vmw_mob holding the page table. * @page_table: Pointer to a struct vmw_mob holding the page table.
*/ */
struct vmw_otable { static const struct vmw_otable pre_dx_tables[] = {
unsigned long size; {VMWGFX_NUM_MOB * SVGA3D_OTABLE_MOB_ENTRY_SIZE, NULL, true},
struct vmw_mob *page_table; {VMWGFX_NUM_GB_SURFACE * SVGA3D_OTABLE_SURFACE_ENTRY_SIZE, NULL, true},
{VMWGFX_NUM_GB_CONTEXT * SVGA3D_OTABLE_CONTEXT_ENTRY_SIZE, NULL, true},
{VMWGFX_NUM_GB_SHADER * SVGA3D_OTABLE_SHADER_ENTRY_SIZE, NULL, true},
{VMWGFX_NUM_GB_SCREEN_TARGET * SVGA3D_OTABLE_SCREEN_TARGET_ENTRY_SIZE,
NULL, VMWGFX_ENABLE_SCREEN_TARGET_OTABLE}
};
static const struct vmw_otable dx_tables[] = {
{VMWGFX_NUM_MOB * SVGA3D_OTABLE_MOB_ENTRY_SIZE, NULL, true},
{VMWGFX_NUM_GB_SURFACE * SVGA3D_OTABLE_SURFACE_ENTRY_SIZE, NULL, true},
{VMWGFX_NUM_GB_CONTEXT * SVGA3D_OTABLE_CONTEXT_ENTRY_SIZE, NULL, true},
{VMWGFX_NUM_GB_SHADER * SVGA3D_OTABLE_SHADER_ENTRY_SIZE, NULL, true},
{VMWGFX_NUM_GB_SCREEN_TARGET * SVGA3D_OTABLE_SCREEN_TARGET_ENTRY_SIZE,
NULL, VMWGFX_ENABLE_SCREEN_TARGET_OTABLE},
{VMWGFX_NUM_DXCONTEXT * sizeof(SVGAOTableDXContextEntry), NULL, true},
}; };
static int vmw_mob_pt_populate(struct vmw_private *dev_priv, static int vmw_mob_pt_populate(struct vmw_private *dev_priv,
...@@ -92,6 +106,7 @@ static void vmw_mob_pt_setup(struct vmw_mob *mob, ...@@ -92,6 +106,7 @@ static void vmw_mob_pt_setup(struct vmw_mob *mob,
*/ */
static int vmw_setup_otable_base(struct vmw_private *dev_priv, static int vmw_setup_otable_base(struct vmw_private *dev_priv,
SVGAOTableType type, SVGAOTableType type,
struct ttm_buffer_object *otable_bo,
unsigned long offset, unsigned long offset,
struct vmw_otable *otable) struct vmw_otable *otable)
{ {
...@@ -106,7 +121,7 @@ static int vmw_setup_otable_base(struct vmw_private *dev_priv, ...@@ -106,7 +121,7 @@ static int vmw_setup_otable_base(struct vmw_private *dev_priv,
BUG_ON(otable->page_table != NULL); BUG_ON(otable->page_table != NULL);
vsgt = vmw_bo_sg_table(dev_priv->otable_bo); vsgt = vmw_bo_sg_table(otable_bo);
vmw_piter_start(&iter, vsgt, offset >> PAGE_SHIFT); vmw_piter_start(&iter, vsgt, offset >> PAGE_SHIFT);
WARN_ON(!vmw_piter_next(&iter)); WARN_ON(!vmw_piter_next(&iter));
...@@ -218,47 +233,21 @@ static void vmw_takedown_otable_base(struct vmw_private *dev_priv, ...@@ -218,47 +233,21 @@ static void vmw_takedown_otable_base(struct vmw_private *dev_priv,
otable->page_table = NULL; otable->page_table = NULL;
} }
/*
* vmw_otables_setup - Set up guest backed memory object tables static int vmw_otable_batch_setup(struct vmw_private *dev_priv,
* struct vmw_otable_batch *batch)
* @dev_priv: Pointer to a device private structure
*
* Takes care of the device guest backed surface
* initialization, by setting up the guest backed memory object tables.
* Returns 0 on success and various error codes on failure. A succesful return
* means the object tables can be taken down using the vmw_otables_takedown
* function.
*/
int vmw_otables_setup(struct vmw_private *dev_priv)
{ {
unsigned long offset; unsigned long offset;
unsigned long bo_size; unsigned long bo_size;
struct vmw_otable *otables; struct vmw_otable *otables = batch->otables;
SVGAOTableType i; SVGAOTableType i;
int ret; int ret;
otables = kzalloc(SVGA_OTABLE_DX9_MAX * sizeof(*otables),
GFP_KERNEL);
if (unlikely(otables == NULL)) {
DRM_ERROR("Failed to allocate space for otable "
"metadata.\n");
return -ENOMEM;
}
otables[SVGA_OTABLE_MOB].size =
VMWGFX_NUM_MOB * SVGA3D_OTABLE_MOB_ENTRY_SIZE;
otables[SVGA_OTABLE_SURFACE].size =
VMWGFX_NUM_GB_SURFACE * SVGA3D_OTABLE_SURFACE_ENTRY_SIZE;
otables[SVGA_OTABLE_CONTEXT].size =
VMWGFX_NUM_GB_CONTEXT * SVGA3D_OTABLE_CONTEXT_ENTRY_SIZE;
otables[SVGA_OTABLE_SHADER].size =
VMWGFX_NUM_GB_SHADER * SVGA3D_OTABLE_SHADER_ENTRY_SIZE;
otables[SVGA_OTABLE_SCREENTARGET].size =
VMWGFX_NUM_GB_SCREEN_TARGET *
SVGA3D_OTABLE_SCREEN_TARGET_ENTRY_SIZE;
bo_size = 0; bo_size = 0;
for (i = 0; i < SVGA_OTABLE_DX9_MAX; ++i) { for (i = 0; i < batch->num_otables; ++i) {
if (!otables[i].enabled)
continue;
otables[i].size = otables[i].size =
(otables[i].size + PAGE_SIZE - 1) & PAGE_MASK; (otables[i].size + PAGE_SIZE - 1) & PAGE_MASK;
bo_size += otables[i].size; bo_size += otables[i].size;
...@@ -268,63 +257,105 @@ int vmw_otables_setup(struct vmw_private *dev_priv) ...@@ -268,63 +257,105 @@ int vmw_otables_setup(struct vmw_private *dev_priv)
ttm_bo_type_device, ttm_bo_type_device,
&vmw_sys_ne_placement, &vmw_sys_ne_placement,
0, false, NULL, 0, false, NULL,
&dev_priv->otable_bo); &batch->otable_bo);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
goto out_no_bo; goto out_no_bo;
ret = ttm_bo_reserve(dev_priv->otable_bo, false, true, false, NULL); ret = ttm_bo_reserve(batch->otable_bo, false, true, false, NULL);
BUG_ON(ret != 0); BUG_ON(ret != 0);
ret = vmw_bo_driver.ttm_tt_populate(dev_priv->otable_bo->ttm); ret = vmw_bo_driver.ttm_tt_populate(batch->otable_bo->ttm);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
goto out_unreserve; goto out_unreserve;
ret = vmw_bo_map_dma(dev_priv->otable_bo); ret = vmw_bo_map_dma(batch->otable_bo);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
goto out_unreserve; goto out_unreserve;
ttm_bo_unreserve(dev_priv->otable_bo); ttm_bo_unreserve(batch->otable_bo);
offset = 0; offset = 0;
for (i = 0; i < SVGA_OTABLE_DX9_MAX - VMW_OTABLE_SETUP_SUB; ++i) { for (i = 0; i < batch->num_otables; ++i) {
ret = vmw_setup_otable_base(dev_priv, i, offset, if (!batch->otables[i].enabled)
continue;
ret = vmw_setup_otable_base(dev_priv, i, batch->otable_bo,
offset,
&otables[i]); &otables[i]);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
goto out_no_setup; goto out_no_setup;
offset += otables[i].size; offset += otables[i].size;
} }
dev_priv->otables = otables;
return 0; return 0;
out_unreserve: out_unreserve:
ttm_bo_unreserve(dev_priv->otable_bo); ttm_bo_unreserve(batch->otable_bo);
out_no_setup: out_no_setup:
for (i = 0; i < SVGA_OTABLE_DX9_MAX - VMW_OTABLE_SETUP_SUB; ++i) for (i = 0; i < batch->num_otables; ++i) {
vmw_takedown_otable_base(dev_priv, i, &otables[i]); if (batch->otables[i].enabled)
vmw_takedown_otable_base(dev_priv, i,
&batch->otables[i]);
}
ttm_bo_unref(&dev_priv->otable_bo); ttm_bo_unref(&batch->otable_bo);
out_no_bo: out_no_bo:
kfree(otables);
return ret; return ret;
} }
/* /*
* vmw_otables_takedown - Take down guest backed memory object tables * vmw_otables_setup - Set up guest backed memory object tables
* *
* @dev_priv: Pointer to a device private structure * @dev_priv: Pointer to a device private structure
* *
* Take down the Guest Memory Object tables. * Takes care of the device guest backed surface
* initialization, by setting up the guest backed memory object tables.
* Returns 0 on success and various error codes on failure. A successful return
* means the object tables can be taken down using the vmw_otables_takedown
* function.
*/ */
void vmw_otables_takedown(struct vmw_private *dev_priv) int vmw_otables_setup(struct vmw_private *dev_priv)
{
struct vmw_otable **otables = &dev_priv->otable_batch.otables;
int ret;
if (dev_priv->has_dx) {
*otables = kmalloc(sizeof(dx_tables), GFP_KERNEL);
if (*otables == NULL)
return -ENOMEM;
memcpy(*otables, dx_tables, sizeof(dx_tables));
dev_priv->otable_batch.num_otables = ARRAY_SIZE(dx_tables);
} else {
*otables = kmalloc(sizeof(pre_dx_tables), GFP_KERNEL);
if (*otables == NULL)
return -ENOMEM;
memcpy(*otables, pre_dx_tables, sizeof(pre_dx_tables));
dev_priv->otable_batch.num_otables = ARRAY_SIZE(pre_dx_tables);
}
ret = vmw_otable_batch_setup(dev_priv, &dev_priv->otable_batch);
if (unlikely(ret != 0))
goto out_setup;
return 0;
out_setup:
kfree(*otables);
return ret;
}
static void vmw_otable_batch_takedown(struct vmw_private *dev_priv,
struct vmw_otable_batch *batch)
{ {
SVGAOTableType i; SVGAOTableType i;
struct ttm_buffer_object *bo = dev_priv->otable_bo; struct ttm_buffer_object *bo = batch->otable_bo;
int ret; int ret;
for (i = 0; i < SVGA_OTABLE_DX9_MAX - VMW_OTABLE_SETUP_SUB; ++i) for (i = 0; i < batch->num_otables; ++i)
if (batch->otables[i].enabled)
vmw_takedown_otable_base(dev_priv, i, vmw_takedown_otable_base(dev_priv, i,
&dev_priv->otables[i]); &batch->otables[i]);
ret = ttm_bo_reserve(bo, false, true, false, NULL); ret = ttm_bo_reserve(bo, false, true, false, NULL);
BUG_ON(ret != 0); BUG_ON(ret != 0);
...@@ -332,11 +363,21 @@ void vmw_otables_takedown(struct vmw_private *dev_priv) ...@@ -332,11 +363,21 @@ void vmw_otables_takedown(struct vmw_private *dev_priv)
vmw_fence_single_bo(bo, NULL); vmw_fence_single_bo(bo, NULL);
ttm_bo_unreserve(bo); ttm_bo_unreserve(bo);
ttm_bo_unref(&dev_priv->otable_bo); ttm_bo_unref(&batch->otable_bo);
kfree(dev_priv->otables);
dev_priv->otables = NULL;
} }
/*
* vmw_otables_takedown - Take down guest backed memory object tables
*
* @dev_priv: Pointer to a device private structure
*
* Take down the Guest Memory Object tables.
*/
void vmw_otables_takedown(struct vmw_private *dev_priv)
{
vmw_otable_batch_takedown(dev_priv, &dev_priv->otable_batch);
kfree(dev_priv->otable_batch.otables);
}
/* /*
* vmw_mob_calculate_pt_pages - Calculate the number of page table pages * vmw_mob_calculate_pt_pages - Calculate the number of page table pages
......
...@@ -31,6 +31,7 @@ ...@@ -31,6 +31,7 @@
#include <drm/ttm/ttm_placement.h> #include <drm/ttm/ttm_placement.h>
#include <drm/drmP.h> #include <drm/drmP.h>
#include "vmwgfx_resource_priv.h" #include "vmwgfx_resource_priv.h"
#include "vmwgfx_binding.h"
#define VMW_RES_EVICT_ERR_COUNT 10 #define VMW_RES_EVICT_ERR_COUNT 10
...@@ -144,10 +145,10 @@ static void vmw_resource_release(struct kref *kref) ...@@ -144,10 +145,10 @@ static void vmw_resource_release(struct kref *kref)
} }
if (likely(res->hw_destroy != NULL)) { if (likely(res->hw_destroy != NULL)) {
res->hw_destroy(res);
mutex_lock(&dev_priv->binding_mutex); mutex_lock(&dev_priv->binding_mutex);
vmw_context_binding_res_list_kill(&res->binding_head); vmw_binding_res_list_kill(&res->binding_head);
mutex_unlock(&dev_priv->binding_mutex); mutex_unlock(&dev_priv->binding_mutex);
res->hw_destroy(res);
} }
id = res->id; id = res->id;
...@@ -1149,14 +1150,16 @@ static int vmw_resource_do_validate(struct vmw_resource *res, ...@@ -1149,14 +1150,16 @@ static int vmw_resource_do_validate(struct vmw_resource *res,
* command submission. * command submission.
* *
* @res: Pointer to the struct vmw_resource to unreserve. * @res: Pointer to the struct vmw_resource to unreserve.
* @switch_backup: Backup buffer has been switched.
* @new_backup: Pointer to new backup buffer if command submission * @new_backup: Pointer to new backup buffer if command submission
* switched. * switched. May be NULL.
* @new_backup_offset: New backup offset if @new_backup is !NULL. * @new_backup_offset: New backup offset if @switch_backup is true.
* *
* Currently unreserving a resource means putting it back on the device's * Currently unreserving a resource means putting it back on the device's
* resource lru list, so that it can be evicted if necessary. * resource lru list, so that it can be evicted if necessary.
*/ */
void vmw_resource_unreserve(struct vmw_resource *res, void vmw_resource_unreserve(struct vmw_resource *res,
bool switch_backup,
struct vmw_dma_buffer *new_backup, struct vmw_dma_buffer *new_backup,
unsigned long new_backup_offset) unsigned long new_backup_offset)
{ {
...@@ -1165,19 +1168,22 @@ void vmw_resource_unreserve(struct vmw_resource *res, ...@@ -1165,19 +1168,22 @@ void vmw_resource_unreserve(struct vmw_resource *res,
if (!list_empty(&res->lru_head)) if (!list_empty(&res->lru_head))
return; return;
if (new_backup && new_backup != res->backup) { if (switch_backup && new_backup != res->backup) {
if (res->backup) { if (res->backup) {
lockdep_assert_held(&res->backup->base.resv->lock.base); lockdep_assert_held(&res->backup->base.resv->lock.base);
list_del_init(&res->mob_head); list_del_init(&res->mob_head);
vmw_dmabuf_unreference(&res->backup); vmw_dmabuf_unreference(&res->backup);
} }
if (new_backup) {
res->backup = vmw_dmabuf_reference(new_backup); res->backup = vmw_dmabuf_reference(new_backup);
lockdep_assert_held(&new_backup->base.resv->lock.base); lockdep_assert_held(&new_backup->base.resv->lock.base);
list_add_tail(&res->mob_head, &new_backup->res_list); list_add_tail(&res->mob_head, &new_backup->res_list);
} else {
res->backup = NULL;
} }
if (new_backup) }
if (switch_backup)
res->backup_offset = new_backup_offset; res->backup_offset = new_backup_offset;
if (!res->func->may_evict || res->id == -1 || res->pin_count) if (!res->func->may_evict || res->id == -1 || res->pin_count)
...@@ -1269,9 +1275,13 @@ int vmw_resource_reserve(struct vmw_resource *res, bool interruptible, ...@@ -1269,9 +1275,13 @@ int vmw_resource_reserve(struct vmw_resource *res, bool interruptible,
if (res->func->needs_backup && res->backup == NULL && if (res->func->needs_backup && res->backup == NULL &&
!no_backup) { !no_backup) {
ret = vmw_resource_buf_alloc(res, interruptible); ret = vmw_resource_buf_alloc(res, interruptible);
if (unlikely(ret != 0)) if (unlikely(ret != 0)) {
DRM_ERROR("Failed to allocate a backup buffer "
"of size %lu. bytes\n",
(unsigned long) res->backup_size);
return ret; return ret;
} }
}
return 0; return 0;
} }
...@@ -1354,7 +1364,7 @@ int vmw_resource_validate(struct vmw_resource *res) ...@@ -1354,7 +1364,7 @@ int vmw_resource_validate(struct vmw_resource *res)
struct ttm_validate_buffer val_buf; struct ttm_validate_buffer val_buf;
unsigned err_count = 0; unsigned err_count = 0;
if (likely(!res->func->may_evict)) if (!res->func->create)
return 0; return 0;
val_buf.bo = NULL; val_buf.bo = NULL;
...@@ -1624,7 +1634,7 @@ int vmw_resource_pin(struct vmw_resource *res, bool interruptible) ...@@ -1624,7 +1634,7 @@ int vmw_resource_pin(struct vmw_resource *res, bool interruptible)
res->pin_count++; res->pin_count++;
out_no_validate: out_no_validate:
vmw_resource_unreserve(res, NULL, 0UL); vmw_resource_unreserve(res, false, NULL, 0UL);
out_no_reserve: out_no_reserve:
mutex_unlock(&dev_priv->cmdbuf_mutex); mutex_unlock(&dev_priv->cmdbuf_mutex);
ttm_write_unlock(&dev_priv->reservation_sem); ttm_write_unlock(&dev_priv->reservation_sem);
...@@ -1660,8 +1670,18 @@ void vmw_resource_unpin(struct vmw_resource *res) ...@@ -1660,8 +1670,18 @@ void vmw_resource_unpin(struct vmw_resource *res)
ttm_bo_unreserve(&vbo->base); ttm_bo_unreserve(&vbo->base);
} }
vmw_resource_unreserve(res, NULL, 0UL); vmw_resource_unreserve(res, false, NULL, 0UL);
mutex_unlock(&dev_priv->cmdbuf_mutex); mutex_unlock(&dev_priv->cmdbuf_mutex);
ttm_read_unlock(&dev_priv->reservation_sem); ttm_read_unlock(&dev_priv->reservation_sem);
} }
/**
* vmw_res_type - Return the resource type
*
* @res: Pointer to the resource
*/
enum vmw_res_type vmw_res_type(const struct vmw_resource *res)
{
return res->func->res_type;
}
...@@ -30,6 +30,12 @@ ...@@ -30,6 +30,12 @@
#include "vmwgfx_drv.h" #include "vmwgfx_drv.h"
enum vmw_cmdbuf_res_state {
VMW_CMDBUF_RES_COMMITTED,
VMW_CMDBUF_RES_ADD,
VMW_CMDBUF_RES_DEL
};
/** /**
* struct vmw_user_resource_conv - Identify a derived user-exported resource * struct vmw_user_resource_conv - Identify a derived user-exported resource
* type and provide a function to convert its ttm_base_object pointer to * type and provide a function to convert its ttm_base_object pointer to
...@@ -55,8 +61,10 @@ struct vmw_user_resource_conv { ...@@ -55,8 +61,10 @@ struct vmw_user_resource_conv {
* @bind: Bind a hardware resource to persistent buffer storage. * @bind: Bind a hardware resource to persistent buffer storage.
* @unbind: Unbind a hardware resource from persistent * @unbind: Unbind a hardware resource from persistent
* buffer storage. * buffer storage.
* @commit_notify: If the resource is a command buffer managed resource,
* callback to notify that a define or remove command
* has been committed to the device.
*/ */
struct vmw_res_func { struct vmw_res_func {
enum vmw_res_type res_type; enum vmw_res_type res_type;
bool needs_backup; bool needs_backup;
...@@ -71,6 +79,8 @@ struct vmw_res_func { ...@@ -71,6 +79,8 @@ struct vmw_res_func {
int (*unbind) (struct vmw_resource *res, int (*unbind) (struct vmw_resource *res,
bool readback, bool readback,
struct ttm_validate_buffer *val_buf); struct ttm_validate_buffer *val_buf);
void (*commit_notify)(struct vmw_resource *res,
enum vmw_cmdbuf_res_state state);
}; };
int vmw_resource_alloc_id(struct vmw_resource *res); int vmw_resource_alloc_id(struct vmw_resource *res);
......
This diff is collapsed.
This diff is collapsed.
/**************************************************************************
* Copyright © 2014 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#ifndef VMW_SO_H
#define VMW_SO_H
enum vmw_view_type {
vmw_view_sr,
vmw_view_rt,
vmw_view_ds,
vmw_view_max,
};
enum vmw_so_type {
vmw_so_el,
vmw_so_bs,
vmw_so_ds,
vmw_so_rs,
vmw_so_ss,
vmw_so_so,
vmw_so_max,
};
/**
* union vmw_view_destroy - view destruction command body
*
* @rtv: RenderTarget view destruction command body
* @srv: ShaderResource view destruction command body
* @dsv: DepthStencil view destruction command body
* @view_id: A single u32 view id.
*
* The assumption here is that all union members are really represented by a
* single u32 in the command stream. If that's not the case,
* the size of this union will not equal the size of an u32, and the
* assumption is invalid, and we detect that at compile time in the
* vmw_so_build_asserts() function.
*/
union vmw_view_destroy {
struct SVGA3dCmdDXDestroyRenderTargetView rtv;
struct SVGA3dCmdDXDestroyShaderResourceView srv;
struct SVGA3dCmdDXDestroyDepthStencilView dsv;
u32 view_id;
};
/* Map enum vmw_view_type to view destroy command ids*/
extern const u32 vmw_view_destroy_cmds[];
/* Map enum vmw_view_type to SVGACOTableType */
extern const SVGACOTableType vmw_view_cotables[];
/* Map enum vmw_so_type to SVGACOTableType */
extern const SVGACOTableType vmw_so_cotables[];
/*
* vmw_view_cmd_to_type - Return the view type for a create or destroy command
*
* @id: The SVGA3D command id.
*
* For a given view create or destroy command id, return the corresponding
* enum vmw_view_type. If the command is unknown, return vmw_view_max.
* The validity of the simplified calculation is verified in the
* vmw_so_build_asserts() function.
*/
static inline enum vmw_view_type vmw_view_cmd_to_type(u32 id)
{
u32 tmp = (id - SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW) / 2;
if (tmp > (u32)vmw_view_max)
return vmw_view_max;
return (enum vmw_view_type) tmp;
}
/*
* vmw_so_cmd_to_type - Return the state object type for a
* create or destroy command
*
* @id: The SVGA3D command id.
*
* For a given state object create or destroy command id,
* return the corresponding enum vmw_so_type. If the command is uknown,
* return vmw_so_max. We should perhaps optimize this function using
* a similar strategy as vmw_view_cmd_to_type().
*/
static inline enum vmw_so_type vmw_so_cmd_to_type(u32 id)
{
switch (id) {
case SVGA_3D_CMD_DX_DEFINE_ELEMENTLAYOUT:
case SVGA_3D_CMD_DX_DESTROY_ELEMENTLAYOUT:
return vmw_so_el;
case SVGA_3D_CMD_DX_DEFINE_BLEND_STATE:
case SVGA_3D_CMD_DX_DESTROY_BLEND_STATE:
return vmw_so_bs;
case SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_STATE:
case SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_STATE:
return vmw_so_ds;
case SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE:
case SVGA_3D_CMD_DX_DESTROY_RASTERIZER_STATE:
return vmw_so_rs;
case SVGA_3D_CMD_DX_DEFINE_SAMPLER_STATE:
case SVGA_3D_CMD_DX_DESTROY_SAMPLER_STATE:
return vmw_so_ss;
case SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT:
case SVGA_3D_CMD_DX_DESTROY_STREAMOUTPUT:
return vmw_so_so;
default:
break;
}
return vmw_so_max;
}
/*
* View management - vmwgfx_so.c
*/
extern int vmw_view_add(struct vmw_cmdbuf_res_manager *man,
struct vmw_resource *ctx,
struct vmw_resource *srf,
enum vmw_view_type view_type,
u32 user_key,
const void *cmd,
size_t cmd_size,
struct list_head *list);
extern int vmw_view_remove(struct vmw_cmdbuf_res_manager *man,
u32 user_key, enum vmw_view_type view_type,
struct list_head *list,
struct vmw_resource **res_p);
extern void vmw_view_surface_list_destroy(struct vmw_private *dev_priv,
struct list_head *view_list);
extern void vmw_view_cotable_list_destroy(struct vmw_private *dev_priv,
struct list_head *list,
bool readback);
extern struct vmw_resource *vmw_view_srf(struct vmw_resource *res);
extern struct vmw_resource *vmw_view_lookup(struct vmw_cmdbuf_res_manager *man,
enum vmw_view_type view_type,
u32 user_key);
#endif
...@@ -561,6 +561,7 @@ static int vmw_stdu_crtc_set_config(struct drm_mode_set *set) ...@@ -561,6 +561,7 @@ static int vmw_stdu_crtc_set_config(struct drm_mode_set *set)
true, /* a scanout buffer */ true, /* a scanout buffer */
content_srf.mip_levels[0], content_srf.mip_levels[0],
content_srf.multisample_count, content_srf.multisample_count,
0,
display_base_size, display_base_size,
&display_srf); &display_srf);
if (unlikely(ret != 0)) { if (unlikely(ret != 0)) {
......
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment