Commit 65f0505b authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'drm-fixes' of git://people.freedesktop.org/~airlied/linux

Pull drm fixes from Dave Airlie:
 "A few regression fixes already, one for my own stupidity, and mgag200
  typo fix, vmwgfx fixes and ttm regression fixes, and a radeon register
  checker update for older cards to handle geom shaders"

* 'drm-fixes' of git://people.freedesktop.org/~airlied/linux:
  drm/radeon: allow geom rings to be setup on r600/r700 (v2)
  drm/mgag200,ast,cirrus: fix regression with drm_can_sleep conversion
  drm/ttm: Don't clear page metadata of imported sg pages
  drm/ttm: Fix TTM object open regression
  vmwgfx: Fix unitialized stack read in vmw_setup_otable_base
  drm/vmwgfx: Reemit context bindings when necessary v2
  drm/vmwgfx: Detect old user-space drivers and set up legacy emulation v2
  drm/vmwgfx: Emulate legacy shaders on guest-backed devices v2
  drm/vmwgfx: Fix legacy surface reference size copyback
  drm/vmwgfx: Fix SET_SHADER_CONST emulation on guest-backed devices
  drm/vmwgfx: Fix regression caused by "drm/ttm: make ttm reservation calls behave like reservation calls"
  drm/vmwgfx: Don't commit staged bindings if execbuf fails
  drm/mgag200: fix typo causing bw limits to be ignored on some chips
parents ef42c58a 7c4c62a0
......@@ -65,7 +65,7 @@ static void ast_dirty_update(struct ast_fbdev *afbdev,
* then the BO is being moved and we should
* store up the damage until later.
*/
if (!drm_can_sleep())
if (drm_can_sleep())
ret = ast_bo_reserve(bo, true);
if (ret) {
if (ret != -EBUSY)
......
......@@ -39,7 +39,7 @@ static void cirrus_dirty_update(struct cirrus_fbdev *afbdev,
* then the BO is being moved and we should
* store up the damage until later.
*/
if (!drm_can_sleep())
if (drm_can_sleep())
ret = cirrus_bo_reserve(bo, true);
if (ret) {
if (ret != -EBUSY)
......
......@@ -41,7 +41,7 @@ static void mga_dirty_update(struct mga_fbdev *mfbdev,
* then the BO is being moved and we should
* store up the damage until later.
*/
if (!drm_can_sleep())
if (drm_can_sleep())
ret = mgag200_bo_reserve(bo, true);
if (ret) {
if (ret != -EBUSY)
......
......@@ -1519,11 +1519,11 @@ static int mga_vga_mode_valid(struct drm_connector *connector,
(mga_vga_calculate_mode_bandwidth(mode, bpp)
> (32700 * 1024))) {
return MODE_BANDWIDTH;
} else if (mode->type == G200_EH &&
} else if (mdev->type == G200_EH &&
(mga_vga_calculate_mode_bandwidth(mode, bpp)
> (37500 * 1024))) {
return MODE_BANDWIDTH;
} else if (mode->type == G200_ER &&
} else if (mdev->type == G200_ER &&
(mga_vga_calculate_mode_bandwidth(mode,
bpp) > (55000 * 1024))) {
return MODE_BANDWIDTH;
......
......@@ -1007,8 +1007,22 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case R_008C64_SQ_VSTMP_RING_SIZE:
case R_0288C8_SQ_GS_VERT_ITEMSIZE:
/* get value to populate the IB don't remove */
tmp =radeon_get_ib_value(p, idx);
ib[idx] = 0;
/*tmp =radeon_get_ib_value(p, idx);
ib[idx] = 0;*/
break;
case SQ_ESGS_RING_BASE:
case SQ_GSVS_RING_BASE:
case SQ_ESTMP_RING_BASE:
case SQ_GSTMP_RING_BASE:
case SQ_PSTMP_RING_BASE:
case SQ_VSTMP_RING_BASE:
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
dev_warn(p->dev, "bad SET_CONTEXT_REG "
"0x%04X\n", reg);
return -EINVAL;
}
ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
break;
case SQ_CONFIG:
track->sq_config = radeon_get_ib_value(p, idx);
......
......@@ -78,9 +78,10 @@
* 2.34.0 - Add CIK tiling mode array query
* 2.35.0 - Add CIK macrotile mode array query
* 2.36.0 - Fix CIK DCE tiling setup
* 2.37.0 - allow GS ring setup on r6xx/r7xx
*/
#define KMS_DRIVER_MAJOR 2
#define KMS_DRIVER_MINOR 36
#define KMS_DRIVER_MINOR 37
#define KMS_DRIVER_PATCHLEVEL 0
int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
int radeon_driver_unload_kms(struct drm_device *dev);
......
......@@ -18,6 +18,7 @@ r600 0x9400
0x00028A3C VGT_GROUP_VECT_1_FMT_CNTL
0x00028A40 VGT_GS_MODE
0x00028A6C VGT_GS_OUT_PRIM_TYPE
0x00028B38 VGT_GS_MAX_VERT_OUT
0x000088C8 VGT_GS_PER_ES
0x000088E8 VGT_GS_PER_VS
0x000088D4 VGT_GS_VERTEX_REUSE
......
......@@ -292,7 +292,7 @@ int ttm_ref_object_add(struct ttm_object_file *tfile,
if (ret == 0) {
ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
if (!kref_get_unless_zero(&ref->kref)) {
if (kref_get_unless_zero(&ref->kref)) {
rcu_read_unlock();
break;
}
......
......@@ -380,6 +380,9 @@ static void ttm_tt_clear_mapping(struct ttm_tt *ttm)
pgoff_t i;
struct page **page = ttm->pages;
if (ttm->page_flags & TTM_PAGE_FLAG_SG)
return;
for (i = 0; i < ttm->num_pages; ++i) {
(*page)->mapping = NULL;
(*page++)->index = 0;
......
......@@ -2583,4 +2583,28 @@ typedef union {
float f;
} SVGA3dDevCapResult;
typedef enum {
SVGA3DCAPS_RECORD_UNKNOWN = 0,
SVGA3DCAPS_RECORD_DEVCAPS_MIN = 0x100,
SVGA3DCAPS_RECORD_DEVCAPS = 0x100,
SVGA3DCAPS_RECORD_DEVCAPS_MAX = 0x1ff,
} SVGA3dCapsRecordType;
typedef
struct SVGA3dCapsRecordHeader {
uint32 length;
SVGA3dCapsRecordType type;
}
SVGA3dCapsRecordHeader;
typedef
struct SVGA3dCapsRecord {
SVGA3dCapsRecordHeader header;
uint32 data[1];
}
SVGA3dCapsRecord;
typedef uint32 SVGA3dCapPair[2];
#endif /* _SVGA3D_REG_H_ */
......@@ -37,7 +37,7 @@ struct vmw_user_context {
typedef int (*vmw_scrub_func)(struct vmw_ctx_bindinfo *);
typedef int (*vmw_scrub_func)(struct vmw_ctx_bindinfo *, bool);
static void vmw_user_context_free(struct vmw_resource *res);
static struct vmw_resource *
......@@ -50,9 +50,11 @@ static int vmw_gb_context_unbind(struct vmw_resource *res,
bool readback,
struct ttm_validate_buffer *val_buf);
static int vmw_gb_context_destroy(struct vmw_resource *res);
static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi);
static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi);
static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi);
static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind);
static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi,
bool rebind);
static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi, bool rebind);
static void vmw_context_binding_state_scrub(struct vmw_ctx_binding_state *cbs);
static void vmw_context_binding_state_kill(struct vmw_ctx_binding_state *cbs);
static uint64_t vmw_user_context_size;
......@@ -111,10 +113,14 @@ static void vmw_hw_context_destroy(struct vmw_resource *res)
if (res->func->destroy == vmw_gb_context_destroy) {
mutex_lock(&dev_priv->cmdbuf_mutex);
mutex_lock(&dev_priv->binding_mutex);
(void) vmw_context_binding_state_kill
(&container_of(res, struct vmw_user_context, res)->cbs);
(void) vmw_gb_context_destroy(res);
if (dev_priv->pinned_bo != NULL &&
!dev_priv->query_cid_valid)
__vmw_execbuf_release_pinned_bo(dev_priv, NULL);
mutex_unlock(&dev_priv->binding_mutex);
mutex_unlock(&dev_priv->cmdbuf_mutex);
return;
}
......@@ -328,7 +334,7 @@ static int vmw_gb_context_unbind(struct vmw_resource *res,
BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
mutex_lock(&dev_priv->binding_mutex);
vmw_context_binding_state_kill(&uctx->cbs);
vmw_context_binding_state_scrub(&uctx->cbs);
submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0);
......@@ -378,10 +384,6 @@ static int vmw_gb_context_destroy(struct vmw_resource *res)
SVGA3dCmdHeader header;
SVGA3dCmdDestroyGBContext body;
} *cmd;
struct vmw_user_context *uctx =
container_of(res, struct vmw_user_context, res);
BUG_ON(!list_empty(&uctx->cbs.list));
if (likely(res->id == -1))
return 0;
......@@ -528,8 +530,9 @@ int vmw_context_define_ioctl(struct drm_device *dev, void *data,
* vmw_context_scrub_shader - scrub a shader binding from a context.
*
* @bi: single binding information.
* @rebind: Whether to issue a bind instead of scrub command.
*/
static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi)
static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind)
{
struct vmw_private *dev_priv = bi->ctx->dev_priv;
struct {
......@@ -548,7 +551,8 @@ static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi)
cmd->header.size = sizeof(cmd->body);
cmd->body.cid = bi->ctx->id;
cmd->body.type = bi->i1.shader_type;
cmd->body.shid = SVGA3D_INVALID_ID;
cmd->body.shid =
cpu_to_le32((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
vmw_fifo_commit(dev_priv, sizeof(*cmd));
return 0;
......@@ -559,8 +563,10 @@ static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi)
* from a context.
*
* @bi: single binding information.
* @rebind: Whether to issue a bind instead of scrub command.
*/
static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi)
static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi,
bool rebind)
{
struct vmw_private *dev_priv = bi->ctx->dev_priv;
struct {
......@@ -579,7 +585,8 @@ static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi)
cmd->header.size = sizeof(cmd->body);
cmd->body.cid = bi->ctx->id;
cmd->body.type = bi->i1.rt_type;
cmd->body.target.sid = SVGA3D_INVALID_ID;
cmd->body.target.sid =
cpu_to_le32((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
cmd->body.target.face = 0;
cmd->body.target.mipmap = 0;
vmw_fifo_commit(dev_priv, sizeof(*cmd));
......@@ -591,11 +598,13 @@ static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi)
* vmw_context_scrub_texture - scrub a texture binding from a context.
*
* @bi: single binding information.
* @rebind: Whether to issue a bind instead of scrub command.
*
* TODO: Possibly complement this function with a function that takes
* a list of texture bindings and combines them to a single command.
*/
static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi)
static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi,
bool rebind)
{
struct vmw_private *dev_priv = bi->ctx->dev_priv;
struct {
......@@ -619,7 +628,8 @@ static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi)
cmd->body.c.cid = bi->ctx->id;
cmd->body.s1.stage = bi->i1.texture_stage;
cmd->body.s1.name = SVGA3D_TS_BIND_TEXTURE;
cmd->body.s1.value = (uint32) SVGA3D_INVALID_ID;
cmd->body.s1.value =
cpu_to_le32((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
vmw_fifo_commit(dev_priv, sizeof(*cmd));
return 0;
......@@ -692,6 +702,7 @@ int vmw_context_binding_add(struct vmw_ctx_binding_state *cbs,
vmw_context_binding_drop(loc);
loc->bi = *bi;
loc->bi.scrubbed = false;
list_add_tail(&loc->ctx_list, &cbs->list);
INIT_LIST_HEAD(&loc->res_list);
......@@ -727,12 +738,11 @@ static void vmw_context_binding_transfer(struct vmw_ctx_binding_state *cbs,
if (loc->bi.ctx != NULL)
vmw_context_binding_drop(loc);
loc->bi = *bi;
list_add_tail(&loc->ctx_list, &cbs->list);
if (bi->res != NULL)
if (bi->res != NULL) {
loc->bi = *bi;
list_add_tail(&loc->ctx_list, &cbs->list);
list_add_tail(&loc->res_list, &bi->res->binding_head);
else
INIT_LIST_HEAD(&loc->res_list);
}
}
/**
......@@ -746,7 +756,10 @@ static void vmw_context_binding_transfer(struct vmw_ctx_binding_state *cbs,
*/
static void vmw_context_binding_kill(struct vmw_ctx_binding *cb)
{
(void) vmw_scrub_funcs[cb->bi.bt](&cb->bi);
if (!cb->bi.scrubbed) {
(void) vmw_scrub_funcs[cb->bi.bt](&cb->bi, false);
cb->bi.scrubbed = true;
}
vmw_context_binding_drop(cb);
}
......@@ -767,6 +780,27 @@ static void vmw_context_binding_state_kill(struct vmw_ctx_binding_state *cbs)
vmw_context_binding_kill(entry);
}
/**
* vmw_context_binding_state_scrub - Scrub all bindings associated with a
* struct vmw_ctx_binding state structure.
*
* @cbs: Pointer to the context binding state tracker.
*
* Emits commands to scrub all bindings associated with the
* context binding state tracker.
*/
static void vmw_context_binding_state_scrub(struct vmw_ctx_binding_state *cbs)
{
struct vmw_ctx_binding *entry;
list_for_each_entry(entry, &cbs->list, ctx_list) {
if (!entry->bi.scrubbed) {
(void) vmw_scrub_funcs[entry->bi.bt](&entry->bi, false);
entry->bi.scrubbed = true;
}
}
}
/**
* vmw_context_binding_res_list_kill - Kill all bindings on a
* resource binding list
......@@ -784,6 +818,27 @@ void vmw_context_binding_res_list_kill(struct list_head *head)
vmw_context_binding_kill(entry);
}
/**
* vmw_context_binding_res_list_scrub - Scrub all bindings on a
* resource binding list
*
* @head: list head of resource binding list
*
* Scrub all bindings associated with a specific resource. Typically
* called before the resource is evicted.
*/
void vmw_context_binding_res_list_scrub(struct list_head *head)
{
struct vmw_ctx_binding *entry;
list_for_each_entry(entry, head, res_list) {
if (!entry->bi.scrubbed) {
(void) vmw_scrub_funcs[entry->bi.bt](&entry->bi, false);
entry->bi.scrubbed = true;
}
}
}
/**
* vmw_context_binding_state_transfer - Commit staged binding info
*
......@@ -803,3 +858,50 @@ void vmw_context_binding_state_transfer(struct vmw_resource *ctx,
list_for_each_entry_safe(entry, next, &from->list, ctx_list)
vmw_context_binding_transfer(&uctx->cbs, &entry->bi);
}
/**
* vmw_context_rebind_all - Rebind all scrubbed bindings of a context
*
* @ctx: The context resource
*
* Walks through the context binding list and rebinds all scrubbed
* resources.
*/
int vmw_context_rebind_all(struct vmw_resource *ctx)
{
struct vmw_ctx_binding *entry;
struct vmw_user_context *uctx =
container_of(ctx, struct vmw_user_context, res);
struct vmw_ctx_binding_state *cbs = &uctx->cbs;
int ret;
list_for_each_entry(entry, &cbs->list, ctx_list) {
if (likely(!entry->bi.scrubbed))
continue;
if (WARN_ON(entry->bi.res == NULL || entry->bi.res->id ==
SVGA3D_INVALID_ID))
continue;
ret = vmw_scrub_funcs[entry->bi.bt](&entry->bi, true);
if (unlikely(ret != 0))
return ret;
entry->bi.scrubbed = false;
}
return 0;
}
/**
* vmw_context_binding_list - Return a list of context bindings
*
* @ctx: The context resource
*
* Returns the current list of bindings of the given context. Note that
* this list becomes stale as soon as the dev_priv::binding_mutex is unlocked.
*/
struct list_head *vmw_context_binding_list(struct vmw_resource *ctx)
{
return &(container_of(ctx, struct vmw_user_context, res)->cbs.list);
}
......@@ -941,6 +941,7 @@ static void vmw_postclose(struct drm_device *dev,
drm_master_put(&vmw_fp->locked_master);
}
vmw_compat_shader_man_destroy(vmw_fp->shman);
ttm_object_file_release(&vmw_fp->tfile);
kfree(vmw_fp);
}
......@@ -960,11 +961,17 @@ static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv)
if (unlikely(vmw_fp->tfile == NULL))
goto out_no_tfile;
vmw_fp->shman = vmw_compat_shader_man_create(dev_priv);
if (IS_ERR(vmw_fp->shman))
goto out_no_shman;
file_priv->driver_priv = vmw_fp;
dev_priv->bdev.dev_mapping = dev->dev_mapping;
return 0;
out_no_shman:
ttm_object_file_release(&vmw_fp->tfile);
out_no_tfile:
kfree(vmw_fp);
return ret;
......
......@@ -75,10 +75,14 @@
#define VMW_RES_FENCE ttm_driver_type3
#define VMW_RES_SHADER ttm_driver_type4
struct vmw_compat_shader_manager;
struct vmw_fpriv {
struct drm_master *locked_master;
struct ttm_object_file *tfile;
struct list_head fence_events;
bool gb_aware;
struct vmw_compat_shader_manager *shman;
};
struct vmw_dma_buffer {
......@@ -272,6 +276,7 @@ struct vmw_ctx_bindinfo {
struct vmw_resource *ctx;
struct vmw_resource *res;
enum vmw_ctx_binding_type bt;
bool scrubbed;
union {
SVGA3dShaderType shader_type;
SVGA3dRenderTargetType rt_type;
......@@ -318,7 +323,7 @@ struct vmw_sw_context{
struct drm_open_hash res_ht;
bool res_ht_initialized;
bool kernel; /**< is the called made from the kernel */
struct ttm_object_file *tfile;
struct vmw_fpriv *fp;
struct list_head validate_nodes;
struct vmw_relocation relocs[VMWGFX_MAX_RELOCATIONS];
uint32_t cur_reloc;
......@@ -336,6 +341,7 @@ struct vmw_sw_context{
bool needs_post_query_barrier;
struct vmw_resource *error_resource;
struct vmw_ctx_binding_state staged_bindings;
struct list_head staged_shaders;
};
struct vmw_legacy_display;
......@@ -569,6 +575,8 @@ struct vmw_user_resource_conv;
extern void vmw_resource_unreference(struct vmw_resource **p_res);
extern struct vmw_resource *vmw_resource_reference(struct vmw_resource *res);
extern struct vmw_resource *
vmw_resource_reference_unless_doomed(struct vmw_resource *res);
extern int vmw_resource_validate(struct vmw_resource *res);
extern int vmw_resource_reserve(struct vmw_resource *res, bool no_backup);
extern bool vmw_resource_needs_backup(const struct vmw_resource *res);
......@@ -957,6 +965,9 @@ extern void
vmw_context_binding_state_transfer(struct vmw_resource *res,
struct vmw_ctx_binding_state *cbs);
extern void vmw_context_binding_res_list_kill(struct list_head *head);
extern void vmw_context_binding_res_list_scrub(struct list_head *head);
extern int vmw_context_rebind_all(struct vmw_resource *ctx);
extern struct list_head *vmw_context_binding_list(struct vmw_resource *ctx);
/*
* Surface management - vmwgfx_surface.c
......@@ -991,6 +1002,28 @@ extern int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int vmw_compat_shader_lookup(struct vmw_compat_shader_manager *man,
SVGA3dShaderType shader_type,
u32 *user_key);
extern void vmw_compat_shaders_commit(struct vmw_compat_shader_manager *man,
struct list_head *list);
extern void vmw_compat_shaders_revert(struct vmw_compat_shader_manager *man,
struct list_head *list);
extern int vmw_compat_shader_remove(struct vmw_compat_shader_manager *man,
u32 user_key,
SVGA3dShaderType shader_type,
struct list_head *list);
extern int vmw_compat_shader_add(struct vmw_compat_shader_manager *man,
u32 user_key, const void *bytecode,
SVGA3dShaderType shader_type,
size_t size,
struct ttm_object_file *tfile,
struct list_head *list);
extern struct vmw_compat_shader_manager *
vmw_compat_shader_man_create(struct vmw_private *dev_priv);
extern void
vmw_compat_shader_man_destroy(struct vmw_compat_shader_manager *man);
/**
* Inline helper functions
......
This diff is collapsed.
......@@ -29,12 +29,18 @@
#include <drm/vmwgfx_drm.h>
#include "vmwgfx_kms.h"
struct svga_3d_compat_cap {
SVGA3dCapsRecordHeader header;
SVGA3dCapPair pairs[SVGA3D_DEVCAP_MAX];
};
int vmw_getparam_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct vmw_private *dev_priv = vmw_priv(dev);
struct drm_vmw_getparam_arg *param =
(struct drm_vmw_getparam_arg *)data;
struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
switch (param->param) {
case DRM_VMW_PARAM_NUM_STREAMS:
......@@ -60,6 +66,11 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
const struct vmw_fifo_state *fifo = &dev_priv->fifo;
if ((dev_priv->capabilities & SVGA_CAP_GBOBJECTS)) {
param->value = SVGA3D_HWVERSION_WS8_B1;
break;
}
param->value =
ioread32(fifo_mem +
((fifo->capabilities &
......@@ -69,17 +80,26 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
break;
}
case DRM_VMW_PARAM_MAX_SURF_MEMORY:
param->value = dev_priv->memory_size;
if ((dev_priv->capabilities & SVGA_CAP_GBOBJECTS) &&
!vmw_fp->gb_aware)
param->value = dev_priv->max_mob_pages * PAGE_SIZE / 2;
else
param->value = dev_priv->memory_size;
break;
case DRM_VMW_PARAM_3D_CAPS_SIZE:
if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS)
param->value = SVGA3D_DEVCAP_MAX;
if ((dev_priv->capabilities & SVGA_CAP_GBOBJECTS) &&
vmw_fp->gb_aware)
param->value = SVGA3D_DEVCAP_MAX * sizeof(uint32_t);
else if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS)
param->value = sizeof(struct svga_3d_compat_cap) +
sizeof(uint32_t);
else
param->value = (SVGA_FIFO_3D_CAPS_LAST -
SVGA_FIFO_3D_CAPS + 1);
param->value *= sizeof(uint32_t);
SVGA_FIFO_3D_CAPS + 1) *
sizeof(uint32_t);
break;
case DRM_VMW_PARAM_MAX_MOB_MEMORY:
vmw_fp->gb_aware = true;
param->value = dev_priv->max_mob_pages * PAGE_SIZE;
break;
default:
......@@ -91,6 +111,38 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
return 0;
}
static int vmw_fill_compat_cap(struct vmw_private *dev_priv, void *bounce,
size_t size)
{
struct svga_3d_compat_cap *compat_cap =
(struct svga_3d_compat_cap *) bounce;
unsigned int i;
size_t pair_offset = offsetof(struct svga_3d_compat_cap, pairs);
unsigned int max_size;
if (size < pair_offset)
return -EINVAL;
max_size = (size - pair_offset) / sizeof(SVGA3dCapPair);
if (max_size > SVGA3D_DEVCAP_MAX)
max_size = SVGA3D_DEVCAP_MAX;
compat_cap->header.length =
(pair_offset + max_size * sizeof(SVGA3dCapPair)) / sizeof(u32);
compat_cap->header.type = SVGA3DCAPS_RECORD_DEVCAPS;
mutex_lock(&dev_priv->hw_mutex);
for (i = 0; i < max_size; ++i) {
vmw_write(dev_priv, SVGA_REG_DEV_CAP, i);
compat_cap->pairs[i][0] = i;
compat_cap->pairs[i][1] = vmw_read(dev_priv, SVGA_REG_DEV_CAP);
}
mutex_unlock(&dev_priv->hw_mutex);
return 0;
}
int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
......@@ -104,41 +156,49 @@ int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
void *bounce;
int ret;
bool gb_objects = !!(dev_priv->capabilities & SVGA_CAP_GBOBJECTS);
struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
if (unlikely(arg->pad64 != 0)) {
DRM_ERROR("Illegal GET_3D_CAP argument.\n");
return -EINVAL;
}
if (gb_objects)
size = SVGA3D_DEVCAP_MAX;
if (gb_objects && vmw_fp->gb_aware)
size = SVGA3D_DEVCAP_MAX * sizeof(uint32_t);
else if (gb_objects)
size = sizeof(struct svga_3d_compat_cap) + sizeof(uint32_t);
else
size = (SVGA_FIFO_3D_CAPS_LAST - SVGA_FIFO_3D_CAPS + 1);
size *= sizeof(uint32_t);
size = (SVGA_FIFO_3D_CAPS_LAST - SVGA_FIFO_3D_CAPS + 1) *
sizeof(uint32_t);
if (arg->max_size < size)
size = arg->max_size;
bounce = vmalloc(size);
bounce = vzalloc(size);
if (unlikely(bounce == NULL)) {
DRM_ERROR("Failed to allocate bounce buffer for 3D caps.\n");
return -ENOMEM;
}
if (gb_objects) {
int i;
if (gb_objects && vmw_fp->gb_aware) {
int i, num;
uint32_t *bounce32 = (uint32_t *) bounce;
num = size / sizeof(uint32_t);
if (num > SVGA3D_DEVCAP_MAX)
num = SVGA3D_DEVCAP_MAX;
mutex_lock(&dev_priv->hw_mutex);
for (i = 0; i < SVGA3D_DEVCAP_MAX; ++i) {
for (i = 0; i < num; ++i) {
vmw_write(dev_priv, SVGA_REG_DEV_CAP, i);
*bounce32++ = vmw_read(dev_priv, SVGA_REG_DEV_CAP);
}
mutex_unlock(&dev_priv->hw_mutex);
} else if (gb_objects) {
ret = vmw_fill_compat_cap(dev_priv, bounce, size);
if (unlikely(ret != 0))
goto out_err;
} else {
fifo_mem = dev_priv->mmio_virt;
memcpy_fromio(bounce, &fifo_mem[SVGA_FIFO_3D_CAPS], size);
}
......@@ -146,6 +206,7 @@ int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
ret = copy_to_user(buffer, bounce, size);
if (ret)
ret = -EFAULT;
out_err:
vfree(bounce);
if (unlikely(ret != 0))
......
......@@ -134,6 +134,7 @@ static int vmw_setup_otable_base(struct vmw_private *dev_priv,
cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL)) {
DRM_ERROR("Failed reserving FIFO space for OTable setup.\n");
ret = -ENOMEM;
goto out_no_fifo;
}
......
......@@ -88,6 +88,11 @@ struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
return res;
}
struct vmw_resource *
vmw_resource_reference_unless_doomed(struct vmw_resource *res)
{
return kref_get_unless_zero(&res->kref) ? res : NULL;
}
/**
* vmw_resource_release_id - release a resource id to the id manager.
......@@ -136,8 +141,12 @@ static void vmw_resource_release(struct kref *kref)
vmw_dmabuf_unreference(&res->backup);
}
if (likely(res->hw_destroy != NULL))
if (likely(res->hw_destroy != NULL)) {
res->hw_destroy(res);
mutex_lock(&dev_priv->binding_mutex);
vmw_context_binding_res_list_kill(&res->binding_head);
mutex_unlock(&dev_priv->binding_mutex);
}
id = res->id;
if (res->res_free != NULL)
......
This diff is collapsed.
......@@ -908,8 +908,8 @@ int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
rep->size_addr;
if (user_sizes)
ret = copy_to_user(user_sizes, srf->sizes,
srf->num_sizes * sizeof(*srf->sizes));
ret = copy_to_user(user_sizes, &srf->base_size,
sizeof(srf->base_size));
if (unlikely(ret != 0)) {
DRM_ERROR("copy_to_user failed %p %u\n",
user_sizes, srf->num_sizes);
......@@ -1111,7 +1111,7 @@ static int vmw_gb_surface_destroy(struct vmw_resource *res)
return 0;
mutex_lock(&dev_priv->binding_mutex);
vmw_context_binding_res_list_kill(&res->binding_head);
vmw_context_binding_res_list_scrub(&res->binding_head);
cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL)) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment