Commit 668b2066 authored by Zack Rusin's avatar Zack Rusin

drm/vmwgfx: Stop using raw ttm_buffer_object's

Various bits of the driver used raw ttm_buffer_object instead of the
driver specific vmw_bo object. All those places used to duplicate
the mapped bo caching policy of vmw_bo.

Instead of duplicating all of that code and special casing various
functions to work both with vmw_bo and raw ttm_buffer_object's unify
the buffer object handling code.

As part of that work fix the naming of bo's, e.g. insted of generic
backup use 'guest_memory' because that's what it really is.

All of it makes the driver easier to maintain and the code easier to
read. Saves 100+ loc as well.
Signed-off-by: default avatarZack Rusin <zackr@vmware.com>
Reviewed-by: default avatarMartin Krastev <krastevm@vmware.com>
Reviewed-by: default avatarMaaz Mombasawala <mombasawalam@vmware.com>
Acked-by: default avatarThomas Zimmermann <tzimmermann@suse.de>
Link: https://patchwork.freedesktop.org/patch/msgid/20230131033542.953249-9-zack@kde.org
parent 39985eea
This diff is collapsed.
...@@ -49,54 +49,54 @@ enum vmw_bo_domain { ...@@ -49,54 +49,54 @@ enum vmw_bo_domain {
VMW_BO_DOMAIN_MOB = BIT(4), VMW_BO_DOMAIN_MOB = BIT(4),
}; };
struct vmw_bo_params {
u32 domain;
u32 busy_domain;
enum ttm_bo_type bo_type;
size_t size;
bool pin;
};
/** /**
* struct vmw_bo - TTM buffer object with vmwgfx additions * struct vmw_bo - TTM buffer object with vmwgfx additions
* @base: The TTM buffer object * @tbo: The TTM buffer object
* @placement: The preferred placement for this buffer object
* @places: The chosen places for the preferred placement.
* @busy_places: Chosen busy places for the preferred placement
* @map: Kmap object for semi-persistent mappings
* @res_tree: RB tree of resources using this buffer object as a backing MOB * @res_tree: RB tree of resources using this buffer object as a backing MOB
* @res_prios: Eviction priority counts for attached resources
* @cpu_writers: Number of synccpu write grabs. Protected by reservation when * @cpu_writers: Number of synccpu write grabs. Protected by reservation when
* increased. May be decreased without reservation. * increased. May be decreased without reservation.
* @dx_query_ctx: DX context if this buffer object is used as a DX query MOB * @dx_query_ctx: DX context if this buffer object is used as a DX query MOB
* @map: Kmap object for semi-persistent mappings
* @res_prios: Eviction priority counts for attached resources
* @dirty: structure for user-space dirty-tracking * @dirty: structure for user-space dirty-tracking
*/ */
struct vmw_bo { struct vmw_bo {
struct ttm_buffer_object base; struct ttm_buffer_object tbo;
struct ttm_placement placement; struct ttm_placement placement;
struct ttm_place places[5]; struct ttm_place places[5];
struct ttm_place busy_places[5]; struct ttm_place busy_places[5];
/* Protected by reservation */
struct ttm_bo_kmap_obj map;
struct rb_root res_tree; struct rb_root res_tree;
u32 res_prios[TTM_MAX_BO_PRIORITY];
atomic_t cpu_writers; atomic_t cpu_writers;
/* Not ref-counted. Protected by binding_mutex */ /* Not ref-counted. Protected by binding_mutex */
struct vmw_resource *dx_query_ctx; struct vmw_resource *dx_query_ctx;
/* Protected by reservation */
struct ttm_bo_kmap_obj map;
u32 res_prios[TTM_MAX_BO_PRIORITY];
struct vmw_bo_dirty *dirty; struct vmw_bo_dirty *dirty;
}; };
void vmw_bo_placement_set(struct vmw_bo *bo, u32 domain, u32 busy_domain); void vmw_bo_placement_set(struct vmw_bo *bo, u32 domain, u32 busy_domain);
void vmw_bo_placement_set_default_accelerated(struct vmw_bo *bo); void vmw_bo_placement_set_default_accelerated(struct vmw_bo *bo);
int vmw_bo_create_kernel(struct vmw_private *dev_priv,
unsigned long size,
struct ttm_placement *placement,
struct ttm_buffer_object **p_bo);
int vmw_bo_create(struct vmw_private *dev_priv, int vmw_bo_create(struct vmw_private *dev_priv,
size_t size, struct vmw_bo_params *params,
u32 domain,
u32 busy_domain,
bool interruptible, bool pin,
struct vmw_bo **p_bo); struct vmw_bo **p_bo);
int vmw_bo_init(struct vmw_private *dev_priv,
struct vmw_bo *vmw_bo,
size_t size,
u32 domain,
u32 busy_domain,
bool interruptible, bool pin);
int vmw_bo_unref_ioctl(struct drm_device *dev, void *data, int vmw_bo_unref_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv); struct drm_file *file_priv);
...@@ -118,9 +118,6 @@ void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *buf, ...@@ -118,9 +118,6 @@ void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *buf,
SVGAGuestPtr *ptr); SVGAGuestPtr *ptr);
int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data, int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv); struct drm_file *file_priv);
int vmw_user_bo_lookup(struct drm_file *filp,
uint32_t handle,
struct vmw_bo **out);
void vmw_bo_fence_single(struct ttm_buffer_object *bo, void vmw_bo_fence_single(struct ttm_buffer_object *bo,
struct vmw_fence_obj *fence); struct vmw_fence_obj *fence);
...@@ -131,6 +128,9 @@ void vmw_bo_move_notify(struct ttm_buffer_object *bo, ...@@ -131,6 +128,9 @@ void vmw_bo_move_notify(struct ttm_buffer_object *bo,
struct ttm_resource *mem); struct ttm_resource *mem);
void vmw_bo_swap_notify(struct ttm_buffer_object *bo); void vmw_bo_swap_notify(struct ttm_buffer_object *bo);
int vmw_user_bo_lookup(struct drm_file *filp,
u32 handle,
struct vmw_bo **out);
/** /**
* vmw_bo_adjust_prio - Adjust the buffer object eviction priority * vmw_bo_adjust_prio - Adjust the buffer object eviction priority
* according to attached resources * according to attached resources
...@@ -142,12 +142,12 @@ static inline void vmw_bo_prio_adjust(struct vmw_bo *vbo) ...@@ -142,12 +142,12 @@ static inline void vmw_bo_prio_adjust(struct vmw_bo *vbo)
while (i--) { while (i--) {
if (vbo->res_prios[i]) { if (vbo->res_prios[i]) {
vbo->base.priority = i; vbo->tbo.priority = i;
return; return;
} }
} }
vbo->base.priority = 3; vbo->tbo.priority = 3;
} }
/** /**
...@@ -166,7 +166,7 @@ static inline void vmw_bo_prio_add(struct vmw_bo *vbo, int prio) ...@@ -166,7 +166,7 @@ static inline void vmw_bo_prio_add(struct vmw_bo *vbo, int prio)
} }
/** /**
* vmw_bo_prio_del - Notify a buffer object of a resource with a certain * vmw_bo_used_prio_del - Notify a buffer object of a resource with a certain
* priority being removed * priority being removed
* @vbo: The struct vmw_bo * @vbo: The struct vmw_bo
* @prio: The resource priority * @prio: The resource priority
...@@ -186,18 +186,18 @@ static inline void vmw_bo_unreference(struct vmw_bo **buf) ...@@ -186,18 +186,18 @@ static inline void vmw_bo_unreference(struct vmw_bo **buf)
*buf = NULL; *buf = NULL;
if (tmp_buf) if (tmp_buf)
ttm_bo_put(&tmp_buf->base); ttm_bo_put(&tmp_buf->tbo);
} }
static inline struct vmw_bo *vmw_bo_reference(struct vmw_bo *buf) static inline struct vmw_bo *vmw_bo_reference(struct vmw_bo *buf)
{ {
ttm_bo_get(&buf->base); ttm_bo_get(&buf->tbo);
return buf; return buf;
} }
static inline struct vmw_bo *to_vmw_bo(struct drm_gem_object *gobj) static inline struct vmw_bo *to_vmw_bo(struct drm_gem_object *gobj)
{ {
return container_of((gobj), struct vmw_bo, base.base); return container_of((gobj), struct vmw_bo, tbo.base);
} }
#endif // VMWGFX_BO_H #endif // VMWGFX_BO_H
...@@ -567,7 +567,7 @@ static int vmw_cmd_emit_dummy_legacy_query(struct vmw_private *dev_priv, ...@@ -567,7 +567,7 @@ static int vmw_cmd_emit_dummy_legacy_query(struct vmw_private *dev_priv,
* without writing to the query result structure. * without writing to the query result structure.
*/ */
struct ttm_buffer_object *bo = &dev_priv->dummy_query_bo->base; struct ttm_buffer_object *bo = &dev_priv->dummy_query_bo->tbo;
struct { struct {
SVGA3dCmdHeader header; SVGA3dCmdHeader header;
SVGA3dCmdWaitForQuery body; SVGA3dCmdWaitForQuery body;
...@@ -613,7 +613,7 @@ static int vmw_cmd_emit_dummy_gb_query(struct vmw_private *dev_priv, ...@@ -613,7 +613,7 @@ static int vmw_cmd_emit_dummy_gb_query(struct vmw_private *dev_priv,
* without writing to the query result structure. * without writing to the query result structure.
*/ */
struct ttm_buffer_object *bo = &dev_priv->dummy_query_bo->base; struct ttm_buffer_object *bo = &dev_priv->dummy_query_bo->tbo;
struct { struct {
SVGA3dCmdHeader header; SVGA3dCmdHeader header;
SVGA3dCmdWaitForGBQuery body; SVGA3dCmdWaitForGBQuery body;
......
...@@ -80,7 +80,6 @@ struct vmw_cmdbuf_context { ...@@ -80,7 +80,6 @@ struct vmw_cmdbuf_context {
* frees are protected by @lock. * frees are protected by @lock.
* @cmd_space: Buffer object for the command buffer space, unless we were * @cmd_space: Buffer object for the command buffer space, unless we were
* able to make a contigous coherent DMA memory allocation, @handle. Immutable. * able to make a contigous coherent DMA memory allocation, @handle. Immutable.
* @map_obj: Mapping state for @cmd_space. Immutable.
* @map: Pointer to command buffer space. May be a mapped buffer object or * @map: Pointer to command buffer space. May be a mapped buffer object or
* a contigous coherent DMA memory allocation. Immutable. * a contigous coherent DMA memory allocation. Immutable.
* @cur: Command buffer for small kernel command submissions. Protected by * @cur: Command buffer for small kernel command submissions. Protected by
...@@ -117,8 +116,7 @@ struct vmw_cmdbuf_man { ...@@ -117,8 +116,7 @@ struct vmw_cmdbuf_man {
struct vmw_cmdbuf_context ctx[SVGA_CB_CONTEXT_MAX]; struct vmw_cmdbuf_context ctx[SVGA_CB_CONTEXT_MAX];
struct list_head error; struct list_head error;
struct drm_mm mm; struct drm_mm mm;
struct ttm_buffer_object *cmd_space; struct vmw_bo *cmd_space;
struct ttm_bo_kmap_obj map_obj;
u8 *map; u8 *map;
struct vmw_cmdbuf_header *cur; struct vmw_cmdbuf_header *cur;
size_t cur_pos; size_t cur_pos;
...@@ -889,7 +887,7 @@ static int vmw_cmdbuf_space_pool(struct vmw_cmdbuf_man *man, ...@@ -889,7 +887,7 @@ static int vmw_cmdbuf_space_pool(struct vmw_cmdbuf_man *man,
header->cmd = man->map + offset; header->cmd = man->map + offset;
if (man->using_mob) { if (man->using_mob) {
cb_hdr->flags = SVGA_CB_FLAG_MOB; cb_hdr->flags = SVGA_CB_FLAG_MOB;
cb_hdr->ptr.mob.mobid = man->cmd_space->resource->start; cb_hdr->ptr.mob.mobid = man->cmd_space->tbo.resource->start;
cb_hdr->ptr.mob.mobOffset = offset; cb_hdr->ptr.mob.mobOffset = offset;
} else { } else {
cb_hdr->ptr.pa = (u64)man->handle + (u64)offset; cb_hdr->ptr.pa = (u64)man->handle + (u64)offset;
...@@ -1222,7 +1220,6 @@ static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man, u32 context, ...@@ -1222,7 +1220,6 @@ static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man, u32 context,
int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man, size_t size) int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man, size_t size)
{ {
struct vmw_private *dev_priv = man->dev_priv; struct vmw_private *dev_priv = man->dev_priv;
bool dummy;
int ret; int ret;
if (man->has_pool) if (man->has_pool)
...@@ -1235,6 +1232,13 @@ int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man, size_t size) ...@@ -1235,6 +1232,13 @@ int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man, size_t size)
if (man->map) { if (man->map) {
man->using_mob = false; man->using_mob = false;
} else { } else {
struct vmw_bo_params bo_params = {
.domain = VMW_BO_DOMAIN_MOB,
.busy_domain = VMW_BO_DOMAIN_MOB,
.bo_type = ttm_bo_type_kernel,
.size = size,
.pin = true
};
/* /*
* DMA memory failed. If we can have command buffers in a * DMA memory failed. If we can have command buffers in a
* MOB, try to use that instead. Note that this will * MOB, try to use that instead. Note that this will
...@@ -1245,19 +1249,12 @@ int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man, size_t size) ...@@ -1245,19 +1249,12 @@ int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man, size_t size)
!dev_priv->has_mob) !dev_priv->has_mob)
return -ENOMEM; return -ENOMEM;
ret = vmw_bo_create_kernel(dev_priv, size, ret = vmw_bo_create(dev_priv, &bo_params, &man->cmd_space);
&vmw_mob_placement,
&man->cmd_space);
if (ret) if (ret)
return ret; return ret;
man->using_mob = true; man->map = vmw_bo_map_and_cache(man->cmd_space);
ret = ttm_bo_kmap(man->cmd_space, 0, size >> PAGE_SHIFT, man->using_mob = man->map;
&man->map_obj);
if (ret)
goto out_no_map;
man->map = ttm_kmap_obj_virtual(&man->map_obj, &dummy);
} }
man->size = size; man->size = size;
...@@ -1277,14 +1274,6 @@ int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man, size_t size) ...@@ -1277,14 +1274,6 @@ int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man, size_t size)
(man->using_mob) ? "MOB" : "DMA"); (man->using_mob) ? "MOB" : "DMA");
return 0; return 0;
out_no_map:
if (man->using_mob) {
ttm_bo_put(man->cmd_space);
man->cmd_space = NULL;
}
return ret;
} }
/** /**
...@@ -1383,14 +1372,11 @@ void vmw_cmdbuf_remove_pool(struct vmw_cmdbuf_man *man) ...@@ -1383,14 +1372,11 @@ void vmw_cmdbuf_remove_pool(struct vmw_cmdbuf_man *man)
man->has_pool = false; man->has_pool = false;
man->default_size = VMW_CMDBUF_INLINE_SIZE; man->default_size = VMW_CMDBUF_INLINE_SIZE;
(void) vmw_cmdbuf_idle(man, false, 10*HZ); (void) vmw_cmdbuf_idle(man, false, 10*HZ);
if (man->using_mob) { if (man->using_mob)
(void) ttm_bo_kunmap(&man->map_obj); vmw_bo_unreference(&man->cmd_space);
ttm_bo_put(man->cmd_space); else
man->cmd_space = NULL;
} else {
dma_free_coherent(man->dev_priv->drm.dev, dma_free_coherent(man->dev_priv->drm.dev,
man->size, man->map, man->handle); man->size, man->map, man->handle);
}
} }
/** /**
......
...@@ -73,7 +73,7 @@ const struct vmw_user_resource_conv *user_context_converter = ...@@ -73,7 +73,7 @@ const struct vmw_user_resource_conv *user_context_converter =
static const struct vmw_res_func vmw_legacy_context_func = { static const struct vmw_res_func vmw_legacy_context_func = {
.res_type = vmw_res_context, .res_type = vmw_res_context,
.needs_backup = false, .needs_guest_memory = false,
.may_evict = false, .may_evict = false,
.type_name = "legacy contexts", .type_name = "legacy contexts",
.domain = VMW_BO_DOMAIN_SYS, .domain = VMW_BO_DOMAIN_SYS,
...@@ -86,7 +86,7 @@ static const struct vmw_res_func vmw_legacy_context_func = { ...@@ -86,7 +86,7 @@ static const struct vmw_res_func vmw_legacy_context_func = {
static const struct vmw_res_func vmw_gb_context_func = { static const struct vmw_res_func vmw_gb_context_func = {
.res_type = vmw_res_context, .res_type = vmw_res_context,
.needs_backup = true, .needs_guest_memory = true,
.may_evict = true, .may_evict = true,
.prio = 3, .prio = 3,
.dirty_prio = 3, .dirty_prio = 3,
...@@ -101,7 +101,7 @@ static const struct vmw_res_func vmw_gb_context_func = { ...@@ -101,7 +101,7 @@ static const struct vmw_res_func vmw_gb_context_func = {
static const struct vmw_res_func vmw_dx_context_func = { static const struct vmw_res_func vmw_dx_context_func = {
.res_type = vmw_res_dx_context, .res_type = vmw_res_dx_context,
.needs_backup = true, .needs_guest_memory = true,
.may_evict = true, .may_evict = true,
.prio = 3, .prio = 3,
.dirty_prio = 3, .dirty_prio = 3,
...@@ -186,7 +186,7 @@ static int vmw_gb_context_init(struct vmw_private *dev_priv, ...@@ -186,7 +186,7 @@ static int vmw_gb_context_init(struct vmw_private *dev_priv,
struct vmw_user_context *uctx = struct vmw_user_context *uctx =
container_of(res, struct vmw_user_context, res); container_of(res, struct vmw_user_context, res);
res->backup_size = (dx ? sizeof(SVGADXContextMobFormat) : res->guest_memory_size = (dx ? sizeof(SVGADXContextMobFormat) :
sizeof(SVGAGBContextData)); sizeof(SVGAGBContextData));
ret = vmw_resource_init(dev_priv, res, true, ret = vmw_resource_init(dev_priv, res, true,
res_free, res_free,
...@@ -358,8 +358,8 @@ static int vmw_gb_context_bind(struct vmw_resource *res, ...@@ -358,8 +358,8 @@ static int vmw_gb_context_bind(struct vmw_resource *res,
cmd->header.size = sizeof(cmd->body); cmd->header.size = sizeof(cmd->body);
cmd->body.cid = res->id; cmd->body.cid = res->id;
cmd->body.mobid = bo->resource->start; cmd->body.mobid = bo->resource->start;
cmd->body.validContents = res->backup_dirty; cmd->body.validContents = res->guest_memory_dirty;
res->backup_dirty = false; res->guest_memory_dirty = false;
vmw_cmd_commit(dev_priv, sizeof(*cmd)); vmw_cmd_commit(dev_priv, sizeof(*cmd));
return 0; return 0;
...@@ -525,8 +525,8 @@ static int vmw_dx_context_bind(struct vmw_resource *res, ...@@ -525,8 +525,8 @@ static int vmw_dx_context_bind(struct vmw_resource *res,
cmd->header.size = sizeof(cmd->body); cmd->header.size = sizeof(cmd->body);
cmd->body.cid = res->id; cmd->body.cid = res->id;
cmd->body.mobid = bo->resource->start; cmd->body.mobid = bo->resource->start;
cmd->body.validContents = res->backup_dirty; cmd->body.validContents = res->guest_memory_dirty;
res->backup_dirty = false; res->guest_memory_dirty = false;
vmw_cmd_commit(dev_priv, sizeof(*cmd)); vmw_cmd_commit(dev_priv, sizeof(*cmd));
......
...@@ -131,7 +131,7 @@ static int vmw_cotable_destroy(struct vmw_resource *res); ...@@ -131,7 +131,7 @@ static int vmw_cotable_destroy(struct vmw_resource *res);
static const struct vmw_res_func vmw_cotable_func = { static const struct vmw_res_func vmw_cotable_func = {
.res_type = vmw_res_cotable, .res_type = vmw_res_cotable,
.needs_backup = true, .needs_guest_memory = true,
.may_evict = true, .may_evict = true,
.prio = 3, .prio = 3,
.dirty_prio = 3, .dirty_prio = 3,
...@@ -182,7 +182,7 @@ static int vmw_cotable_unscrub(struct vmw_resource *res) ...@@ -182,7 +182,7 @@ static int vmw_cotable_unscrub(struct vmw_resource *res)
{ {
struct vmw_cotable *vcotbl = vmw_cotable(res); struct vmw_cotable *vcotbl = vmw_cotable(res);
struct vmw_private *dev_priv = res->dev_priv; struct vmw_private *dev_priv = res->dev_priv;
struct ttm_buffer_object *bo = &res->backup->base; struct ttm_buffer_object *bo = &res->guest_memory_bo->tbo;
struct { struct {
SVGA3dCmdHeader header; SVGA3dCmdHeader header;
SVGA3dCmdDXSetCOTable body; SVGA3dCmdDXSetCOTable body;
...@@ -230,7 +230,7 @@ static int vmw_cotable_bind(struct vmw_resource *res, ...@@ -230,7 +230,7 @@ static int vmw_cotable_bind(struct vmw_resource *res,
* take the opportunity to correct the value here so that it's not * take the opportunity to correct the value here so that it's not
* misused in the future. * misused in the future.
*/ */
val_buf->bo = &res->backup->base; val_buf->bo = &res->guest_memory_bo->tbo;
return vmw_cotable_unscrub(res); return vmw_cotable_unscrub(res);
} }
...@@ -291,7 +291,7 @@ int vmw_cotable_scrub(struct vmw_resource *res, bool readback) ...@@ -291,7 +291,7 @@ int vmw_cotable_scrub(struct vmw_resource *res, bool readback)
cmd0->body.cid = vcotbl->ctx->id; cmd0->body.cid = vcotbl->ctx->id;
cmd0->body.type = vcotbl->type; cmd0->body.type = vcotbl->type;
cmd1 = (void *) &cmd0[1]; cmd1 = (void *) &cmd0[1];
vcotbl->size_read_back = res->backup_size; vcotbl->size_read_back = res->guest_memory_size;
} }
cmd1->header.id = SVGA_3D_CMD_DX_SET_COTABLE; cmd1->header.id = SVGA_3D_CMD_DX_SET_COTABLE;
cmd1->header.size = sizeof(cmd1->body); cmd1->header.size = sizeof(cmd1->body);
...@@ -373,12 +373,12 @@ static int vmw_cotable_readback(struct vmw_resource *res) ...@@ -373,12 +373,12 @@ static int vmw_cotable_readback(struct vmw_resource *res)
cmd->header.size = sizeof(cmd->body); cmd->header.size = sizeof(cmd->body);
cmd->body.cid = vcotbl->ctx->id; cmd->body.cid = vcotbl->ctx->id;
cmd->body.type = vcotbl->type; cmd->body.type = vcotbl->type;
vcotbl->size_read_back = res->backup_size; vcotbl->size_read_back = res->guest_memory_size;
vmw_cmd_commit(dev_priv, sizeof(*cmd)); vmw_cmd_commit(dev_priv, sizeof(*cmd));
} }
(void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL); (void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
vmw_bo_fence_single(&res->backup->base, fence); vmw_bo_fence_single(&res->guest_memory_bo->tbo, fence);
vmw_fence_obj_unreference(&fence); vmw_fence_obj_unreference(&fence);
return 0; return 0;
...@@ -401,14 +401,21 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size) ...@@ -401,14 +401,21 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
struct ttm_operation_ctx ctx = { false, false }; struct ttm_operation_ctx ctx = { false, false };
struct vmw_private *dev_priv = res->dev_priv; struct vmw_private *dev_priv = res->dev_priv;
struct vmw_cotable *vcotbl = vmw_cotable(res); struct vmw_cotable *vcotbl = vmw_cotable(res);
struct vmw_bo *buf, *old_buf = res->backup; struct vmw_bo *buf, *old_buf = res->guest_memory_bo;
struct ttm_buffer_object *bo, *old_bo = &res->backup->base; struct ttm_buffer_object *bo, *old_bo = &res->guest_memory_bo->tbo;
size_t old_size = res->backup_size; size_t old_size = res->guest_memory_size;
size_t old_size_read_back = vcotbl->size_read_back; size_t old_size_read_back = vcotbl->size_read_back;
size_t cur_size_read_back; size_t cur_size_read_back;
struct ttm_bo_kmap_obj old_map, new_map; struct ttm_bo_kmap_obj old_map, new_map;
int ret; int ret;
size_t i; size_t i;
struct vmw_bo_params bo_params = {
.domain = VMW_BO_DOMAIN_MOB,
.busy_domain = VMW_BO_DOMAIN_MOB,
.bo_type = ttm_bo_type_device,
.size = new_size,
.pin = true
};
MKS_STAT_TIME_DECL(MKSSTAT_KERN_COTABLE_RESIZE); MKS_STAT_TIME_DECL(MKSSTAT_KERN_COTABLE_RESIZE);
MKS_STAT_TIME_PUSH(MKSSTAT_KERN_COTABLE_RESIZE); MKS_STAT_TIME_PUSH(MKSSTAT_KERN_COTABLE_RESIZE);
...@@ -425,15 +432,13 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size) ...@@ -425,15 +432,13 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
* for the new COTable. Initially pin the buffer object to make sure * for the new COTable. Initially pin the buffer object to make sure
* we can use tryreserve without failure. * we can use tryreserve without failure.
*/ */
ret = vmw_bo_create(dev_priv, new_size, ret = vmw_bo_create(dev_priv, &bo_params, &buf);
VMW_BO_DOMAIN_MOB, VMW_BO_DOMAIN_MOB,
true, true, &buf);
if (ret) { if (ret) {
DRM_ERROR("Failed initializing new cotable MOB.\n"); DRM_ERROR("Failed initializing new cotable MOB.\n");
goto out_done; goto out_done;
} }
bo = &buf->base; bo = &buf->tbo;
WARN_ON_ONCE(ttm_bo_reserve(bo, false, true, NULL)); WARN_ON_ONCE(ttm_bo_reserve(bo, false, true, NULL));
ret = ttm_bo_wait(old_bo, false, false); ret = ttm_bo_wait(old_bo, false, false);
...@@ -477,8 +482,8 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size) ...@@ -477,8 +482,8 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
} }
vmw_resource_mob_detach(res); vmw_resource_mob_detach(res);
res->backup = buf; res->guest_memory_bo = buf;
res->backup_size = new_size; res->guest_memory_size = new_size;
vcotbl->size_read_back = cur_size_read_back; vcotbl->size_read_back = cur_size_read_back;
/* /*
...@@ -488,8 +493,8 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size) ...@@ -488,8 +493,8 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
ret = vmw_cotable_unscrub(res); ret = vmw_cotable_unscrub(res);
if (ret) { if (ret) {
DRM_ERROR("Failed switching COTable backup buffer.\n"); DRM_ERROR("Failed switching COTable backup buffer.\n");
res->backup = old_buf; res->guest_memory_bo = old_buf;
res->backup_size = old_size; res->guest_memory_size = old_size;
vcotbl->size_read_back = old_size_read_back; vcotbl->size_read_back = old_size_read_back;
vmw_resource_mob_attach(res); vmw_resource_mob_attach(res);
goto out_wait; goto out_wait;
...@@ -504,7 +509,7 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size) ...@@ -504,7 +509,7 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
if (unlikely(ret)) if (unlikely(ret))
goto out_wait; goto out_wait;
/* Release the pin acquired in vmw_bo_init */ /* Release the pin acquired in vmw_bo_create */
ttm_bo_unpin(bo); ttm_bo_unpin(bo);
MKS_STAT_TIME_POP(MKSSTAT_KERN_COTABLE_RESIZE); MKS_STAT_TIME_POP(MKSSTAT_KERN_COTABLE_RESIZE);
...@@ -539,7 +544,7 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size) ...@@ -539,7 +544,7 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
static int vmw_cotable_create(struct vmw_resource *res) static int vmw_cotable_create(struct vmw_resource *res)
{ {
struct vmw_cotable *vcotbl = vmw_cotable(res); struct vmw_cotable *vcotbl = vmw_cotable(res);
size_t new_size = res->backup_size; size_t new_size = res->guest_memory_size;
size_t needed_size; size_t needed_size;
int ret; int ret;
...@@ -548,7 +553,7 @@ static int vmw_cotable_create(struct vmw_resource *res) ...@@ -548,7 +553,7 @@ static int vmw_cotable_create(struct vmw_resource *res)
while (needed_size > new_size) while (needed_size > new_size)
new_size *= 2; new_size *= 2;
if (likely(new_size <= res->backup_size)) { if (likely(new_size <= res->guest_memory_size)) {
if (vcotbl->scrubbed && vmw_resource_mob_attached(res)) { if (vcotbl->scrubbed && vmw_resource_mob_attached(res)) {
ret = vmw_cotable_unscrub(res); ret = vmw_cotable_unscrub(res);
if (ret) if (ret)
...@@ -612,12 +617,12 @@ struct vmw_resource *vmw_cotable_alloc(struct vmw_private *dev_priv, ...@@ -612,12 +617,12 @@ struct vmw_resource *vmw_cotable_alloc(struct vmw_private *dev_priv,
INIT_LIST_HEAD(&vcotbl->resource_list); INIT_LIST_HEAD(&vcotbl->resource_list);
vcotbl->res.id = type; vcotbl->res.id = type;
vcotbl->res.backup_size = PAGE_SIZE; vcotbl->res.guest_memory_size = PAGE_SIZE;
num_entries = PAGE_SIZE / co_info[type].size; num_entries = PAGE_SIZE / co_info[type].size;
if (num_entries < co_info[type].min_initial_entries) { if (num_entries < co_info[type].min_initial_entries) {
vcotbl->res.backup_size = co_info[type].min_initial_entries * vcotbl->res.guest_memory_size = co_info[type].min_initial_entries *
co_info[type].size; co_info[type].size;
vcotbl->res.backup_size = PFN_ALIGN(vcotbl->res.backup_size); vcotbl->res.guest_memory_size = PFN_ALIGN(vcotbl->res.guest_memory_size);
} }
vcotbl->scrubbed = true; vcotbl->scrubbed = true;
......
...@@ -391,23 +391,28 @@ static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv) ...@@ -391,23 +391,28 @@ static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
struct ttm_bo_kmap_obj map; struct ttm_bo_kmap_obj map;
volatile SVGA3dQueryResult *result; volatile SVGA3dQueryResult *result;
bool dummy; bool dummy;
struct vmw_bo_params bo_params = {
.domain = VMW_BO_DOMAIN_SYS,
.busy_domain = VMW_BO_DOMAIN_SYS,
.bo_type = ttm_bo_type_kernel,
.size = PAGE_SIZE,
.pin = true
};
/* /*
* Create the vbo as pinned, so that a tryreserve will * Create the vbo as pinned, so that a tryreserve will
* immediately succeed. This is because we're the only * immediately succeed. This is because we're the only
* user of the bo currently. * user of the bo currently.
*/ */
ret = vmw_bo_create(dev_priv, PAGE_SIZE, ret = vmw_bo_create(dev_priv, &bo_params, &vbo);
VMW_BO_DOMAIN_SYS, VMW_BO_DOMAIN_SYS,
false, true, &vbo);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
return ret; return ret;
ret = ttm_bo_reserve(&vbo->base, false, true, NULL); ret = ttm_bo_reserve(&vbo->tbo, false, true, NULL);
BUG_ON(ret != 0); BUG_ON(ret != 0);
vmw_bo_pin_reserved(vbo, true); vmw_bo_pin_reserved(vbo, true);
ret = ttm_bo_kmap(&vbo->base, 0, 1, &map); ret = ttm_bo_kmap(&vbo->tbo, 0, 1, &map);
if (likely(ret == 0)) { if (likely(ret == 0)) {
result = ttm_kmap_obj_virtual(&map, &dummy); result = ttm_kmap_obj_virtual(&map, &dummy);
result->totalSize = sizeof(*result); result->totalSize = sizeof(*result);
...@@ -416,7 +421,7 @@ static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv) ...@@ -416,7 +421,7 @@ static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
ttm_bo_kunmap(&map); ttm_bo_kunmap(&map);
} }
vmw_bo_pin_reserved(vbo, false); vmw_bo_pin_reserved(vbo, false);
ttm_bo_unreserve(&vbo->base); ttm_bo_unreserve(&vbo->tbo);
if (unlikely(ret != 0)) { if (unlikely(ret != 0)) {
DRM_ERROR("Dummy query buffer map failed.\n"); DRM_ERROR("Dummy query buffer map failed.\n");
......
...@@ -142,21 +142,23 @@ struct vmw_res_func; ...@@ -142,21 +142,23 @@ struct vmw_res_func;
* @kref: For refcounting. * @kref: For refcounting.
* @dev_priv: Pointer to the device private for this resource. Immutable. * @dev_priv: Pointer to the device private for this resource. Immutable.
* @id: Device id. Protected by @dev_priv::resource_lock. * @id: Device id. Protected by @dev_priv::resource_lock.
* @backup_size: Backup buffer size. Immutable. * @guest_memory_size: Guest memory buffer size. Immutable.
* @res_dirty: Resource contains data not yet in the backup buffer. Protected * @res_dirty: Resource contains data not yet in the guest memory buffer.
* by resource reserved.
* @backup_dirty: Backup buffer contains data not yet in the HW resource.
* Protected by resource reserved. * Protected by resource reserved.
* @guest_memory_dirty: Guest memory buffer contains data not yet in the HW
* resource. Protected by resource reserved.
* @coherent: Emulate coherency by tracking vm accesses. * @coherent: Emulate coherency by tracking vm accesses.
* @backup: The backup buffer if any. Protected by resource reserved. * @guest_memory_bo: The guest memory buffer if any. Protected by resource
* @backup_offset: Offset into the backup buffer if any. Protected by resource * reserved.
* reserved. Note that only a few resource types can have a @backup_offset * @guest_memory_offset: Offset into the guest memory buffer if any. Protected
* different from zero. * by resource reserved. Note that only a few resource types can have a
* @guest_memory_offset different from zero.
* @pin_count: The pin count for this resource. A pinned resource has a * @pin_count: The pin count for this resource. A pinned resource has a
* pin-count greater than zero. It is not on the resource LRU lists and its * pin-count greater than zero. It is not on the resource LRU lists and its
* backup buffer is pinned. Hence it can't be evicted. * guest memory buffer is pinned. Hence it can't be evicted.
* @func: Method vtable for this resource. Immutable. * @func: Method vtable for this resource. Immutable.
* @mob_node; Node for the MOB backup rbtree. Protected by @backup reserved. * @mob_node; Node for the MOB guest memory rbtree. Protected by
* @guest_memory_bo reserved.
* @lru_head: List head for the LRU list. Protected by @dev_priv::resource_lock. * @lru_head: List head for the LRU list. Protected by @dev_priv::resource_lock.
* @binding_head: List head for the context binding list. Protected by * @binding_head: List head for the context binding list. Protected by
* the @dev_priv::binding_mutex * the @dev_priv::binding_mutex
...@@ -165,18 +167,19 @@ struct vmw_res_func; ...@@ -165,18 +167,19 @@ struct vmw_res_func;
* resource destruction. * resource destruction.
*/ */
struct vmw_bo; struct vmw_bo;
struct vmw_bo;
struct vmw_resource_dirty; struct vmw_resource_dirty;
struct vmw_resource { struct vmw_resource {
struct kref kref; struct kref kref;
struct vmw_private *dev_priv; struct vmw_private *dev_priv;
int id; int id;
u32 used_prio; u32 used_prio;
unsigned long backup_size; unsigned long guest_memory_size;
u32 res_dirty : 1; u32 res_dirty : 1;
u32 backup_dirty : 1; u32 guest_memory_dirty : 1;
u32 coherent : 1; u32 coherent : 1;
struct vmw_bo *backup; struct vmw_bo *guest_memory_bo;
unsigned long backup_offset; unsigned long guest_memory_offset;
unsigned long pin_count; unsigned long pin_count;
const struct vmw_res_func *func; const struct vmw_res_func *func;
struct rb_node mob_node; struct rb_node mob_node;
...@@ -467,7 +470,7 @@ struct vmw_otable_batch { ...@@ -467,7 +470,7 @@ struct vmw_otable_batch {
unsigned num_otables; unsigned num_otables;
struct vmw_otable *otables; struct vmw_otable *otables;
struct vmw_resource *context; struct vmw_resource *context;
struct ttm_buffer_object *otable_bo; struct vmw_bo *otable_bo;
}; };
enum { enum {
...@@ -662,6 +665,11 @@ static inline struct vmw_private *vmw_priv(struct drm_device *dev) ...@@ -662,6 +665,11 @@ static inline struct vmw_private *vmw_priv(struct drm_device *dev)
return (struct vmw_private *)dev->dev_private; return (struct vmw_private *)dev->dev_private;
} }
static inline struct vmw_private *vmw_priv_from_ttm(struct ttm_device *bdev)
{
return container_of(bdev, struct vmw_private, bdev);
}
static inline struct vmw_fpriv *vmw_fpriv(struct drm_file *file_priv) static inline struct vmw_fpriv *vmw_fpriv(struct drm_file *file_priv)
{ {
return (struct vmw_fpriv *)file_priv->driver_priv; return (struct vmw_fpriv *)file_priv->driver_priv;
...@@ -814,9 +822,9 @@ extern int vmw_user_stream_lookup(struct vmw_private *dev_priv, ...@@ -814,9 +822,9 @@ extern int vmw_user_stream_lookup(struct vmw_private *dev_priv,
extern void vmw_resource_unreserve(struct vmw_resource *res, extern void vmw_resource_unreserve(struct vmw_resource *res,
bool dirty_set, bool dirty_set,
bool dirty, bool dirty,
bool switch_backup, bool switch_guest_memory,
struct vmw_bo *new_backup, struct vmw_bo *new_guest_memory,
unsigned long new_backup_offset); unsigned long new_guest_memory_offset);
extern void vmw_query_move_notify(struct ttm_buffer_object *bo, extern void vmw_query_move_notify(struct ttm_buffer_object *bo,
struct ttm_resource *old_mem, struct ttm_resource *old_mem,
struct ttm_resource *new_mem); struct ttm_resource *new_mem);
...@@ -929,16 +937,15 @@ vmw_is_cursor_bypass3_enabled(const struct vmw_private *dev_priv) ...@@ -929,16 +937,15 @@ vmw_is_cursor_bypass3_enabled(const struct vmw_private *dev_priv)
extern const size_t vmw_tt_size; extern const size_t vmw_tt_size;
extern struct ttm_placement vmw_vram_placement; extern struct ttm_placement vmw_vram_placement;
extern struct ttm_placement vmw_vram_sys_placement;
extern struct ttm_placement vmw_vram_gmr_placement; extern struct ttm_placement vmw_vram_gmr_placement;
extern struct ttm_placement vmw_sys_placement; extern struct ttm_placement vmw_sys_placement;
extern struct ttm_placement vmw_mob_placement;
extern struct ttm_device_funcs vmw_bo_driver; extern struct ttm_device_funcs vmw_bo_driver;
extern const struct vmw_sg_table * extern const struct vmw_sg_table *
vmw_bo_sg_table(struct ttm_buffer_object *bo); vmw_bo_sg_table(struct ttm_buffer_object *bo);
extern int vmw_bo_create_and_populate(struct vmw_private *dev_priv, int vmw_bo_create_and_populate(struct vmw_private *dev_priv,
unsigned long bo_size, size_t bo_size,
struct ttm_buffer_object **bo_p); u32 domain,
struct vmw_bo **bo_p);
extern void vmw_piter_start(struct vmw_piter *viter, extern void vmw_piter_start(struct vmw_piter *viter,
const struct vmw_sg_table *vsgt, const struct vmw_sg_table *vsgt,
......
...@@ -722,7 +722,7 @@ static int vmw_rebind_all_dx_query(struct vmw_resource *ctx_res) ...@@ -722,7 +722,7 @@ static int vmw_rebind_all_dx_query(struct vmw_resource *ctx_res)
cmd->header.id = SVGA_3D_CMD_DX_BIND_ALL_QUERY; cmd->header.id = SVGA_3D_CMD_DX_BIND_ALL_QUERY;
cmd->header.size = sizeof(cmd->body); cmd->header.size = sizeof(cmd->body);
cmd->body.cid = ctx_res->id; cmd->body.cid = ctx_res->id;
cmd->body.mobid = dx_query_mob->base.resource->start; cmd->body.mobid = dx_query_mob->tbo.resource->start;
vmw_cmd_commit(dev_priv, sizeof(*cmd)); vmw_cmd_commit(dev_priv, sizeof(*cmd));
vmw_context_bind_dx_query(ctx_res, dx_query_mob); vmw_context_bind_dx_query(ctx_res, dx_query_mob);
...@@ -1033,7 +1033,7 @@ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv, ...@@ -1033,7 +1033,7 @@ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
if (unlikely(new_query_bo != sw_context->cur_query_bo)) { if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
if (unlikely(PFN_UP(new_query_bo->base.resource->size) > 4)) { if (unlikely(PFN_UP(new_query_bo->tbo.resource->size) > 4)) {
VMW_DEBUG_USER("Query buffer too large.\n"); VMW_DEBUG_USER("Query buffer too large.\n");
return -EINVAL; return -EINVAL;
} }
...@@ -1164,7 +1164,7 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv, ...@@ -1164,7 +1164,7 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
} }
vmw_bo_placement_set(vmw_bo, VMW_BO_DOMAIN_MOB, VMW_BO_DOMAIN_MOB); vmw_bo_placement_set(vmw_bo, VMW_BO_DOMAIN_MOB, VMW_BO_DOMAIN_MOB);
ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo); ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo);
ttm_bo_put(&vmw_bo->base); ttm_bo_put(&vmw_bo->tbo);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
return ret; return ret;
...@@ -1220,7 +1220,7 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv, ...@@ -1220,7 +1220,7 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
vmw_bo_placement_set(vmw_bo, VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM, vmw_bo_placement_set(vmw_bo, VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM,
VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM); VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM);
ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo); ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo);
ttm_bo_put(&vmw_bo->base); ttm_bo_put(&vmw_bo->tbo);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
return ret; return ret;
...@@ -1533,7 +1533,7 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv, ...@@ -1533,7 +1533,7 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
return ret; return ret;
/* Make sure DMA doesn't cross BO boundaries. */ /* Make sure DMA doesn't cross BO boundaries. */
bo_size = vmw_bo->base.base.size; bo_size = vmw_bo->tbo.base.size;
if (unlikely(cmd->body.guest.ptr.offset > bo_size)) { if (unlikely(cmd->body.guest.ptr.offset > bo_size)) {
VMW_DEBUG_USER("Invalid DMA offset.\n"); VMW_DEBUG_USER("Invalid DMA offset.\n");
return -EINVAL; return -EINVAL;
...@@ -1556,7 +1556,7 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv, ...@@ -1556,7 +1556,7 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res); srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res);
vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->base, header); vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->tbo, header);
return 0; return 0;
} }
...@@ -3759,7 +3759,7 @@ static void vmw_apply_relocations(struct vmw_sw_context *sw_context) ...@@ -3759,7 +3759,7 @@ static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
struct ttm_buffer_object *bo; struct ttm_buffer_object *bo;
list_for_each_entry(reloc, &sw_context->bo_relocations, head) { list_for_each_entry(reloc, &sw_context->bo_relocations, head) {
bo = &reloc->vbo->base; bo = &reloc->vbo->tbo;
switch (bo->resource->mem_type) { switch (bo->resource->mem_type) {
case TTM_PL_VRAM: case TTM_PL_VRAM:
reloc->location->offset += bo->resource->start << PAGE_SHIFT; reloc->location->offset += bo->resource->start << PAGE_SHIFT;
......
...@@ -33,9 +33,8 @@ ...@@ -33,9 +33,8 @@
static void vmw_gem_object_free(struct drm_gem_object *gobj) static void vmw_gem_object_free(struct drm_gem_object *gobj)
{ {
struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(gobj); struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(gobj);
if (bo) { if (bo)
ttm_bo_put(bo); ttm_bo_put(bo);
}
} }
static int vmw_gem_object_open(struct drm_gem_object *obj, static int vmw_gem_object_open(struct drm_gem_object *obj,
...@@ -119,19 +118,23 @@ int vmw_gem_object_create_with_handle(struct vmw_private *dev_priv, ...@@ -119,19 +118,23 @@ int vmw_gem_object_create_with_handle(struct vmw_private *dev_priv,
struct vmw_bo **p_vbo) struct vmw_bo **p_vbo)
{ {
int ret; int ret;
struct vmw_bo_params params = {
.domain = (dev_priv->has_mob) ? VMW_BO_DOMAIN_SYS : VMW_BO_DOMAIN_VRAM,
.busy_domain = VMW_BO_DOMAIN_SYS,
.bo_type = ttm_bo_type_device,
.size = size,
.pin = false
};
ret = vmw_bo_create(dev_priv, size, ret = vmw_bo_create(dev_priv, &params, p_vbo);
(dev_priv->has_mob) ? VMW_BO_DOMAIN_SYS : VMW_BO_DOMAIN_VRAM,
VMW_BO_DOMAIN_SYS,
true, false, p_vbo);
(*p_vbo)->base.base.funcs = &vmw_gem_object_funcs; (*p_vbo)->tbo.base.funcs = &vmw_gem_object_funcs;
if (ret != 0) if (ret != 0)
goto out_no_bo; goto out_no_bo;
ret = drm_gem_handle_create(filp, &(*p_vbo)->base.base, handle); ret = drm_gem_handle_create(filp, &(*p_vbo)->tbo.base, handle);
/* drop reference from allocate - handle holds it now */ /* drop reference from allocate - handle holds it now */
drm_gem_object_put(&(*p_vbo)->base.base); drm_gem_object_put(&(*p_vbo)->tbo.base);
out_no_bo: out_no_bo:
return ret; return ret;
} }
...@@ -155,7 +158,7 @@ int vmw_gem_object_create_ioctl(struct drm_device *dev, void *data, ...@@ -155,7 +158,7 @@ int vmw_gem_object_create_ioctl(struct drm_device *dev, void *data,
goto out_no_bo; goto out_no_bo;
rep->handle = handle; rep->handle = handle;
rep->map_handle = drm_vma_node_offset_addr(&vbo->base.base.vma_node); rep->map_handle = drm_vma_node_offset_addr(&vbo->tbo.base.vma_node);
rep->cur_gmr_id = handle; rep->cur_gmr_id = handle;
rep->cur_gmr_offset = 0; rep->cur_gmr_offset = 0;
out_no_bo: out_no_bo:
...@@ -169,7 +172,7 @@ static void vmw_bo_print_info(int id, struct vmw_bo *bo, struct seq_file *m) ...@@ -169,7 +172,7 @@ static void vmw_bo_print_info(int id, struct vmw_bo *bo, struct seq_file *m)
const char *placement; const char *placement;
const char *type; const char *type;
switch (bo->base.resource->mem_type) { switch (bo->tbo.resource->mem_type) {
case TTM_PL_SYSTEM: case TTM_PL_SYSTEM:
placement = " CPU"; placement = " CPU";
break; break;
...@@ -190,7 +193,7 @@ static void vmw_bo_print_info(int id, struct vmw_bo *bo, struct seq_file *m) ...@@ -190,7 +193,7 @@ static void vmw_bo_print_info(int id, struct vmw_bo *bo, struct seq_file *m)
break; break;
} }
switch (bo->base.type) { switch (bo->tbo.type) {
case ttm_bo_type_device: case ttm_bo_type_device:
type = "device"; type = "device";
break; break;
...@@ -206,12 +209,12 @@ static void vmw_bo_print_info(int id, struct vmw_bo *bo, struct seq_file *m) ...@@ -206,12 +209,12 @@ static void vmw_bo_print_info(int id, struct vmw_bo *bo, struct seq_file *m)
} }
seq_printf(m, "\t\t0x%08x: %12zu bytes %s, type = %s", seq_printf(m, "\t\t0x%08x: %12zu bytes %s, type = %s",
id, bo->base.base.size, placement, type); id, bo->tbo.base.size, placement, type);
seq_printf(m, ", priority = %u, pin_count = %u, GEM refs = %d, TTM refs = %d", seq_printf(m, ", priority = %u, pin_count = %u, GEM refs = %d, TTM refs = %d",
bo->base.priority, bo->tbo.priority,
bo->base.pin_count, bo->tbo.pin_count,
kref_read(&bo->base.base.refcount), kref_read(&bo->tbo.base.refcount),
kref_read(&bo->base.kref)); kref_read(&bo->tbo.kref));
seq_puts(m, "\n"); seq_puts(m, "\n");
} }
......
...@@ -153,9 +153,8 @@ static void vmw_cursor_update_mob(struct vmw_private *dev_priv, ...@@ -153,9 +153,8 @@ static void vmw_cursor_update_mob(struct vmw_private *dev_priv,
SVGAGBCursorHeader *header; SVGAGBCursorHeader *header;
SVGAGBAlphaCursorHeader *alpha_header; SVGAGBAlphaCursorHeader *alpha_header;
const u32 image_size = width * height * sizeof(*image); const u32 image_size = width * height * sizeof(*image);
bool is_iomem;
header = ttm_kmap_obj_virtual(&vps->cursor.map, &is_iomem); header = vmw_bo_map_and_cache(vps->cursor.bo);
alpha_header = &header->header.alphaHeader; alpha_header = &header->header.alphaHeader;
memset(header, 0, sizeof(*header)); memset(header, 0, sizeof(*header));
...@@ -170,7 +169,7 @@ static void vmw_cursor_update_mob(struct vmw_private *dev_priv, ...@@ -170,7 +169,7 @@ static void vmw_cursor_update_mob(struct vmw_private *dev_priv,
memcpy(header + 1, image, image_size); memcpy(header + 1, image, image_size);
vmw_write(dev_priv, SVGA_REG_CURSOR_MOBID, vmw_write(dev_priv, SVGA_REG_CURSOR_MOBID,
vps->cursor.bo->resource->start); vps->cursor.bo->tbo.resource->start);
} }
...@@ -188,7 +187,7 @@ static u32 *vmw_du_cursor_plane_acquire_image(struct vmw_plane_state *vps) ...@@ -188,7 +187,7 @@ static u32 *vmw_du_cursor_plane_acquire_image(struct vmw_plane_state *vps)
bool is_iomem; bool is_iomem;
if (vps->surf) { if (vps->surf) {
if (vps->surf_mapped) if (vps->surf_mapped)
return vmw_bo_map_and_cache(vps->surf->res.backup); return vmw_bo_map_and_cache(vps->surf->res.guest_memory_bo);
return vps->surf->snooper.image; return vps->surf->snooper.image;
} else if (vps->bo) } else if (vps->bo)
return ttm_kmap_obj_virtual(&vps->bo->map, &is_iomem); return ttm_kmap_obj_virtual(&vps->bo->map, &is_iomem);
...@@ -223,15 +222,13 @@ static bool vmw_du_cursor_plane_has_changed(struct vmw_plane_state *old_vps, ...@@ -223,15 +222,13 @@ static bool vmw_du_cursor_plane_has_changed(struct vmw_plane_state *old_vps,
return changed; return changed;
} }
static void vmw_du_destroy_cursor_mob(struct ttm_buffer_object **bo) static void vmw_du_destroy_cursor_mob(struct vmw_bo **vbo)
{ {
if (!(*bo)) if (!(*vbo))
return; return;
ttm_bo_unpin(*bo); ttm_bo_unpin(&(*vbo)->tbo);
ttm_bo_put(*bo); vmw_bo_unreference(vbo);
kfree(*bo);
*bo = NULL;
} }
static void vmw_du_put_cursor_mob(struct vmw_cursor_plane *vcp, static void vmw_du_put_cursor_mob(struct vmw_cursor_plane *vcp,
...@@ -255,8 +252,8 @@ static void vmw_du_put_cursor_mob(struct vmw_cursor_plane *vcp, ...@@ -255,8 +252,8 @@ static void vmw_du_put_cursor_mob(struct vmw_cursor_plane *vcp,
/* Cache is full: See if this mob is bigger than an existing mob. */ /* Cache is full: See if this mob is bigger than an existing mob. */
for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) { for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) {
if (vcp->cursor_mobs[i]->base.size < if (vcp->cursor_mobs[i]->tbo.base.size <
vps->cursor.bo->base.size) { vps->cursor.bo->tbo.base.size) {
vmw_du_destroy_cursor_mob(&vcp->cursor_mobs[i]); vmw_du_destroy_cursor_mob(&vcp->cursor_mobs[i]);
vcp->cursor_mobs[i] = vps->cursor.bo; vcp->cursor_mobs[i] = vps->cursor.bo;
vps->cursor.bo = NULL; vps->cursor.bo = NULL;
...@@ -289,7 +286,7 @@ static int vmw_du_get_cursor_mob(struct vmw_cursor_plane *vcp, ...@@ -289,7 +286,7 @@ static int vmw_du_get_cursor_mob(struct vmw_cursor_plane *vcp,
return -EINVAL; return -EINVAL;
if (vps->cursor.bo) { if (vps->cursor.bo) {
if (vps->cursor.bo->base.size >= size) if (vps->cursor.bo->tbo.base.size >= size)
return 0; return 0;
vmw_du_put_cursor_mob(vcp, vps); vmw_du_put_cursor_mob(vcp, vps);
} }
...@@ -297,26 +294,27 @@ static int vmw_du_get_cursor_mob(struct vmw_cursor_plane *vcp, ...@@ -297,26 +294,27 @@ static int vmw_du_get_cursor_mob(struct vmw_cursor_plane *vcp,
/* Look for an unused mob in the cache. */ /* Look for an unused mob in the cache. */
for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) { for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) {
if (vcp->cursor_mobs[i] && if (vcp->cursor_mobs[i] &&
vcp->cursor_mobs[i]->base.size >= size) { vcp->cursor_mobs[i]->tbo.base.size >= size) {
vps->cursor.bo = vcp->cursor_mobs[i]; vps->cursor.bo = vcp->cursor_mobs[i];
vcp->cursor_mobs[i] = NULL; vcp->cursor_mobs[i] = NULL;
return 0; return 0;
} }
} }
/* Create a new mob if we can't find an existing one. */ /* Create a new mob if we can't find an existing one. */
ret = vmw_bo_create_kernel(dev_priv, size, &vmw_mob_placement, ret = vmw_bo_create_and_populate(dev_priv, size,
&vps->cursor.bo); VMW_BO_DOMAIN_MOB,
&vps->cursor.bo);
if (ret != 0) if (ret != 0)
return ret; return ret;
/* Fence the mob creation so we are guarateed to have the mob */ /* Fence the mob creation so we are guarateed to have the mob */
ret = ttm_bo_reserve(vps->cursor.bo, false, false, NULL); ret = ttm_bo_reserve(&vps->cursor.bo->tbo, false, false, NULL);
if (ret != 0) if (ret != 0)
goto teardown; goto teardown;
vmw_bo_fence_single(vps->cursor.bo, NULL); vmw_bo_fence_single(&vps->cursor.bo->tbo, NULL);
ttm_bo_unreserve(vps->cursor.bo); ttm_bo_unreserve(&vps->cursor.bo->tbo);
return 0; return 0;
teardown: teardown:
...@@ -574,39 +572,30 @@ vmw_du_cursor_plane_map_cm(struct vmw_plane_state *vps) ...@@ -574,39 +572,30 @@ vmw_du_cursor_plane_map_cm(struct vmw_plane_state *vps)
{ {
int ret; int ret;
u32 size = vmw_du_cursor_mob_size(vps->base.crtc_w, vps->base.crtc_h); u32 size = vmw_du_cursor_mob_size(vps->base.crtc_w, vps->base.crtc_h);
struct ttm_buffer_object *bo = vps->cursor.bo; struct ttm_buffer_object *bo;
if (!bo) if (!vps->cursor.bo)
return -EINVAL; return -EINVAL;
bo = &vps->cursor.bo->tbo;
if (bo->base.size < size) if (bo->base.size < size)
return -EINVAL; return -EINVAL;
if (vps->cursor.mapped) if (vps->cursor.bo->map.virtual)
return 0; return 0;
ret = ttm_bo_reserve(bo, false, false, NULL); ret = ttm_bo_reserve(bo, false, false, NULL);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
return -ENOMEM; return -ENOMEM;
ret = ttm_bo_kmap(bo, 0, PFN_UP(size), &vps->cursor.map); vmw_bo_map_and_cache(vps->cursor.bo);
/*
* We just want to try to get mob bind to finish
* so that the first write to SVGA_REG_CURSOR_MOBID
* is done with a buffer that the device has already
* seen
*/
(void) ttm_bo_wait(bo, false, false);
ttm_bo_unreserve(bo); ttm_bo_unreserve(bo);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
return -ENOMEM; return -ENOMEM;
vps->cursor.mapped = true;
return 0; return 0;
} }
...@@ -623,19 +612,15 @@ static int ...@@ -623,19 +612,15 @@ static int
vmw_du_cursor_plane_unmap_cm(struct vmw_plane_state *vps) vmw_du_cursor_plane_unmap_cm(struct vmw_plane_state *vps)
{ {
int ret = 0; int ret = 0;
struct ttm_buffer_object *bo = vps->cursor.bo; struct vmw_bo *vbo = vps->cursor.bo;
if (!vps->cursor.mapped) if (!vbo || !vbo->map.virtual)
return 0; return 0;
if (!bo) ret = ttm_bo_reserve(&vbo->tbo, true, false, NULL);
return 0;
ret = ttm_bo_reserve(bo, true, false, NULL);
if (likely(ret == 0)) { if (likely(ret == 0)) {
ttm_bo_kunmap(&vps->cursor.map); vmw_bo_unmap(vbo);
ttm_bo_unreserve(bo); ttm_bo_unreserve(&vbo->tbo);
vps->cursor.mapped = false;
} }
return ret; return ret;
...@@ -661,16 +646,16 @@ vmw_du_cursor_plane_cleanup_fb(struct drm_plane *plane, ...@@ -661,16 +646,16 @@ vmw_du_cursor_plane_cleanup_fb(struct drm_plane *plane,
bool is_iomem; bool is_iomem;
if (vps->surf_mapped) { if (vps->surf_mapped) {
vmw_bo_unmap(vps->surf->res.backup); vmw_bo_unmap(vps->surf->res.guest_memory_bo);
vps->surf_mapped = false; vps->surf_mapped = false;
} }
if (vps->bo && ttm_kmap_obj_virtual(&vps->bo->map, &is_iomem)) { if (vps->bo && ttm_kmap_obj_virtual(&vps->bo->map, &is_iomem)) {
const int ret = ttm_bo_reserve(&vps->bo->base, true, false, NULL); const int ret = ttm_bo_reserve(&vps->bo->tbo, true, false, NULL);
if (likely(ret == 0)) { if (likely(ret == 0)) {
ttm_bo_kunmap(&vps->bo->map); ttm_bo_kunmap(&vps->bo->map);
ttm_bo_unreserve(&vps->bo->base); ttm_bo_unreserve(&vps->bo->tbo);
} }
} }
...@@ -736,26 +721,26 @@ vmw_du_cursor_plane_prepare_fb(struct drm_plane *plane, ...@@ -736,26 +721,26 @@ vmw_du_cursor_plane_prepare_fb(struct drm_plane *plane,
* reserve the ttm_buffer_object first which * reserve the ttm_buffer_object first which
* vmw_bo_map_and_cache() omits. * vmw_bo_map_and_cache() omits.
*/ */
ret = ttm_bo_reserve(&vps->bo->base, true, false, NULL); ret = ttm_bo_reserve(&vps->bo->tbo, true, false, NULL);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
return -ENOMEM; return -ENOMEM;
ret = ttm_bo_kmap(&vps->bo->base, 0, PFN_UP(size), &vps->bo->map); ret = ttm_bo_kmap(&vps->bo->tbo, 0, PFN_UP(size), &vps->bo->map);
ttm_bo_unreserve(&vps->bo->base); ttm_bo_unreserve(&vps->bo->tbo);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
return -ENOMEM; return -ENOMEM;
} else if (vps->surf && !vps->bo && vps->surf->res.backup) { } else if (vps->surf && !vps->bo && vps->surf->res.guest_memory_bo) {
WARN_ON(vps->surf->snooper.image); WARN_ON(vps->surf->snooper.image);
ret = ttm_bo_reserve(&vps->surf->res.backup->base, true, false, ret = ttm_bo_reserve(&vps->surf->res.guest_memory_bo->tbo, true, false,
NULL); NULL);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
return -ENOMEM; return -ENOMEM;
vmw_bo_map_and_cache(vps->surf->res.backup); vmw_bo_map_and_cache(vps->surf->res.guest_memory_bo);
ttm_bo_unreserve(&vps->surf->res.backup->base); ttm_bo_unreserve(&vps->surf->res.guest_memory_bo->tbo);
vps->surf_mapped = true; vps->surf_mapped = true;
} }
...@@ -926,7 +911,7 @@ int vmw_du_cursor_plane_atomic_check(struct drm_plane *plane, ...@@ -926,7 +911,7 @@ int vmw_du_cursor_plane_atomic_check(struct drm_plane *plane,
WARN_ON(!surface); WARN_ON(!surface);
if (!surface || if (!surface ||
(!surface->snooper.image && !surface->res.backup)) { (!surface->snooper.image && !surface->res.guest_memory_bo)) {
DRM_ERROR("surface not suitable for cursor\n"); DRM_ERROR("surface not suitable for cursor\n");
return -EINVAL; return -EINVAL;
} }
...@@ -1397,7 +1382,7 @@ static int vmw_framebuffer_bo_create_handle(struct drm_framebuffer *fb, ...@@ -1397,7 +1382,7 @@ static int vmw_framebuffer_bo_create_handle(struct drm_framebuffer *fb,
struct vmw_framebuffer_bo *vfbd = struct vmw_framebuffer_bo *vfbd =
vmw_framebuffer_to_vfbd(fb); vmw_framebuffer_to_vfbd(fb);
return drm_gem_handle_create(file_priv, &vfbd->buffer->base.base, handle); return drm_gem_handle_create(file_priv, &vfbd->buffer->tbo.base, handle);
} }
static void vmw_framebuffer_bo_destroy(struct drm_framebuffer *framebuffer) static void vmw_framebuffer_bo_destroy(struct drm_framebuffer *framebuffer)
...@@ -1546,9 +1531,9 @@ static int vmw_create_bo_proxy(struct drm_device *dev, ...@@ -1546,9 +1531,9 @@ static int vmw_create_bo_proxy(struct drm_device *dev,
/* Reserve and switch the backing mob. */ /* Reserve and switch the backing mob. */
mutex_lock(&res->dev_priv->cmdbuf_mutex); mutex_lock(&res->dev_priv->cmdbuf_mutex);
(void) vmw_resource_reserve(res, false, true); (void) vmw_resource_reserve(res, false, true);
vmw_bo_unreference(&res->backup); vmw_bo_unreference(&res->guest_memory_bo);
res->backup = vmw_bo_reference(bo_mob); res->guest_memory_bo = vmw_bo_reference(bo_mob);
res->backup_offset = 0; res->guest_memory_offset = 0;
vmw_resource_unreserve(res, false, false, false, NULL, 0); vmw_resource_unreserve(res, false, false, false, NULL, 0);
mutex_unlock(&res->dev_priv->cmdbuf_mutex); mutex_unlock(&res->dev_priv->cmdbuf_mutex);
...@@ -1570,7 +1555,7 @@ static int vmw_kms_new_framebuffer_bo(struct vmw_private *dev_priv, ...@@ -1570,7 +1555,7 @@ static int vmw_kms_new_framebuffer_bo(struct vmw_private *dev_priv,
int ret; int ret;
requested_size = mode_cmd->height * mode_cmd->pitches[0]; requested_size = mode_cmd->height * mode_cmd->pitches[0];
if (unlikely(requested_size > bo->base.base.size)) { if (unlikely(requested_size > bo->tbo.base.size)) {
DRM_ERROR("Screen buffer object size is too small " DRM_ERROR("Screen buffer object size is too small "
"for requested mode.\n"); "for requested mode.\n");
return -EINVAL; return -EINVAL;
...@@ -1591,7 +1576,7 @@ static int vmw_kms_new_framebuffer_bo(struct vmw_private *dev_priv, ...@@ -1591,7 +1576,7 @@ static int vmw_kms_new_framebuffer_bo(struct vmw_private *dev_priv,
goto out_err1; goto out_err1;
} }
vfbd->base.base.obj[0] = &bo->base.base; vfbd->base.base.obj[0] = &bo->tbo.base;
drm_helper_mode_fill_fb_struct(dev, &vfbd->base.base, mode_cmd); drm_helper_mode_fill_fb_struct(dev, &vfbd->base.base, mode_cmd);
vfbd->base.bo = true; vfbd->base.bo = true;
vfbd->buffer = vmw_bo_reference(bo); vfbd->buffer = vmw_bo_reference(bo);
......
...@@ -270,9 +270,7 @@ struct vmw_crtc_state { ...@@ -270,9 +270,7 @@ struct vmw_crtc_state {
}; };
struct vmw_cursor_plane_state { struct vmw_cursor_plane_state {
struct ttm_buffer_object *bo; struct vmw_bo *bo;
struct ttm_bo_kmap_obj map;
bool mapped;
s32 hotspot_x; s32 hotspot_x;
s32 hotspot_y; s32 hotspot_y;
}; };
...@@ -343,7 +341,7 @@ struct vmw_connector_state { ...@@ -343,7 +341,7 @@ struct vmw_connector_state {
struct vmw_cursor_plane { struct vmw_cursor_plane {
struct drm_plane base; struct drm_plane base;
struct ttm_buffer_object *cursor_mobs[3]; struct vmw_bo *cursor_mobs[3];
}; };
/** /**
......
...@@ -147,7 +147,7 @@ static int vmw_ldu_fb_pin(struct vmw_framebuffer *vfb) ...@@ -147,7 +147,7 @@ static int vmw_ldu_fb_pin(struct vmw_framebuffer *vfb)
int ret; int ret;
buf = vfb->bo ? vmw_framebuffer_to_vfbd(&vfb->base)->buffer : buf = vfb->bo ? vmw_framebuffer_to_vfbd(&vfb->base)->buffer :
vmw_framebuffer_to_vfbs(&vfb->base)->surface->res.backup; vmw_framebuffer_to_vfbs(&vfb->base)->surface->res.guest_memory_bo;
if (!buf) if (!buf)
return 0; return 0;
...@@ -169,7 +169,7 @@ static int vmw_ldu_fb_unpin(struct vmw_framebuffer *vfb) ...@@ -169,7 +169,7 @@ static int vmw_ldu_fb_unpin(struct vmw_framebuffer *vfb)
struct vmw_bo *buf; struct vmw_bo *buf;
buf = vfb->bo ? vmw_framebuffer_to_vfbd(&vfb->base)->buffer : buf = vfb->bo ? vmw_framebuffer_to_vfbd(&vfb->base)->buffer :
vmw_framebuffer_to_vfbs(&vfb->base)->surface->res.backup; vmw_framebuffer_to_vfbs(&vfb->base)->surface->res.guest_memory_bo;
if (WARN_ON(!buf)) if (WARN_ON(!buf))
return 0; return 0;
......
...@@ -51,7 +51,7 @@ ...@@ -51,7 +51,7 @@
* @pt_root_page DMA address of the level 0 page of the page table. * @pt_root_page DMA address of the level 0 page of the page table.
*/ */
struct vmw_mob { struct vmw_mob {
struct ttm_buffer_object *pt_bo; struct vmw_bo *pt_bo;
unsigned long num_pages; unsigned long num_pages;
unsigned pt_level; unsigned pt_level;
dma_addr_t pt_root_page; dma_addr_t pt_root_page;
...@@ -204,7 +204,7 @@ static void vmw_takedown_otable_base(struct vmw_private *dev_priv, ...@@ -204,7 +204,7 @@ static void vmw_takedown_otable_base(struct vmw_private *dev_priv,
if (otable->page_table == NULL) if (otable->page_table == NULL)
return; return;
bo = otable->page_table->pt_bo; bo = &otable->page_table->pt_bo->tbo;
cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd)); cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL)) if (unlikely(cmd == NULL))
return; return;
...@@ -252,7 +252,9 @@ static int vmw_otable_batch_setup(struct vmw_private *dev_priv, ...@@ -252,7 +252,9 @@ static int vmw_otable_batch_setup(struct vmw_private *dev_priv,
bo_size += otables[i].size; bo_size += otables[i].size;
} }
ret = vmw_bo_create_and_populate(dev_priv, bo_size, &batch->otable_bo); ret = vmw_bo_create_and_populate(dev_priv, bo_size,
VMW_BO_DOMAIN_WAITABLE_SYS,
&batch->otable_bo);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
return ret; return ret;
...@@ -261,7 +263,8 @@ static int vmw_otable_batch_setup(struct vmw_private *dev_priv, ...@@ -261,7 +263,8 @@ static int vmw_otable_batch_setup(struct vmw_private *dev_priv,
if (!batch->otables[i].enabled) if (!batch->otables[i].enabled)
continue; continue;
ret = vmw_setup_otable_base(dev_priv, i, batch->otable_bo, ret = vmw_setup_otable_base(dev_priv, i,
&batch->otable_bo->tbo,
offset, offset,
&otables[i]); &otables[i]);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
...@@ -278,8 +281,8 @@ static int vmw_otable_batch_setup(struct vmw_private *dev_priv, ...@@ -278,8 +281,8 @@ static int vmw_otable_batch_setup(struct vmw_private *dev_priv,
&batch->otables[i]); &batch->otables[i]);
} }
vmw_bo_unpin_unlocked(batch->otable_bo); vmw_bo_unpin_unlocked(&batch->otable_bo->tbo);
ttm_bo_put(batch->otable_bo); ttm_bo_put(&batch->otable_bo->tbo);
batch->otable_bo = NULL; batch->otable_bo = NULL;
return ret; return ret;
} }
...@@ -330,7 +333,7 @@ static void vmw_otable_batch_takedown(struct vmw_private *dev_priv, ...@@ -330,7 +333,7 @@ static void vmw_otable_batch_takedown(struct vmw_private *dev_priv,
struct vmw_otable_batch *batch) struct vmw_otable_batch *batch)
{ {
SVGAOTableType i; SVGAOTableType i;
struct ttm_buffer_object *bo = batch->otable_bo; struct ttm_buffer_object *bo = &batch->otable_bo->tbo;
int ret; int ret;
for (i = 0; i < batch->num_otables; ++i) for (i = 0; i < batch->num_otables; ++i)
...@@ -345,8 +348,7 @@ static void vmw_otable_batch_takedown(struct vmw_private *dev_priv, ...@@ -345,8 +348,7 @@ static void vmw_otable_batch_takedown(struct vmw_private *dev_priv,
ttm_bo_unpin(bo); ttm_bo_unpin(bo);
ttm_bo_unreserve(bo); ttm_bo_unreserve(bo);
ttm_bo_put(batch->otable_bo); vmw_bo_unreference(&batch->otable_bo);
batch->otable_bo = NULL;
} }
/* /*
...@@ -414,7 +416,9 @@ static int vmw_mob_pt_populate(struct vmw_private *dev_priv, ...@@ -414,7 +416,9 @@ static int vmw_mob_pt_populate(struct vmw_private *dev_priv,
{ {
BUG_ON(mob->pt_bo != NULL); BUG_ON(mob->pt_bo != NULL);
return vmw_bo_create_and_populate(dev_priv, mob->num_pages * PAGE_SIZE, &mob->pt_bo); return vmw_bo_create_and_populate(dev_priv, mob->num_pages * PAGE_SIZE,
VMW_BO_DOMAIN_WAITABLE_SYS,
&mob->pt_bo);
} }
/** /**
...@@ -495,7 +499,7 @@ static void vmw_mob_pt_setup(struct vmw_mob *mob, ...@@ -495,7 +499,7 @@ static void vmw_mob_pt_setup(struct vmw_mob *mob,
unsigned long num_data_pages) unsigned long num_data_pages)
{ {
unsigned long num_pt_pages = 0; unsigned long num_pt_pages = 0;
struct ttm_buffer_object *bo = mob->pt_bo; struct ttm_buffer_object *bo = &mob->pt_bo->tbo;
struct vmw_piter save_pt_iter = {0}; struct vmw_piter save_pt_iter = {0};
struct vmw_piter pt_iter; struct vmw_piter pt_iter;
const struct vmw_sg_table *vsgt; const struct vmw_sg_table *vsgt;
...@@ -532,9 +536,8 @@ static void vmw_mob_pt_setup(struct vmw_mob *mob, ...@@ -532,9 +536,8 @@ static void vmw_mob_pt_setup(struct vmw_mob *mob,
void vmw_mob_destroy(struct vmw_mob *mob) void vmw_mob_destroy(struct vmw_mob *mob)
{ {
if (mob->pt_bo) { if (mob->pt_bo) {
vmw_bo_unpin_unlocked(mob->pt_bo); vmw_bo_unpin_unlocked(&mob->pt_bo->tbo);
ttm_bo_put(mob->pt_bo); vmw_bo_unreference(&mob->pt_bo);
mob->pt_bo = NULL;
} }
kfree(mob); kfree(mob);
} }
...@@ -553,7 +556,7 @@ void vmw_mob_unbind(struct vmw_private *dev_priv, ...@@ -553,7 +556,7 @@ void vmw_mob_unbind(struct vmw_private *dev_priv,
SVGA3dCmdDestroyGBMob body; SVGA3dCmdDestroyGBMob body;
} *cmd; } *cmd;
int ret; int ret;
struct ttm_buffer_object *bo = mob->pt_bo; struct ttm_buffer_object *bo = &mob->pt_bo->tbo;
if (bo) { if (bo) {
ret = ttm_bo_reserve(bo, false, true, NULL); ret = ttm_bo_reserve(bo, false, true, NULL);
...@@ -645,9 +648,8 @@ int vmw_mob_bind(struct vmw_private *dev_priv, ...@@ -645,9 +648,8 @@ int vmw_mob_bind(struct vmw_private *dev_priv,
out_no_cmd_space: out_no_cmd_space:
vmw_fifo_resource_dec(dev_priv); vmw_fifo_resource_dec(dev_priv);
if (pt_set_up) { if (pt_set_up) {
vmw_bo_unpin_unlocked(mob->pt_bo); vmw_bo_unpin_unlocked(&mob->pt_bo->tbo);
ttm_bo_put(mob->pt_bo); vmw_bo_unreference(&mob->pt_bo);
mob->pt_bo = NULL;
} }
return -ENOMEM; return -ENOMEM;
......
...@@ -140,7 +140,7 @@ static int vmw_overlay_send_put(struct vmw_private *dev_priv, ...@@ -140,7 +140,7 @@ static int vmw_overlay_send_put(struct vmw_private *dev_priv,
for (i = 0; i < num_items; i++) for (i = 0; i < num_items; i++)
items[i].registerId = i; items[i].registerId = i;
vmw_bo_get_guest_ptr(&buf->base, &ptr); vmw_bo_get_guest_ptr(&buf->tbo, &ptr);
ptr.offset += arg->offset; ptr.offset += arg->offset;
items[SVGA_VIDEO_ENABLED].value = true; items[SVGA_VIDEO_ENABLED].value = true;
......
...@@ -82,8 +82,8 @@ struct vmw_bo_dirty { ...@@ -82,8 +82,8 @@ struct vmw_bo_dirty {
static void vmw_bo_dirty_scan_pagetable(struct vmw_bo *vbo) static void vmw_bo_dirty_scan_pagetable(struct vmw_bo *vbo)
{ {
struct vmw_bo_dirty *dirty = vbo->dirty; struct vmw_bo_dirty *dirty = vbo->dirty;
pgoff_t offset = drm_vma_node_start(&vbo->base.base.vma_node); pgoff_t offset = drm_vma_node_start(&vbo->tbo.base.vma_node);
struct address_space *mapping = vbo->base.bdev->dev_mapping; struct address_space *mapping = vbo->tbo.bdev->dev_mapping;
pgoff_t num_marked; pgoff_t num_marked;
num_marked = clean_record_shared_mapping_range num_marked = clean_record_shared_mapping_range
...@@ -120,23 +120,22 @@ static void vmw_bo_dirty_scan_pagetable(struct vmw_bo *vbo) ...@@ -120,23 +120,22 @@ static void vmw_bo_dirty_scan_pagetable(struct vmw_bo *vbo)
static void vmw_bo_dirty_scan_mkwrite(struct vmw_bo *vbo) static void vmw_bo_dirty_scan_mkwrite(struct vmw_bo *vbo)
{ {
struct vmw_bo_dirty *dirty = vbo->dirty; struct vmw_bo_dirty *dirty = vbo->dirty;
unsigned long offset = drm_vma_node_start(&vbo->base.base.vma_node); unsigned long offset = drm_vma_node_start(&vbo->tbo.base.vma_node);
struct address_space *mapping = vbo->base.bdev->dev_mapping; struct address_space *mapping = vbo->tbo.bdev->dev_mapping;
pgoff_t num_marked; pgoff_t num_marked;
if (dirty->end <= dirty->start) if (dirty->end <= dirty->start)
return; return;
num_marked = wp_shared_mapping_range(vbo->base.bdev->dev_mapping, num_marked = wp_shared_mapping_range(vbo->tbo.bdev->dev_mapping,
dirty->start + offset, dirty->start + offset,
dirty->end - dirty->start); dirty->end - dirty->start);
if (100UL * num_marked / dirty->bitmap_size > if (100UL * num_marked / dirty->bitmap_size >
VMW_DIRTY_PERCENTAGE) { VMW_DIRTY_PERCENTAGE)
dirty->change_count++; dirty->change_count++;
} else { else
dirty->change_count = 0; dirty->change_count = 0;
}
if (dirty->change_count > VMW_DIRTY_NUM_CHANGE_TRIGGERS) { if (dirty->change_count > VMW_DIRTY_NUM_CHANGE_TRIGGERS) {
pgoff_t start = 0; pgoff_t start = 0;
...@@ -186,8 +185,8 @@ static void vmw_bo_dirty_pre_unmap(struct vmw_bo *vbo, ...@@ -186,8 +185,8 @@ static void vmw_bo_dirty_pre_unmap(struct vmw_bo *vbo,
pgoff_t start, pgoff_t end) pgoff_t start, pgoff_t end)
{ {
struct vmw_bo_dirty *dirty = vbo->dirty; struct vmw_bo_dirty *dirty = vbo->dirty;
unsigned long offset = drm_vma_node_start(&vbo->base.base.vma_node); unsigned long offset = drm_vma_node_start(&vbo->tbo.base.vma_node);
struct address_space *mapping = vbo->base.bdev->dev_mapping; struct address_space *mapping = vbo->tbo.bdev->dev_mapping;
if (dirty->method != VMW_BO_DIRTY_PAGETABLE || start >= end) if (dirty->method != VMW_BO_DIRTY_PAGETABLE || start >= end)
return; return;
...@@ -210,8 +209,8 @@ static void vmw_bo_dirty_pre_unmap(struct vmw_bo *vbo, ...@@ -210,8 +209,8 @@ static void vmw_bo_dirty_pre_unmap(struct vmw_bo *vbo,
void vmw_bo_dirty_unmap(struct vmw_bo *vbo, void vmw_bo_dirty_unmap(struct vmw_bo *vbo,
pgoff_t start, pgoff_t end) pgoff_t start, pgoff_t end)
{ {
unsigned long offset = drm_vma_node_start(&vbo->base.base.vma_node); unsigned long offset = drm_vma_node_start(&vbo->tbo.base.vma_node);
struct address_space *mapping = vbo->base.bdev->dev_mapping; struct address_space *mapping = vbo->tbo.bdev->dev_mapping;
vmw_bo_dirty_pre_unmap(vbo, start, end); vmw_bo_dirty_pre_unmap(vbo, start, end);
unmap_shared_mapping_range(mapping, (offset + start) << PAGE_SHIFT, unmap_shared_mapping_range(mapping, (offset + start) << PAGE_SHIFT,
...@@ -231,7 +230,7 @@ void vmw_bo_dirty_unmap(struct vmw_bo *vbo, ...@@ -231,7 +230,7 @@ void vmw_bo_dirty_unmap(struct vmw_bo *vbo,
int vmw_bo_dirty_add(struct vmw_bo *vbo) int vmw_bo_dirty_add(struct vmw_bo *vbo)
{ {
struct vmw_bo_dirty *dirty = vbo->dirty; struct vmw_bo_dirty *dirty = vbo->dirty;
pgoff_t num_pages = PFN_UP(vbo->base.resource->size); pgoff_t num_pages = PFN_UP(vbo->tbo.resource->size);
size_t size; size_t size;
int ret; int ret;
...@@ -254,8 +253,8 @@ int vmw_bo_dirty_add(struct vmw_bo *vbo) ...@@ -254,8 +253,8 @@ int vmw_bo_dirty_add(struct vmw_bo *vbo)
if (num_pages < PAGE_SIZE / sizeof(pte_t)) { if (num_pages < PAGE_SIZE / sizeof(pte_t)) {
dirty->method = VMW_BO_DIRTY_PAGETABLE; dirty->method = VMW_BO_DIRTY_PAGETABLE;
} else { } else {
struct address_space *mapping = vbo->base.bdev->dev_mapping; struct address_space *mapping = vbo->tbo.bdev->dev_mapping;
pgoff_t offset = drm_vma_node_start(&vbo->base.base.vma_node); pgoff_t offset = drm_vma_node_start(&vbo->tbo.base.vma_node);
dirty->method = VMW_BO_DIRTY_MKWRITE; dirty->method = VMW_BO_DIRTY_MKWRITE;
...@@ -307,11 +306,11 @@ void vmw_bo_dirty_release(struct vmw_bo *vbo) ...@@ -307,11 +306,11 @@ void vmw_bo_dirty_release(struct vmw_bo *vbo)
*/ */
void vmw_bo_dirty_transfer_to_res(struct vmw_resource *res) void vmw_bo_dirty_transfer_to_res(struct vmw_resource *res)
{ {
struct vmw_bo *vbo = res->backup; struct vmw_bo *vbo = res->guest_memory_bo;
struct vmw_bo_dirty *dirty = vbo->dirty; struct vmw_bo_dirty *dirty = vbo->dirty;
pgoff_t start, cur, end; pgoff_t start, cur, end;
unsigned long res_start = res->backup_offset; unsigned long res_start = res->guest_memory_offset;
unsigned long res_end = res->backup_offset + res->backup_size; unsigned long res_end = res->guest_memory_offset + res->guest_memory_size;
WARN_ON_ONCE(res_start & ~PAGE_MASK); WARN_ON_ONCE(res_start & ~PAGE_MASK);
res_start >>= PAGE_SHIFT; res_start >>= PAGE_SHIFT;
...@@ -352,9 +351,9 @@ void vmw_bo_dirty_transfer_to_res(struct vmw_resource *res) ...@@ -352,9 +351,9 @@ void vmw_bo_dirty_transfer_to_res(struct vmw_resource *res)
*/ */
void vmw_bo_dirty_clear_res(struct vmw_resource *res) void vmw_bo_dirty_clear_res(struct vmw_resource *res)
{ {
unsigned long res_start = res->backup_offset; unsigned long res_start = res->guest_memory_offset;
unsigned long res_end = res->backup_offset + res->backup_size; unsigned long res_end = res->guest_memory_offset + res->guest_memory_size;
struct vmw_bo *vbo = res->backup; struct vmw_bo *vbo = res->guest_memory_bo;
struct vmw_bo_dirty *dirty = vbo->dirty; struct vmw_bo_dirty *dirty = vbo->dirty;
res_start >>= PAGE_SHIFT; res_start >>= PAGE_SHIFT;
...@@ -381,8 +380,7 @@ vm_fault_t vmw_bo_vm_mkwrite(struct vm_fault *vmf) ...@@ -381,8 +380,7 @@ vm_fault_t vmw_bo_vm_mkwrite(struct vm_fault *vmf)
vm_fault_t ret; vm_fault_t ret;
unsigned long page_offset; unsigned long page_offset;
unsigned int save_flags; unsigned int save_flags;
struct vmw_bo *vbo = struct vmw_bo *vbo = to_vmw_bo(&bo->base);
container_of(bo, typeof(*vbo), base);
/* /*
* mkwrite() doesn't handle the VM_FAULT_RETRY return value correctly. * mkwrite() doesn't handle the VM_FAULT_RETRY return value correctly.
...@@ -420,8 +418,7 @@ vm_fault_t vmw_bo_vm_fault(struct vm_fault *vmf) ...@@ -420,8 +418,7 @@ vm_fault_t vmw_bo_vm_fault(struct vm_fault *vmf)
struct vm_area_struct *vma = vmf->vma; struct vm_area_struct *vma = vmf->vma;
struct ttm_buffer_object *bo = (struct ttm_buffer_object *) struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
vma->vm_private_data; vma->vm_private_data;
struct vmw_bo *vbo = struct vmw_bo *vbo = to_vmw_bo(&bo->base);
container_of(bo, struct vmw_bo, base);
pgoff_t num_prefault; pgoff_t num_prefault;
pgprot_t prot; pgprot_t prot;
vm_fault_t ret; vm_fault_t ret;
......
This diff is collapsed.
...@@ -58,10 +58,11 @@ struct vmw_user_resource_conv { ...@@ -58,10 +58,11 @@ struct vmw_user_resource_conv {
* struct vmw_res_func - members and functions common for a resource type * struct vmw_res_func - members and functions common for a resource type
* *
* @res_type: Enum that identifies the lru list to use for eviction. * @res_type: Enum that identifies the lru list to use for eviction.
* @needs_backup: Whether the resource is guest-backed and needs * @needs_guest_memory:Whether the resource is guest-backed and needs
* persistent buffer storage. * persistent buffer storage.
* @type_name: String that identifies the resource type. * @type_name: String that identifies the resource type.
* @backup_placement: TTM placement for backup buffers. * @domain: TTM placement for guest memory buffers.
* @busy_domain: TTM busy placement for guest memory buffers.
* @may_evict Whether the resource may be evicted. * @may_evict Whether the resource may be evicted.
* @create: Create a hardware resource. * @create: Create a hardware resource.
* @destroy: Destroy a hardware resource. * @destroy: Destroy a hardware resource.
...@@ -81,7 +82,7 @@ struct vmw_user_resource_conv { ...@@ -81,7 +82,7 @@ struct vmw_user_resource_conv {
*/ */
struct vmw_res_func { struct vmw_res_func {
enum vmw_res_type res_type; enum vmw_res_type res_type;
bool needs_backup; bool needs_guest_memory;
const char *type_name; const char *type_name;
u32 domain; u32 domain;
u32 busy_domain; u32 busy_domain;
......
...@@ -149,7 +149,7 @@ static int vmw_sou_fifo_create(struct vmw_private *dev_priv, ...@@ -149,7 +149,7 @@ static int vmw_sou_fifo_create(struct vmw_private *dev_priv,
sou->base.set_gui_y = cmd->obj.root.y; sou->base.set_gui_y = cmd->obj.root.y;
/* Ok to assume that buffer is pinned in vram */ /* Ok to assume that buffer is pinned in vram */
vmw_bo_get_guest_ptr(&sou->buffer->base, &cmd->obj.backingStore.ptr); vmw_bo_get_guest_ptr(&sou->buffer->tbo, &cmd->obj.backingStore.ptr);
cmd->obj.backingStore.pitch = mode->hdisplay * 4; cmd->obj.backingStore.pitch = mode->hdisplay * 4;
vmw_cmd_commit(dev_priv, fifo_size); vmw_cmd_commit(dev_priv, fifo_size);
...@@ -410,9 +410,13 @@ vmw_sou_primary_plane_prepare_fb(struct drm_plane *plane, ...@@ -410,9 +410,13 @@ vmw_sou_primary_plane_prepare_fb(struct drm_plane *plane,
struct drm_crtc *crtc = plane->state->crtc ?: new_state->crtc; struct drm_crtc *crtc = plane->state->crtc ?: new_state->crtc;
struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state); struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state);
struct vmw_private *dev_priv; struct vmw_private *dev_priv;
size_t size;
int ret; int ret;
struct vmw_bo_params bo_params = {
.domain = VMW_BO_DOMAIN_VRAM,
.busy_domain = VMW_BO_DOMAIN_VRAM,
.bo_type = ttm_bo_type_device,
.pin = true
};
if (!new_fb) { if (!new_fb) {
vmw_bo_unreference(&vps->bo); vmw_bo_unreference(&vps->bo);
...@@ -421,11 +425,11 @@ vmw_sou_primary_plane_prepare_fb(struct drm_plane *plane, ...@@ -421,11 +425,11 @@ vmw_sou_primary_plane_prepare_fb(struct drm_plane *plane,
return 0; return 0;
} }
size = new_state->crtc_w * new_state->crtc_h * 4; bo_params.size = new_state->crtc_w * new_state->crtc_h * 4;
dev_priv = vmw_priv(crtc->dev); dev_priv = vmw_priv(crtc->dev);
if (vps->bo) { if (vps->bo) {
if (vps->bo_size == size) { if (vps->bo_size == bo_params.size) {
/* /*
* Note that this might temporarily up the pin-count * Note that this might temporarily up the pin-count
* to 2, until cleanup_fb() is called. * to 2, until cleanup_fb() is called.
...@@ -444,17 +448,12 @@ vmw_sou_primary_plane_prepare_fb(struct drm_plane *plane, ...@@ -444,17 +448,12 @@ vmw_sou_primary_plane_prepare_fb(struct drm_plane *plane,
* resume the overlays, this is preferred to failing to alloc. * resume the overlays, this is preferred to failing to alloc.
*/ */
vmw_overlay_pause_all(dev_priv); vmw_overlay_pause_all(dev_priv);
ret = vmw_bo_create(dev_priv, size, ret = vmw_bo_create(dev_priv, &bo_params, &vps->bo);
VMW_BO_DOMAIN_VRAM,
VMW_BO_DOMAIN_VRAM,
false, true, &vps->bo);
vmw_overlay_resume_all(dev_priv); vmw_overlay_resume_all(dev_priv);
if (ret) { if (ret)
vps->bo = NULL; /* vmw_bo_init frees on error */
return ret; return ret;
}
vps->bo_size = size; vps->bo_size = bo_params.size;
/* /*
* TTM already thinks the buffer is pinned, but make sure the * TTM already thinks the buffer is pinned, but make sure the
...@@ -491,7 +490,7 @@ static uint32_t vmw_sou_bo_define_gmrfb(struct vmw_du_update_plane *update, ...@@ -491,7 +490,7 @@ static uint32_t vmw_sou_bo_define_gmrfb(struct vmw_du_update_plane *update,
gmr->body.format.colorDepth = depth; gmr->body.format.colorDepth = depth;
gmr->body.format.reserved = 0; gmr->body.format.reserved = 0;
gmr->body.bytesPerLine = update->vfb->base.pitches[0]; gmr->body.bytesPerLine = update->vfb->base.pitches[0];
vmw_bo_get_guest_ptr(&vfbbo->buffer->base, &gmr->body.ptr); vmw_bo_get_guest_ptr(&vfbbo->buffer->tbo, &gmr->body.ptr);
return sizeof(*gmr); return sizeof(*gmr);
} }
...@@ -973,7 +972,7 @@ static int do_bo_define_gmrfb(struct vmw_private *dev_priv, ...@@ -973,7 +972,7 @@ static int do_bo_define_gmrfb(struct vmw_private *dev_priv,
cmd->body.format.reserved = 0; cmd->body.format.reserved = 0;
cmd->body.bytesPerLine = framebuffer->base.pitches[0]; cmd->body.bytesPerLine = framebuffer->base.pitches[0];
/* Buffer is reserved in vram or GMR */ /* Buffer is reserved in vram or GMR */
vmw_bo_get_guest_ptr(&buf->base, &cmd->body.ptr); vmw_bo_get_guest_ptr(&buf->tbo, &cmd->body.ptr);
vmw_cmd_commit(dev_priv, sizeof(*cmd)); vmw_cmd_commit(dev_priv, sizeof(*cmd));
return 0; return 0;
......
...@@ -89,7 +89,7 @@ const struct vmw_user_resource_conv *user_shader_converter = ...@@ -89,7 +89,7 @@ const struct vmw_user_resource_conv *user_shader_converter =
static const struct vmw_res_func vmw_gb_shader_func = { static const struct vmw_res_func vmw_gb_shader_func = {
.res_type = vmw_res_shader, .res_type = vmw_res_shader,
.needs_backup = true, .needs_guest_memory = true,
.may_evict = true, .may_evict = true,
.prio = 3, .prio = 3,
.dirty_prio = 3, .dirty_prio = 3,
...@@ -104,7 +104,7 @@ static const struct vmw_res_func vmw_gb_shader_func = { ...@@ -104,7 +104,7 @@ static const struct vmw_res_func vmw_gb_shader_func = {
static const struct vmw_res_func vmw_dx_shader_func = { static const struct vmw_res_func vmw_dx_shader_func = {
.res_type = vmw_res_shader, .res_type = vmw_res_shader,
.needs_backup = true, .needs_guest_memory = true,
.may_evict = true, .may_evict = true,
.prio = 3, .prio = 3,
.dirty_prio = 3, .dirty_prio = 3,
...@@ -178,10 +178,10 @@ static int vmw_gb_shader_init(struct vmw_private *dev_priv, ...@@ -178,10 +178,10 @@ static int vmw_gb_shader_init(struct vmw_private *dev_priv,
return ret; return ret;
} }
res->backup_size = size; res->guest_memory_size = size;
if (byte_code) { if (byte_code) {
res->backup = vmw_bo_reference(byte_code); res->guest_memory_bo = vmw_bo_reference(byte_code);
res->backup_offset = offset; res->guest_memory_offset = offset;
} }
shader->size = size; shader->size = size;
shader->type = type; shader->type = type;
...@@ -262,8 +262,8 @@ static int vmw_gb_shader_bind(struct vmw_resource *res, ...@@ -262,8 +262,8 @@ static int vmw_gb_shader_bind(struct vmw_resource *res,
cmd->header.size = sizeof(cmd->body); cmd->header.size = sizeof(cmd->body);
cmd->body.shid = res->id; cmd->body.shid = res->id;
cmd->body.mobid = bo->resource->start; cmd->body.mobid = bo->resource->start;
cmd->body.offsetInBytes = res->backup_offset; cmd->body.offsetInBytes = res->guest_memory_offset;
res->backup_dirty = false; res->guest_memory_dirty = false;
vmw_cmd_commit(dev_priv, sizeof(*cmd)); vmw_cmd_commit(dev_priv, sizeof(*cmd));
return 0; return 0;
...@@ -280,7 +280,7 @@ static int vmw_gb_shader_unbind(struct vmw_resource *res, ...@@ -280,7 +280,7 @@ static int vmw_gb_shader_unbind(struct vmw_resource *res,
} *cmd; } *cmd;
struct vmw_fence_obj *fence; struct vmw_fence_obj *fence;
BUG_ON(res->backup->base.resource->mem_type != VMW_PL_MOB); BUG_ON(res->guest_memory_bo->tbo.resource->mem_type != VMW_PL_MOB);
cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd)); cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL)) if (unlikely(cmd == NULL))
...@@ -400,8 +400,8 @@ static int vmw_dx_shader_unscrub(struct vmw_resource *res) ...@@ -400,8 +400,8 @@ static int vmw_dx_shader_unscrub(struct vmw_resource *res)
cmd->header.size = sizeof(cmd->body); cmd->header.size = sizeof(cmd->body);
cmd->body.cid = shader->ctx->id; cmd->body.cid = shader->ctx->id;
cmd->body.shid = shader->id; cmd->body.shid = shader->id;
cmd->body.mobid = res->backup->base.resource->start; cmd->body.mobid = res->guest_memory_bo->tbo.resource->start;
cmd->body.offsetInBytes = res->backup_offset; cmd->body.offsetInBytes = res->guest_memory_offset;
vmw_cmd_commit(dev_priv, sizeof(*cmd)); vmw_cmd_commit(dev_priv, sizeof(*cmd));
vmw_cotable_add_resource(shader->cotable, &shader->cotable_head); vmw_cotable_add_resource(shader->cotable, &shader->cotable_head);
...@@ -511,7 +511,7 @@ static int vmw_dx_shader_unbind(struct vmw_resource *res, ...@@ -511,7 +511,7 @@ static int vmw_dx_shader_unbind(struct vmw_resource *res,
struct vmw_fence_obj *fence; struct vmw_fence_obj *fence;
int ret; int ret;
BUG_ON(res->backup->base.resource->mem_type != VMW_PL_MOB); BUG_ON(res->guest_memory_bo->tbo.resource->mem_type != VMW_PL_MOB);
mutex_lock(&dev_priv->binding_mutex); mutex_lock(&dev_priv->binding_mutex);
ret = vmw_dx_shader_scrub(res); ret = vmw_dx_shader_scrub(res);
...@@ -785,7 +785,7 @@ static int vmw_shader_define(struct drm_device *dev, struct drm_file *file_priv, ...@@ -785,7 +785,7 @@ static int vmw_shader_define(struct drm_device *dev, struct drm_file *file_priv,
return ret; return ret;
} }
if ((u64)buffer->base.base.size < (u64)size + (u64)offset) { if ((u64)buffer->tbo.base.size < (u64)size + (u64)offset) {
VMW_DEBUG_USER("Illegal buffer- or shader size.\n"); VMW_DEBUG_USER("Illegal buffer- or shader size.\n");
ret = -EINVAL; ret = -EINVAL;
goto out_bad_arg; goto out_bad_arg;
...@@ -891,25 +891,29 @@ int vmw_compat_shader_add(struct vmw_private *dev_priv, ...@@ -891,25 +891,29 @@ int vmw_compat_shader_add(struct vmw_private *dev_priv,
bool is_iomem; bool is_iomem;
int ret; int ret;
struct vmw_resource *res; struct vmw_resource *res;
struct vmw_bo_params bo_params = {
.domain = VMW_BO_DOMAIN_SYS,
.busy_domain = VMW_BO_DOMAIN_SYS,
.bo_type = ttm_bo_type_device,
.size = size,
.pin = true
};
if (!vmw_shader_id_ok(user_key, shader_type)) if (!vmw_shader_id_ok(user_key, shader_type))
return -EINVAL; return -EINVAL;
ret = vmw_bo_create(dev_priv, size, ret = vmw_bo_create(dev_priv, &bo_params, &buf);
VMW_BO_DOMAIN_SYS,
VMW_BO_DOMAIN_SYS,
true, true, &buf);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
goto out; goto out;
ret = ttm_bo_reserve(&buf->base, false, true, NULL); ret = ttm_bo_reserve(&buf->tbo, false, true, NULL);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
goto no_reserve; goto no_reserve;
/* Map and copy shader bytecode. */ /* Map and copy shader bytecode. */
ret = ttm_bo_kmap(&buf->base, 0, PFN_UP(size), &map); ret = ttm_bo_kmap(&buf->tbo, 0, PFN_UP(size), &map);
if (unlikely(ret != 0)) { if (unlikely(ret != 0)) {
ttm_bo_unreserve(&buf->base); ttm_bo_unreserve(&buf->tbo);
goto no_reserve; goto no_reserve;
} }
...@@ -917,12 +921,9 @@ int vmw_compat_shader_add(struct vmw_private *dev_priv, ...@@ -917,12 +921,9 @@ int vmw_compat_shader_add(struct vmw_private *dev_priv,
WARN_ON(is_iomem); WARN_ON(is_iomem);
ttm_bo_kunmap(&map); ttm_bo_kunmap(&map);
vmw_bo_placement_set(buf, ret = ttm_bo_validate(&buf->tbo, &buf->placement, &ctx);
VMW_BO_DOMAIN_SYS,
VMW_BO_DOMAIN_SYS);
ret = ttm_bo_validate(&buf->base, &buf->placement, &ctx);
WARN_ON(ret != 0); WARN_ON(ret != 0);
ttm_bo_unreserve(&buf->base); ttm_bo_unreserve(&buf->tbo);
res = vmw_shader_alloc(dev_priv, buf, size, 0, shader_type); res = vmw_shader_alloc(dev_priv, buf, size, 0, shader_type);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
......
...@@ -82,7 +82,7 @@ static void vmw_view_commit_notify(struct vmw_resource *res, ...@@ -82,7 +82,7 @@ static void vmw_view_commit_notify(struct vmw_resource *res,
static const struct vmw_res_func vmw_view_func = { static const struct vmw_res_func vmw_view_func = {
.res_type = vmw_res_view, .res_type = vmw_res_view,
.needs_backup = false, .needs_guest_memory = false,
.may_evict = false, .may_evict = false,
.type_name = "DX view", .type_name = "DX view",
.domain = VMW_BO_DOMAIN_SYS, .domain = VMW_BO_DOMAIN_SYS,
......
...@@ -505,11 +505,11 @@ static void vmw_stdu_bo_cpu_commit(struct vmw_kms_dirty *dirty) ...@@ -505,11 +505,11 @@ static void vmw_stdu_bo_cpu_commit(struct vmw_kms_dirty *dirty)
/* Assume we are blitting from Guest (bo) to Host (display_srf) */ /* Assume we are blitting from Guest (bo) to Host (display_srf) */
src_pitch = stdu->display_srf->metadata.base_size.width * stdu->cpp; src_pitch = stdu->display_srf->metadata.base_size.width * stdu->cpp;
src_bo = &stdu->display_srf->res.backup->base; src_bo = &stdu->display_srf->res.guest_memory_bo->tbo;
src_offset = ddirty->top * dst_pitch + ddirty->left * stdu->cpp; src_offset = ddirty->top * dst_pitch + ddirty->left * stdu->cpp;
dst_pitch = ddirty->pitch; dst_pitch = ddirty->pitch;
dst_bo = &ddirty->buf->base; dst_bo = &ddirty->buf->tbo;
dst_offset = ddirty->fb_top * src_pitch + ddirty->fb_left * stdu->cpp; dst_offset = ddirty->fb_top * src_pitch + ddirty->fb_left * stdu->cpp;
(void) vmw_bo_cpu_blit(dst_bo, dst_offset, dst_pitch, (void) vmw_bo_cpu_blit(dst_bo, dst_offset, dst_pitch,
...@@ -1080,11 +1080,11 @@ vmw_stdu_bo_populate_update_cpu(struct vmw_du_update_plane *update, void *cmd, ...@@ -1080,11 +1080,11 @@ vmw_stdu_bo_populate_update_cpu(struct vmw_du_update_plane *update, void *cmd,
diff.cpp = stdu->cpp; diff.cpp = stdu->cpp;
dst_bo = &stdu->display_srf->res.backup->base; dst_bo = &stdu->display_srf->res.guest_memory_bo->tbo;
dst_pitch = stdu->display_srf->metadata.base_size.width * stdu->cpp; dst_pitch = stdu->display_srf->metadata.base_size.width * stdu->cpp;
dst_offset = bb->y1 * dst_pitch + bb->x1 * stdu->cpp; dst_offset = bb->y1 * dst_pitch + bb->x1 * stdu->cpp;
src_bo = &vfbbo->buffer->base; src_bo = &vfbbo->buffer->tbo;
src_pitch = update->vfb->base.pitches[0]; src_pitch = update->vfb->base.pitches[0];
src_offset = bo_update->fb_top * src_pitch + bo_update->fb_left * src_offset = bo_update->fb_top * src_pitch + bo_update->fb_left *
stdu->cpp; stdu->cpp;
......
...@@ -63,7 +63,7 @@ static void vmw_dx_streamoutput_commit_notify(struct vmw_resource *res, ...@@ -63,7 +63,7 @@ static void vmw_dx_streamoutput_commit_notify(struct vmw_resource *res,
static const struct vmw_res_func vmw_dx_streamoutput_func = { static const struct vmw_res_func vmw_dx_streamoutput_func = {
.res_type = vmw_res_streamoutput, .res_type = vmw_res_streamoutput,
.needs_backup = true, .needs_guest_memory = true,
.may_evict = false, .may_evict = false,
.type_name = "DX streamoutput", .type_name = "DX streamoutput",
.domain = VMW_BO_DOMAIN_MOB, .domain = VMW_BO_DOMAIN_MOB,
...@@ -106,8 +106,8 @@ static int vmw_dx_streamoutput_unscrub(struct vmw_resource *res) ...@@ -106,8 +106,8 @@ static int vmw_dx_streamoutput_unscrub(struct vmw_resource *res)
cmd->header.id = SVGA_3D_CMD_DX_BIND_STREAMOUTPUT; cmd->header.id = SVGA_3D_CMD_DX_BIND_STREAMOUTPUT;
cmd->header.size = sizeof(cmd->body); cmd->header.size = sizeof(cmd->body);
cmd->body.soid = so->id; cmd->body.soid = so->id;
cmd->body.mobid = res->backup->base.resource->start; cmd->body.mobid = res->guest_memory_bo->tbo.resource->start;
cmd->body.offsetInBytes = res->backup_offset; cmd->body.offsetInBytes = res->guest_memory_offset;
cmd->body.sizeInBytes = so->size; cmd->body.sizeInBytes = so->size;
vmw_cmd_commit(dev_priv, sizeof(*cmd)); vmw_cmd_commit(dev_priv, sizeof(*cmd));
...@@ -197,7 +197,7 @@ static int vmw_dx_streamoutput_unbind(struct vmw_resource *res, bool readback, ...@@ -197,7 +197,7 @@ static int vmw_dx_streamoutput_unbind(struct vmw_resource *res, bool readback,
struct vmw_fence_obj *fence; struct vmw_fence_obj *fence;
int ret; int ret;
if (WARN_ON(res->backup->base.resource->mem_type != VMW_PL_MOB)) if (WARN_ON(res->guest_memory_bo->tbo.resource->mem_type != VMW_PL_MOB))
return -EINVAL; return -EINVAL;
mutex_lock(&dev_priv->binding_mutex); mutex_lock(&dev_priv->binding_mutex);
......
This diff is collapsed.
...@@ -50,13 +50,6 @@ static const struct ttm_place gmr_placement_flags = { ...@@ -50,13 +50,6 @@ static const struct ttm_place gmr_placement_flags = {
.flags = 0 .flags = 0
}; };
static const struct ttm_place mob_placement_flags = {
.fpfn = 0,
.lpfn = 0,
.mem_type = VMW_PL_MOB,
.flags = 0
};
struct ttm_placement vmw_vram_placement = { struct ttm_placement vmw_vram_placement = {
.num_placement = 1, .num_placement = 1,
.placement = &vram_placement_flags, .placement = &vram_placement_flags,
...@@ -78,13 +71,6 @@ static const struct ttm_place vram_gmr_placement_flags[] = { ...@@ -78,13 +71,6 @@ static const struct ttm_place vram_gmr_placement_flags[] = {
} }
}; };
static const struct ttm_place vmw_sys_placement_flags = {
.fpfn = 0,
.lpfn = 0,
.mem_type = VMW_PL_SYSTEM,
.flags = 0
};
struct ttm_placement vmw_vram_gmr_placement = { struct ttm_placement vmw_vram_gmr_placement = {
.num_placement = 2, .num_placement = 2,
.placement = vram_gmr_placement_flags, .placement = vram_gmr_placement_flags,
...@@ -92,13 +78,6 @@ struct ttm_placement vmw_vram_gmr_placement = { ...@@ -92,13 +78,6 @@ struct ttm_placement vmw_vram_gmr_placement = {
.busy_placement = &gmr_placement_flags .busy_placement = &gmr_placement_flags
}; };
struct ttm_placement vmw_vram_sys_placement = {
.num_placement = 1,
.placement = &vram_placement_flags,
.num_busy_placement = 1,
.busy_placement = &sys_placement_flags
};
struct ttm_placement vmw_sys_placement = { struct ttm_placement vmw_sys_placement = {
.num_placement = 1, .num_placement = 1,
.placement = &sys_placement_flags, .placement = &sys_placement_flags,
...@@ -106,20 +85,6 @@ struct ttm_placement vmw_sys_placement = { ...@@ -106,20 +85,6 @@ struct ttm_placement vmw_sys_placement = {
.busy_placement = &sys_placement_flags .busy_placement = &sys_placement_flags
}; };
struct ttm_placement vmw_pt_sys_placement = {
.num_placement = 1,
.placement = &vmw_sys_placement_flags,
.num_busy_placement = 1,
.busy_placement = &vmw_sys_placement_flags
};
struct ttm_placement vmw_mob_placement = {
.num_placement = 1,
.num_busy_placement = 1,
.placement = &mob_placement_flags,
.busy_placement = &mob_placement_flags
};
const size_t vmw_tt_size = sizeof(struct vmw_ttm_tt); const size_t vmw_tt_size = sizeof(struct vmw_ttm_tt);
/** /**
...@@ -462,7 +427,7 @@ static struct ttm_tt *vmw_ttm_tt_create(struct ttm_buffer_object *bo, ...@@ -462,7 +427,7 @@ static struct ttm_tt *vmw_ttm_tt_create(struct ttm_buffer_object *bo,
if (!vmw_be) if (!vmw_be)
return NULL; return NULL;
vmw_be->dev_priv = container_of(bo->bdev, struct vmw_private, bdev); vmw_be->dev_priv = vmw_priv_from_ttm(bo->bdev);
vmw_be->mob = NULL; vmw_be->mob = NULL;
if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent) if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent)
...@@ -488,7 +453,7 @@ static void vmw_evict_flags(struct ttm_buffer_object *bo, ...@@ -488,7 +453,7 @@ static void vmw_evict_flags(struct ttm_buffer_object *bo,
static int vmw_ttm_io_mem_reserve(struct ttm_device *bdev, struct ttm_resource *mem) static int vmw_ttm_io_mem_reserve(struct ttm_device *bdev, struct ttm_resource *mem)
{ {
struct vmw_private *dev_priv = container_of(bdev, struct vmw_private, bdev); struct vmw_private *dev_priv = vmw_priv_from_ttm(bdev);
switch (mem->mem_type) { switch (mem->mem_type) {
case TTM_PL_SYSTEM: case TTM_PL_SYSTEM:
...@@ -599,34 +564,39 @@ struct ttm_device_funcs vmw_bo_driver = { ...@@ -599,34 +564,39 @@ struct ttm_device_funcs vmw_bo_driver = {
}; };
int vmw_bo_create_and_populate(struct vmw_private *dev_priv, int vmw_bo_create_and_populate(struct vmw_private *dev_priv,
unsigned long bo_size, size_t bo_size, u32 domain,
struct ttm_buffer_object **bo_p) struct vmw_bo **bo_p)
{ {
struct ttm_operation_ctx ctx = { struct ttm_operation_ctx ctx = {
.interruptible = false, .interruptible = false,
.no_wait_gpu = false .no_wait_gpu = false
}; };
struct ttm_buffer_object *bo; struct vmw_bo *vbo;
int ret; int ret;
struct vmw_bo_params bo_params = {
.domain = domain,
.busy_domain = domain,
.bo_type = ttm_bo_type_kernel,
.size = bo_size,
.pin = true
};
ret = vmw_bo_create_kernel(dev_priv, bo_size, ret = vmw_bo_create(dev_priv, &bo_params, &vbo);
&vmw_pt_sys_placement,
&bo);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
return ret; return ret;
ret = ttm_bo_reserve(bo, false, true, NULL); ret = ttm_bo_reserve(&vbo->tbo, false, true, NULL);
BUG_ON(ret != 0); BUG_ON(ret != 0);
ret = vmw_ttm_populate(bo->bdev, bo->ttm, &ctx); ret = vmw_ttm_populate(vbo->tbo.bdev, vbo->tbo.ttm, &ctx);
if (likely(ret == 0)) { if (likely(ret == 0)) {
struct vmw_ttm_tt *vmw_tt = struct vmw_ttm_tt *vmw_tt =
container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm); container_of(vbo->tbo.ttm, struct vmw_ttm_tt, dma_ttm);
ret = vmw_ttm_map_dma(vmw_tt); ret = vmw_ttm_map_dma(vmw_tt);
} }
ttm_bo_unreserve(bo); ttm_bo_unreserve(&vbo->tbo);
if (likely(ret == 0)) if (likely(ret == 0))
*bo_p = bo; *bo_p = vbo;
return ret; return ret;
} }
...@@ -81,7 +81,7 @@ static void vmw_stream_set_arg_handle(void *data, u32 handle) ...@@ -81,7 +81,7 @@ static void vmw_stream_set_arg_handle(void *data, u32 handle)
static const struct vmw_simple_resource_func va_stream_func = { static const struct vmw_simple_resource_func va_stream_func = {
.res_func = { .res_func = {
.res_type = vmw_res_stream, .res_type = vmw_res_stream,
.needs_backup = false, .needs_guest_memory = false,
.may_evict = false, .may_evict = false,
.type_name = "overlay stream", .type_name = "overlay stream",
.domain = VMW_BO_DOMAIN_SYS, .domain = VMW_BO_DOMAIN_SYS,
......
...@@ -55,13 +55,13 @@ struct vmw_validation_bo_node { ...@@ -55,13 +55,13 @@ struct vmw_validation_bo_node {
* @head: List head for the resource validation list. * @head: List head for the resource validation list.
* @hash: A hash entry used for the duplicate detection hash table. * @hash: A hash entry used for the duplicate detection hash table.
* @res: Reference counted resource pointer. * @res: Reference counted resource pointer.
* @new_backup: Non ref-counted pointer to new backup buffer to be assigned * @new_guest_memory_bo: Non ref-counted pointer to new guest memory buffer
* to a resource. * to be assigned to a resource.
* @new_backup_offset: Offset into the new backup mob for resources that can * @new_guest_memory_offset: Offset into the new backup mob for resources
* share MOBs. * that can share MOBs.
* @no_buffer_needed: Kernel does not need to allocate a MOB during validation, * @no_buffer_needed: Kernel does not need to allocate a MOB during validation,
* the command stream provides a mob bind operation. * the command stream provides a mob bind operation.
* @switching_backup: The validation process is switching backup MOB. * @switching_guest_memory_bo: The validation process is switching backup MOB.
* @first_usage: True iff the resource has been seen only once in the current * @first_usage: True iff the resource has been seen only once in the current
* validation batch. * validation batch.
* @reserved: Whether the resource is currently reserved by this process. * @reserved: Whether the resource is currently reserved by this process.
...@@ -76,10 +76,10 @@ struct vmw_validation_res_node { ...@@ -76,10 +76,10 @@ struct vmw_validation_res_node {
struct list_head head; struct list_head head;
struct vmwgfx_hash_item hash; struct vmwgfx_hash_item hash;
struct vmw_resource *res; struct vmw_resource *res;
struct vmw_bo *new_backup; struct vmw_bo *new_guest_memory_bo;
unsigned long new_backup_offset; unsigned long new_guest_memory_offset;
u32 no_buffer_needed : 1; u32 no_buffer_needed : 1;
u32 switching_backup : 1; u32 switching_guest_memory_bo : 1;
u32 first_usage : 1; u32 first_usage : 1;
u32 reserved : 1; u32 reserved : 1;
u32 dirty : 1; u32 dirty : 1;
...@@ -193,7 +193,7 @@ vmw_validation_find_bo_dup(struct vmw_validation_context *ctx, ...@@ -193,7 +193,7 @@ vmw_validation_find_bo_dup(struct vmw_validation_context *ctx,
struct vmw_validation_bo_node *entry; struct vmw_validation_bo_node *entry;
list_for_each_entry(entry, &ctx->bo_list, base.head) { list_for_each_entry(entry, &ctx->bo_list, base.head) {
if (entry->base.bo == &vbo->base) { if (entry->base.bo == &vbo->tbo) {
bo_node = entry; bo_node = entry;
break; break;
} }
...@@ -279,7 +279,7 @@ int vmw_validation_add_bo(struct vmw_validation_context *ctx, ...@@ -279,7 +279,7 @@ int vmw_validation_add_bo(struct vmw_validation_context *ctx,
bo_node->hash.key); bo_node->hash.key);
} }
val_buf = &bo_node->base; val_buf = &bo_node->base;
val_buf->bo = ttm_bo_get_unless_zero(&vbo->base); val_buf->bo = ttm_bo_get_unless_zero(&vbo->tbo);
if (!val_buf->bo) if (!val_buf->bo)
return -ESRCH; return -ESRCH;
val_buf->num_shared = 0; val_buf->num_shared = 0;
...@@ -393,23 +393,23 @@ void vmw_validation_res_set_dirty(struct vmw_validation_context *ctx, ...@@ -393,23 +393,23 @@ void vmw_validation_res_set_dirty(struct vmw_validation_context *ctx,
* the resource. * the resource.
* @vbo: The new backup buffer object MOB. This buffer object needs to have * @vbo: The new backup buffer object MOB. This buffer object needs to have
* already been registered with the validation context. * already been registered with the validation context.
* @backup_offset: Offset into the new backup MOB. * @guest_memory_offset: Offset into the new backup MOB.
*/ */
void vmw_validation_res_switch_backup(struct vmw_validation_context *ctx, void vmw_validation_res_switch_backup(struct vmw_validation_context *ctx,
void *val_private, void *val_private,
struct vmw_bo *vbo, struct vmw_bo *vbo,
unsigned long backup_offset) unsigned long guest_memory_offset)
{ {
struct vmw_validation_res_node *val; struct vmw_validation_res_node *val;
val = container_of(val_private, typeof(*val), private); val = container_of(val_private, typeof(*val), private);
val->switching_backup = 1; val->switching_guest_memory_bo = 1;
if (val->first_usage) if (val->first_usage)
val->no_buffer_needed = 1; val->no_buffer_needed = 1;
val->new_backup = vbo; val->new_guest_memory_bo = vbo;
val->new_backup_offset = backup_offset; val->new_guest_memory_offset = guest_memory_offset;
} }
/** /**
...@@ -437,8 +437,8 @@ int vmw_validation_res_reserve(struct vmw_validation_context *ctx, ...@@ -437,8 +437,8 @@ int vmw_validation_res_reserve(struct vmw_validation_context *ctx,
goto out_unreserve; goto out_unreserve;
val->reserved = 1; val->reserved = 1;
if (res->backup) { if (res->guest_memory_bo) {
struct vmw_bo *vbo = res->backup; struct vmw_bo *vbo = res->guest_memory_bo;
vmw_bo_placement_set(vbo, vmw_bo_placement_set(vbo,
res->func->domain, res->func->domain,
...@@ -448,11 +448,11 @@ int vmw_validation_res_reserve(struct vmw_validation_context *ctx, ...@@ -448,11 +448,11 @@ int vmw_validation_res_reserve(struct vmw_validation_context *ctx,
goto out_unreserve; goto out_unreserve;
} }
if (val->switching_backup && val->new_backup && if (val->switching_guest_memory_bo && val->new_guest_memory_bo &&
res->coherent) { res->coherent) {
struct vmw_validation_bo_node *bo_node = struct vmw_validation_bo_node *bo_node =
vmw_validation_find_bo_dup(ctx, vmw_validation_find_bo_dup(ctx,
val->new_backup); val->new_guest_memory_bo);
if (WARN_ON(!bo_node)) { if (WARN_ON(!bo_node)) {
ret = -EINVAL; ret = -EINVAL;
...@@ -495,9 +495,9 @@ void vmw_validation_res_unreserve(struct vmw_validation_context *ctx, ...@@ -495,9 +495,9 @@ void vmw_validation_res_unreserve(struct vmw_validation_context *ctx,
vmw_resource_unreserve(val->res, vmw_resource_unreserve(val->res,
val->dirty_set, val->dirty_set,
val->dirty, val->dirty,
val->switching_backup, val->switching_guest_memory_bo,
val->new_backup, val->new_guest_memory_bo,
val->new_backup_offset); val->new_guest_memory_offset);
} }
} }
...@@ -512,8 +512,7 @@ void vmw_validation_res_unreserve(struct vmw_validation_context *ctx, ...@@ -512,8 +512,7 @@ void vmw_validation_res_unreserve(struct vmw_validation_context *ctx,
static int vmw_validation_bo_validate_single(struct ttm_buffer_object *bo, static int vmw_validation_bo_validate_single(struct ttm_buffer_object *bo,
bool interruptible) bool interruptible)
{ {
struct vmw_bo *vbo = struct vmw_bo *vbo = to_vmw_bo(&bo->base);
container_of(bo, struct vmw_bo, base);
struct ttm_operation_ctx ctx = { struct ttm_operation_ctx ctx = {
.interruptible = interruptible, .interruptible = interruptible,
.no_wait_gpu = false .no_wait_gpu = false
...@@ -523,7 +522,7 @@ static int vmw_validation_bo_validate_single(struct ttm_buffer_object *bo, ...@@ -523,7 +522,7 @@ static int vmw_validation_bo_validate_single(struct ttm_buffer_object *bo,
if (atomic_read(&vbo->cpu_writers)) if (atomic_read(&vbo->cpu_writers))
return -EBUSY; return -EBUSY;
if (vbo->base.pin_count > 0) if (vbo->tbo.pin_count > 0)
return 0; return 0;
ret = ttm_bo_validate(bo, &vbo->placement, &ctx); ret = ttm_bo_validate(bo, &vbo->placement, &ctx);
...@@ -554,8 +553,7 @@ int vmw_validation_bo_validate(struct vmw_validation_context *ctx, bool intr) ...@@ -554,8 +553,7 @@ int vmw_validation_bo_validate(struct vmw_validation_context *ctx, bool intr)
int ret; int ret;
list_for_each_entry(entry, &ctx->bo_list, base.head) { list_for_each_entry(entry, &ctx->bo_list, base.head) {
struct vmw_bo *vbo = struct vmw_bo *vbo = to_vmw_bo(&entry->base.bo->base);
container_of(entry->base.bo, typeof(*vbo), base);
ret = vmw_validation_bo_validate_single(entry->base.bo, intr); ret = vmw_validation_bo_validate_single(entry->base.bo, intr);
...@@ -605,7 +603,7 @@ int vmw_validation_res_validate(struct vmw_validation_context *ctx, bool intr) ...@@ -605,7 +603,7 @@ int vmw_validation_res_validate(struct vmw_validation_context *ctx, bool intr)
list_for_each_entry(val, &ctx->resource_list, head) { list_for_each_entry(val, &ctx->resource_list, head) {
struct vmw_resource *res = val->res; struct vmw_resource *res = val->res;
struct vmw_bo *backup = res->backup; struct vmw_bo *backup = res->guest_memory_bo;
ret = vmw_resource_validate(res, intr, val->dirty_set && ret = vmw_resource_validate(res, intr, val->dirty_set &&
val->dirty); val->dirty);
...@@ -616,8 +614,8 @@ int vmw_validation_res_validate(struct vmw_validation_context *ctx, bool intr) ...@@ -616,8 +614,8 @@ int vmw_validation_res_validate(struct vmw_validation_context *ctx, bool intr)
} }
/* Check if the resource switched backup buffer */ /* Check if the resource switched backup buffer */
if (backup && res->backup && (backup != res->backup)) { if (backup && res->guest_memory_bo && backup != res->guest_memory_bo) {
struct vmw_bo *vbo = res->backup; struct vmw_bo *vbo = res->guest_memory_bo;
vmw_bo_placement_set(vbo, res->func->domain, vmw_bo_placement_set(vbo, res->func->domain,
res->func->busy_domain); res->func->busy_domain);
...@@ -855,9 +853,7 @@ void vmw_validation_bo_backoff(struct vmw_validation_context *ctx) ...@@ -855,9 +853,7 @@ void vmw_validation_bo_backoff(struct vmw_validation_context *ctx)
list_for_each_entry(entry, &ctx->bo_list, base.head) { list_for_each_entry(entry, &ctx->bo_list, base.head) {
if (entry->coherent_count) { if (entry->coherent_count) {
unsigned int coherent_count = entry->coherent_count; unsigned int coherent_count = entry->coherent_count;
struct vmw_bo *vbo = struct vmw_bo *vbo = to_vmw_bo(&entry->base.bo->base);
container_of(entry->base.bo, typeof(*vbo),
base);
while (coherent_count--) while (coherent_count--)
vmw_bo_dirty_release(vbo); vmw_bo_dirty_release(vbo);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment