Commit 3eab3d9e authored by Thomas Hellstrom's avatar Thomas Hellstrom

drm/vmwgfx: Add command buffer support v3

Add command buffer support.
Currently we don't implement preemption or fancy error handling.
Tested with a couple of mesa-demos, compiz/unity and viewperf maya-03.

v2:
- Synchronize with pending work at command buffer manager takedown.
- Add an interface to flush the current command buffer for latency-critical
  command batches and apply it to framebuffer dirtying.

v3:
- Minor fixes of definitions and typos to address reviews.
- Removed new or moved branch predictor hints.
Signed-off-by: default avatarThomas Hellstrom <thellstrom@vmware.com>
Reviewed-by: default avatarSinclair Yeh <syeh@vmware.com>
parent cb09bbcc
......@@ -7,6 +7,6 @@ vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \
vmwgfx_overlay.o vmwgfx_marker.o vmwgfx_gmrid_manager.o \
vmwgfx_fence.o vmwgfx_dmabuf.o vmwgfx_scrn.o vmwgfx_context.o \
vmwgfx_surface.o vmwgfx_prime.o vmwgfx_mob.o vmwgfx_shader.o \
vmwgfx_cmdbuf_res.o \
vmwgfx_cmdbuf_res.o vmwgfx_cmdbuf.o \
obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o
......@@ -72,6 +72,12 @@ static struct ttm_place mob_placement_flags = {
.flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED
};
static struct ttm_place mob_ne_placement_flags = {
.fpfn = 0,
.lpfn = 0,
.flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
};
struct ttm_placement vmw_vram_placement = {
.num_placement = 1,
.placement = &vram_placement_flags,
......@@ -200,6 +206,13 @@ struct ttm_placement vmw_mob_placement = {
.busy_placement = &mob_placement_flags
};
struct ttm_placement vmw_mob_ne_placement = {
.num_placement = 1,
.num_busy_placement = 1,
.placement = &mob_ne_placement_flags,
.busy_placement = &mob_ne_placement_flags
};
struct vmw_ttm_tt {
struct ttm_dma_tt dma_ttm;
struct vmw_private *dev_priv;
......
This diff is collapsed.
......@@ -278,6 +278,8 @@ static void vmw_print_capabilities(uint32_t capabilities)
DRM_INFO(" Command Buffers 2.\n");
if (capabilities & SVGA_CAP_GBOBJECTS)
DRM_INFO(" Guest Backed Resources.\n");
if (capabilities & SVGA_CAP_CMD_BUFFERS_3)
DRM_INFO(" Command Buffers 3.\n");
}
/**
......@@ -362,6 +364,17 @@ static int vmw_request_device_late(struct vmw_private *dev_priv)
}
}
if (dev_priv->cman) {
ret = vmw_cmdbuf_set_pool_size(dev_priv->cman,
256*4096, 2*4096);
if (ret) {
struct vmw_cmdbuf_man *man = dev_priv->cman;
dev_priv->cman = NULL;
vmw_cmdbuf_man_destroy(man);
}
}
return 0;
}
......@@ -375,6 +388,9 @@ static int vmw_request_device(struct vmw_private *dev_priv)
return ret;
}
vmw_fence_fifo_up(dev_priv->fman);
dev_priv->cman = vmw_cmdbuf_man_create(dev_priv);
if (IS_ERR(dev_priv->cman))
dev_priv->cman = NULL;
ret = vmw_request_device_late(dev_priv);
if (ret)
......@@ -387,10 +403,14 @@ static int vmw_request_device(struct vmw_private *dev_priv)
return 0;
out_no_query_bo:
if (dev_priv->cman)
vmw_cmdbuf_remove_pool(dev_priv->cman);
if (dev_priv->has_mob) {
(void) ttm_bo_evict_mm(&dev_priv->bdev, VMW_PL_MOB);
vmw_otables_takedown(dev_priv);
}
if (dev_priv->cman)
vmw_cmdbuf_man_destroy(dev_priv->cman);
out_no_mob:
vmw_fence_fifo_down(dev_priv->fman);
vmw_fifo_release(dev_priv, &dev_priv->fifo);
......@@ -415,6 +435,9 @@ static void vmw_release_device_early(struct vmw_private *dev_priv)
BUG_ON(dev_priv->pinned_bo != NULL);
ttm_bo_unref(&dev_priv->dummy_query_bo);
if (dev_priv->cman)
vmw_cmdbuf_remove_pool(dev_priv->cman);
if (dev_priv->has_mob) {
ttm_bo_evict_mm(&dev_priv->bdev, VMW_PL_MOB);
vmw_otables_takedown(dev_priv);
......@@ -432,6 +455,9 @@ static void vmw_release_device_early(struct vmw_private *dev_priv)
static void vmw_release_device_late(struct vmw_private *dev_priv)
{
vmw_fence_fifo_down(dev_priv->fman);
if (dev_priv->cman)
vmw_cmdbuf_man_destroy(dev_priv->cman);
vmw_fifo_release(dev_priv, &dev_priv->fifo);
}
......
......@@ -453,6 +453,8 @@ struct vmw_private {
spinlock_t waiter_lock;
int fence_queue_waiters; /* Protected by waiter_lock */
int goal_queue_waiters; /* Protected by waiter_lock */
int cmdbuf_waiters; /* Protected by irq_lock */
int error_waiters; /* Protected by irq_lock */
atomic_t fifo_queue_waiters;
uint32_t last_read_seqno;
spinlock_t irq_lock;
......@@ -535,6 +537,8 @@ struct vmw_private {
*/
struct ttm_buffer_object *otable_bo;
struct vmw_otable *otables;
struct vmw_cmdbuf_man *cman;
};
static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res)
......@@ -729,6 +733,8 @@ extern bool vmw_fifo_have_3d(struct vmw_private *dev_priv);
extern bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv);
extern int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv,
uint32_t cid);
extern int vmw_fifo_flush(struct vmw_private *dev_priv,
bool interruptible);
/**
* TTM glue - vmwgfx_ttm_glue.c
......@@ -753,6 +759,7 @@ extern struct ttm_placement vmw_sys_ne_placement;
extern struct ttm_placement vmw_evictable_placement;
extern struct ttm_placement vmw_srf_placement;
extern struct ttm_placement vmw_mob_placement;
extern struct ttm_placement vmw_mob_ne_placement;
extern struct ttm_bo_driver vmw_bo_driver;
extern int vmw_dma_quiescent(struct drm_device *dev);
extern int vmw_bo_map_dma(struct ttm_buffer_object *bo);
......@@ -855,6 +862,10 @@ extern void vmw_seqno_waiter_add(struct vmw_private *dev_priv);
extern void vmw_seqno_waiter_remove(struct vmw_private *dev_priv);
extern void vmw_goal_waiter_add(struct vmw_private *dev_priv);
extern void vmw_goal_waiter_remove(struct vmw_private *dev_priv);
extern void vmw_generic_waiter_add(struct vmw_private *dev_priv, u32 flag,
int *waiter_count);
extern void vmw_generic_waiter_remove(struct vmw_private *dev_priv,
u32 flag, int *waiter_count);
/**
* Rudimentary fence-like objects currently used only for throttling -
......@@ -1077,6 +1088,35 @@ extern int vmw_cmdbuf_res_remove(struct vmw_cmdbuf_res_manager *man,
struct list_head *list);
/*
* Command buffer managerment vmwgfx_cmdbuf.c
*/
struct vmw_cmdbuf_man;
struct vmw_cmdbuf_header;
extern struct vmw_cmdbuf_man *
vmw_cmdbuf_man_create(struct vmw_private *dev_priv);
extern int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man,
size_t size, size_t default_size);
extern void vmw_cmdbuf_remove_pool(struct vmw_cmdbuf_man *man);
extern void vmw_cmdbuf_man_destroy(struct vmw_cmdbuf_man *man);
extern int vmw_cmdbuf_idle(struct vmw_cmdbuf_man *man, bool interruptible,
unsigned long timeout);
extern void *vmw_cmdbuf_reserve(struct vmw_cmdbuf_man *man, size_t size,
int ctx_id, bool interruptible,
struct vmw_cmdbuf_header *header);
extern void vmw_cmdbuf_commit(struct vmw_cmdbuf_man *man, size_t size,
struct vmw_cmdbuf_header *header,
bool flush);
extern void vmw_cmdbuf_tasklet_schedule(struct vmw_cmdbuf_man *man);
extern void *vmw_cmdbuf_alloc(struct vmw_cmdbuf_man *man,
size_t size, bool interruptible,
struct vmw_cmdbuf_header **p_header);
extern void vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header);
extern int vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man,
bool interruptible);
/**
* Inline helper functions
*/
......
......@@ -2417,7 +2417,126 @@ vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
}
}
/**
* vmw_execbuf_submit_fifo - Patch a command batch and submit it using
* the fifo.
*
* @dev_priv: Pointer to a device private structure.
* @kernel_commands: Pointer to the unpatched command batch.
* @command_size: Size of the unpatched command batch.
* @sw_context: Structure holding the relocation lists.
*
* Side effects: If this function returns 0, then the command batch
* pointed to by @kernel_commands will have been modified.
*/
static int vmw_execbuf_submit_fifo(struct vmw_private *dev_priv,
void *kernel_commands,
u32 command_size,
struct vmw_sw_context *sw_context)
{
void *cmd = vmw_fifo_reserve(dev_priv, command_size);
if (!cmd) {
DRM_ERROR("Failed reserving fifo space for commands.\n");
return -ENOMEM;
}
vmw_apply_relocations(sw_context);
memcpy(cmd, kernel_commands, command_size);
vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
vmw_resource_relocations_free(&sw_context->res_relocations);
vmw_fifo_commit(dev_priv, command_size);
return 0;
}
/**
* vmw_execbuf_submit_cmdbuf - Patch a command batch and submit it using
* the command buffer manager.
*
* @dev_priv: Pointer to a device private structure.
* @header: Opaque handle to the command buffer allocation.
* @command_size: Size of the unpatched command batch.
* @sw_context: Structure holding the relocation lists.
*
* Side effects: If this function returns 0, then the command buffer
* represented by @header will have been modified.
*/
static int vmw_execbuf_submit_cmdbuf(struct vmw_private *dev_priv,
struct vmw_cmdbuf_header *header,
u32 command_size,
struct vmw_sw_context *sw_context)
{
void *cmd = vmw_cmdbuf_reserve(dev_priv->cman, command_size,
SVGA3D_INVALID_ID, false, header);
vmw_apply_relocations(sw_context);
vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
vmw_resource_relocations_free(&sw_context->res_relocations);
vmw_cmdbuf_commit(dev_priv->cman, command_size, header, false);
return 0;
}
/**
* vmw_execbuf_cmdbuf - Prepare, if possible, a user-space command batch for
* submission using a command buffer.
*
* @dev_priv: Pointer to a device private structure.
* @user_commands: User-space pointer to the commands to be submitted.
* @command_size: Size of the unpatched command batch.
* @header: Out parameter returning the opaque pointer to the command buffer.
*
* This function checks whether we can use the command buffer manager for
* submission and if so, creates a command buffer of suitable size and
* copies the user data into that buffer.
*
* On successful return, the function returns a pointer to the data in the
* command buffer and *@header is set to non-NULL.
* If command buffers could not be used, the function will return the value
* of @kernel_commands on function call. That value may be NULL. In that case,
* the value of *@header will be set to NULL.
* If an error is encountered, the function will return a pointer error value.
* If the function is interrupted by a signal while sleeping, it will return
* -ERESTARTSYS casted to a pointer error value.
*/
void *vmw_execbuf_cmdbuf(struct vmw_private *dev_priv,
void __user *user_commands,
void *kernel_commands,
u32 command_size,
struct vmw_cmdbuf_header **header)
{
size_t cmdbuf_size;
int ret;
*header = NULL;
if (!dev_priv->cman || kernel_commands)
return kernel_commands;
if (command_size > SVGA_CB_MAX_SIZE) {
DRM_ERROR("Command buffer is too large.\n");
return ERR_PTR(-EINVAL);
}
/* If possible, add a little space for fencing. */
cmdbuf_size = command_size + 512;
cmdbuf_size = min_t(size_t, cmdbuf_size, SVGA_CB_MAX_SIZE);
kernel_commands = vmw_cmdbuf_alloc(dev_priv->cman, cmdbuf_size,
true, header);
if (IS_ERR(kernel_commands))
return kernel_commands;
ret = copy_from_user(kernel_commands, user_commands,
command_size);
if (ret) {
DRM_ERROR("Failed copying commands.\n");
vmw_cmdbuf_header_free(*header);
*header = NULL;
return ERR_PTR(-EFAULT);
}
return kernel_commands;
}
int vmw_execbuf_process(struct drm_file *file_priv,
struct vmw_private *dev_priv,
......@@ -2432,18 +2551,33 @@ int vmw_execbuf_process(struct drm_file *file_priv,
struct vmw_fence_obj *fence = NULL;
struct vmw_resource *error_resource;
struct list_head resource_list;
struct vmw_cmdbuf_header *header;
struct ww_acquire_ctx ticket;
uint32_t handle;
void *cmd;
int ret;
if (throttle_us) {
ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue,
throttle_us);
if (ret)
return ret;
}
kernel_commands = vmw_execbuf_cmdbuf(dev_priv, user_commands,
kernel_commands, command_size,
&header);
if (IS_ERR(kernel_commands))
return PTR_ERR(kernel_commands);
ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
if (unlikely(ret != 0))
return -ERESTARTSYS;
if (ret) {
ret = -ERESTARTSYS;
goto out_free_header;
}
sw_context->kernel = false;
if (kernel_commands == NULL) {
sw_context->kernel = false;
ret = vmw_resize_cmd_bounce(sw_context, command_size);
if (unlikely(ret != 0))
goto out_unlock;
......@@ -2458,7 +2592,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
goto out_unlock;
}
kernel_commands = sw_context->cmd_bounce;
} else
} else if (!header)
sw_context->kernel = true;
sw_context->fp = vmw_fpriv(file_priv);
......@@ -2478,7 +2612,6 @@ int vmw_execbuf_process(struct drm_file *file_priv,
sw_context->res_ht_initialized = true;
}
INIT_LIST_HEAD(&sw_context->staged_cmd_res);
INIT_LIST_HEAD(&resource_list);
ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
command_size);
......@@ -2502,14 +2635,6 @@ int vmw_execbuf_process(struct drm_file *file_priv,
if (unlikely(ret != 0))
goto out_err;
if (throttle_us) {
ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue,
throttle_us);
if (unlikely(ret != 0))
goto out_err;
}
ret = mutex_lock_interruptible(&dev_priv->binding_mutex);
if (unlikely(ret != 0)) {
ret = -ERESTARTSYS;
......@@ -2522,20 +2647,16 @@ int vmw_execbuf_process(struct drm_file *file_priv,
goto out_unlock_binding;
}
cmd = vmw_fifo_reserve(dev_priv, command_size);
if (unlikely(cmd == NULL)) {
DRM_ERROR("Failed reserving fifo space for commands.\n");
ret = -ENOMEM;
goto out_unlock_binding;
if (!header) {
ret = vmw_execbuf_submit_fifo(dev_priv, kernel_commands,
command_size, sw_context);
} else {
ret = vmw_execbuf_submit_cmdbuf(dev_priv, header, command_size,
sw_context);
header = NULL;
}
vmw_apply_relocations(sw_context);
memcpy(cmd, kernel_commands, command_size);
vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
vmw_resource_relocations_free(&sw_context->res_relocations);
vmw_fifo_commit(dev_priv, command_size);
if (ret)
goto out_unlock_binding;
vmw_query_bo_switch_commit(dev_priv, sw_context);
ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
......@@ -2610,6 +2731,9 @@ int vmw_execbuf_process(struct drm_file *file_priv,
vmw_resource_list_unreference(&resource_list);
if (unlikely(error_resource != NULL))
vmw_resource_unreference(&error_resource);
out_free_header:
if (header)
vmw_cmdbuf_header_free(header);
return ret;
}
......
......@@ -257,6 +257,7 @@ static void vmw_fb_dirty_flush(struct vmw_fb_par *par)
cmd->body.width = cpu_to_le32(w);
cmd->body.height = cpu_to_le32(h);
vmw_fifo_commit(vmw_priv, sizeof(*cmd));
vmw_fifo_flush(vmw_priv, false);
}
static void vmw_fb_dirty_mark(struct vmw_fb_par *par,
......
......@@ -310,7 +310,8 @@ static int vmw_fifo_wait(struct vmw_private *dev_priv,
* Returns:
* Pointer to the fifo, or null on error (possible hardware hang).
*/
void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
static void *vmw_local_fifo_reserve(struct vmw_private *dev_priv,
uint32_t bytes)
{
struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
......@@ -389,9 +390,29 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
out_err:
fifo_state->reserved_size = 0;
mutex_unlock(&fifo_state->fifo_mutex);
return NULL;
}
void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
{
void *ret;
if (dev_priv->cman)
ret = vmw_cmdbuf_reserve(dev_priv->cman, bytes,
SVGA3D_INVALID_ID, false, NULL);
else
ret = vmw_local_fifo_reserve(dev_priv, bytes);
if (IS_ERR_OR_NULL(ret)) {
DRM_ERROR("Fifo reserve failure of %u bytes.\n",
(unsigned) bytes);
dump_stack();
return NULL;
}
return ret;
}
static void vmw_fifo_res_copy(struct vmw_fifo_state *fifo_state,
__le32 __iomem *fifo_mem,
uint32_t next_cmd,
......@@ -434,7 +455,7 @@ static void vmw_fifo_slow_copy(struct vmw_fifo_state *fifo_state,
}
}
void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
void vmw_local_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
{
struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
......@@ -480,6 +501,46 @@ void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
mutex_unlock(&fifo_state->fifo_mutex);
}
void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
{
if (dev_priv->cman)
vmw_cmdbuf_commit(dev_priv->cman, bytes, NULL, false);
else
vmw_local_fifo_commit(dev_priv, bytes);
}
/**
* vmw_fifo_commit_flush - Commit fifo space and flush any buffered commands.
*
* @dev_priv: Pointer to device private structure.
* @bytes: Number of bytes to commit.
*/
static void vmw_fifo_commit_flush(struct vmw_private *dev_priv, uint32_t bytes)
{
if (dev_priv->cman)
vmw_cmdbuf_commit(dev_priv->cman, bytes, NULL, true);
else
vmw_local_fifo_commit(dev_priv, bytes);
}
/**
* vmw_fifo_flush - Flush any buffered commands and make sure command processing
* starts.
*
* @dev_priv: Pointer to device private structure.
* @interruptible: Whether to wait interruptible if function needs to sleep.
*/
int vmw_fifo_flush(struct vmw_private *dev_priv, bool interruptible)
{
might_sleep();
if (dev_priv->cman)
return vmw_cmdbuf_cur_flush(dev_priv->cman, interruptible);
else
return 0;
}
int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
{
struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
......@@ -517,7 +578,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
((unsigned long)fm + sizeof(__le32));
iowrite32(*seqno, &cmd_fence->fence);
vmw_fifo_commit(dev_priv, bytes);
vmw_fifo_commit_flush(dev_priv, bytes);
(void) vmw_marker_push(&fifo_state->marker_queue, *seqno);
vmw_update_seqno(dev_priv, fifo_state);
......
......@@ -56,6 +56,9 @@ irqreturn_t vmw_irq_handler(int irq, void *arg)
if (masked_status & SVGA_IRQFLAG_FIFO_PROGRESS)
wake_up_all(&dev_priv->fifo_queue);
if (masked_status & (SVGA_IRQFLAG_COMMAND_BUFFER |
SVGA_IRQFLAG_ERROR))
vmw_cmdbuf_tasklet_schedule(dev_priv->cman);
return IRQ_HANDLED;
}
......@@ -131,8 +134,16 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
* Block command submission while waiting for idle.
*/
if (fifo_idle)
if (fifo_idle) {
down_read(&fifo_state->rwsem);
if (dev_priv->cman) {
ret = vmw_cmdbuf_idle(dev_priv->cman, interruptible,
10*HZ);
if (ret)
goto out_err;
}
}
signal_seq = atomic_read(&dev_priv->marker_seq);
ret = 0;
......@@ -171,6 +182,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
iowrite32(signal_seq, fifo_mem + SVGA_FIFO_FENCE);
}
wake_up_all(&dev_priv->fence_queue);
out_err:
if (fifo_idle)
up_read(&fifo_state->rwsem);
......@@ -315,3 +327,30 @@ void vmw_irq_uninstall(struct drm_device *dev)
status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
}
void vmw_generic_waiter_add(struct vmw_private *dev_priv,
u32 flag, int *waiter_count)
{
unsigned long irq_flags;
spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
if ((*waiter_count)++ == 0) {
outl(flag, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
dev_priv->irq_mask |= flag;
vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
}
spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
}
void vmw_generic_waiter_remove(struct vmw_private *dev_priv,
u32 flag, int *waiter_count)
{
unsigned long irq_flags;
spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
if (--(*waiter_count) == 0) {
dev_priv->irq_mask &= ~flag;
vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
}
spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
}
......@@ -631,6 +631,7 @@ static int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
flags, color,
clips, num_clips, inc, NULL);
vmw_fifo_flush(dev_priv, false);
ttm_read_unlock(&dev_priv->reservation_sem);
drm_modeset_unlock_all(dev_priv->dev);
......@@ -987,6 +988,7 @@ static int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
clips, num_clips, increment, NULL);
}
vmw_fifo_flush(dev_priv, false);
ttm_read_unlock(&dev_priv->reservation_sem);
drm_modeset_unlock_all(dev_priv->dev);
......@@ -1347,6 +1349,8 @@ int vmw_kms_present(struct vmw_private *dev_priv,
break;
}
vmw_fifo_flush(dev_priv, false);
kfree(cmd);
out_free_tmp:
kfree(tmp);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment