Commit 359dc60d authored by Zack Rusin's avatar Zack Rusin

drm/vmwgfx: Remove the throttling code

Throttling was used before fencing to implement early vsync
support in the xorg state tracker a long time ago. The xorg
state tracker has been removed years ago and no one else
has ever used throttling. It's time to remove this code,
it hasn't been used or tested in years.
Signed-off-by: default avatarZack Rusin <zackr@vmware.com>
Reviewed-by: default avatarMartin Krastev <krastevm@vmware.com>
Reviewed-by: default avatarRoland Scheidegger <sroland@vmware.com>
Link: https://patchwork.freedesktop.org/patch/414042/?series=85516&rev=2
parent 8772c0bb
...@@ -2,8 +2,8 @@ ...@@ -2,8 +2,8 @@
vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \ vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \
vmwgfx_fb.o vmwgfx_ioctl.o vmwgfx_resource.o vmwgfx_ttm_buffer.o \ vmwgfx_fb.o vmwgfx_ioctl.o vmwgfx_resource.o vmwgfx_ttm_buffer.o \
vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \ vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \
vmwgfx_overlay.o vmwgfx_marker.o vmwgfx_gmrid_manager.o \ vmwgfx_overlay.o vmwgfx_gmrid_manager.o vmwgfx_fence.o \
vmwgfx_fence.o vmwgfx_bo.o vmwgfx_scrn.o vmwgfx_context.o \ vmwgfx_bo.o vmwgfx_scrn.o vmwgfx_context.o \
vmwgfx_surface.o vmwgfx_prime.o vmwgfx_mob.o vmwgfx_shader.o \ vmwgfx_surface.o vmwgfx_prime.o vmwgfx_mob.o vmwgfx_shader.o \
vmwgfx_cmdbuf_res.o vmwgfx_cmdbuf.o vmwgfx_stdu.o \ vmwgfx_cmdbuf_res.o vmwgfx_cmdbuf.o vmwgfx_stdu.o \
vmwgfx_cotable.o vmwgfx_so.o vmwgfx_binding.o vmwgfx_msg.o \ vmwgfx_cotable.o vmwgfx_so.o vmwgfx_binding.o vmwgfx_msg.o \
......
...@@ -276,13 +276,6 @@ struct vmw_surface { ...@@ -276,13 +276,6 @@ struct vmw_surface {
struct list_head view_list; struct list_head view_list;
}; };
struct vmw_marker_queue {
struct list_head head;
u64 lag;
u64 lag_time;
spinlock_t lock;
};
struct vmw_fifo_state { struct vmw_fifo_state {
unsigned long reserved_size; unsigned long reserved_size;
u32 *dynamic_buffer; u32 *dynamic_buffer;
...@@ -292,7 +285,6 @@ struct vmw_fifo_state { ...@@ -292,7 +285,6 @@ struct vmw_fifo_state {
uint32_t capabilities; uint32_t capabilities;
struct mutex fifo_mutex; struct mutex fifo_mutex;
struct rw_semaphore rwsem; struct rw_semaphore rwsem;
struct vmw_marker_queue marker_queue;
bool dx; bool dx;
}; };
...@@ -1120,19 +1112,6 @@ extern void vmw_generic_waiter_add(struct vmw_private *dev_priv, u32 flag, ...@@ -1120,19 +1112,6 @@ extern void vmw_generic_waiter_add(struct vmw_private *dev_priv, u32 flag,
extern void vmw_generic_waiter_remove(struct vmw_private *dev_priv, extern void vmw_generic_waiter_remove(struct vmw_private *dev_priv,
u32 flag, int *waiter_count); u32 flag, int *waiter_count);
/**
* Rudimentary fence-like objects currently used only for throttling -
* vmwgfx_marker.c
*/
extern void vmw_marker_queue_init(struct vmw_marker_queue *queue);
extern void vmw_marker_queue_takedown(struct vmw_marker_queue *queue);
extern int vmw_marker_push(struct vmw_marker_queue *queue,
uint32_t seqno);
extern int vmw_marker_pull(struct vmw_marker_queue *queue,
uint32_t signaled_seqno);
extern int vmw_wait_lag(struct vmw_private *dev_priv,
struct vmw_marker_queue *queue, uint32_t us);
/** /**
* Kernel framebuffer - vmwgfx_fb.c * Kernel framebuffer - vmwgfx_fb.c
......
...@@ -4046,11 +4046,7 @@ int vmw_execbuf_process(struct drm_file *file_priv, ...@@ -4046,11 +4046,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
} }
if (throttle_us) { if (throttle_us) {
ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue, VMW_DEBUG_USER("Throttling is no longer supported.\n");
throttle_us);
if (ret)
goto out_free_fence_fd;
} }
kernel_commands = vmw_execbuf_cmdbuf(dev_priv, user_commands, kernel_commands = vmw_execbuf_cmdbuf(dev_priv, user_commands,
......
...@@ -157,7 +157,6 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) ...@@ -157,7 +157,6 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno); atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
vmw_fifo_mem_write(dev_priv, SVGA_FIFO_FENCE, dev_priv->last_read_seqno); vmw_fifo_mem_write(dev_priv, SVGA_FIFO_FENCE, dev_priv->last_read_seqno);
vmw_marker_queue_init(&fifo->marker_queue);
return 0; return 0;
} }
...@@ -185,8 +184,6 @@ void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) ...@@ -185,8 +184,6 @@ void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
vmw_write(dev_priv, SVGA_REG_TRACES, vmw_write(dev_priv, SVGA_REG_TRACES,
dev_priv->traces_state); dev_priv->traces_state);
vmw_marker_queue_takedown(&fifo->marker_queue);
if (likely(fifo->static_buffer != NULL)) { if (likely(fifo->static_buffer != NULL)) {
vfree(fifo->static_buffer); vfree(fifo->static_buffer);
fifo->static_buffer = NULL; fifo->static_buffer = NULL;
...@@ -563,7 +560,6 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno) ...@@ -563,7 +560,6 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
cmd_fence = (struct svga_fifo_cmd_fence *) fm; cmd_fence = (struct svga_fifo_cmd_fence *) fm;
cmd_fence->fence = *seqno; cmd_fence->fence = *seqno;
vmw_fifo_commit_flush(dev_priv, bytes); vmw_fifo_commit_flush(dev_priv, bytes);
(void) vmw_marker_push(&fifo_state->marker_queue, *seqno);
vmw_update_seqno(dev_priv, fifo_state); vmw_update_seqno(dev_priv, fifo_state);
out_err: out_err:
......
...@@ -121,7 +121,6 @@ void vmw_update_seqno(struct vmw_private *dev_priv, ...@@ -121,7 +121,6 @@ void vmw_update_seqno(struct vmw_private *dev_priv,
if (dev_priv->last_read_seqno != seqno) { if (dev_priv->last_read_seqno != seqno) {
dev_priv->last_read_seqno = seqno; dev_priv->last_read_seqno = seqno;
vmw_marker_pull(&fifo_state->marker_queue, seqno);
vmw_fences_update(dev_priv->fman); vmw_fences_update(dev_priv->fman);
} }
} }
......
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
* Copyright 2010 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#include "vmwgfx_drv.h"
struct vmw_marker {
struct list_head head;
uint32_t seqno;
u64 submitted;
};
void vmw_marker_queue_init(struct vmw_marker_queue *queue)
{
INIT_LIST_HEAD(&queue->head);
queue->lag = 0;
queue->lag_time = ktime_get_raw_ns();
spin_lock_init(&queue->lock);
}
void vmw_marker_queue_takedown(struct vmw_marker_queue *queue)
{
struct vmw_marker *marker, *next;
spin_lock(&queue->lock);
list_for_each_entry_safe(marker, next, &queue->head, head) {
kfree(marker);
}
spin_unlock(&queue->lock);
}
int vmw_marker_push(struct vmw_marker_queue *queue,
uint32_t seqno)
{
struct vmw_marker *marker = kmalloc(sizeof(*marker), GFP_KERNEL);
if (unlikely(!marker))
return -ENOMEM;
marker->seqno = seqno;
marker->submitted = ktime_get_raw_ns();
spin_lock(&queue->lock);
list_add_tail(&marker->head, &queue->head);
spin_unlock(&queue->lock);
return 0;
}
int vmw_marker_pull(struct vmw_marker_queue *queue,
uint32_t signaled_seqno)
{
struct vmw_marker *marker, *next;
bool updated = false;
u64 now;
spin_lock(&queue->lock);
now = ktime_get_raw_ns();
if (list_empty(&queue->head)) {
queue->lag = 0;
queue->lag_time = now;
updated = true;
goto out_unlock;
}
list_for_each_entry_safe(marker, next, &queue->head, head) {
if (signaled_seqno - marker->seqno > (1 << 30))
continue;
queue->lag = now - marker->submitted;
queue->lag_time = now;
updated = true;
list_del(&marker->head);
kfree(marker);
}
out_unlock:
spin_unlock(&queue->lock);
return (updated) ? 0 : -EBUSY;
}
static u64 vmw_fifo_lag(struct vmw_marker_queue *queue)
{
u64 now;
spin_lock(&queue->lock);
now = ktime_get_raw_ns();
queue->lag += now - queue->lag_time;
queue->lag_time = now;
spin_unlock(&queue->lock);
return queue->lag;
}
static bool vmw_lag_lt(struct vmw_marker_queue *queue,
uint32_t us)
{
u64 cond = (u64) us * NSEC_PER_USEC;
return vmw_fifo_lag(queue) <= cond;
}
int vmw_wait_lag(struct vmw_private *dev_priv,
struct vmw_marker_queue *queue, uint32_t us)
{
struct vmw_marker *marker;
uint32_t seqno;
int ret;
while (!vmw_lag_lt(queue, us)) {
spin_lock(&queue->lock);
if (list_empty(&queue->head))
seqno = atomic_read(&dev_priv->marker_seq);
else {
marker = list_first_entry(&queue->head,
struct vmw_marker, head);
seqno = marker->seqno;
}
spin_unlock(&queue->lock);
ret = vmw_wait_seqno(dev_priv, false, seqno, true,
3*HZ);
if (unlikely(ret != 0))
return ret;
(void) vmw_marker_pull(queue, seqno);
}
return 0;
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment