Commit c09a3502 authored by Dave Airlie's avatar Dave Airlie

Merge branch 'drm-vmware-fixes' into drm-testing

* drm-vmware-fixes:
  drm/vmwgfx: Remove some leftover debug messages.
  drm/vmwgfx: Print warnings in kernel log about bo pinning that fails.
  drm/vmwgfx: Unpause overlay on update.
  drm/vmwgfx: Some modesetting cleanups and fixes.
  drm/vmwgfx: Don't use SVGA_REG_ENABLE in modesetting code.
  drm/vmwgfx: Remove duplicate member from struct vmw_legacy_display_unit.
  drm/vmwgfx: Reserve first part of VRAM for framebuffer.
  drm/vmwgfx: Support older hardware.
  drm/vmwgfx: Get connector status from detection function.
  drm/vmwgfx: Add kernel throttling support. Bump minor.
  drm/vmwgfx: Make sure to unpin old and pin new framebuffer.
  drm/vmwgfx: Fix single framebuffer detection.
  drm/vmwgfx: Assume larger framebuffer max size.
parents 4abe4389 1ca14e75
...@@ -4,6 +4,6 @@ ccflags-y := -Iinclude/drm ...@@ -4,6 +4,6 @@ ccflags-y := -Iinclude/drm
vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \ vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \
vmwgfx_fb.o vmwgfx_ioctl.o vmwgfx_resource.o vmwgfx_buffer.o \ vmwgfx_fb.o vmwgfx_ioctl.o vmwgfx_resource.o vmwgfx_buffer.o \
vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \ vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \
vmwgfx_overlay.o vmwgfx_overlay.o vmwgfx_fence.o
obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o
...@@ -318,6 +318,15 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) ...@@ -318,6 +318,15 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
goto out_err3; goto out_err3;
} }
/* Need mmio memory to check for fifo pitchlock cap. */
if (!(dev_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) &&
!(dev_priv->capabilities & SVGA_CAP_PITCHLOCK) &&
!vmw_fifo_have_pitchlock(dev_priv)) {
ret = -ENOSYS;
DRM_ERROR("Hardware has no pitchlock\n");
goto out_err4;
}
dev_priv->tdev = ttm_object_device_init dev_priv->tdev = ttm_object_device_init
(dev_priv->mem_global_ref.object, 12); (dev_priv->mem_global_ref.object, 12);
...@@ -399,8 +408,6 @@ static int vmw_driver_unload(struct drm_device *dev) ...@@ -399,8 +408,6 @@ static int vmw_driver_unload(struct drm_device *dev)
{ {
struct vmw_private *dev_priv = vmw_priv(dev); struct vmw_private *dev_priv = vmw_priv(dev);
DRM_INFO(VMWGFX_DRIVER_NAME " unload.\n");
unregister_pm_notifier(&dev_priv->pm_nb); unregister_pm_notifier(&dev_priv->pm_nb);
vmw_fb_close(dev_priv); vmw_fb_close(dev_priv);
...@@ -546,7 +553,6 @@ static int vmw_master_create(struct drm_device *dev, ...@@ -546,7 +553,6 @@ static int vmw_master_create(struct drm_device *dev,
{ {
struct vmw_master *vmaster; struct vmw_master *vmaster;
DRM_INFO("Master create.\n");
vmaster = kzalloc(sizeof(*vmaster), GFP_KERNEL); vmaster = kzalloc(sizeof(*vmaster), GFP_KERNEL);
if (unlikely(vmaster == NULL)) if (unlikely(vmaster == NULL))
return -ENOMEM; return -ENOMEM;
...@@ -563,7 +569,6 @@ static void vmw_master_destroy(struct drm_device *dev, ...@@ -563,7 +569,6 @@ static void vmw_master_destroy(struct drm_device *dev,
{ {
struct vmw_master *vmaster = vmw_master(master); struct vmw_master *vmaster = vmw_master(master);
DRM_INFO("Master destroy.\n");
master->driver_priv = NULL; master->driver_priv = NULL;
kfree(vmaster); kfree(vmaster);
} }
...@@ -579,8 +584,6 @@ static int vmw_master_set(struct drm_device *dev, ...@@ -579,8 +584,6 @@ static int vmw_master_set(struct drm_device *dev,
struct vmw_master *vmaster = vmw_master(file_priv->master); struct vmw_master *vmaster = vmw_master(file_priv->master);
int ret = 0; int ret = 0;
DRM_INFO("Master set.\n");
if (active) { if (active) {
BUG_ON(active != &dev_priv->fbdev_master); BUG_ON(active != &dev_priv->fbdev_master);
ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile); ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile);
...@@ -622,8 +625,6 @@ static void vmw_master_drop(struct drm_device *dev, ...@@ -622,8 +625,6 @@ static void vmw_master_drop(struct drm_device *dev,
struct vmw_master *vmaster = vmw_master(file_priv->master); struct vmw_master *vmaster = vmw_master(file_priv->master);
int ret; int ret;
DRM_INFO("Master drop.\n");
/** /**
* Make sure the master doesn't disappear while we have * Make sure the master doesn't disappear while we have
* it locked. * it locked.
......
...@@ -41,7 +41,7 @@ ...@@ -41,7 +41,7 @@
#define VMWGFX_DRIVER_DATE "20100209" #define VMWGFX_DRIVER_DATE "20100209"
#define VMWGFX_DRIVER_MAJOR 1 #define VMWGFX_DRIVER_MAJOR 1
#define VMWGFX_DRIVER_MINOR 0 #define VMWGFX_DRIVER_MINOR 1
#define VMWGFX_DRIVER_PATCHLEVEL 0 #define VMWGFX_DRIVER_PATCHLEVEL 0
#define VMWGFX_FILE_PAGE_OFFSET 0x00100000 #define VMWGFX_FILE_PAGE_OFFSET 0x00100000
#define VMWGFX_FIFO_STATIC_SIZE (1024*1024) #define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
...@@ -102,6 +102,13 @@ struct vmw_surface { ...@@ -102,6 +102,13 @@ struct vmw_surface {
struct vmw_cursor_snooper snooper; struct vmw_cursor_snooper snooper;
}; };
struct vmw_fence_queue {
struct list_head head;
struct timespec lag;
struct timespec lag_time;
spinlock_t lock;
};
struct vmw_fifo_state { struct vmw_fifo_state {
unsigned long reserved_size; unsigned long reserved_size;
__le32 *dynamic_buffer; __le32 *dynamic_buffer;
...@@ -115,6 +122,7 @@ struct vmw_fifo_state { ...@@ -115,6 +122,7 @@ struct vmw_fifo_state {
uint32_t capabilities; uint32_t capabilities;
struct mutex fifo_mutex; struct mutex fifo_mutex;
struct rw_semaphore rwsem; struct rw_semaphore rwsem;
struct vmw_fence_queue fence_queue;
}; };
struct vmw_relocation { struct vmw_relocation {
...@@ -179,6 +187,7 @@ struct vmw_private { ...@@ -179,6 +187,7 @@ struct vmw_private {
uint32_t vga_red_mask; uint32_t vga_red_mask;
uint32_t vga_blue_mask; uint32_t vga_blue_mask;
uint32_t vga_green_mask; uint32_t vga_green_mask;
uint32_t vga_pitchlock;
/* /*
* Framebuffer info. * Framebuffer info.
...@@ -393,6 +402,7 @@ extern int vmw_fifo_send_fence(struct vmw_private *dev_priv, ...@@ -393,6 +402,7 @@ extern int vmw_fifo_send_fence(struct vmw_private *dev_priv,
extern void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason); extern void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason);
extern int vmw_fifo_mmap(struct file *filp, struct vm_area_struct *vma); extern int vmw_fifo_mmap(struct file *filp, struct vm_area_struct *vma);
extern bool vmw_fifo_have_3d(struct vmw_private *dev_priv); extern bool vmw_fifo_have_3d(struct vmw_private *dev_priv);
extern bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv);
/** /**
* TTM glue - vmwgfx_ttm_glue.c * TTM glue - vmwgfx_ttm_glue.c
...@@ -441,6 +451,23 @@ extern int vmw_fallback_wait(struct vmw_private *dev_priv, ...@@ -441,6 +451,23 @@ extern int vmw_fallback_wait(struct vmw_private *dev_priv,
uint32_t sequence, uint32_t sequence,
bool interruptible, bool interruptible,
unsigned long timeout); unsigned long timeout);
extern void vmw_update_sequence(struct vmw_private *dev_priv,
struct vmw_fifo_state *fifo_state);
/**
* Rudimentary fence objects currently used only for throttling -
* vmwgfx_fence.c
*/
extern void vmw_fence_queue_init(struct vmw_fence_queue *queue);
extern void vmw_fence_queue_takedown(struct vmw_fence_queue *queue);
extern int vmw_fence_push(struct vmw_fence_queue *queue,
uint32_t sequence);
extern int vmw_fence_pull(struct vmw_fence_queue *queue,
uint32_t signaled_sequence);
extern int vmw_wait_lag(struct vmw_private *dev_priv,
struct vmw_fence_queue *queue, uint32_t us);
/** /**
* Kernel framebuffer - vmwgfx_fb.c * Kernel framebuffer - vmwgfx_fb.c
...@@ -466,6 +493,9 @@ void vmw_kms_cursor_snoop(struct vmw_surface *srf, ...@@ -466,6 +493,9 @@ void vmw_kms_cursor_snoop(struct vmw_surface *srf,
struct ttm_object_file *tfile, struct ttm_object_file *tfile,
struct ttm_buffer_object *bo, struct ttm_buffer_object *bo,
SVGA3dCmdHeader *header); SVGA3dCmdHeader *header);
void vmw_kms_write_svga(struct vmw_private *vmw_priv,
unsigned width, unsigned height, unsigned pitch,
unsigned bbp, unsigned depth);
/** /**
* Overlay control - vmwgfx_overlay.c * Overlay control - vmwgfx_overlay.c
......
...@@ -669,6 +669,15 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data, ...@@ -669,6 +669,15 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
goto out_err; goto out_err;
vmw_apply_relocations(sw_context); vmw_apply_relocations(sw_context);
if (arg->throttle_us) {
ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.fence_queue,
arg->throttle_us);
if (unlikely(ret != 0))
goto out_err;
}
vmw_fifo_commit(dev_priv, arg->command_size); vmw_fifo_commit(dev_priv, arg->command_size);
ret = vmw_fifo_send_fence(dev_priv, &sequence); ret = vmw_fifo_send_fence(dev_priv, &sequence);
......
...@@ -132,16 +132,14 @@ static int vmw_fb_check_var(struct fb_var_screeninfo *var, ...@@ -132,16 +132,14 @@ static int vmw_fb_check_var(struct fb_var_screeninfo *var,
return -EINVAL; return -EINVAL;
} }
/* without multimon its hard to resize */ if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) &&
if (!(vmw_priv->capabilities & SVGA_CAP_MULTIMON) && (var->xoffset != 0 || var->yoffset != 0)) {
(var->xres != par->max_width || DRM_ERROR("Can not handle panning without display topology\n");
var->yres != par->max_height)) {
DRM_ERROR("Tried to resize, but we don't have multimon\n");
return -EINVAL; return -EINVAL;
} }
if (var->xres > par->max_width || if ((var->xoffset + var->xres) > par->max_width ||
var->yres > par->max_height) { (var->yoffset + var->yres) > par->max_height) {
DRM_ERROR("Requested geom can not fit in framebuffer\n"); DRM_ERROR("Requested geom can not fit in framebuffer\n");
return -EINVAL; return -EINVAL;
} }
...@@ -154,8 +152,7 @@ static int vmw_fb_set_par(struct fb_info *info) ...@@ -154,8 +152,7 @@ static int vmw_fb_set_par(struct fb_info *info)
struct vmw_fb_par *par = info->par; struct vmw_fb_par *par = info->par;
struct vmw_private *vmw_priv = par->vmw_priv; struct vmw_private *vmw_priv = par->vmw_priv;
if (vmw_priv->capabilities & SVGA_CAP_MULTIMON) { if (vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) {
vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1);
vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, 0); vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, 0);
vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true); vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true);
vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, 0); vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, 0);
...@@ -164,18 +161,11 @@ static int vmw_fb_set_par(struct fb_info *info) ...@@ -164,18 +161,11 @@ static int vmw_fb_set_par(struct fb_info *info)
vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, 0); vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, 0);
vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID); vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
vmw_write(vmw_priv, SVGA_REG_ENABLE, 1); vmw_kms_write_svga(vmw_priv, info->var.xres, info->var.yres,
vmw_write(vmw_priv, SVGA_REG_WIDTH, par->max_width); info->fix.line_length,
vmw_write(vmw_priv, SVGA_REG_HEIGHT, par->max_height); par->bpp, par->depth);
vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, par->bpp);
vmw_write(vmw_priv, SVGA_REG_DEPTH, par->depth);
vmw_write(vmw_priv, SVGA_REG_RED_MASK, 0x00ff0000);
vmw_write(vmw_priv, SVGA_REG_GREEN_MASK, 0x0000ff00);
vmw_write(vmw_priv, SVGA_REG_BLUE_MASK, 0x000000ff);
/* TODO check if pitch and offset changes */ /* TODO check if pitch and offset changes */
vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1);
vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, 0); vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, 0);
vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true); vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true);
vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, info->var.xoffset); vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, info->var.xoffset);
...@@ -183,13 +173,19 @@ static int vmw_fb_set_par(struct fb_info *info) ...@@ -183,13 +173,19 @@ static int vmw_fb_set_par(struct fb_info *info)
vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, info->var.xres); vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, info->var.xres);
vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, info->var.yres); vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, info->var.yres);
vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID); vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1);
} else { } else {
vmw_write(vmw_priv, SVGA_REG_WIDTH, info->var.xres); vmw_kms_write_svga(vmw_priv, info->var.xres, info->var.yres,
vmw_write(vmw_priv, SVGA_REG_HEIGHT, info->var.yres); info->fix.line_length,
par->bpp, par->depth);
/* TODO check if pitch and offset changes */
} }
/* This is really helpful since if this fails the user
* can probably not see anything on the screen.
*/
WARN_ON(vmw_read(vmw_priv, SVGA_REG_FB_OFFSET) != 0);
return 0; return 0;
} }
...@@ -416,48 +412,23 @@ int vmw_fb_init(struct vmw_private *vmw_priv) ...@@ -416,48 +412,23 @@ int vmw_fb_init(struct vmw_private *vmw_priv)
unsigned fb_bbp, fb_depth, fb_offset, fb_pitch, fb_size; unsigned fb_bbp, fb_depth, fb_offset, fb_pitch, fb_size;
int ret; int ret;
/* XXX These shouldn't be hardcoded. */
initial_width = 800; initial_width = 800;
initial_height = 600; initial_height = 600;
fb_bbp = 32; fb_bbp = 32;
fb_depth = 24; fb_depth = 24;
if (vmw_priv->capabilities & SVGA_CAP_MULTIMON) { /* XXX As shouldn't these be as well. */
fb_width = min(vmw_priv->fb_max_width, (unsigned)2048); fb_width = min(vmw_priv->fb_max_width, (unsigned)2048);
fb_height = min(vmw_priv->fb_max_height, (unsigned)2048); fb_height = min(vmw_priv->fb_max_height, (unsigned)2048);
} else {
fb_width = min(vmw_priv->fb_max_width, initial_width);
fb_height = min(vmw_priv->fb_max_height, initial_height);
}
initial_width = min(fb_width, initial_width); initial_width = min(fb_width, initial_width);
initial_height = min(fb_height, initial_height); initial_height = min(fb_height, initial_height);
vmw_write(vmw_priv, SVGA_REG_WIDTH, fb_width); fb_pitch = fb_width * fb_bbp / 8;
vmw_write(vmw_priv, SVGA_REG_HEIGHT, fb_height); fb_size = fb_pitch * fb_height;
vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, fb_bbp);
vmw_write(vmw_priv, SVGA_REG_DEPTH, fb_depth);
vmw_write(vmw_priv, SVGA_REG_RED_MASK, 0x00ff0000);
vmw_write(vmw_priv, SVGA_REG_GREEN_MASK, 0x0000ff00);
vmw_write(vmw_priv, SVGA_REG_BLUE_MASK, 0x000000ff);
fb_size = vmw_read(vmw_priv, SVGA_REG_FB_SIZE);
fb_offset = vmw_read(vmw_priv, SVGA_REG_FB_OFFSET); fb_offset = vmw_read(vmw_priv, SVGA_REG_FB_OFFSET);
fb_pitch = vmw_read(vmw_priv, SVGA_REG_BYTES_PER_LINE);
DRM_DEBUG("width %u\n", vmw_read(vmw_priv, SVGA_REG_MAX_WIDTH));
DRM_DEBUG("height %u\n", vmw_read(vmw_priv, SVGA_REG_MAX_HEIGHT));
DRM_DEBUG("width %u\n", vmw_read(vmw_priv, SVGA_REG_WIDTH));
DRM_DEBUG("height %u\n", vmw_read(vmw_priv, SVGA_REG_HEIGHT));
DRM_DEBUG("bpp %u\n", vmw_read(vmw_priv, SVGA_REG_BITS_PER_PIXEL));
DRM_DEBUG("depth %u\n", vmw_read(vmw_priv, SVGA_REG_DEPTH));
DRM_DEBUG("bpl %u\n", vmw_read(vmw_priv, SVGA_REG_BYTES_PER_LINE));
DRM_DEBUG("r mask %08x\n", vmw_read(vmw_priv, SVGA_REG_RED_MASK));
DRM_DEBUG("g mask %08x\n", vmw_read(vmw_priv, SVGA_REG_GREEN_MASK));
DRM_DEBUG("b mask %08x\n", vmw_read(vmw_priv, SVGA_REG_BLUE_MASK));
DRM_DEBUG("fb_offset 0x%08x\n", fb_offset);
DRM_DEBUG("fb_pitch %u\n", fb_pitch);
DRM_DEBUG("fb_size %u kiB\n", fb_size / 1024);
info = framebuffer_alloc(sizeof(*par), device); info = framebuffer_alloc(sizeof(*par), device);
if (!info) if (!info)
...@@ -659,6 +630,10 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv, ...@@ -659,6 +630,10 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv,
goto err_unlock; goto err_unlock;
ret = ttm_bo_validate(bo, &ne_placement, false, false, false); ret = ttm_bo_validate(bo, &ne_placement, false, false, false);
/* Could probably bug on */
WARN_ON(bo->offset != 0);
ttm_bo_unreserve(bo); ttm_bo_unreserve(bo);
err_unlock: err_unlock:
ttm_write_unlock(&vmw_priv->active_master->lock); ttm_write_unlock(&vmw_priv->active_master->lock);
......
/**************************************************************************
*
* Copyright (C) 2010 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#include "vmwgfx_drv.h"
struct vmw_fence {
struct list_head head;
uint32_t sequence;
struct timespec submitted;
};
void vmw_fence_queue_init(struct vmw_fence_queue *queue)
{
INIT_LIST_HEAD(&queue->head);
queue->lag = ns_to_timespec(0);
getrawmonotonic(&queue->lag_time);
spin_lock_init(&queue->lock);
}
void vmw_fence_queue_takedown(struct vmw_fence_queue *queue)
{
struct vmw_fence *fence, *next;
spin_lock(&queue->lock);
list_for_each_entry_safe(fence, next, &queue->head, head) {
kfree(fence);
}
spin_unlock(&queue->lock);
}
int vmw_fence_push(struct vmw_fence_queue *queue,
uint32_t sequence)
{
struct vmw_fence *fence = kmalloc(sizeof(*fence), GFP_KERNEL);
if (unlikely(!fence))
return -ENOMEM;
fence->sequence = sequence;
getrawmonotonic(&fence->submitted);
spin_lock(&queue->lock);
list_add_tail(&fence->head, &queue->head);
spin_unlock(&queue->lock);
return 0;
}
int vmw_fence_pull(struct vmw_fence_queue *queue,
uint32_t signaled_sequence)
{
struct vmw_fence *fence, *next;
struct timespec now;
bool updated = false;
spin_lock(&queue->lock);
getrawmonotonic(&now);
if (list_empty(&queue->head)) {
queue->lag = ns_to_timespec(0);
queue->lag_time = now;
updated = true;
goto out_unlock;
}
list_for_each_entry_safe(fence, next, &queue->head, head) {
if (signaled_sequence - fence->sequence > (1 << 30))
continue;
queue->lag = timespec_sub(now, fence->submitted);
queue->lag_time = now;
updated = true;
list_del(&fence->head);
kfree(fence);
}
out_unlock:
spin_unlock(&queue->lock);
return (updated) ? 0 : -EBUSY;
}
static struct timespec vmw_timespec_add(struct timespec t1,
struct timespec t2)
{
t1.tv_sec += t2.tv_sec;
t1.tv_nsec += t2.tv_nsec;
if (t1.tv_nsec >= 1000000000L) {
t1.tv_sec += 1;
t1.tv_nsec -= 1000000000L;
}
return t1;
}
static struct timespec vmw_fifo_lag(struct vmw_fence_queue *queue)
{
struct timespec now;
spin_lock(&queue->lock);
getrawmonotonic(&now);
queue->lag = vmw_timespec_add(queue->lag,
timespec_sub(now, queue->lag_time));
queue->lag_time = now;
spin_unlock(&queue->lock);
return queue->lag;
}
static bool vmw_lag_lt(struct vmw_fence_queue *queue,
uint32_t us)
{
struct timespec lag, cond;
cond = ns_to_timespec((s64) us * 1000);
lag = vmw_fifo_lag(queue);
return (timespec_compare(&lag, &cond) < 1);
}
int vmw_wait_lag(struct vmw_private *dev_priv,
struct vmw_fence_queue *queue, uint32_t us)
{
struct vmw_fence *fence;
uint32_t sequence;
int ret;
while (!vmw_lag_lt(queue, us)) {
spin_lock(&queue->lock);
if (list_empty(&queue->head))
sequence = atomic_read(&dev_priv->fence_seq);
else {
fence = list_first_entry(&queue->head,
struct vmw_fence, head);
sequence = fence->sequence;
}
spin_unlock(&queue->lock);
ret = vmw_wait_fence(dev_priv, false, sequence, true,
3*HZ);
if (unlikely(ret != 0))
return ret;
(void) vmw_fence_pull(queue, sequence);
}
return 0;
}
...@@ -34,6 +34,9 @@ bool vmw_fifo_have_3d(struct vmw_private *dev_priv) ...@@ -34,6 +34,9 @@ bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
__le32 __iomem *fifo_mem = dev_priv->mmio_virt; __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
uint32_t fifo_min, hwversion; uint32_t fifo_min, hwversion;
if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO))
return false;
fifo_min = ioread32(fifo_mem + SVGA_FIFO_MIN); fifo_min = ioread32(fifo_mem + SVGA_FIFO_MIN);
if (fifo_min <= SVGA_FIFO_3D_HWVERSION * sizeof(unsigned int)) if (fifo_min <= SVGA_FIFO_3D_HWVERSION * sizeof(unsigned int))
return false; return false;
...@@ -48,6 +51,21 @@ bool vmw_fifo_have_3d(struct vmw_private *dev_priv) ...@@ -48,6 +51,21 @@ bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
return true; return true;
} }
bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv)
{
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
uint32_t caps;
if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO))
return false;
caps = ioread32(fifo_mem + SVGA_FIFO_CAPABILITIES);
if (caps & SVGA_FIFO_CAP_PITCHLOCK)
return true;
return false;
}
int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
{ {
__le32 __iomem *fifo_mem = dev_priv->mmio_virt; __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
...@@ -120,7 +138,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) ...@@ -120,7 +138,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
atomic_set(&dev_priv->fence_seq, dev_priv->last_read_sequence); atomic_set(&dev_priv->fence_seq, dev_priv->last_read_sequence);
iowrite32(dev_priv->last_read_sequence, fifo_mem + SVGA_FIFO_FENCE); iowrite32(dev_priv->last_read_sequence, fifo_mem + SVGA_FIFO_FENCE);
vmw_fence_queue_init(&fifo->fence_queue);
return vmw_fifo_send_fence(dev_priv, &dummy); return vmw_fifo_send_fence(dev_priv, &dummy);
out_err: out_err:
vfree(fifo->static_buffer); vfree(fifo->static_buffer);
...@@ -159,6 +177,7 @@ void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) ...@@ -159,6 +177,7 @@ void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
dev_priv->enable_state); dev_priv->enable_state);
mutex_unlock(&dev_priv->hw_mutex); mutex_unlock(&dev_priv->hw_mutex);
vmw_fence_queue_takedown(&fifo->fence_queue);
if (likely(fifo->last_buffer != NULL)) { if (likely(fifo->last_buffer != NULL)) {
vfree(fifo->last_buffer); vfree(fifo->last_buffer);
...@@ -484,6 +503,8 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *sequence) ...@@ -484,6 +503,8 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *sequence)
fifo_state->last_buffer_add = true; fifo_state->last_buffer_add = true;
vmw_fifo_commit(dev_priv, bytes); vmw_fifo_commit(dev_priv, bytes);
fifo_state->last_buffer_add = false; fifo_state->last_buffer_add = false;
(void) vmw_fence_push(&fifo_state->fence_queue, *sequence);
vmw_update_sequence(dev_priv, fifo_state);
out_err: out_err:
return ret; return ret;
......
...@@ -64,22 +64,33 @@ static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t sequence) ...@@ -64,22 +64,33 @@ static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t sequence)
return (busy == 0); return (busy == 0);
} }
void vmw_update_sequence(struct vmw_private *dev_priv,
struct vmw_fifo_state *fifo_state)
{
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
uint32_t sequence = ioread32(fifo_mem + SVGA_FIFO_FENCE);
if (dev_priv->last_read_sequence != sequence) {
dev_priv->last_read_sequence = sequence;
vmw_fence_pull(&fifo_state->fence_queue, sequence);
}
}
bool vmw_fence_signaled(struct vmw_private *dev_priv, bool vmw_fence_signaled(struct vmw_private *dev_priv,
uint32_t sequence) uint32_t sequence)
{ {
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
struct vmw_fifo_state *fifo_state; struct vmw_fifo_state *fifo_state;
bool ret; bool ret;
if (likely(dev_priv->last_read_sequence - sequence < VMW_FENCE_WRAP)) if (likely(dev_priv->last_read_sequence - sequence < VMW_FENCE_WRAP))
return true; return true;
dev_priv->last_read_sequence = ioread32(fifo_mem + SVGA_FIFO_FENCE); fifo_state = &dev_priv->fifo;
vmw_update_sequence(dev_priv, fifo_state);
if (likely(dev_priv->last_read_sequence - sequence < VMW_FENCE_WRAP)) if (likely(dev_priv->last_read_sequence - sequence < VMW_FENCE_WRAP))
return true; return true;
fifo_state = &dev_priv->fifo;
if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE) && if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE) &&
vmw_fifo_idle(dev_priv, sequence)) vmw_fifo_idle(dev_priv, sequence))
return true; return true;
......
...@@ -30,6 +30,8 @@ ...@@ -30,6 +30,8 @@
/* Might need a hrtimer here? */ /* Might need a hrtimer here? */
#define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1) #define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1)
static int vmw_surface_dmabuf_pin(struct vmw_framebuffer *vfb);
static int vmw_surface_dmabuf_unpin(struct vmw_framebuffer *vfb);
void vmw_display_unit_cleanup(struct vmw_display_unit *du) void vmw_display_unit_cleanup(struct vmw_display_unit *du)
{ {
...@@ -326,6 +328,7 @@ int vmw_framebuffer_create_handle(struct drm_framebuffer *fb, ...@@ -326,6 +328,7 @@ int vmw_framebuffer_create_handle(struct drm_framebuffer *fb,
struct vmw_framebuffer_surface { struct vmw_framebuffer_surface {
struct vmw_framebuffer base; struct vmw_framebuffer base;
struct vmw_surface *surface; struct vmw_surface *surface;
struct vmw_dma_buffer *buffer;
struct delayed_work d_work; struct delayed_work d_work;
struct mutex work_lock; struct mutex work_lock;
bool present_fs; bool present_fs;
...@@ -500,8 +503,8 @@ int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv, ...@@ -500,8 +503,8 @@ int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
vfbs->base.base.depth = 24; vfbs->base.base.depth = 24;
vfbs->base.base.width = width; vfbs->base.base.width = width;
vfbs->base.base.height = height; vfbs->base.base.height = height;
vfbs->base.pin = NULL; vfbs->base.pin = &vmw_surface_dmabuf_pin;
vfbs->base.unpin = NULL; vfbs->base.unpin = &vmw_surface_dmabuf_unpin;
vfbs->surface = surface; vfbs->surface = surface;
mutex_init(&vfbs->work_lock); mutex_init(&vfbs->work_lock);
INIT_DELAYED_WORK(&vfbs->d_work, &vmw_framebuffer_present_fs_callback); INIT_DELAYED_WORK(&vfbs->d_work, &vmw_framebuffer_present_fs_callback);
...@@ -589,6 +592,40 @@ static struct drm_framebuffer_funcs vmw_framebuffer_dmabuf_funcs = { ...@@ -589,6 +592,40 @@ static struct drm_framebuffer_funcs vmw_framebuffer_dmabuf_funcs = {
.create_handle = vmw_framebuffer_create_handle, .create_handle = vmw_framebuffer_create_handle,
}; };
static int vmw_surface_dmabuf_pin(struct vmw_framebuffer *vfb)
{
struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
struct vmw_framebuffer_surface *vfbs =
vmw_framebuffer_to_vfbs(&vfb->base);
unsigned long size = vfbs->base.base.pitch * vfbs->base.base.height;
int ret;
vfbs->buffer = kzalloc(sizeof(*vfbs->buffer), GFP_KERNEL);
if (unlikely(vfbs->buffer == NULL))
return -ENOMEM;
vmw_overlay_pause_all(dev_priv);
ret = vmw_dmabuf_init(dev_priv, vfbs->buffer, size,
&vmw_vram_ne_placement,
false, &vmw_dmabuf_bo_free);
vmw_overlay_resume_all(dev_priv);
return ret;
}
static int vmw_surface_dmabuf_unpin(struct vmw_framebuffer *vfb)
{
struct ttm_buffer_object *bo;
struct vmw_framebuffer_surface *vfbs =
vmw_framebuffer_to_vfbs(&vfb->base);
bo = &vfbs->buffer->base;
ttm_bo_unref(&bo);
vfbs->buffer = NULL;
return 0;
}
static int vmw_framebuffer_dmabuf_pin(struct vmw_framebuffer *vfb) static int vmw_framebuffer_dmabuf_pin(struct vmw_framebuffer *vfb)
{ {
struct vmw_private *dev_priv = vmw_priv(vfb->base.dev); struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
...@@ -596,33 +633,15 @@ static int vmw_framebuffer_dmabuf_pin(struct vmw_framebuffer *vfb) ...@@ -596,33 +633,15 @@ static int vmw_framebuffer_dmabuf_pin(struct vmw_framebuffer *vfb)
vmw_framebuffer_to_vfbd(&vfb->base); vmw_framebuffer_to_vfbd(&vfb->base);
int ret; int ret;
vmw_overlay_pause_all(dev_priv); vmw_overlay_pause_all(dev_priv);
ret = vmw_dmabuf_to_start_of_vram(dev_priv, vfbd->buffer); ret = vmw_dmabuf_to_start_of_vram(dev_priv, vfbd->buffer);
if (dev_priv->capabilities & SVGA_CAP_MULTIMON) {
vmw_write(dev_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1);
vmw_write(dev_priv, SVGA_REG_DISPLAY_ID, 0);
vmw_write(dev_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true);
vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_X, 0);
vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_Y, 0);
vmw_write(dev_priv, SVGA_REG_DISPLAY_WIDTH, 0);
vmw_write(dev_priv, SVGA_REG_DISPLAY_HEIGHT, 0);
vmw_write(dev_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
vmw_write(dev_priv, SVGA_REG_ENABLE, 1);
vmw_write(dev_priv, SVGA_REG_WIDTH, vfb->base.width);
vmw_write(dev_priv, SVGA_REG_HEIGHT, vfb->base.height);
vmw_write(dev_priv, SVGA_REG_BITS_PER_PIXEL, vfb->base.bits_per_pixel);
vmw_write(dev_priv, SVGA_REG_DEPTH, vfb->base.depth);
vmw_write(dev_priv, SVGA_REG_RED_MASK, 0x00ff0000);
vmw_write(dev_priv, SVGA_REG_GREEN_MASK, 0x0000ff00);
vmw_write(dev_priv, SVGA_REG_BLUE_MASK, 0x000000ff);
} else
WARN_ON(true);
vmw_overlay_resume_all(dev_priv); vmw_overlay_resume_all(dev_priv);
WARN_ON(ret != 0);
return 0; return 0;
} }
...@@ -668,7 +687,7 @@ int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv, ...@@ -668,7 +687,7 @@ int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv,
/* XXX get the first 3 from the surface info */ /* XXX get the first 3 from the surface info */
vfbd->base.base.bits_per_pixel = 32; vfbd->base.base.bits_per_pixel = 32;
vfbd->base.base.pitch = width * 32 / 4; vfbd->base.base.pitch = width * vfbd->base.base.bits_per_pixel / 8;
vfbd->base.base.depth = 24; vfbd->base.base.depth = 24;
vfbd->base.base.width = width; vfbd->base.base.width = width;
vfbd->base.base.height = height; vfbd->base.base.height = height;
...@@ -765,8 +784,9 @@ int vmw_kms_init(struct vmw_private *dev_priv) ...@@ -765,8 +784,9 @@ int vmw_kms_init(struct vmw_private *dev_priv)
dev->mode_config.funcs = &vmw_kms_funcs; dev->mode_config.funcs = &vmw_kms_funcs;
dev->mode_config.min_width = 1; dev->mode_config.min_width = 1;
dev->mode_config.min_height = 1; dev->mode_config.min_height = 1;
dev->mode_config.max_width = dev_priv->fb_max_width; /* assumed largest fb size */
dev->mode_config.max_height = dev_priv->fb_max_height; dev->mode_config.max_width = 8192;
dev->mode_config.max_height = 8192;
ret = vmw_kms_init_legacy_display_system(dev_priv); ret = vmw_kms_init_legacy_display_system(dev_priv);
...@@ -826,24 +846,25 @@ int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data, ...@@ -826,24 +846,25 @@ int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
return ret; return ret;
} }
int vmw_kms_save_vga(struct vmw_private *vmw_priv) void vmw_kms_write_svga(struct vmw_private *vmw_priv,
unsigned width, unsigned height, unsigned pitch,
unsigned bbp, unsigned depth)
{ {
/* if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
* setup a single multimon monitor with the size vmw_write(vmw_priv, SVGA_REG_PITCHLOCK, pitch);
* of 0x0, this stops the UI from resizing when we else if (vmw_fifo_have_pitchlock(vmw_priv))
* change the framebuffer size iowrite32(pitch, vmw_priv->mmio_virt + SVGA_FIFO_PITCHLOCK);
*/ vmw_write(vmw_priv, SVGA_REG_WIDTH, width);
if (vmw_priv->capabilities & SVGA_CAP_MULTIMON) { vmw_write(vmw_priv, SVGA_REG_HEIGHT, height);
vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1); vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, bbp);
vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, 0); vmw_write(vmw_priv, SVGA_REG_DEPTH, depth);
vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true); vmw_write(vmw_priv, SVGA_REG_RED_MASK, 0x00ff0000);
vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, 0); vmw_write(vmw_priv, SVGA_REG_GREEN_MASK, 0x0000ff00);
vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, 0); vmw_write(vmw_priv, SVGA_REG_BLUE_MASK, 0x000000ff);
vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, 0); }
vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, 0);
vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
}
int vmw_kms_save_vga(struct vmw_private *vmw_priv)
{
vmw_priv->vga_width = vmw_read(vmw_priv, SVGA_REG_WIDTH); vmw_priv->vga_width = vmw_read(vmw_priv, SVGA_REG_WIDTH);
vmw_priv->vga_height = vmw_read(vmw_priv, SVGA_REG_HEIGHT); vmw_priv->vga_height = vmw_read(vmw_priv, SVGA_REG_HEIGHT);
vmw_priv->vga_bpp = vmw_read(vmw_priv, SVGA_REG_BITS_PER_PIXEL); vmw_priv->vga_bpp = vmw_read(vmw_priv, SVGA_REG_BITS_PER_PIXEL);
...@@ -852,6 +873,12 @@ int vmw_kms_save_vga(struct vmw_private *vmw_priv) ...@@ -852,6 +873,12 @@ int vmw_kms_save_vga(struct vmw_private *vmw_priv)
vmw_priv->vga_red_mask = vmw_read(vmw_priv, SVGA_REG_RED_MASK); vmw_priv->vga_red_mask = vmw_read(vmw_priv, SVGA_REG_RED_MASK);
vmw_priv->vga_green_mask = vmw_read(vmw_priv, SVGA_REG_GREEN_MASK); vmw_priv->vga_green_mask = vmw_read(vmw_priv, SVGA_REG_GREEN_MASK);
vmw_priv->vga_blue_mask = vmw_read(vmw_priv, SVGA_REG_BLUE_MASK); vmw_priv->vga_blue_mask = vmw_read(vmw_priv, SVGA_REG_BLUE_MASK);
if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
vmw_priv->vga_pitchlock =
vmw_read(vmw_priv, SVGA_REG_PITCHLOCK);
else if (vmw_fifo_have_pitchlock(vmw_priv))
vmw_priv->vga_pitchlock =
ioread32(vmw_priv->mmio_virt + SVGA_FIFO_PITCHLOCK);
return 0; return 0;
} }
...@@ -866,9 +893,12 @@ int vmw_kms_restore_vga(struct vmw_private *vmw_priv) ...@@ -866,9 +893,12 @@ int vmw_kms_restore_vga(struct vmw_private *vmw_priv)
vmw_write(vmw_priv, SVGA_REG_RED_MASK, vmw_priv->vga_red_mask); vmw_write(vmw_priv, SVGA_REG_RED_MASK, vmw_priv->vga_red_mask);
vmw_write(vmw_priv, SVGA_REG_GREEN_MASK, vmw_priv->vga_green_mask); vmw_write(vmw_priv, SVGA_REG_GREEN_MASK, vmw_priv->vga_green_mask);
vmw_write(vmw_priv, SVGA_REG_BLUE_MASK, vmw_priv->vga_blue_mask); vmw_write(vmw_priv, SVGA_REG_BLUE_MASK, vmw_priv->vga_blue_mask);
if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
/* TODO check for multimon */ vmw_write(vmw_priv, SVGA_REG_PITCHLOCK,
vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 0); vmw_priv->vga_pitchlock);
else if (vmw_fifo_have_pitchlock(vmw_priv))
iowrite32(vmw_priv->vga_pitchlock,
vmw_priv->mmio_virt + SVGA_FIFO_PITCHLOCK);
return 0; return 0;
} }
...@@ -38,6 +38,7 @@ struct vmw_legacy_display { ...@@ -38,6 +38,7 @@ struct vmw_legacy_display {
struct list_head active; struct list_head active;
unsigned num_active; unsigned num_active;
unsigned last_num_active;
struct vmw_framebuffer *fb; struct vmw_framebuffer *fb;
}; };
...@@ -49,8 +50,6 @@ struct vmw_legacy_display_unit { ...@@ -49,8 +50,6 @@ struct vmw_legacy_display_unit {
struct vmw_display_unit base; struct vmw_display_unit base;
struct list_head active; struct list_head active;
unsigned unit;
}; };
static void vmw_ldu_destroy(struct vmw_legacy_display_unit *ldu) static void vmw_ldu_destroy(struct vmw_legacy_display_unit *ldu)
...@@ -88,23 +87,44 @@ static int vmw_ldu_commit_list(struct vmw_private *dev_priv) ...@@ -88,23 +87,44 @@ static int vmw_ldu_commit_list(struct vmw_private *dev_priv)
{ {
struct vmw_legacy_display *lds = dev_priv->ldu_priv; struct vmw_legacy_display *lds = dev_priv->ldu_priv;
struct vmw_legacy_display_unit *entry; struct vmw_legacy_display_unit *entry;
struct drm_crtc *crtc; struct drm_framebuffer *fb = NULL;
struct drm_crtc *crtc = NULL;
int i = 0; int i = 0;
/* to stop the screen from changing size on resize */ /* If there is no display topology the host just assumes
vmw_write(dev_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 0); * that the guest will set the same layout as the host.
for (i = 0; i < lds->num_active; i++) { */
vmw_write(dev_priv, SVGA_REG_DISPLAY_ID, i); if (!(dev_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY)) {
vmw_write(dev_priv, SVGA_REG_DISPLAY_IS_PRIMARY, !i); int w = 0, h = 0;
vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_X, 0); list_for_each_entry(entry, &lds->active, active) {
vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_Y, 0); crtc = &entry->base.crtc;
vmw_write(dev_priv, SVGA_REG_DISPLAY_WIDTH, 0); w = max(w, crtc->x + crtc->mode.hdisplay);
vmw_write(dev_priv, SVGA_REG_DISPLAY_HEIGHT, 0); h = max(h, crtc->y + crtc->mode.vdisplay);
vmw_write(dev_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID); i++;
} }
/* Now set the mode */ if (crtc == NULL)
vmw_write(dev_priv, SVGA_REG_NUM_GUEST_DISPLAYS, lds->num_active); return 0;
fb = entry->base.crtc.fb;
vmw_kms_write_svga(dev_priv, w, h, fb->pitch,
fb->bits_per_pixel, fb->depth);
return 0;
}
if (!list_empty(&lds->active)) {
entry = list_entry(lds->active.next, typeof(*entry), active);
fb = entry->base.crtc.fb;
vmw_kms_write_svga(dev_priv, fb->width, fb->height, fb->pitch,
fb->bits_per_pixel, fb->depth);
}
/* Make sure we always show something. */
vmw_write(dev_priv, SVGA_REG_NUM_GUEST_DISPLAYS,
lds->num_active ? lds->num_active : 1);
i = 0; i = 0;
list_for_each_entry(entry, &lds->active, active) { list_for_each_entry(entry, &lds->active, active) {
crtc = &entry->base.crtc; crtc = &entry->base.crtc;
...@@ -120,6 +140,10 @@ static int vmw_ldu_commit_list(struct vmw_private *dev_priv) ...@@ -120,6 +140,10 @@ static int vmw_ldu_commit_list(struct vmw_private *dev_priv)
i++; i++;
} }
BUG_ON(i != lds->num_active);
lds->last_num_active = lds->num_active;
return 0; return 0;
} }
...@@ -130,6 +154,7 @@ static int vmw_ldu_del_active(struct vmw_private *vmw_priv, ...@@ -130,6 +154,7 @@ static int vmw_ldu_del_active(struct vmw_private *vmw_priv,
if (list_empty(&ldu->active)) if (list_empty(&ldu->active))
return 0; return 0;
/* Must init otherwise list_empty(&ldu->active) will not work. */
list_del_init(&ldu->active); list_del_init(&ldu->active);
if (--(ld->num_active) == 0) { if (--(ld->num_active) == 0) {
BUG_ON(!ld->fb); BUG_ON(!ld->fb);
...@@ -149,24 +174,29 @@ static int vmw_ldu_add_active(struct vmw_private *vmw_priv, ...@@ -149,24 +174,29 @@ static int vmw_ldu_add_active(struct vmw_private *vmw_priv,
struct vmw_legacy_display_unit *entry; struct vmw_legacy_display_unit *entry;
struct list_head *at; struct list_head *at;
BUG_ON(!ld->num_active && ld->fb);
if (vfb != ld->fb) {
if (ld->fb && ld->fb->unpin)
ld->fb->unpin(ld->fb);
if (vfb->pin)
vfb->pin(vfb);
ld->fb = vfb;
}
if (!list_empty(&ldu->active)) if (!list_empty(&ldu->active))
return 0; return 0;
at = &ld->active; at = &ld->active;
list_for_each_entry(entry, &ld->active, active) { list_for_each_entry(entry, &ld->active, active) {
if (entry->unit > ldu->unit) if (entry->base.unit > ldu->base.unit)
break; break;
at = &entry->active; at = &entry->active;
} }
list_add(&ldu->active, at); list_add(&ldu->active, at);
if (ld->num_active++ == 0) {
BUG_ON(ld->fb); ld->num_active++;
if (vfb->pin)
vfb->pin(vfb);
ld->fb = vfb;
}
return 0; return 0;
} }
...@@ -208,6 +238,8 @@ static int vmw_ldu_crtc_set_config(struct drm_mode_set *set) ...@@ -208,6 +238,8 @@ static int vmw_ldu_crtc_set_config(struct drm_mode_set *set)
/* ldu only supports one fb active at the time */ /* ldu only supports one fb active at the time */
if (dev_priv->ldu_priv->fb && vfb && if (dev_priv->ldu_priv->fb && vfb &&
!(dev_priv->ldu_priv->num_active == 1 &&
!list_empty(&ldu->active)) &&
dev_priv->ldu_priv->fb != vfb) { dev_priv->ldu_priv->fb != vfb) {
DRM_ERROR("Multiple framebuffers not supported\n"); DRM_ERROR("Multiple framebuffers not supported\n");
return -EINVAL; return -EINVAL;
...@@ -443,18 +475,16 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit) ...@@ -443,18 +475,16 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
if (!ldu) if (!ldu)
return -ENOMEM; return -ENOMEM;
ldu->unit = unit; ldu->base.unit = unit;
crtc = &ldu->base.crtc; crtc = &ldu->base.crtc;
encoder = &ldu->base.encoder; encoder = &ldu->base.encoder;
connector = &ldu->base.connector; connector = &ldu->base.connector;
INIT_LIST_HEAD(&ldu->active);
drm_connector_init(dev, connector, &vmw_legacy_connector_funcs, drm_connector_init(dev, connector, &vmw_legacy_connector_funcs,
DRM_MODE_CONNECTOR_LVDS); DRM_MODE_CONNECTOR_LVDS);
/* Initial status */ connector->status = vmw_ldu_connector_detect(connector);
if (unit == 0)
connector->status = connector_status_connected;
else
connector->status = connector_status_disconnected;
drm_encoder_init(dev, encoder, &vmw_legacy_encoder_funcs, drm_encoder_init(dev, encoder, &vmw_legacy_encoder_funcs,
DRM_MODE_ENCODER_LVDS); DRM_MODE_ENCODER_LVDS);
...@@ -462,8 +492,6 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit) ...@@ -462,8 +492,6 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
encoder->possible_crtcs = (1 << unit); encoder->possible_crtcs = (1 << unit);
encoder->possible_clones = 0; encoder->possible_clones = 0;
INIT_LIST_HEAD(&ldu->active);
drm_crtc_init(dev, crtc, &vmw_legacy_crtc_funcs); drm_crtc_init(dev, crtc, &vmw_legacy_crtc_funcs);
drm_connector_attach_property(connector, drm_connector_attach_property(connector,
...@@ -487,11 +515,14 @@ int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv) ...@@ -487,11 +515,14 @@ int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv)
INIT_LIST_HEAD(&dev_priv->ldu_priv->active); INIT_LIST_HEAD(&dev_priv->ldu_priv->active);
dev_priv->ldu_priv->num_active = 0; dev_priv->ldu_priv->num_active = 0;
dev_priv->ldu_priv->last_num_active = 0;
dev_priv->ldu_priv->fb = NULL; dev_priv->ldu_priv->fb = NULL;
drm_mode_create_dirty_info_property(dev_priv->dev); drm_mode_create_dirty_info_property(dev_priv->dev);
vmw_ldu_init(dev_priv, 0); vmw_ldu_init(dev_priv, 0);
/* for old hardware without multimon only enable one display */
if (dev_priv->capabilities & SVGA_CAP_MULTIMON) {
vmw_ldu_init(dev_priv, 1); vmw_ldu_init(dev_priv, 1);
vmw_ldu_init(dev_priv, 2); vmw_ldu_init(dev_priv, 2);
vmw_ldu_init(dev_priv, 3); vmw_ldu_init(dev_priv, 3);
...@@ -499,6 +530,7 @@ int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv) ...@@ -499,6 +530,7 @@ int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv)
vmw_ldu_init(dev_priv, 5); vmw_ldu_init(dev_priv, 5);
vmw_ldu_init(dev_priv, 6); vmw_ldu_init(dev_priv, 6);
vmw_ldu_init(dev_priv, 7); vmw_ldu_init(dev_priv, 7);
}
return 0; return 0;
} }
......
...@@ -358,6 +358,8 @@ static int vmw_overlay_update_stream(struct vmw_private *dev_priv, ...@@ -358,6 +358,8 @@ static int vmw_overlay_update_stream(struct vmw_private *dev_priv,
if (stream->buf != buf) if (stream->buf != buf)
stream->buf = vmw_dmabuf_reference(buf); stream->buf = vmw_dmabuf_reference(buf);
stream->saved = *arg; stream->saved = *arg;
/* stream is no longer stopped/paused */
stream->paused = false;
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment