Commit a278724a authored by Thomas Hellstrom's avatar Thomas Hellstrom

drm/vmwgfx: Implement fbdev on kms v2

With screen targets the old legacy display system fbdev doesn't work
satisfactory anymore. At best the resolution is severely restricted.
Therefore implement fbdev on top of the kms system. With this change, fbdev
will be using whatever KMS backend is chosen.

There are helpers available for this, so in the future we'd probably want
to implement the helper callbacks instead of calling into our KMS
implementation directly.

v2: Make sure we take the mode_config mutex around modesetting,
Also clear the initial framebuffer using vzalloc instead of vmalloc.
Signed-off-by: default avatarThomas Hellstrom <thellstrom@vmware.com>
Reviewed-by: default avatarSinclair Yeh <syeh@vmware.com>
parent fd006a43
...@@ -1120,23 +1120,6 @@ static long vmw_compat_ioctl(struct file *filp, unsigned int cmd, ...@@ -1120,23 +1120,6 @@ static long vmw_compat_ioctl(struct file *filp, unsigned int cmd,
static void vmw_lastclose(struct drm_device *dev) static void vmw_lastclose(struct drm_device *dev)
{ {
struct drm_crtc *crtc;
struct drm_mode_set set;
int ret;
set.x = 0;
set.y = 0;
set.fb = NULL;
set.mode = NULL;
set.connectors = NULL;
set.num_connectors = 0;
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
set.crtc = crtc;
ret = drm_mode_set_config_internal(&set);
WARN_ON(ret != 0);
}
} }
static void vmw_master_init(struct vmw_master *vmaster) static void vmw_master_init(struct vmw_master *vmaster)
...@@ -1321,6 +1304,8 @@ static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val, ...@@ -1321,6 +1304,8 @@ static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
switch (val) { switch (val) {
case PM_HIBERNATION_PREPARE: case PM_HIBERNATION_PREPARE:
if (dev_priv->enable_fb)
vmw_fb_off(dev_priv);
ttm_suspend_lock(&dev_priv->reservation_sem); ttm_suspend_lock(&dev_priv->reservation_sem);
/* /*
...@@ -1337,7 +1322,8 @@ static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val, ...@@ -1337,7 +1322,8 @@ static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
case PM_POST_RESTORE: case PM_POST_RESTORE:
vmw_fence_fifo_up(dev_priv->fman); vmw_fence_fifo_up(dev_priv->fman);
ttm_suspend_unlock(&dev_priv->reservation_sem); ttm_suspend_unlock(&dev_priv->reservation_sem);
if (dev_priv->enable_fb)
vmw_fb_on(dev_priv);
break; break;
case PM_RESTORE_PREPARE: case PM_RESTORE_PREPARE:
break; break;
......
...@@ -30,6 +30,7 @@ ...@@ -30,6 +30,7 @@
#include <drm/drmP.h> #include <drm/drmP.h>
#include "vmwgfx_drv.h" #include "vmwgfx_drv.h"
#include "vmwgfx_kms.h"
#include <drm/ttm/ttm_placement.h> #include <drm/ttm/ttm_placement.h>
...@@ -40,21 +41,22 @@ struct vmw_fb_par { ...@@ -40,21 +41,22 @@ struct vmw_fb_par {
void *vmalloc; void *vmalloc;
struct mutex bo_mutex;
struct vmw_dma_buffer *vmw_bo; struct vmw_dma_buffer *vmw_bo;
struct ttm_bo_kmap_obj map; struct ttm_bo_kmap_obj map;
void *bo_ptr;
unsigned bo_size;
struct drm_framebuffer *set_fb;
struct drm_display_mode *set_mode;
u32 fb_x;
u32 fb_y;
bool bo_iowrite;
u32 pseudo_palette[17]; u32 pseudo_palette[17];
unsigned depth;
unsigned bpp;
unsigned max_width; unsigned max_width;
unsigned max_height; unsigned max_height;
void *bo_ptr;
unsigned bo_size;
bool bo_iowrite;
struct { struct {
spinlock_t lock; spinlock_t lock;
bool active; bool active;
...@@ -63,6 +65,11 @@ struct vmw_fb_par { ...@@ -63,6 +65,11 @@ struct vmw_fb_par {
unsigned x2; unsigned x2;
unsigned y2; unsigned y2;
} dirty; } dirty;
struct drm_crtc *crtc;
struct drm_connector *con;
bool local_mode;
}; };
static int vmw_fb_setcolreg(unsigned regno, unsigned red, unsigned green, static int vmw_fb_setcolreg(unsigned regno, unsigned red, unsigned green,
...@@ -77,7 +84,7 @@ static int vmw_fb_setcolreg(unsigned regno, unsigned red, unsigned green, ...@@ -77,7 +84,7 @@ static int vmw_fb_setcolreg(unsigned regno, unsigned red, unsigned green,
return 1; return 1;
} }
switch (par->depth) { switch (par->set_fb->depth) {
case 24: case 24:
case 32: case 32:
pal[regno] = ((red & 0xff00) << 8) | pal[regno] = ((red & 0xff00) << 8) |
...@@ -85,7 +92,8 @@ static int vmw_fb_setcolreg(unsigned regno, unsigned red, unsigned green, ...@@ -85,7 +92,8 @@ static int vmw_fb_setcolreg(unsigned regno, unsigned red, unsigned green,
((blue & 0xff00) >> 8); ((blue & 0xff00) >> 8);
break; break;
default: default:
DRM_ERROR("Bad depth %u, bpp %u.\n", par->depth, par->bpp); DRM_ERROR("Bad depth %u, bpp %u.\n", par->set_fb->depth,
par->set_fb->bits_per_pixel);
return 1; return 1;
} }
...@@ -134,12 +142,6 @@ static int vmw_fb_check_var(struct fb_var_screeninfo *var, ...@@ -134,12 +142,6 @@ static int vmw_fb_check_var(struct fb_var_screeninfo *var,
return -EINVAL; return -EINVAL;
} }
if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) &&
(var->xoffset != 0 || var->yoffset != 0)) {
DRM_ERROR("Can not handle panning without display topology\n");
return -EINVAL;
}
if ((var->xoffset + var->xres) > par->max_width || if ((var->xoffset + var->xres) > par->max_width ||
(var->yoffset + var->yres) > par->max_height) { (var->yoffset + var->yres) > par->max_height) {
DRM_ERROR("Requested geom can not fit in framebuffer\n"); DRM_ERROR("Requested geom can not fit in framebuffer\n");
...@@ -156,46 +158,6 @@ static int vmw_fb_check_var(struct fb_var_screeninfo *var, ...@@ -156,46 +158,6 @@ static int vmw_fb_check_var(struct fb_var_screeninfo *var,
return 0; return 0;
} }
static int vmw_fb_set_par(struct fb_info *info)
{
struct vmw_fb_par *par = info->par;
struct vmw_private *vmw_priv = par->vmw_priv;
int ret;
info->fix.line_length = info->var.xres * info->var.bits_per_pixel/8;
ret = vmw_kms_write_svga(vmw_priv, info->var.xres, info->var.yres,
info->fix.line_length,
par->bpp, par->depth);
if (ret)
return ret;
if (vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) {
/* TODO check if pitch and offset changes */
vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1);
vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, 0);
vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true);
vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, info->var.xoffset);
vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, info->var.yoffset);
vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, info->var.xres);
vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, info->var.yres);
vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
}
/* This is really helpful since if this fails the user
* can probably not see anything on the screen.
*/
WARN_ON(vmw_read(vmw_priv, SVGA_REG_FB_OFFSET) != 0);
return 0;
}
static int vmw_fb_pan_display(struct fb_var_screeninfo *var,
struct fb_info *info)
{
return 0;
}
static int vmw_fb_blank(int blank, struct fb_info *info) static int vmw_fb_blank(int blank, struct fb_info *info)
{ {
return 0; return 0;
...@@ -209,55 +171,77 @@ static void vmw_fb_dirty_flush(struct vmw_fb_par *par) ...@@ -209,55 +171,77 @@ static void vmw_fb_dirty_flush(struct vmw_fb_par *par)
{ {
struct vmw_private *vmw_priv = par->vmw_priv; struct vmw_private *vmw_priv = par->vmw_priv;
struct fb_info *info = vmw_priv->fb_info; struct fb_info *info = vmw_priv->fb_info;
int stride = (info->fix.line_length / 4); unsigned long irq_flags;
int *src = (int *)info->screen_base; s32 dst_x1, dst_x2, dst_y1, dst_y2, w, h;
__le32 __iomem *vram_mem = par->bo_ptr; u32 cpp, max_x, max_y;
unsigned long flags; struct drm_clip_rect clip;
unsigned x, y, w, h; struct drm_framebuffer *cur_fb;
int i, k; u8 *src_ptr, *dst_ptr;
struct {
uint32_t header;
SVGAFifoCmdUpdate body;
} *cmd;
if (vmw_priv->suspended) if (vmw_priv->suspended)
return; return;
spin_lock_irqsave(&par->dirty.lock, flags); mutex_lock(&par->bo_mutex);
cur_fb = par->set_fb;
if (!cur_fb)
goto out_unlock;
spin_lock_irqsave(&par->dirty.lock, irq_flags);
if (!par->dirty.active) { if (!par->dirty.active) {
spin_unlock_irqrestore(&par->dirty.lock, flags); spin_unlock_irqrestore(&par->dirty.lock, irq_flags);
return; goto out_unlock;
} }
x = par->dirty.x1;
y = par->dirty.y1; /*
w = min(par->dirty.x2, info->var.xres) - x; * Handle panning when copying from vmalloc to framebuffer.
h = min(par->dirty.y2, info->var.yres) - y; * Clip dirty area to framebuffer.
*/
cpp = (cur_fb->bits_per_pixel + 7) / 8;
max_x = par->fb_x + cur_fb->width;
max_y = par->fb_y + cur_fb->height;
dst_x1 = par->dirty.x1 - par->fb_x;
dst_y1 = par->dirty.y1 - par->fb_y;
dst_x1 = max_t(s32, dst_x1, 0);
dst_y1 = max_t(s32, dst_y1, 0);
dst_x2 = par->dirty.x2 - par->fb_x;
dst_y2 = par->dirty.y2 - par->fb_y;
dst_x2 = min_t(s32, dst_x2, max_x);
dst_y2 = min_t(s32, dst_y2, max_y);
w = dst_x2 - dst_x1;
h = dst_y2 - dst_y1;
w = max_t(s32, 0, w);
h = max_t(s32, 0, h);
par->dirty.x1 = par->dirty.x2 = 0; par->dirty.x1 = par->dirty.x2 = 0;
par->dirty.y1 = par->dirty.y2 = 0; par->dirty.y1 = par->dirty.y2 = 0;
spin_unlock_irqrestore(&par->dirty.lock, flags); spin_unlock_irqrestore(&par->dirty.lock, irq_flags);
for (i = y * stride; i < info->fix.smem_len / 4; i += stride) { if (w && h) {
for (k = i+x; k < i+x+w && k < info->fix.smem_len / 4; k++) dst_ptr = (u8 *)par->bo_ptr +
iowrite32(src[k], vram_mem + k); (dst_y1 * par->set_fb->pitches[0] + dst_x1 * cpp);
src_ptr = (u8 *)par->vmalloc +
((dst_y1 + par->fb_y) * info->fix.line_length +
(dst_x1 + par->fb_x) * cpp);
while (h-- > 0) {
memcpy(dst_ptr, src_ptr, w*cpp);
dst_ptr += par->set_fb->pitches[0];
src_ptr += info->fix.line_length;
} }
#if 0 clip.x1 = dst_x1;
DRM_INFO("%s, (%u, %u) (%ux%u)\n", __func__, x, y, w, h); clip.x2 = dst_x2;
#endif clip.y1 = dst_y1;
clip.y2 = dst_y2;
cmd = vmw_fifo_reserve(vmw_priv, sizeof(*cmd)); WARN_ON_ONCE(par->set_fb->funcs->dirty(cur_fb, NULL, 0, 0,
if (unlikely(cmd == NULL)) { &clip, 1));
DRM_ERROR("Fifo reserve failed.\n");
return;
}
cmd->header = cpu_to_le32(SVGA_CMD_UPDATE);
cmd->body.x = cpu_to_le32(x);
cmd->body.y = cpu_to_le32(y);
cmd->body.width = cpu_to_le32(w);
cmd->body.height = cpu_to_le32(h);
vmw_fifo_commit(vmw_priv, sizeof(*cmd));
vmw_fifo_flush(vmw_priv, false); vmw_fifo_flush(vmw_priv, false);
}
out_unlock:
mutex_unlock(&par->bo_mutex);
} }
static void vmw_fb_dirty_mark(struct vmw_fb_par *par, static void vmw_fb_dirty_mark(struct vmw_fb_par *par,
...@@ -292,6 +276,28 @@ static void vmw_fb_dirty_mark(struct vmw_fb_par *par, ...@@ -292,6 +276,28 @@ static void vmw_fb_dirty_mark(struct vmw_fb_par *par,
spin_unlock_irqrestore(&par->dirty.lock, flags); spin_unlock_irqrestore(&par->dirty.lock, flags);
} }
static int vmw_fb_pan_display(struct fb_var_screeninfo *var,
struct fb_info *info)
{
struct vmw_fb_par *par = info->par;
if ((var->xoffset + var->xres) > var->xres_virtual ||
(var->yoffset + var->yres) > var->yres_virtual) {
DRM_ERROR("Requested panning can not fit in framebuffer\n");
return -EINVAL;
}
mutex_lock(&par->bo_mutex);
par->fb_x = var->xoffset;
par->fb_y = var->yoffset;
if (par->set_fb)
vmw_fb_dirty_mark(par, par->fb_x, par->fb_y, par->set_fb->width,
par->set_fb->height);
mutex_unlock(&par->bo_mutex);
return 0;
}
static void vmw_deferred_io(struct fb_info *info, static void vmw_deferred_io(struct fb_info *info,
struct list_head *pagelist) struct list_head *pagelist)
{ {
...@@ -359,33 +365,12 @@ static void vmw_fb_imageblit(struct fb_info *info, const struct fb_image *image) ...@@ -359,33 +365,12 @@ static void vmw_fb_imageblit(struct fb_info *info, const struct fb_image *image)
* Bring up code * Bring up code
*/ */
static struct fb_ops vmw_fb_ops = {
.owner = THIS_MODULE,
.fb_check_var = vmw_fb_check_var,
.fb_set_par = vmw_fb_set_par,
.fb_setcolreg = vmw_fb_setcolreg,
.fb_fillrect = vmw_fb_fillrect,
.fb_copyarea = vmw_fb_copyarea,
.fb_imageblit = vmw_fb_imageblit,
.fb_pan_display = vmw_fb_pan_display,
.fb_blank = vmw_fb_blank,
};
static int vmw_fb_create_bo(struct vmw_private *vmw_priv, static int vmw_fb_create_bo(struct vmw_private *vmw_priv,
size_t size, struct vmw_dma_buffer **out) size_t size, struct vmw_dma_buffer **out)
{ {
struct vmw_dma_buffer *vmw_bo; struct vmw_dma_buffer *vmw_bo;
struct ttm_place ne_place = vmw_vram_ne_placement.placement[0];
struct ttm_placement ne_placement;
int ret; int ret;
ne_placement.num_placement = 1;
ne_placement.placement = &ne_place;
ne_placement.num_busy_placement = 1;
ne_placement.busy_placement = &ne_place;
ne_place.lpfn = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
(void) ttm_write_lock(&vmw_priv->reservation_sem, false); (void) ttm_write_lock(&vmw_priv->reservation_sem, false);
vmw_bo = kmalloc(sizeof(*vmw_bo), GFP_KERNEL); vmw_bo = kmalloc(sizeof(*vmw_bo), GFP_KERNEL);
...@@ -395,14 +380,13 @@ static int vmw_fb_create_bo(struct vmw_private *vmw_priv, ...@@ -395,14 +380,13 @@ static int vmw_fb_create_bo(struct vmw_private *vmw_priv,
} }
ret = vmw_dmabuf_init(vmw_priv, vmw_bo, size, ret = vmw_dmabuf_init(vmw_priv, vmw_bo, size,
&ne_placement, &vmw_sys_placement,
false, false,
&vmw_dmabuf_bo_free); &vmw_dmabuf_bo_free);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
goto err_unlock; /* init frees the buffer on failure */ goto err_unlock; /* init frees the buffer on failure */
*out = vmw_bo; *out = vmw_bo;
ttm_write_unlock(&vmw_priv->reservation_sem); ttm_write_unlock(&vmw_priv->reservation_sem);
return 0; return 0;
...@@ -412,14 +396,249 @@ static int vmw_fb_create_bo(struct vmw_private *vmw_priv, ...@@ -412,14 +396,249 @@ static int vmw_fb_create_bo(struct vmw_private *vmw_priv,
return ret; return ret;
} }
static int vmw_fb_compute_depth(struct fb_var_screeninfo *var,
int *depth)
{
switch (var->bits_per_pixel) {
case 32:
*depth = (var->transp.length > 0) ? 32 : 24;
break;
default:
DRM_ERROR("Bad bpp %u.\n", var->bits_per_pixel);
return -EINVAL;
}
return 0;
}
static int vmw_fb_kms_detach(struct vmw_fb_par *par,
bool detach_bo,
bool unref_bo)
{
struct drm_framebuffer *cur_fb = par->set_fb;
int ret;
/* Detach the KMS framebuffer from crtcs */
if (par->set_mode) {
struct drm_mode_set set;
set.crtc = par->crtc;
set.x = 0;
set.y = 0;
set.mode = NULL;
set.fb = NULL;
set.num_connectors = 1;
set.connectors = &par->con;
ret = drm_mode_set_config_internal(&set);
if (ret) {
DRM_ERROR("Could not unset a mode.\n");
return ret;
}
drm_mode_destroy(par->vmw_priv->dev, par->set_mode);
par->set_mode = NULL;
}
if (cur_fb) {
drm_framebuffer_unreference(cur_fb);
par->set_fb = NULL;
}
if (par->vmw_bo && detach_bo) {
if (par->bo_ptr) {
ttm_bo_kunmap(&par->map);
par->bo_ptr = NULL;
}
if (unref_bo)
vmw_dmabuf_unreference(&par->vmw_bo);
else
vmw_dmabuf_unpin(par->vmw_priv, par->vmw_bo, false);
}
return 0;
}
static int vmw_fb_kms_framebuffer(struct fb_info *info)
{
struct drm_mode_fb_cmd mode_cmd;
struct vmw_fb_par *par = info->par;
struct fb_var_screeninfo *var = &info->var;
struct drm_framebuffer *cur_fb;
struct vmw_framebuffer *vfb;
int ret = 0;
size_t new_bo_size;
ret = vmw_fb_compute_depth(var, &mode_cmd.depth);
if (ret)
return ret;
mode_cmd.width = var->xres;
mode_cmd.height = var->yres;
mode_cmd.bpp = var->bits_per_pixel;
mode_cmd.pitch = ((mode_cmd.bpp + 7) / 8) * mode_cmd.width;
cur_fb = par->set_fb;
if (cur_fb && cur_fb->width == mode_cmd.width &&
cur_fb->height == mode_cmd.height &&
cur_fb->bits_per_pixel == mode_cmd.bpp &&
cur_fb->depth == mode_cmd.depth &&
cur_fb->pitches[0] == mode_cmd.pitch)
return 0;
/* Need new buffer object ? */
new_bo_size = (size_t) mode_cmd.pitch * (size_t) mode_cmd.height;
ret = vmw_fb_kms_detach(par,
par->bo_size < new_bo_size ||
par->bo_size > 2*new_bo_size,
true);
if (ret)
return ret;
if (!par->vmw_bo) {
ret = vmw_fb_create_bo(par->vmw_priv, new_bo_size,
&par->vmw_bo);
if (ret) {
DRM_ERROR("Failed creating a buffer object for "
"fbdev.\n");
return ret;
}
par->bo_size = new_bo_size;
}
vfb = vmw_kms_new_framebuffer(par->vmw_priv, par->vmw_bo, NULL,
true, &mode_cmd);
if (IS_ERR(vfb))
return PTR_ERR(vfb);
par->set_fb = &vfb->base;
if (!par->bo_ptr) {
/*
* Pin before mapping. Since we don't know in what placement
* to pin, call into KMS to do it for us.
*/
ret = vfb->pin(vfb);
if (ret) {
DRM_ERROR("Could not pin the fbdev framebuffer.\n");
return ret;
}
ret = ttm_bo_kmap(&par->vmw_bo->base, 0,
par->vmw_bo->base.num_pages, &par->map);
if (ret) {
vfb->unpin(vfb);
DRM_ERROR("Could not map the fbdev framebuffer.\n");
return ret;
}
par->bo_ptr = ttm_kmap_obj_virtual(&par->map, &par->bo_iowrite);
}
return 0;
}
static int vmw_fb_set_par(struct fb_info *info)
{
struct vmw_fb_par *par = info->par;
struct vmw_private *vmw_priv = par->vmw_priv;
struct drm_mode_set set;
struct fb_var_screeninfo *var = &info->var;
struct drm_display_mode new_mode = { DRM_MODE("fb_mode",
DRM_MODE_TYPE_DRIVER,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC)
};
struct drm_display_mode *old_mode;
struct drm_display_mode *mode;
int ret;
old_mode = par->set_mode;
mode = drm_mode_duplicate(vmw_priv->dev, &new_mode);
if (!mode) {
DRM_ERROR("Could not create new fb mode.\n");
return -ENOMEM;
}
mode->hdisplay = var->xres;
mode->vdisplay = var->yres;
vmw_guess_mode_timing(mode);
if (old_mode && drm_mode_equal(old_mode, mode)) {
drm_mode_destroy(vmw_priv->dev, mode);
mode = old_mode;
old_mode = NULL;
} else if (!vmw_kms_validate_mode_vram(vmw_priv,
mode->hdisplay *
(var->bits_per_pixel + 7) / 8,
mode->vdisplay)) {
drm_mode_destroy(vmw_priv->dev, mode);
return -EINVAL;
}
mutex_lock(&par->bo_mutex);
drm_modeset_lock_all(vmw_priv->dev);
ret = vmw_fb_kms_framebuffer(info);
if (ret)
goto out_unlock;
par->fb_x = var->xoffset;
par->fb_y = var->yoffset;
set.crtc = par->crtc;
set.x = 0;
set.y = 0;
set.mode = mode;
set.fb = par->set_fb;
set.num_connectors = 1;
set.connectors = &par->con;
ret = drm_mode_set_config_internal(&set);
if (ret)
goto out_unlock;
vmw_fb_dirty_mark(par, par->fb_x, par->fb_y,
par->set_fb->width, par->set_fb->height);
/* If there already was stuff dirty we wont
* schedule a new work, so lets do it now */
#if (defined(VMWGFX_STANDALONE) && defined(VMWGFX_FB_DEFERRED))
schedule_delayed_work(&par->def_par.deferred_work, 0);
#else
schedule_delayed_work(&info->deferred_work, 0);
#endif
out_unlock:
if (old_mode)
drm_mode_destroy(vmw_priv->dev, old_mode);
par->set_mode = mode;
drm_modeset_unlock_all(vmw_priv->dev);
mutex_unlock(&par->bo_mutex);
return ret;
}
static struct fb_ops vmw_fb_ops = {
.owner = THIS_MODULE,
.fb_check_var = vmw_fb_check_var,
.fb_set_par = vmw_fb_set_par,
.fb_setcolreg = vmw_fb_setcolreg,
.fb_fillrect = vmw_fb_fillrect,
.fb_copyarea = vmw_fb_copyarea,
.fb_imageblit = vmw_fb_imageblit,
.fb_pan_display = vmw_fb_pan_display,
.fb_blank = vmw_fb_blank,
};
int vmw_fb_init(struct vmw_private *vmw_priv) int vmw_fb_init(struct vmw_private *vmw_priv)
{ {
struct device *device = &vmw_priv->dev->pdev->dev; struct device *device = &vmw_priv->dev->pdev->dev;
struct vmw_fb_par *par; struct vmw_fb_par *par;
struct fb_info *info; struct fb_info *info;
unsigned initial_width, initial_height;
unsigned fb_width, fb_height; unsigned fb_width, fb_height;
unsigned fb_bpp, fb_depth, fb_offset, fb_pitch, fb_size; unsigned fb_bpp, fb_depth, fb_offset, fb_pitch, fb_size;
struct drm_display_mode *init_mode;
int ret; int ret;
fb_bpp = 32; fb_bpp = 32;
...@@ -429,9 +648,6 @@ int vmw_fb_init(struct vmw_private *vmw_priv) ...@@ -429,9 +648,6 @@ int vmw_fb_init(struct vmw_private *vmw_priv)
fb_width = min(vmw_priv->fb_max_width, (unsigned)2048); fb_width = min(vmw_priv->fb_max_width, (unsigned)2048);
fb_height = min(vmw_priv->fb_max_height, (unsigned)2048); fb_height = min(vmw_priv->fb_max_height, (unsigned)2048);
initial_width = min(vmw_priv->initial_width, fb_width);
initial_height = min(vmw_priv->initial_height, fb_height);
fb_pitch = fb_width * fb_bpp / 8; fb_pitch = fb_width * fb_bpp / 8;
fb_size = fb_pitch * fb_height; fb_size = fb_pitch * fb_height;
fb_offset = vmw_read(vmw_priv, SVGA_REG_FB_OFFSET); fb_offset = vmw_read(vmw_priv, SVGA_REG_FB_OFFSET);
...@@ -445,35 +661,34 @@ int vmw_fb_init(struct vmw_private *vmw_priv) ...@@ -445,35 +661,34 @@ int vmw_fb_init(struct vmw_private *vmw_priv)
*/ */
vmw_priv->fb_info = info; vmw_priv->fb_info = info;
par = info->par; par = info->par;
memset(par, 0, sizeof(*par));
par->vmw_priv = vmw_priv; par->vmw_priv = vmw_priv;
par->depth = fb_depth;
par->bpp = fb_bpp;
par->vmalloc = NULL; par->vmalloc = NULL;
par->max_width = fb_width; par->max_width = fb_width;
par->max_height = fb_height; par->max_height = fb_height;
drm_modeset_lock_all(vmw_priv->dev);
ret = vmw_kms_fbdev_init_data(vmw_priv, 0, par->max_width,
par->max_height, &par->con,
&par->crtc, &init_mode);
if (ret) {
drm_modeset_unlock_all(vmw_priv->dev);
goto err_kms;
}
info->var.xres = init_mode->hdisplay;
info->var.yres = init_mode->vdisplay;
drm_modeset_unlock_all(vmw_priv->dev);
/* /*
* Create buffers and alloc memory * Create buffers and alloc memory
*/ */
par->vmalloc = vmalloc(fb_size); par->vmalloc = vzalloc(fb_size);
if (unlikely(par->vmalloc == NULL)) { if (unlikely(par->vmalloc == NULL)) {
ret = -ENOMEM; ret = -ENOMEM;
goto err_free; goto err_free;
} }
ret = vmw_fb_create_bo(vmw_priv, fb_size, &par->vmw_bo);
if (unlikely(ret != 0))
goto err_free;
ret = ttm_bo_kmap(&par->vmw_bo->base,
0,
par->vmw_bo->base.num_pages,
&par->map);
if (unlikely(ret != 0))
goto err_unref;
par->bo_ptr = ttm_kmap_obj_virtual(&par->map, &par->bo_iowrite);
par->bo_size = fb_size;
/* /*
* Fixed and var * Fixed and var
*/ */
...@@ -509,18 +724,14 @@ int vmw_fb_init(struct vmw_private *vmw_priv) ...@@ -509,18 +724,14 @@ int vmw_fb_init(struct vmw_private *vmw_priv)
info->var.xres_virtual = fb_width; info->var.xres_virtual = fb_width;
info->var.yres_virtual = fb_height; info->var.yres_virtual = fb_height;
info->var.bits_per_pixel = par->bpp; info->var.bits_per_pixel = fb_bpp;
info->var.xoffset = 0; info->var.xoffset = 0;
info->var.yoffset = 0; info->var.yoffset = 0;
info->var.activate = FB_ACTIVATE_NOW; info->var.activate = FB_ACTIVATE_NOW;
info->var.height = -1; info->var.height = -1;
info->var.width = -1; info->var.width = -1;
info->var.xres = initial_width;
info->var.yres = initial_height;
/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */ /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
info->apertures = alloc_apertures(1); info->apertures = alloc_apertures(1);
if (!info->apertures) { if (!info->apertures) {
ret = -ENOMEM; ret = -ENOMEM;
...@@ -536,6 +747,7 @@ int vmw_fb_init(struct vmw_private *vmw_priv) ...@@ -536,6 +747,7 @@ int vmw_fb_init(struct vmw_private *vmw_priv)
par->dirty.y1 = par->dirty.y2 = 0; par->dirty.y1 = par->dirty.y2 = 0;
par->dirty.active = true; par->dirty.active = true;
spin_lock_init(&par->dirty.lock); spin_lock_init(&par->dirty.lock);
mutex_init(&par->bo_mutex);
info->fbdefio = &vmw_defio; info->fbdefio = &vmw_defio;
fb_deferred_io_init(info); fb_deferred_io_init(info);
...@@ -543,16 +755,16 @@ int vmw_fb_init(struct vmw_private *vmw_priv) ...@@ -543,16 +755,16 @@ int vmw_fb_init(struct vmw_private *vmw_priv)
if (unlikely(ret != 0)) if (unlikely(ret != 0))
goto err_defio; goto err_defio;
vmw_fb_set_par(info);
return 0; return 0;
err_defio: err_defio:
fb_deferred_io_cleanup(info); fb_deferred_io_cleanup(info);
err_aper: err_aper:
ttm_bo_kunmap(&par->map);
err_unref:
ttm_bo_unref((struct ttm_buffer_object **)&par->vmw_bo);
err_free: err_free:
vfree(par->vmalloc); vfree(par->vmalloc);
err_kms:
framebuffer_release(info); framebuffer_release(info);
vmw_priv->fb_info = NULL; vmw_priv->fb_info = NULL;
...@@ -563,22 +775,18 @@ int vmw_fb_close(struct vmw_private *vmw_priv) ...@@ -563,22 +775,18 @@ int vmw_fb_close(struct vmw_private *vmw_priv)
{ {
struct fb_info *info; struct fb_info *info;
struct vmw_fb_par *par; struct vmw_fb_par *par;
struct ttm_buffer_object *bo;
if (!vmw_priv->fb_info) if (!vmw_priv->fb_info)
return 0; return 0;
info = vmw_priv->fb_info; info = vmw_priv->fb_info;
par = info->par; par = info->par;
bo = &par->vmw_bo->base;
par->vmw_bo = NULL;
/* ??? order */ /* ??? order */
fb_deferred_io_cleanup(info); fb_deferred_io_cleanup(info);
unregister_framebuffer(info); unregister_framebuffer(info);
ttm_bo_kunmap(&par->map); (void) vmw_fb_kms_detach(par, true, true);
ttm_bo_unref(&bo);
vfree(par->vmalloc); vfree(par->vmalloc);
framebuffer_release(info); framebuffer_release(info);
...@@ -597,20 +805,16 @@ int vmw_fb_off(struct vmw_private *vmw_priv) ...@@ -597,20 +805,16 @@ int vmw_fb_off(struct vmw_private *vmw_priv)
info = vmw_priv->fb_info; info = vmw_priv->fb_info;
par = info->par; par = info->par;
if (!par->bo_ptr)
return 0;
vmw_kms_save_vga(vmw_priv);
spin_lock_irqsave(&par->dirty.lock, flags); spin_lock_irqsave(&par->dirty.lock, flags);
par->dirty.active = false; par->dirty.active = false;
spin_unlock_irqrestore(&par->dirty.lock, flags); spin_unlock_irqrestore(&par->dirty.lock, flags);
flush_delayed_work(&info->deferred_work); flush_delayed_work(&info->deferred_work);
par->bo_ptr = NULL; mutex_lock(&par->bo_mutex);
ttm_bo_kunmap(&par->map); (void) vmw_fb_kms_detach(par, true, false);
mutex_unlock(&par->bo_mutex);
vmw_dmabuf_unpin(vmw_priv, par->vmw_bo, false);
return 0; return 0;
} }
...@@ -620,8 +824,6 @@ int vmw_fb_on(struct vmw_private *vmw_priv) ...@@ -620,8 +824,6 @@ int vmw_fb_on(struct vmw_private *vmw_priv)
struct fb_info *info; struct fb_info *info;
struct vmw_fb_par *par; struct vmw_fb_par *par;
unsigned long flags; unsigned long flags;
bool dummy;
int ret;
if (!vmw_priv->fb_info) if (!vmw_priv->fb_info)
return -EINVAL; return -EINVAL;
...@@ -629,39 +831,10 @@ int vmw_fb_on(struct vmw_private *vmw_priv) ...@@ -629,39 +831,10 @@ int vmw_fb_on(struct vmw_private *vmw_priv)
info = vmw_priv->fb_info; info = vmw_priv->fb_info;
par = info->par; par = info->par;
/* we are already active */ vmw_fb_set_par(info);
if (par->bo_ptr != NULL)
return 0;
/* Make sure that all overlays are stoped when we take over */
vmw_overlay_stop_all(vmw_priv);
ret = vmw_dmabuf_pin_in_start_of_vram(vmw_priv, par->vmw_bo, false);
if (unlikely(ret != 0)) {
DRM_ERROR("could not move buffer to start of VRAM\n");
goto err_no_buffer;
}
ret = ttm_bo_kmap(&par->vmw_bo->base,
0,
par->vmw_bo->base.num_pages,
&par->map);
BUG_ON(ret != 0);
par->bo_ptr = ttm_kmap_obj_virtual(&par->map, &dummy);
spin_lock_irqsave(&par->dirty.lock, flags); spin_lock_irqsave(&par->dirty.lock, flags);
par->dirty.active = true; par->dirty.active = true;
spin_unlock_irqrestore(&par->dirty.lock, flags); spin_unlock_irqrestore(&par->dirty.lock, flags);
vmw_kms_restore_vga(vmw_priv);
err_no_buffer:
vmw_fb_set_par(info);
vmw_fb_dirty_mark(par, 0, 0, info->var.xres, info->var.yres);
/* If there already was stuff dirty we wont
* schedule a new work, so lets do it now */
schedule_delayed_work(&info->deferred_work, 0);
return 0; return 0;
} }
...@@ -372,6 +372,7 @@ static void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer) ...@@ -372,6 +372,7 @@ static void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer)
drm_framebuffer_cleanup(framebuffer); drm_framebuffer_cleanup(framebuffer);
vmw_surface_unreference(&vfbs->surface); vmw_surface_unreference(&vfbs->surface);
if (vfbs->base.user_obj)
ttm_base_object_unref(&vfbs->base.user_obj); ttm_base_object_unref(&vfbs->base.user_obj);
kfree(vfbs); kfree(vfbs);
...@@ -582,6 +583,7 @@ static void vmw_framebuffer_dmabuf_destroy(struct drm_framebuffer *framebuffer) ...@@ -582,6 +583,7 @@ static void vmw_framebuffer_dmabuf_destroy(struct drm_framebuffer *framebuffer)
drm_framebuffer_cleanup(framebuffer); drm_framebuffer_cleanup(framebuffer);
vmw_dmabuf_unreference(&vfbd->buffer); vmw_dmabuf_unreference(&vfbd->buffer);
if (vfbd->base.user_obj)
ttm_base_object_unref(&vfbd->base.user_obj); ttm_base_object_unref(&vfbd->base.user_obj);
kfree(vfbd); kfree(vfbd);
...@@ -1462,7 +1464,7 @@ static struct drm_display_mode vmw_kms_connector_builtin[] = { ...@@ -1462,7 +1464,7 @@ static struct drm_display_mode vmw_kms_connector_builtin[] = {
* @mode - Pointer to a struct drm_display_mode with hdisplay and vdisplay * @mode - Pointer to a struct drm_display_mode with hdisplay and vdisplay
* members filled in. * members filled in.
*/ */
static void vmw_guess_mode_timing(struct drm_display_mode *mode) void vmw_guess_mode_timing(struct drm_display_mode *mode)
{ {
mode->hsync_start = mode->hdisplay + 50; mode->hsync_start = mode->hdisplay + 50;
mode->hsync_end = mode->hsync_start + 50; mode->hsync_end = mode->hsync_start + 50;
...@@ -2001,3 +2003,58 @@ int vmw_kms_update_proxy(struct vmw_resource *res, ...@@ -2001,3 +2003,58 @@ int vmw_kms_update_proxy(struct vmw_resource *res,
return 0; return 0;
} }
int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv,
unsigned unit,
u32 max_width,
u32 max_height,
struct drm_connector **p_con,
struct drm_crtc **p_crtc,
struct drm_display_mode **p_mode)
{
struct drm_connector *con;
struct vmw_display_unit *du;
struct drm_display_mode *mode;
int i = 0;
list_for_each_entry(con, &dev_priv->dev->mode_config.connector_list,
head) {
if (i == unit)
break;
++i;
}
if (i != unit) {
DRM_ERROR("Could not find initial display unit.\n");
return -EINVAL;
}
if (list_empty(&con->modes))
(void) vmw_du_connector_fill_modes(con, max_width, max_height);
if (list_empty(&con->modes)) {
DRM_ERROR("Could not find initial display mode.\n");
return -EINVAL;
}
du = vmw_connector_to_du(con);
*p_con = con;
*p_crtc = &du->crtc;
list_for_each_entry(mode, &con->modes, head) {
if (mode->type & DRM_MODE_TYPE_PREFERRED)
break;
}
if (mode->type & DRM_MODE_TYPE_PREFERRED)
*p_mode = mode;
else {
WARN_ONCE(true, "Could not find initial preferred mode.\n");
*p_mode = list_first_entry(&con->modes,
struct drm_display_mode,
head);
}
return 0;
}
...@@ -243,6 +243,14 @@ vmw_kms_new_framebuffer(struct vmw_private *dev_priv, ...@@ -243,6 +243,14 @@ vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
struct vmw_surface *surface, struct vmw_surface *surface,
bool only_2d, bool only_2d,
const struct drm_mode_fb_cmd *mode_cmd); const struct drm_mode_fb_cmd *mode_cmd);
int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv,
unsigned unit,
u32 max_width,
u32 max_height,
struct drm_connector **p_con,
struct drm_crtc **p_crtc,
struct drm_display_mode **p_mode);
void vmw_guess_mode_timing(struct drm_display_mode *mode);
/* /*
* Legacy display unit functions - vmwgfx_ldu.c * Legacy display unit functions - vmwgfx_ldu.c
......
...@@ -279,7 +279,6 @@ static int vmw_ldu_crtc_set_config(struct drm_mode_set *set) ...@@ -279,7 +279,6 @@ static int vmw_ldu_crtc_set_config(struct drm_mode_set *set)
return -EINVAL; return -EINVAL;
} }
vmw_fb_off(dev_priv);
vmw_svga_enable(dev_priv); vmw_svga_enable(dev_priv);
crtc->primary->fb = fb; crtc->primary->fb = fb;
......
...@@ -369,7 +369,6 @@ static int vmw_sou_crtc_set_config(struct drm_mode_set *set) ...@@ -369,7 +369,6 @@ static int vmw_sou_crtc_set_config(struct drm_mode_set *set)
return -EINVAL; return -EINVAL;
} }
vmw_fb_off(dev_priv);
vmw_svga_enable(dev_priv); vmw_svga_enable(dev_priv);
if (mode->hdisplay != crtc->mode.hdisplay || if (mode->hdisplay != crtc->mode.hdisplay ||
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment