cirrus_fbdev.c 7.65 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11
/*
 * Copyright 2012 Red Hat
 *
 * This file is subject to the terms and conditions of the GNU General
 * Public License version 2. See the file COPYING in the main
 * directory of this archive for more details.
 *
 * Authors: Matthew Garrett
 *          Dave Airlie
 */
#include <linux/module.h>
12 13
#include <drm/drmP.h>
#include <drm/drm_fb_helper.h>
14
#include <drm/drm_crtc_helper.h>
15 16 17 18 19 20 21 22 23 24

#include "cirrus_drv.h"

static void cirrus_dirty_update(struct cirrus_fbdev *afbdev,
			     int x, int y, int width, int height)
{
	int i;
	struct drm_gem_object *obj;
	struct cirrus_bo *bo;
	int src_offset, dst_offset;
25
	int bpp = afbdev->gfb.base.format->cpp[0];
26
	int ret = -EBUSY;
27
	bool unmap = false;
28 29 30
	bool store_for_later = false;
	int x2, y2;
	unsigned long flags;
31 32 33 34

	obj = afbdev->gfb.obj;
	bo = gem_to_cirrus_bo(obj);

35 36 37 38 39
	/*
	 * try and reserve the BO, if we fail with busy
	 * then the BO is being moved and we should
	 * store up the damage until later.
	 */
40
	if (drm_can_sleep())
41
		ret = cirrus_bo_reserve(bo, true);
42
	if (ret) {
43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66
		if (ret != -EBUSY)
			return;
		store_for_later = true;
	}

	x2 = x + width - 1;
	y2 = y + height - 1;
	spin_lock_irqsave(&afbdev->dirty_lock, flags);

	if (afbdev->y1 < y)
		y = afbdev->y1;
	if (afbdev->y2 > y2)
		y2 = afbdev->y2;
	if (afbdev->x1 < x)
		x = afbdev->x1;
	if (afbdev->x2 > x2)
		x2 = afbdev->x2;

	if (store_for_later) {
		afbdev->x1 = x;
		afbdev->x2 = x2;
		afbdev->y1 = y;
		afbdev->y2 = y2;
		spin_unlock_irqrestore(&afbdev->dirty_lock, flags);
67 68 69
		return;
	}

70 71 72 73
	afbdev->x1 = afbdev->y1 = INT_MAX;
	afbdev->x2 = afbdev->y2 = 0;
	spin_unlock_irqrestore(&afbdev->dirty_lock, flags);

74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98
	if (!bo->kmap.virtual) {
		ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
		if (ret) {
			DRM_ERROR("failed to kmap fb updates\n");
			cirrus_bo_unreserve(bo);
			return;
		}
		unmap = true;
	}
	for (i = y; i < y + height; i++) {
		/* assume equal stride for now */
		src_offset = dst_offset = i * afbdev->gfb.base.pitches[0] + (x * bpp);
		memcpy_toio(bo->kmap.virtual + src_offset, afbdev->sysram + src_offset, width * bpp);

	}
	if (unmap)
		ttm_bo_kunmap(&bo->kmap);

	cirrus_bo_unreserve(bo);
}

static void cirrus_fillrect(struct fb_info *info,
			 const struct fb_fillrect *rect)
{
	struct cirrus_fbdev *afbdev = info->par;
99
	drm_fb_helper_sys_fillrect(info, rect);
100 101 102 103 104 105 106 107
	cirrus_dirty_update(afbdev, rect->dx, rect->dy, rect->width,
			 rect->height);
}

static void cirrus_copyarea(struct fb_info *info,
			 const struct fb_copyarea *area)
{
	struct cirrus_fbdev *afbdev = info->par;
108
	drm_fb_helper_sys_copyarea(info, area);
109 110 111 112 113 114 115 116
	cirrus_dirty_update(afbdev, area->dx, area->dy, area->width,
			 area->height);
}

static void cirrus_imageblit(struct fb_info *info,
			  const struct fb_image *image)
{
	struct cirrus_fbdev *afbdev = info->par;
117
	drm_fb_helper_sys_imageblit(info, image);
118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135
	cirrus_dirty_update(afbdev, image->dx, image->dy, image->width,
			 image->height);
}


static struct fb_ops cirrusfb_ops = {
	.owner = THIS_MODULE,
	.fb_check_var = drm_fb_helper_check_var,
	.fb_set_par = drm_fb_helper_set_par,
	.fb_fillrect = cirrus_fillrect,
	.fb_copyarea = cirrus_copyarea,
	.fb_imageblit = cirrus_imageblit,
	.fb_pan_display = drm_fb_helper_pan_display,
	.fb_blank = drm_fb_helper_blank,
	.fb_setcmap = drm_fb_helper_setcmap,
};

static int cirrusfb_create_object(struct cirrus_fbdev *afbdev,
136
			       const struct drm_mode_fb_cmd2 *mode_cmd,
137 138 139
			       struct drm_gem_object **gobj_p)
{
	struct drm_device *dev = afbdev->helper.dev;
140
	struct cirrus_device *cdev = dev->dev_private;
141
	u32 bpp;
142 143 144
	u32 size;
	struct drm_gem_object *gobj;
	int ret = 0;
145 146

	bpp = drm_format_plane_cpp(mode_cmd->pixel_format, 0) * 8;
147

148 149
	if (!cirrus_check_framebuffer(cdev, mode_cmd->width, mode_cmd->height,
				      bpp, mode_cmd->pitches[0]))
150
		return -EINVAL;
151

152 153 154 155 156 157 158 159 160
	size = mode_cmd->pitches[0] * mode_cmd->height;
	ret = cirrus_gem_create(dev, size, true, &gobj);
	if (ret)
		return ret;

	*gobj_p = gobj;
	return ret;
}

161
static int cirrusfb_create(struct drm_fb_helper *helper,
162 163
			   struct drm_fb_helper_surface_size *sizes)
{
164 165
	struct cirrus_fbdev *gfbdev =
		container_of(helper, struct cirrus_fbdev, helper);
166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193
	struct cirrus_device *cdev = gfbdev->helper.dev->dev_private;
	struct fb_info *info;
	struct drm_framebuffer *fb;
	struct drm_mode_fb_cmd2 mode_cmd;
	void *sysram;
	struct drm_gem_object *gobj = NULL;
	struct cirrus_bo *bo = NULL;
	int size, ret;

	mode_cmd.width = sizes->surface_width;
	mode_cmd.height = sizes->surface_height;
	mode_cmd.pitches[0] = mode_cmd.width * ((sizes->surface_bpp + 7) / 8);
	mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
							  sizes->surface_depth);
	size = mode_cmd.pitches[0] * mode_cmd.height;

	ret = cirrusfb_create_object(gfbdev, &mode_cmd, &gobj);
	if (ret) {
		DRM_ERROR("failed to create fbcon backing object %d\n", ret);
		return ret;
	}

	bo = gem_to_cirrus_bo(gobj);

	sysram = vmalloc(size);
	if (!sysram)
		return -ENOMEM;

194 195 196
	info = drm_fb_helper_alloc_fbi(helper);
	if (IS_ERR(info))
		return PTR_ERR(info);
197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219

	info->par = gfbdev;

	ret = cirrus_framebuffer_init(cdev->dev, &gfbdev->gfb, &mode_cmd, gobj);
	if (ret)
		return ret;

	gfbdev->sysram = sysram;
	gfbdev->size = size;

	fb = &gfbdev->gfb.base;
	if (!fb) {
		DRM_INFO("fb is NULL\n");
		return -EINVAL;
	}

	/* setup helper */
	gfbdev->helper.fb = fb;

	strcpy(info->fix.id, "cirrusdrmfb");

	info->fbops = &cirrusfb_ops;

Ville Syrjälä's avatar
Ville Syrjälä committed
220
	drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth);
221 222 223 224 225 226 227
	drm_fb_helper_fill_var(info, &gfbdev->helper, sizes->fb_width,
			       sizes->fb_height);

	/* setup aperture base/size for vesafb takeover */
	info->apertures->ranges[0].base = cdev->dev->mode_config.fb_base;
	info->apertures->ranges[0].size = cdev->mc.vram_size;

228 229 230
	info->fix.smem_start = cdev->dev->mode_config.fb_base;
	info->fix.smem_len = cdev->mc.vram_size;

231 232 233 234 235 236 237 238 239
	info->screen_base = sysram;
	info->screen_size = size;

	info->fix.mmio_start = 0;
	info->fix.mmio_len = 0;

	DRM_INFO("fb mappable at 0x%lX\n", info->fix.smem_start);
	DRM_INFO("vram aper at 0x%lX\n", (unsigned long)info->fix.smem_start);
	DRM_INFO("size %lu\n", (unsigned long)info->fix.smem_len);
Ville Syrjälä's avatar
Ville Syrjälä committed
240
	DRM_INFO("fb depth is %d\n", fb->format->depth);
241 242 243 244 245 246 247 248 249 250
	DRM_INFO("   pitch is %d\n", fb->pitches[0]);

	return 0;
}

static int cirrus_fbdev_destroy(struct drm_device *dev,
				struct cirrus_fbdev *gfbdev)
{
	struct cirrus_framebuffer *gfb = &gfbdev->gfb;

251
	drm_fb_helper_unregister_fbi(&gfbdev->helper);
252 253 254 255 256 257 258 259

	if (gfb->obj) {
		drm_gem_object_unreference_unlocked(gfb->obj);
		gfb->obj = NULL;
	}

	vfree(gfbdev->sysram);
	drm_fb_helper_fini(&gfbdev->helper);
260
	drm_framebuffer_unregister_private(&gfb->base);
261 262 263 264 265
	drm_framebuffer_cleanup(&gfb->base);

	return 0;
}

266
static const struct drm_fb_helper_funcs cirrus_fb_helper_funcs = {
267 268
	.gamma_set = cirrus_crtc_fb_gamma_set,
	.gamma_get = cirrus_crtc_fb_gamma_get,
269
	.fb_probe = cirrusfb_create,
270 271 272 273 274 275 276 277 278 279 280 281 282 283
};

int cirrus_fbdev_init(struct cirrus_device *cdev)
{
	struct cirrus_fbdev *gfbdev;
	int ret;
	int bpp_sel = 24;

	/*bpp_sel = 8;*/
	gfbdev = kzalloc(sizeof(struct cirrus_fbdev), GFP_KERNEL);
	if (!gfbdev)
		return -ENOMEM;

	cdev->mode_info.gfbdev = gfbdev;
284
	spin_lock_init(&gfbdev->dirty_lock);
285

286 287 288
	drm_fb_helper_prepare(cdev->dev, &gfbdev->helper,
			      &cirrus_fb_helper_funcs);

289
	ret = drm_fb_helper_init(cdev->dev, &gfbdev->helper,
290
				 CIRRUSFB_CONN_LIMIT);
291 292 293 294 295
	if (ret)
		return ret;

	ret = drm_fb_helper_single_add_all_connectors(&gfbdev->helper);
	if (ret)
296
		return ret;
297 298 299

	/* disable all the possible outputs/crtcs before entering KMS mode */
	drm_helper_disable_unused_functions(cdev->dev);
300

301
	return drm_fb_helper_initial_config(&gfbdev->helper, bpp_sel);
302 303 304 305 306 307 308 309 310 311 312
}

void cirrus_fbdev_fini(struct cirrus_device *cdev)
{
	if (!cdev->mode_info.gfbdev)
		return;

	cirrus_fbdev_destroy(cdev->dev, cdev->mode_info.gfbdev);
	kfree(cdev->mode_info.gfbdev);
	cdev->mode_info.gfbdev = NULL;
}