Commit cd5351f4 authored by Rob Clark's avatar Rob Clark Committed by Greg Kroah-Hartman

staging: add omapdrm DRM/KMS driver for TI OMAP platforms

A DRM display driver for TI OMAP platform.  Similar to omapfb (fbdev)
and omap_vout (v4l2 display) drivers in the past, this driver uses the
DSS2 driver to access the display hardware, including support for
HDMI, DVI, and various types of LCD panels.  And it implements GEM
support for buffer allocation (for KMS as well as offscreen buffers
used by the xf86-video-omap userspace xorg driver).

The driver maps CRTCs to overlays, encoders to overlay-managers, and
connectors to dssdev's.  Note that this arrangement might change slightly
when support for drm_plane overlays is added.

For GEM support, non-scanout buffers are using the shmem backed pages
provided by GEM core (In drm_gem_object_init()).  In the case of scanout
buffers, which need to be physically contiguous, those are allocated
with CMA and use drm_gem_private_object_init().

See userspace xorg driver:
git://github.com/robclark/xf86-video-omap.git

Refer to this link for CMA (Continuous Memory Allocator):
http://lkml.org/lkml/2011/8/19/302

Links to previous versions of the patch:
v1: http://lwn.net/Articles/458137/
v2: http://patches.linaro.org/4156/
v3: http://patches.linaro.org/4688/
v4: http://patches.linaro.org/4791/

History:

v5: move headers from include/drm at Greg KH's request, minor rebasing
    on 3.2-rc1, pull in private copies of drm_gem_{get,put}_pages()
    because "drm/gem: add functions to get/put pages" patch is not
    merged yet
v4: bit of rework of encoder/connector _dpms() code, modeset_init()
    rework to not use nested functions, update TODO.txt
v3: minor cleanups, improved error handling for dev_load(), some minor
    API changes that will be needed later for tiled buffer support
v2: replace omap_vram with CMA for scanout buffer allocation, remove
    unneeded functions, use dma_addr_t for physical addresses, error
    handling cleanup, refactor attach/detach pages into common drm
    functions, split non-userspace-facing API into omap_priv.h, remove
    plugin API

v1: original
Signed-off-by: default avatarRob Clark <rob@ti.com>
Acked-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@suse.de>
parent be7f39c5
...@@ -130,4 +130,6 @@ source "drivers/staging/nvec/Kconfig" ...@@ -130,4 +130,6 @@ source "drivers/staging/nvec/Kconfig"
source "drivers/staging/media/Kconfig" source "drivers/staging/media/Kconfig"
source "drivers/staging/omapdrm/Kconfig"
endif # STAGING endif # STAGING
...@@ -56,3 +56,4 @@ obj-$(CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI4) += ste_rmi4/ ...@@ -56,3 +56,4 @@ obj-$(CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI4) += ste_rmi4/
obj-$(CONFIG_DRM_PSB) += gma500/ obj-$(CONFIG_DRM_PSB) += gma500/
obj-$(CONFIG_INTEL_MEI) += mei/ obj-$(CONFIG_INTEL_MEI) += mei/
obj-$(CONFIG_MFD_NVEC) += nvec/ obj-$(CONFIG_MFD_NVEC) += nvec/
obj-$(CONFIG_DRM_OMAP) += omapdrm/
config DRM_OMAP
tristate "OMAP DRM"
depends on DRM && !CONFIG_FB_OMAP2
depends on ARCH_OMAP2PLUS
select DRM_KMS_HELPER
select OMAP2_DSS
select FB_SYS_FILLRECT
select FB_SYS_COPYAREA
select FB_SYS_IMAGEBLIT
select FB_SYS_FOPS
default n
help
DRM display driver for OMAP2/3/4 based boards.
config DRM_OMAP_NUM_CRTCS
int "Number of CRTCs"
range 1 10
default 1 if ARCH_OMAP2 || ARCH_OMAP3
default 2 if ARCH_OMAP4
depends on DRM_OMAP
help
Select the number of video overlays which can be used as framebuffers.
The remaining overlays are reserved for video.
#
# Makefile for the drm device driver. This driver provides support for the
# Direct Rendering Infrastructure (DRI)
#
ccflags-y := -Iinclude/drm -Werror
omapdrm-y := omap_drv.o omap_crtc.o omap_encoder.o omap_connector.o omap_fb.o omap_fbdev.o omap_gem.o
# temporary:
omapdrm-y += omap_gem_helpers.o
obj-$(CONFIG_DRM_OMAP) += omapdrm.o
TODO
. check error handling/cleanup paths
. add drm_plane / overlay support
. add video decode/encode support (via syslink3 + codec-engine)
. still some rough edges with flipping.. event back to userspace should
really come after VSYNC interrupt
. where should we do eviction (detatch_pages())? We aren't necessarily
accessing the pages via a GART, so maybe we need some other threshold
to put a cap on the # of pages that can be pin'd. (It is mostly only
of interest in case you have a swap partition/file.. which a lot of
these devices do not.. but it doesn't hurt for the driver to do the
right thing anyways.)
. Use mm_shrinker to trigger unpinning pages. Need to figure out how
to handle next issue first (I think?)
. Note TTM already has some mm_shrinker stuff.. maybe an argument to
move to TTM? Or maybe something that could be factored out in common?
. GEM/shmem backed pages can have existing mappings (kernel linear map,
etc..), which isn't really ideal.
. Revisit GEM sync object infrastructure.. TTM has some framework for this
already. Possibly this could be refactored out and made more common?
There should be some way to do this with less wheel-reinvention.
. Review DSS vs KMS mismatches. The omap_dss_device is sort of part encoder,
part connector. Which results in a bit of duct tape to fwd calls from
encoder to connector. Possibly this could be done a bit better.
Userspace:
. git://github.com/robclark/xf86-video-omap.git
Currently tested on
. OMAP3530 beagleboard
. OMAP4430 pandaboard
. OMAP4460 pandaboard
/*
* drivers/staging/omapdrm/omap_connector.c
*
* Copyright (C) 2011 Texas Instruments
* Author: Rob Clark <rob@ti.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "omap_drv.h"
#include "drm_crtc.h"
#include "drm_crtc_helper.h"
/*
* connector funcs
*/
#define to_omap_connector(x) container_of(x, struct omap_connector, base)
struct omap_connector {
struct drm_connector base;
struct omap_dss_device *dssdev;
};
static inline void copy_timings_omap_to_drm(struct drm_display_mode *mode,
struct omap_video_timings *timings)
{
mode->clock = timings->pixel_clock;
mode->hdisplay = timings->x_res;
mode->hsync_start = mode->hdisplay + timings->hfp;
mode->hsync_end = mode->hsync_start + timings->hsw;
mode->htotal = mode->hsync_end + timings->hbp;
mode->vdisplay = timings->y_res;
mode->vsync_start = mode->vdisplay + timings->vfp;
mode->vsync_end = mode->vsync_start + timings->vsw;
mode->vtotal = mode->vsync_end + timings->vbp;
/* note: whether or not it is interlaced, +/- h/vsync, etc,
* which should be set in the mode flags, is not exposed in
* the omap_video_timings struct.. but hdmi driver tracks
* those separately so all we have to have to set the mode
* is the way to recover these timings values, and the
* omap_dss_driver would do the rest.
*/
}
static inline void copy_timings_drm_to_omap(struct omap_video_timings *timings,
struct drm_display_mode *mode)
{
timings->pixel_clock = mode->clock;
timings->x_res = mode->hdisplay;
timings->hfp = mode->hsync_start - mode->hdisplay;
timings->hsw = mode->hsync_end - mode->hsync_start;
timings->hbp = mode->htotal - mode->hsync_end;
timings->y_res = mode->vdisplay;
timings->vfp = mode->vsync_start - mode->vdisplay;
timings->vsw = mode->vsync_end - mode->vsync_start;
timings->vbp = mode->vtotal - mode->vsync_end;
}
static void omap_connector_dpms(struct drm_connector *connector, int mode)
{
struct omap_connector *omap_connector = to_omap_connector(connector);
struct omap_dss_device *dssdev = omap_connector->dssdev;
int old_dpms;
DBG("%s: %d", dssdev->name, mode);
old_dpms = connector->dpms;
/* from off to on, do from crtc to connector */
if (mode < old_dpms)
drm_helper_connector_dpms(connector, mode);
if (mode == DRM_MODE_DPMS_ON) {
/* store resume info for suspended displays */
switch (dssdev->state) {
case OMAP_DSS_DISPLAY_SUSPENDED:
dssdev->activate_after_resume = true;
break;
case OMAP_DSS_DISPLAY_DISABLED: {
int ret = dssdev->driver->enable(dssdev);
if (ret) {
DBG("%s: failed to enable: %d",
dssdev->name, ret);
dssdev->driver->disable(dssdev);
}
break;
}
default:
break;
}
} else {
/* TODO */
}
/* from on to off, do from connector to crtc */
if (mode > old_dpms)
drm_helper_connector_dpms(connector, mode);
}
enum drm_connector_status omap_connector_detect(
struct drm_connector *connector, bool force)
{
struct omap_connector *omap_connector = to_omap_connector(connector);
struct omap_dss_device *dssdev = omap_connector->dssdev;
struct omap_dss_driver *dssdrv = dssdev->driver;
enum drm_connector_status ret;
if (dssdrv->detect) {
if (dssdrv->detect(dssdev)) {
ret = connector_status_connected;
} else {
ret = connector_status_disconnected;
}
} else {
ret = connector_status_unknown;
}
VERB("%s: %d (force=%d)", omap_connector->dssdev->name, ret, force);
return ret;
}
static void omap_connector_destroy(struct drm_connector *connector)
{
struct omap_connector *omap_connector = to_omap_connector(connector);
struct omap_dss_device *dssdev = omap_connector->dssdev;
dssdev->driver->disable(dssdev);
DBG("%s", omap_connector->dssdev->name);
drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
kfree(omap_connector);
omap_dss_put_device(dssdev);
}
#define MAX_EDID 512
static int omap_connector_get_modes(struct drm_connector *connector)
{
struct omap_connector *omap_connector = to_omap_connector(connector);
struct omap_dss_device *dssdev = omap_connector->dssdev;
struct omap_dss_driver *dssdrv = dssdev->driver;
struct drm_device *dev = connector->dev;
int n = 0;
DBG("%s", omap_connector->dssdev->name);
/* if display exposes EDID, then we parse that in the normal way to
* build table of supported modes.. otherwise (ie. fixed resolution
* LCD panels) we just return a single mode corresponding to the
* currently configured timings:
*/
if (dssdrv->read_edid) {
void *edid = kzalloc(MAX_EDID, GFP_KERNEL);
if ((dssdrv->read_edid(dssdev, edid, MAX_EDID) > 0) &&
drm_edid_is_valid(edid)) {
drm_mode_connector_update_edid_property(
connector, edid);
n = drm_add_edid_modes(connector, edid);
kfree(connector->display_info.raw_edid);
connector->display_info.raw_edid = edid;
} else {
drm_mode_connector_update_edid_property(
connector, NULL);
connector->display_info.raw_edid = NULL;
kfree(edid);
}
} else {
struct drm_display_mode *mode = drm_mode_create(dev);
struct omap_video_timings timings;
dssdrv->get_timings(dssdev, &timings);
copy_timings_omap_to_drm(mode, &timings);
mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
drm_mode_set_name(mode);
drm_mode_probed_add(connector, mode);
n = 1;
}
return n;
}
static int omap_connector_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct omap_connector *omap_connector = to_omap_connector(connector);
struct omap_dss_device *dssdev = omap_connector->dssdev;
struct omap_dss_driver *dssdrv = dssdev->driver;
struct omap_video_timings timings = {0};
struct drm_device *dev = connector->dev;
struct drm_display_mode *new_mode;
int ret = MODE_BAD;
copy_timings_drm_to_omap(&timings, mode);
mode->vrefresh = drm_mode_vrefresh(mode);
if (!dssdrv->check_timings(dssdev, &timings)) {
/* check if vrefresh is still valid */
new_mode = drm_mode_duplicate(dev, mode);
new_mode->clock = timings.pixel_clock;
new_mode->vrefresh = 0;
if (mode->vrefresh == drm_mode_vrefresh(new_mode))
ret = MODE_OK;
drm_mode_destroy(dev, new_mode);
}
DBG("connector: mode %s: "
"%d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
(ret == MODE_OK) ? "valid" : "invalid",
mode->base.id, mode->name, mode->vrefresh, mode->clock,
mode->hdisplay, mode->hsync_start,
mode->hsync_end, mode->htotal,
mode->vdisplay, mode->vsync_start,
mode->vsync_end, mode->vtotal, mode->type, mode->flags);
return ret;
}
struct drm_encoder *omap_connector_attached_encoder(
struct drm_connector *connector)
{
int i;
struct omap_connector *omap_connector = to_omap_connector(connector);
for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
struct drm_mode_object *obj;
if (connector->encoder_ids[i] == 0)
break;
obj = drm_mode_object_find(connector->dev,
connector->encoder_ids[i],
DRM_MODE_OBJECT_ENCODER);
if (obj) {
struct drm_encoder *encoder = obj_to_encoder(obj);
struct omap_overlay_manager *mgr =
omap_encoder_get_manager(encoder);
DBG("%s: found %s", omap_connector->dssdev->name,
mgr->name);
return encoder;
}
}
DBG("%s: no encoder", omap_connector->dssdev->name);
return NULL;
}
static const struct drm_connector_funcs omap_connector_funcs = {
.dpms = omap_connector_dpms,
.detect = omap_connector_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = omap_connector_destroy,
};
static const struct drm_connector_helper_funcs omap_connector_helper_funcs = {
.get_modes = omap_connector_get_modes,
.mode_valid = omap_connector_mode_valid,
.best_encoder = omap_connector_attached_encoder,
};
/* called from encoder when mode is set, to propagate settings to the dssdev */
void omap_connector_mode_set(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct drm_device *dev = connector->dev;
struct omap_connector *omap_connector = to_omap_connector(connector);
struct omap_dss_device *dssdev = omap_connector->dssdev;
struct omap_dss_driver *dssdrv = dssdev->driver;
struct omap_video_timings timings;
copy_timings_drm_to_omap(&timings, mode);
DBG("%s: set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
omap_connector->dssdev->name,
mode->base.id, mode->name, mode->vrefresh, mode->clock,
mode->hdisplay, mode->hsync_start,
mode->hsync_end, mode->htotal,
mode->vdisplay, mode->vsync_start,
mode->vsync_end, mode->vtotal, mode->type, mode->flags);
if (dssdrv->check_timings(dssdev, &timings)) {
dev_err(dev->dev, "could not set timings\n");
return;
}
dssdrv->set_timings(dssdev, &timings);
}
/* flush an area of the framebuffer (in case of manual update display that
* is not automatically flushed)
*/
void omap_connector_flush(struct drm_connector *connector,
int x, int y, int w, int h)
{
struct omap_connector *omap_connector = to_omap_connector(connector);
/* TODO: enable when supported in dss */
VERB("%s: %d,%d, %dx%d", omap_connector->dssdev->name, x, y, w, h);
}
/* initialize connector */
struct drm_connector *omap_connector_init(struct drm_device *dev,
int connector_type, struct omap_dss_device *dssdev)
{
struct drm_connector *connector = NULL;
struct omap_connector *omap_connector;
DBG("%s", dssdev->name);
omap_dss_get_device(dssdev);
omap_connector = kzalloc(sizeof(struct omap_connector), GFP_KERNEL);
if (!omap_connector) {
dev_err(dev->dev, "could not allocate connector\n");
goto fail;
}
omap_connector->dssdev = dssdev;
connector = &omap_connector->base;
drm_connector_init(dev, connector, &omap_connector_funcs,
connector_type);
drm_connector_helper_add(connector, &omap_connector_helper_funcs);
#if 0 /* enable when dss2 supports hotplug */
if (dssdev->caps & OMAP_DSS_DISPLAY_CAP_HPD)
connector->polled = 0;
else
#endif
connector->polled = DRM_CONNECTOR_POLL_CONNECT |
DRM_CONNECTOR_POLL_DISCONNECT;
connector->interlace_allowed = 1;
connector->doublescan_allowed = 0;
drm_sysfs_connector_add(connector);
return connector;
fail:
if (connector) {
omap_connector_destroy(connector);
}
return NULL;
}
/*
* drivers/staging/omapdrm/omap_crtc.c
*
* Copyright (C) 2011 Texas Instruments
* Author: Rob Clark <rob@ti.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "omap_drv.h"
#include "drm_mode.h"
#include "drm_crtc.h"
#include "drm_crtc_helper.h"
#define to_omap_crtc(x) container_of(x, struct omap_crtc, base)
struct omap_crtc {
struct drm_crtc base;
struct omap_overlay *ovl;
struct omap_overlay_info info;
int id;
/* if there is a pending flip, this will be non-null: */
struct drm_pending_vblank_event *event;
};
/* push changes down to dss2 */
static int commit(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
struct omap_overlay *ovl = omap_crtc->ovl;
struct omap_overlay_info *info = &omap_crtc->info;
int ret;
DBG("%s", omap_crtc->ovl->name);
DBG("%dx%d -> %dx%d (%d)", info->width, info->height, info->out_width,
info->out_height, info->screen_width);
DBG("%d,%d %08x", info->pos_x, info->pos_y, info->paddr);
/* NOTE: do we want to do this at all here, or just wait
* for dpms(ON) since other CRTC's may not have their mode
* set yet, so fb dimensions may still change..
*/
ret = ovl->set_overlay_info(ovl, info);
if (ret) {
dev_err(dev->dev, "could not set overlay info\n");
return ret;
}
/* our encoder doesn't necessarily get a commit() after this, in
* particular in the dpms() and mode_set_base() cases, so force the
* manager to update:
*
* could this be in the encoder somehow?
*/
if (ovl->manager) {
ret = ovl->manager->apply(ovl->manager);
if (ret) {
dev_err(dev->dev, "could not apply settings\n");
return ret;
}
}
if (info->enabled) {
omap_framebuffer_flush(crtc->fb, crtc->x, crtc->y,
crtc->fb->width, crtc->fb->height);
}
return 0;
}
/* update parameters that are dependent on the framebuffer dimensions and
* position within the fb that this crtc scans out from. This is called
* when framebuffer dimensions or x,y base may have changed, either due
* to our mode, or a change in another crtc that is scanning out of the
* same fb.
*/
static void update_scanout(struct drm_crtc *crtc)
{
struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
dma_addr_t paddr;
unsigned int screen_width;
omap_framebuffer_get_buffer(crtc->fb, crtc->x, crtc->y,
NULL, &paddr, &screen_width);
DBG("%s: %d,%d: %08x (%d)", omap_crtc->ovl->name,
crtc->x, crtc->y, (u32)paddr, screen_width);
omap_crtc->info.paddr = paddr;
omap_crtc->info.screen_width = screen_width;
}
static void omap_crtc_gamma_set(struct drm_crtc *crtc,
u16 *red, u16 *green, u16 *blue, uint32_t start, uint32_t size)
{
struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
DBG("%s", omap_crtc->ovl->name);
}
static void omap_crtc_destroy(struct drm_crtc *crtc)
{
struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
DBG("%s", omap_crtc->ovl->name);
drm_crtc_cleanup(crtc);
kfree(omap_crtc);
}
static void omap_crtc_dpms(struct drm_crtc *crtc, int mode)
{
struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
DBG("%s: %d", omap_crtc->ovl->name, mode);
if (mode == DRM_MODE_DPMS_ON) {
update_scanout(crtc);
omap_crtc->info.enabled = true;
} else {
omap_crtc->info.enabled = false;
}
WARN_ON(commit(crtc));
}
static bool omap_crtc_mode_fixup(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
DBG("%s", omap_crtc->ovl->name);
return true;
}
static int omap_crtc_mode_set(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode,
int x, int y,
struct drm_framebuffer *old_fb)
{
struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
DBG("%s: %d,%d: %dx%d", omap_crtc->ovl->name, x, y,
mode->hdisplay, mode->vdisplay);
/* just use adjusted mode */
mode = adjusted_mode;
omap_crtc->info.width = mode->hdisplay;
omap_crtc->info.height = mode->vdisplay;
omap_crtc->info.out_width = mode->hdisplay;
omap_crtc->info.out_height = mode->vdisplay;
omap_crtc->info.color_mode = OMAP_DSS_COLOR_RGB24U;
omap_crtc->info.rotation_type = OMAP_DSS_ROT_DMA;
omap_crtc->info.rotation = OMAP_DSS_ROT_0;
omap_crtc->info.global_alpha = 0xff;
omap_crtc->info.mirror = 0;
omap_crtc->info.mirror = 0;
omap_crtc->info.pos_x = 0;
omap_crtc->info.pos_y = 0;
#if 0 /* re-enable when these are available in DSS2 driver */
omap_crtc->info.zorder = 3; /* GUI in the front, video behind */
omap_crtc->info.min_x_decim = 1;
omap_crtc->info.max_x_decim = 1;
omap_crtc->info.min_y_decim = 1;
omap_crtc->info.max_y_decim = 1;
#endif
update_scanout(crtc);
return 0;
}
static void omap_crtc_prepare(struct drm_crtc *crtc)
{
struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
struct omap_overlay *ovl = omap_crtc->ovl;
DBG("%s", omap_crtc->ovl->name);
ovl->get_overlay_info(ovl, &omap_crtc->info);
omap_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
}
static void omap_crtc_commit(struct drm_crtc *crtc)
{
struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
DBG("%s", omap_crtc->ovl->name);
omap_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
}
static int omap_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_framebuffer *old_fb)
{
struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
DBG("%s %d,%d: fb=%p", omap_crtc->ovl->name, x, y, old_fb);
update_scanout(crtc);
return commit(crtc);
}
static void omap_crtc_load_lut(struct drm_crtc *crtc)
{
struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
DBG("%s", omap_crtc->ovl->name);
}
static void page_flip_cb(void *arg)
{
struct drm_crtc *crtc = arg;
struct drm_device *dev = crtc->dev;
struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
struct drm_pending_vblank_event *event = omap_crtc->event;
struct timeval now;
unsigned long flags;
WARN_ON(!event);
omap_crtc->event = NULL;
update_scanout(crtc);
WARN_ON(commit(crtc));
/* wakeup userspace */
/* TODO: this should happen *after* flip in vsync IRQ handler */
if (event) {
spin_lock_irqsave(&dev->event_lock, flags);
event->event.sequence = drm_vblank_count_and_time(
dev, omap_crtc->id, &now);
event->event.tv_sec = now.tv_sec;
event->event.tv_usec = now.tv_usec;
list_add_tail(&event->base.link,
&event->base.file_priv->event_list);
wake_up_interruptible(&event->base.file_priv->event_wait);
spin_unlock_irqrestore(&dev->event_lock, flags);
}
}
static int omap_crtc_page_flip_locked(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_pending_vblank_event *event)
{
struct drm_device *dev = crtc->dev;
struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
DBG("%d -> %d", crtc->fb ? crtc->fb->base.id : -1, fb->base.id);
if (omap_crtc->event) {
dev_err(dev->dev, "already a pending flip\n");
return -EINVAL;
}
crtc->fb = fb;
omap_crtc->event = event;
omap_gem_op_async(omap_framebuffer_bo(fb), OMAP_GEM_READ,
page_flip_cb, crtc);
return 0;
}
static const struct drm_crtc_funcs omap_crtc_funcs = {
.gamma_set = omap_crtc_gamma_set,
.set_config = drm_crtc_helper_set_config,
.destroy = omap_crtc_destroy,
.page_flip = omap_crtc_page_flip_locked,
};
static const struct drm_crtc_helper_funcs omap_crtc_helper_funcs = {
.dpms = omap_crtc_dpms,
.mode_fixup = omap_crtc_mode_fixup,
.mode_set = omap_crtc_mode_set,
.prepare = omap_crtc_prepare,
.commit = omap_crtc_commit,
.mode_set_base = omap_crtc_mode_set_base,
.load_lut = omap_crtc_load_lut,
};
struct omap_overlay *omap_crtc_get_overlay(struct drm_crtc *crtc)
{
struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
return omap_crtc->ovl;
}
/* initialize crtc */
struct drm_crtc *omap_crtc_init(struct drm_device *dev,
struct omap_overlay *ovl, int id)
{
struct drm_crtc *crtc = NULL;
struct omap_crtc *omap_crtc = kzalloc(sizeof(*omap_crtc), GFP_KERNEL);
DBG("%s", ovl->name);
if (!omap_crtc) {
dev_err(dev->dev, "could not allocate CRTC\n");
goto fail;
}
omap_crtc->ovl = ovl;
omap_crtc->id = id;
crtc = &omap_crtc->base;
drm_crtc_init(dev, crtc, &omap_crtc_funcs);
drm_crtc_helper_add(crtc, &omap_crtc_helper_funcs);
return crtc;
fail:
if (crtc) {
drm_crtc_cleanup(crtc);
kfree(omap_crtc);
}
return NULL;
}
/*
* include/drm/omap_drm.h
*
* Copyright (C) 2011 Texas Instruments
* Author: Rob Clark <rob@ti.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __OMAP_DRM_H__
#define __OMAP_DRM_H__
#include "drm.h"
/* Please note that modifications to all structs defined here are
* subject to backwards-compatibility constraints.
*/
#define OMAP_PARAM_CHIPSET_ID 1 /* ie. 0x3430, 0x4430, etc */
struct drm_omap_param {
uint64_t param; /* in */
uint64_t value; /* in (set_param), out (get_param) */
};
#define OMAP_BO_SCANOUT 0x00000001 /* scanout capable (phys contiguous) */
#define OMAP_BO_CACHE_MASK 0x00000006 /* cache type mask, see cache modes */
#define OMAP_BO_TILED_MASK 0x00000f00 /* tiled mapping mask, see tiled modes */
/* cache modes */
#define OMAP_BO_CACHED 0x00000000 /* default */
#define OMAP_BO_WC 0x00000002 /* write-combine */
#define OMAP_BO_UNCACHED 0x00000004 /* strongly-ordered (uncached) */
/* tiled modes */
#define OMAP_BO_TILED_8 0x00000100
#define OMAP_BO_TILED_16 0x00000200
#define OMAP_BO_TILED_32 0x00000300
#define OMAP_BO_TILED (OMAP_BO_TILED_8 | OMAP_BO_TILED_16 | OMAP_BO_TILED_32)
union omap_gem_size {
uint32_t bytes; /* (for non-tiled formats) */
struct {
uint16_t width;
uint16_t height;
} tiled; /* (for tiled formats) */
};
struct drm_omap_gem_new {
union omap_gem_size size; /* in */
uint32_t flags; /* in */
uint32_t handle; /* out */
uint32_t __pad;
};
/* mask of operations: */
enum omap_gem_op {
OMAP_GEM_READ = 0x01,
OMAP_GEM_WRITE = 0x02,
};
struct drm_omap_gem_cpu_prep {
uint32_t handle; /* buffer handle (in) */
uint32_t op; /* mask of omap_gem_op (in) */
};
struct drm_omap_gem_cpu_fini {
uint32_t handle; /* buffer handle (in) */
uint32_t op; /* mask of omap_gem_op (in) */
/* TODO maybe here we pass down info about what regions are touched
* by sw so we can be clever about cache ops? For now a placeholder,
* set to zero and we just do full buffer flush..
*/
uint32_t nregions;
uint32_t __pad;
};
struct drm_omap_gem_info {
uint32_t handle; /* buffer handle (in) */
uint32_t pad;
uint64_t offset; /* mmap offset (out) */
/* note: in case of tiled buffers, the user virtual size can be
* different from the physical size (ie. how many pages are needed
* to back the object) which is returned in DRM_IOCTL_GEM_OPEN..
* This size here is the one that should be used if you want to
* mmap() the buffer:
*/
uint32_t size; /* virtual size for mmap'ing (out) */
uint32_t __pad;
};
#define DRM_OMAP_GET_PARAM 0x00
#define DRM_OMAP_SET_PARAM 0x01
/* placeholder for plugin-api
#define DRM_OMAP_GET_BASE 0x02
*/
#define DRM_OMAP_GEM_NEW 0x03
#define DRM_OMAP_GEM_CPU_PREP 0x04
#define DRM_OMAP_GEM_CPU_FINI 0x05
#define DRM_OMAP_GEM_INFO 0x06
#define DRM_OMAP_NUM_IOCTLS 0x07
#define DRM_IOCTL_OMAP_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_OMAP_GET_PARAM, struct drm_omap_param)
#define DRM_IOCTL_OMAP_SET_PARAM DRM_IOW (DRM_COMMAND_BASE + DRM_OMAP_SET_PARAM, struct drm_omap_param)
/* placeholder for plugin-api
#define DRM_IOCTL_OMAP_GET_BASE DRM_IOWR(DRM_COMMAND_BASE + DRM_OMAP_GET_BASE, struct drm_omap_get_base)
*/
#define DRM_IOCTL_OMAP_GEM_NEW DRM_IOWR(DRM_COMMAND_BASE + DRM_OMAP_GEM_NEW, struct drm_omap_gem_new)
#define DRM_IOCTL_OMAP_GEM_CPU_PREP DRM_IOW (DRM_COMMAND_BASE + DRM_OMAP_GEM_CPU_PREP, struct drm_omap_gem_cpu_prep)
#define DRM_IOCTL_OMAP_GEM_CPU_FINI DRM_IOW (DRM_COMMAND_BASE + DRM_OMAP_GEM_CPU_FINI, struct drm_omap_gem_cpu_fini)
#define DRM_IOCTL_OMAP_GEM_INFO DRM_IOWR(DRM_COMMAND_BASE + DRM_OMAP_GEM_INFO, struct drm_omap_gem_info)
#endif /* __OMAP_DRM_H__ */
/*
* drivers/staging/omapdrm/omap_drv.c
*
* Copyright (C) 2011 Texas Instruments
* Author: Rob Clark <rob@ti.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "omap_drv.h"
#include "drm_crtc_helper.h"
#include "drm_fb_helper.h"
#define DRIVER_NAME MODULE_NAME
#define DRIVER_DESC "OMAP DRM"
#define DRIVER_DATE "20110917"
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 0
#define DRIVER_PATCHLEVEL 0
struct drm_device *drm_device;
static int num_crtc = CONFIG_DRM_OMAP_NUM_CRTCS;
MODULE_PARM_DESC(num_crtc, "Number of overlays to use as CRTCs");
module_param(num_crtc, int, 0600);
/*
* mode config funcs
*/
/* Notes about mapping DSS and DRM entities:
* CRTC: overlay
* encoder: manager.. with some extension to allow one primary CRTC
* and zero or more video CRTC's to be mapped to one encoder?
* connector: dssdev.. manager can be attached/detached from different
* devices
*/
static void omap_fb_output_poll_changed(struct drm_device *dev)
{
struct omap_drm_private *priv = dev->dev_private;
DBG("dev=%p", dev);
if (priv->fbdev) {
drm_fb_helper_hotplug_event(priv->fbdev);
}
}
static struct drm_mode_config_funcs omap_mode_config_funcs = {
.fb_create = omap_framebuffer_create,
.output_poll_changed = omap_fb_output_poll_changed,
};
static int get_connector_type(struct omap_dss_device *dssdev)
{
switch (dssdev->type) {
case OMAP_DISPLAY_TYPE_HDMI:
return DRM_MODE_CONNECTOR_HDMIA;
case OMAP_DISPLAY_TYPE_DPI:
if (!strcmp(dssdev->name, "dvi"))
return DRM_MODE_CONNECTOR_DVID;
/* fallthrough */
default:
return DRM_MODE_CONNECTOR_Unknown;
}
}
#if 0 /* enable when dss2 supports hotplug */
static int omap_drm_notifier(struct notifier_block *nb,
unsigned long evt, void *arg)
{
switch (evt) {
case OMAP_DSS_SIZE_CHANGE:
case OMAP_DSS_HOTPLUG_CONNECT:
case OMAP_DSS_HOTPLUG_DISCONNECT: {
struct drm_device *dev = drm_device;
DBG("hotplug event: evt=%d, dev=%p", evt, dev);
if (dev) {
drm_sysfs_hotplug_event(dev);
}
return NOTIFY_OK;
}
default: /* don't care about other events for now */
return NOTIFY_DONE;
}
}
#endif
static void dump_video_chains(void)
{
int i;
DBG("dumping video chains: ");
for (i = 0; i < omap_dss_get_num_overlays(); i++) {
struct omap_overlay *ovl = omap_dss_get_overlay(i);
struct omap_overlay_manager *mgr = ovl->manager;
struct omap_dss_device *dssdev = mgr ? mgr->device : NULL;
if (dssdev) {
DBG("%d: %s -> %s -> %s", i, ovl->name, mgr->name,
dssdev->name);
} else if (mgr) {
DBG("%d: %s -> %s", i, ovl->name, mgr->name);
} else {
DBG("%d: %s", i, ovl->name);
}
}
}
/* create encoders for each manager */
static int create_encoder(struct drm_device *dev,
struct omap_overlay_manager *mgr)
{
struct omap_drm_private *priv = dev->dev_private;
struct drm_encoder *encoder = omap_encoder_init(dev, mgr);
if (!encoder) {
dev_err(dev->dev, "could not create encoder: %s\n",
mgr->name);
return -ENOMEM;
}
BUG_ON(priv->num_encoders >= ARRAY_SIZE(priv->encoders));
priv->encoders[priv->num_encoders++] = encoder;
return 0;
}
/* create connectors for each display device */
static int create_connector(struct drm_device *dev,
struct omap_dss_device *dssdev)
{
struct omap_drm_private *priv = dev->dev_private;
static struct notifier_block *notifier;
struct drm_connector *connector;
int j;
if (!dssdev->driver) {
dev_warn(dev->dev, "%s has no driver.. skipping it\n",
dssdev->name);
return 0;
}
if (!(dssdev->driver->get_timings ||
dssdev->driver->read_edid)) {
dev_warn(dev->dev, "%s driver does not support "
"get_timings or read_edid.. skipping it!\n",
dssdev->name);
return 0;
}
connector = omap_connector_init(dev,
get_connector_type(dssdev), dssdev);
if (!connector) {
dev_err(dev->dev, "could not create connector: %s\n",
dssdev->name);
return -ENOMEM;
}
BUG_ON(priv->num_connectors >= ARRAY_SIZE(priv->connectors));
priv->connectors[priv->num_connectors++] = connector;
#if 0 /* enable when dss2 supports hotplug */
notifier = kzalloc(sizeof(struct notifier_block), GFP_KERNEL);
notifier->notifier_call = omap_drm_notifier;
omap_dss_add_notify(dssdev, notifier);
#else
notifier = NULL;
#endif
for (j = 0; j < priv->num_encoders; j++) {
struct omap_overlay_manager *mgr =
omap_encoder_get_manager(priv->encoders[j]);
if (mgr->device == dssdev) {
drm_mode_connector_attach_encoder(connector,
priv->encoders[j]);
}
}
return 0;
}
/* create up to max_overlays CRTCs mapping to overlays.. by default,
* connect the overlays to different managers/encoders, giving priority
* to encoders connected to connectors with a detected connection
*/
static int create_crtc(struct drm_device *dev, struct omap_overlay *ovl,
int *j, unsigned int connected_connectors)
{
struct omap_drm_private *priv = dev->dev_private;
struct omap_overlay_manager *mgr = NULL;
struct drm_crtc *crtc;
if (ovl->manager) {
DBG("disconnecting %s from %s", ovl->name,
ovl->manager->name);
ovl->unset_manager(ovl);
}
/* find next best connector, ones with detected connection first
*/
while (*j < priv->num_connectors && !mgr) {
if (connected_connectors & (1 << *j)) {
struct drm_encoder *encoder =
omap_connector_attached_encoder(
priv->connectors[*j]);
if (encoder) {
mgr = omap_encoder_get_manager(encoder);
}
}
(*j)++;
}
/* if we couldn't find another connected connector, lets start
* looking at the unconnected connectors:
*
* note: it might not be immediately apparent, but thanks to
* the !mgr check in both this loop and the one above, the only
* way to enter this loop is with *j == priv->num_connectors,
* so idx can never go negative.
*/
while (*j < 2 * priv->num_connectors && !mgr) {
int idx = *j - priv->num_connectors;
if (!(connected_connectors & (1 << idx))) {
struct drm_encoder *encoder =
omap_connector_attached_encoder(
priv->connectors[idx]);
if (encoder) {
mgr = omap_encoder_get_manager(encoder);
}
}
(*j)++;
}
if (mgr) {
DBG("connecting %s to %s", ovl->name, mgr->name);
ovl->set_manager(ovl, mgr);
}
crtc = omap_crtc_init(dev, ovl, priv->num_crtcs);
if (!crtc) {
dev_err(dev->dev, "could not create CRTC: %s\n",
ovl->name);
return -ENOMEM;
}
BUG_ON(priv->num_crtcs >= ARRAY_SIZE(priv->crtcs));
priv->crtcs[priv->num_crtcs++] = crtc;
return 0;
}
static int match_dev_name(struct omap_dss_device *dssdev, void *data)
{
return !strcmp(dssdev->name, data);
}
static unsigned int detect_connectors(struct drm_device *dev)
{
struct omap_drm_private *priv = dev->dev_private;
unsigned int connected_connectors = 0;
int i;
for (i = 0; i < priv->num_connectors; i++) {
struct drm_connector *connector = priv->connectors[i];
if (omap_connector_detect(connector, true) ==
connector_status_connected) {
connected_connectors |= (1 << i);
}
}
return connected_connectors;
}
static int omap_modeset_init(struct drm_device *dev)
{
const struct omap_drm_platform_data *pdata = dev->dev->platform_data;
struct omap_drm_private *priv = dev->dev_private;
struct omap_dss_device *dssdev = NULL;
int i, j;
unsigned int connected_connectors = 0;
drm_mode_config_init(dev);
if (pdata) {
/* if platform data is provided by the board file, use it to
* control which overlays, managers, and devices we own.
*/
for (i = 0; i < pdata->mgr_cnt; i++) {
struct omap_overlay_manager *mgr =
omap_dss_get_overlay_manager(pdata->mgr_ids[i]);
create_encoder(dev, mgr);
}
for (i = 0; i < pdata->dev_cnt; i++) {
struct omap_dss_device *dssdev =
omap_dss_find_device(
(void *)pdata->dev_names[i], match_dev_name);
if (!dssdev) {
dev_warn(dev->dev, "no such dssdev: %s\n",
pdata->dev_names[i]);
continue;
}
create_connector(dev, dssdev);
}
connected_connectors = detect_connectors(dev);
j = 0;
for (i = 0; i < pdata->ovl_cnt; i++) {
struct omap_overlay *ovl =
omap_dss_get_overlay(pdata->ovl_ids[i]);
create_crtc(dev, ovl, &j, connected_connectors);
}
} else {
/* otherwise just grab up to CONFIG_DRM_OMAP_NUM_CRTCS and try
* to make educated guesses about everything else
*/
int max_overlays = min(omap_dss_get_num_overlays(), num_crtc);
for (i = 0; i < omap_dss_get_num_overlay_managers(); i++) {
create_encoder(dev, omap_dss_get_overlay_manager(i));
}
for_each_dss_dev(dssdev) {
create_connector(dev, dssdev);
}
connected_connectors = detect_connectors(dev);
j = 0;
for (i = 0; i < max_overlays; i++) {
create_crtc(dev, omap_dss_get_overlay(i),
&j, connected_connectors);
}
}
/* for now keep the mapping of CRTCs and encoders static.. */
for (i = 0; i < priv->num_encoders; i++) {
struct drm_encoder *encoder = priv->encoders[i];
struct omap_overlay_manager *mgr =
omap_encoder_get_manager(encoder);
encoder->possible_crtcs = 0;
for (j = 0; j < priv->num_crtcs; j++) {
struct omap_overlay *ovl =
omap_crtc_get_overlay(priv->crtcs[j]);
if (ovl->manager == mgr) {
encoder->possible_crtcs |= (1 << j);
}
}
DBG("%s: possible_crtcs=%08x", mgr->name,
encoder->possible_crtcs);
}
dump_video_chains();
dev->mode_config.min_width = 256;
dev->mode_config.min_height = 256;
/* note: eventually will need some cpu_is_omapXYZ() type stuff here
* to fill in these limits properly on different OMAP generations..
*/
dev->mode_config.max_width = 2048;
dev->mode_config.max_height = 2048;
dev->mode_config.funcs = &omap_mode_config_funcs;
return 0;
}
static void omap_modeset_free(struct drm_device *dev)
{
drm_mode_config_cleanup(dev);
}
/*
* drm ioctl funcs
*/
static int ioctl_get_param(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_omap_param *args = data;
DBG("%p: param=%llu", dev, args->param);
switch (args->param) {
case OMAP_PARAM_CHIPSET_ID:
args->value = GET_OMAP_TYPE;
break;
default:
DBG("unknown parameter %lld", args->param);
return -EINVAL;
}
return 0;
}
static int ioctl_set_param(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_omap_param *args = data;
switch (args->param) {
default:
DBG("unknown parameter %lld", args->param);
return -EINVAL;
}
return 0;
}
static int ioctl_gem_new(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_omap_gem_new *args = data;
DBG("%p:%p: size=0x%08x, flags=%08x", dev, file_priv,
args->size.bytes, args->flags);
return omap_gem_new_handle(dev, file_priv, args->size,
args->flags, &args->handle);
}
static int ioctl_gem_cpu_prep(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_omap_gem_cpu_prep *args = data;
struct drm_gem_object *obj;
int ret;
VERB("%p:%p: handle=%d, op=%x", dev, file_priv, args->handle, args->op);
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
if (!obj) {
return -ENOENT;
}
ret = omap_gem_op_sync(obj, args->op);
if (!ret) {
ret = omap_gem_op_start(obj, args->op);
}
drm_gem_object_unreference_unlocked(obj);
return ret;
}
static int ioctl_gem_cpu_fini(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_omap_gem_cpu_fini *args = data;
struct drm_gem_object *obj;
int ret;
VERB("%p:%p: handle=%d", dev, file_priv, args->handle);
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
if (!obj) {
return -ENOENT;
}
/* XXX flushy, flushy */
ret = 0;
if (!ret) {
ret = omap_gem_op_finish(obj, args->op);
}
drm_gem_object_unreference_unlocked(obj);
return ret;
}
static int ioctl_gem_info(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_omap_gem_info *args = data;
struct drm_gem_object *obj;
int ret = 0;
DBG("%p:%p: handle=%d", dev, file_priv, args->handle);
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
if (!obj) {
return -ENOENT;
}
args->size = obj->size; /* for now */
args->offset = omap_gem_mmap_offset(obj);
drm_gem_object_unreference_unlocked(obj);
return ret;
}
struct drm_ioctl_desc ioctls[DRM_COMMAND_END - DRM_COMMAND_BASE] = {
DRM_IOCTL_DEF_DRV(OMAP_GET_PARAM, ioctl_get_param, DRM_UNLOCKED|DRM_AUTH),
DRM_IOCTL_DEF_DRV(OMAP_SET_PARAM, ioctl_set_param, DRM_UNLOCKED|DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(OMAP_GEM_NEW, ioctl_gem_new, DRM_UNLOCKED|DRM_AUTH),
DRM_IOCTL_DEF_DRV(OMAP_GEM_CPU_PREP, ioctl_gem_cpu_prep, DRM_UNLOCKED|DRM_AUTH),
DRM_IOCTL_DEF_DRV(OMAP_GEM_CPU_FINI, ioctl_gem_cpu_fini, DRM_UNLOCKED|DRM_AUTH),
DRM_IOCTL_DEF_DRV(OMAP_GEM_INFO, ioctl_gem_info, DRM_UNLOCKED|DRM_AUTH),
};
/*
* drm driver funcs
*/
/**
* load - setup chip and create an initial config
* @dev: DRM device
* @flags: startup flags
*
* The driver load routine has to do several things:
* - initialize the memory manager
* - allocate initial config memory
* - setup the DRM framebuffer with the allocated memory
*/
static int dev_load(struct drm_device *dev, unsigned long flags)
{
struct omap_drm_private *priv;
int ret;
DBG("load: dev=%p", dev);
drm_device = dev;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv) {
dev_err(dev->dev, "could not allocate priv\n");
return -ENOMEM;
}
dev->dev_private = priv;
ret = omap_modeset_init(dev);
if (ret) {
dev_err(dev->dev, "omap_modeset_init failed: ret=%d\n", ret);
dev->dev_private = NULL;
kfree(priv);
return ret;
}
priv->fbdev = omap_fbdev_init(dev);
if (!priv->fbdev) {
dev_warn(dev->dev, "omap_fbdev_init failed\n");
/* well, limp along without an fbdev.. maybe X11 will work? */
}
drm_kms_helper_poll_init(dev);
ret = drm_vblank_init(dev, priv->num_crtcs);
if (ret) {
dev_warn(dev->dev, "could not init vblank\n");
}
return 0;
}
static int dev_unload(struct drm_device *dev)
{
DBG("unload: dev=%p", dev);
drm_vblank_cleanup(dev);
drm_kms_helper_poll_fini(dev);
omap_fbdev_free(dev);
omap_modeset_free(dev);
kfree(dev->dev_private);
dev->dev_private = NULL;
return 0;
}
static int dev_open(struct drm_device *dev, struct drm_file *file)
{
file->driver_priv = NULL;
DBG("open: dev=%p, file=%p", dev, file);
return 0;
}
static int dev_firstopen(struct drm_device *dev)
{
DBG("firstopen: dev=%p", dev);
return 0;
}
/**
* lastclose - clean up after all DRM clients have exited
* @dev: DRM device
*
* Take care of cleaning up after all DRM clients have exited. In the
* mode setting case, we want to restore the kernel's initial mode (just
* in case the last client left us in a bad state).
*/
static void dev_lastclose(struct drm_device *dev)
{
/* we don't support vga-switcheroo.. so just make sure the fbdev
* mode is active
*/
struct omap_drm_private *priv = dev->dev_private;
int ret;
DBG("lastclose: dev=%p", dev);
ret = drm_fb_helper_restore_fbdev_mode(priv->fbdev);
if (ret)
DBG("failed to restore crtc mode");
}
static void dev_preclose(struct drm_device *dev, struct drm_file *file)
{
DBG("preclose: dev=%p", dev);
}
static void dev_postclose(struct drm_device *dev, struct drm_file *file)
{
DBG("postclose: dev=%p, file=%p", dev, file);
}
/**
* enable_vblank - enable vblank interrupt events
* @dev: DRM device
* @crtc: which irq to enable
*
* Enable vblank interrupts for @crtc. If the device doesn't have
* a hardware vblank counter, this routine should be a no-op, since
* interrupts will have to stay on to keep the count accurate.
*
* RETURNS
* Zero on success, appropriate errno if the given @crtc's vblank
* interrupt cannot be enabled.
*/
static int dev_enable_vblank(struct drm_device *dev, int crtc)
{
DBG("enable_vblank: dev=%p, crtc=%d", dev, crtc);
return 0;
}
/**
* disable_vblank - disable vblank interrupt events
* @dev: DRM device
* @crtc: which irq to enable
*
* Disable vblank interrupts for @crtc. If the device doesn't have
* a hardware vblank counter, this routine should be a no-op, since
* interrupts will have to stay on to keep the count accurate.
*/
static void dev_disable_vblank(struct drm_device *dev, int crtc)
{
DBG("disable_vblank: dev=%p, crtc=%d", dev, crtc);
}
static irqreturn_t dev_irq_handler(DRM_IRQ_ARGS)
{
return IRQ_HANDLED;
}
static void dev_irq_preinstall(struct drm_device *dev)
{
DBG("irq_preinstall: dev=%p", dev);
}
static int dev_irq_postinstall(struct drm_device *dev)
{
DBG("irq_postinstall: dev=%p", dev);
return 0;
}
static void dev_irq_uninstall(struct drm_device *dev)
{
DBG("irq_uninstall: dev=%p", dev);
}
static struct vm_operations_struct omap_gem_vm_ops = {
.fault = omap_gem_fault,
.open = drm_gem_vm_open,
.close = drm_gem_vm_close,
};
static struct drm_driver omap_drm_driver = {
.driver_features =
DRIVER_HAVE_IRQ | DRIVER_MODESET | DRIVER_GEM,
.load = dev_load,
.unload = dev_unload,
.open = dev_open,
.firstopen = dev_firstopen,
.lastclose = dev_lastclose,
.preclose = dev_preclose,
.postclose = dev_postclose,
.get_vblank_counter = drm_vblank_count,
.enable_vblank = dev_enable_vblank,
.disable_vblank = dev_disable_vblank,
.irq_preinstall = dev_irq_preinstall,
.irq_postinstall = dev_irq_postinstall,
.irq_uninstall = dev_irq_uninstall,
.irq_handler = dev_irq_handler,
.reclaim_buffers = drm_core_reclaim_buffers,
.gem_init_object = omap_gem_init_object,
.gem_free_object = omap_gem_free_object,
.gem_vm_ops = &omap_gem_vm_ops,
.dumb_create = omap_gem_dumb_create,
.dumb_map_offset = omap_gem_dumb_map_offset,
.dumb_destroy = omap_gem_dumb_destroy,
.ioctls = ioctls,
.num_ioctls = DRM_OMAP_NUM_IOCTLS,
.fops = {
.owner = THIS_MODULE,
.open = drm_open,
.unlocked_ioctl = drm_ioctl,
.release = drm_release,
.mmap = omap_gem_mmap,
.poll = drm_poll,
.fasync = drm_fasync,
.read = drm_read,
.llseek = noop_llseek,
},
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = DRIVER_DATE,
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
.patchlevel = DRIVER_PATCHLEVEL,
};
static int pdev_suspend(struct platform_device *pDevice, pm_message_t state)
{
DBG("");
return 0;
}
static int pdev_resume(struct platform_device *device)
{
DBG("");
return 0;
}
static void pdev_shutdown(struct platform_device *device)
{
DBG("");
}
static int pdev_probe(struct platform_device *device)
{
DBG("%s", device->name);
return drm_platform_init(&omap_drm_driver, device);
}
static int pdev_remove(struct platform_device *device)
{
DBG("");
drm_platform_exit(&omap_drm_driver, device);
return 0;
}
struct platform_driver pdev = {
.driver = {
.name = DRIVER_NAME,
.owner = THIS_MODULE,
},
.probe = pdev_probe,
.remove = pdev_remove,
.suspend = pdev_suspend,
.resume = pdev_resume,
.shutdown = pdev_shutdown,
};
static int __init omap_drm_init(void)
{
DBG("init");
return platform_driver_register(&pdev);
}
static void __exit omap_drm_fini(void)
{
DBG("fini");
platform_driver_unregister(&pdev);
}
/* need late_initcall() so we load after dss_driver's are loaded */
late_initcall(omap_drm_init);
module_exit(omap_drm_fini);
MODULE_AUTHOR("Rob Clark <rob@ti.com>");
MODULE_DESCRIPTION("OMAP DRM Display Driver");
MODULE_ALIAS("platform:" DRIVER_NAME);
MODULE_LICENSE("GPL v2");
/*
* drivers/staging/omapdrm/omap_drv.h
*
* Copyright (C) 2011 Texas Instruments
* Author: Rob Clark <rob@ti.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __OMAP_DRV_H__
#define __OMAP_DRV_H__
#include <video/omapdss.h>
#include <linux/module.h>
#include <linux/types.h>
#include <drm/drmP.h>
#include "omap_drm.h"
#include "omap_priv.h"
#define DBG(fmt, ...) DRM_DEBUG(fmt"\n", ##__VA_ARGS__)
#define VERB(fmt, ...) if (0) DRM_DEBUG(fmt, ##__VA_ARGS__) /* verbose debug */
#define MODULE_NAME "omapdrm"
/* max # of mapper-id's that can be assigned.. todo, come up with a better
* (but still inexpensive) way to store/access per-buffer mapper private
* data..
*/
#define MAX_MAPPERS 2
struct omap_drm_private {
unsigned int num_crtcs;
struct drm_crtc *crtcs[8];
unsigned int num_encoders;
struct drm_encoder *encoders[8];
unsigned int num_connectors;
struct drm_connector *connectors[8];
struct drm_fb_helper *fbdev;
};
struct drm_fb_helper *omap_fbdev_init(struct drm_device *dev);
void omap_fbdev_free(struct drm_device *dev);
struct drm_crtc *omap_crtc_init(struct drm_device *dev,
struct omap_overlay *ovl, int id);
struct omap_overlay *omap_crtc_get_overlay(struct drm_crtc *crtc);
struct drm_encoder *omap_encoder_init(struct drm_device *dev,
struct omap_overlay_manager *mgr);
struct omap_overlay_manager *omap_encoder_get_manager(
struct drm_encoder *encoder);
struct drm_encoder *omap_connector_attached_encoder(
struct drm_connector *connector);
enum drm_connector_status omap_connector_detect(
struct drm_connector *connector, bool force);
struct drm_connector *omap_connector_init(struct drm_device *dev,
int connector_type, struct omap_dss_device *dssdev);
void omap_connector_mode_set(struct drm_connector *connector,
struct drm_display_mode *mode);
void omap_connector_flush(struct drm_connector *connector,
int x, int y, int w, int h);
struct drm_framebuffer *omap_framebuffer_create(struct drm_device *dev,
struct drm_file *file, struct drm_mode_fb_cmd *mode_cmd);
struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev,
struct drm_mode_fb_cmd *mode_cmd, struct drm_gem_object *bo);
struct drm_gem_object *omap_framebuffer_bo(struct drm_framebuffer *fb);
int omap_framebuffer_get_buffer(struct drm_framebuffer *fb, int x, int y,
void **vaddr, dma_addr_t *paddr, unsigned int *screen_width);
struct drm_connector *omap_framebuffer_get_next_connector(
struct drm_framebuffer *fb, struct drm_connector *from);
void omap_framebuffer_flush(struct drm_framebuffer *fb,
int x, int y, int w, int h);
struct drm_gem_object *omap_gem_new(struct drm_device *dev,
union omap_gem_size gsize, uint32_t flags);
int omap_gem_new_handle(struct drm_device *dev, struct drm_file *file,
union omap_gem_size gsize, uint32_t flags, uint32_t *handle);
void omap_gem_free_object(struct drm_gem_object *obj);
int omap_gem_init_object(struct drm_gem_object *obj);
void *omap_gem_vaddr(struct drm_gem_object *obj);
int omap_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
uint32_t handle, uint64_t *offset);
int omap_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
uint32_t handle);
int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
struct drm_mode_create_dumb *args);
int omap_gem_mmap(struct file *filp, struct vm_area_struct *vma);
int omap_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
int omap_gem_op_start(struct drm_gem_object *obj, enum omap_gem_op op);
int omap_gem_op_finish(struct drm_gem_object *obj, enum omap_gem_op op);
int omap_gem_op_sync(struct drm_gem_object *obj, enum omap_gem_op op);
int omap_gem_op_async(struct drm_gem_object *obj, enum omap_gem_op op,
void (*fxn)(void *arg), void *arg);
int omap_gem_get_paddr(struct drm_gem_object *obj,
dma_addr_t *paddr, bool remap);
int omap_gem_put_paddr(struct drm_gem_object *obj);
uint64_t omap_gem_mmap_offset(struct drm_gem_object *obj);
static inline int align_pitch(int pitch, int width, int bpp)
{
int bytespp = (bpp + 7) / 8;
/* in case someone tries to feed us a completely bogus stride: */
pitch = max(pitch, width * bytespp);
/* PVR needs alignment to 8 pixels.. right now that is the most
* restrictive stride requirement..
*/
return ALIGN(pitch, 8 * bytespp);
}
#endif /* __OMAP_DRV_H__ */
/*
* drivers/staging/omapdrm/omap_encoder.c
*
* Copyright (C) 2011 Texas Instruments
* Author: Rob Clark <rob@ti.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "omap_drv.h"
#include "drm_crtc.h"
#include "drm_crtc_helper.h"
/*
* encoder funcs
*/
#define to_omap_encoder(x) container_of(x, struct omap_encoder, base)
struct omap_encoder {
struct drm_encoder base;
struct omap_overlay_manager *mgr;
};
static void omap_encoder_destroy(struct drm_encoder *encoder)
{
struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
DBG("%s", omap_encoder->mgr->name);
drm_encoder_cleanup(encoder);
kfree(omap_encoder);
}
static void omap_encoder_dpms(struct drm_encoder *encoder, int mode)
{
struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
DBG("%s: %d", omap_encoder->mgr->name, mode);
}
static bool omap_encoder_mode_fixup(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
DBG("%s", omap_encoder->mgr->name);
return true;
}
static void omap_encoder_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
struct drm_device *dev = encoder->dev;
struct omap_drm_private *priv = dev->dev_private;
int i;
mode = adjusted_mode;
DBG("%s: set mode: %dx%d", omap_encoder->mgr->name,
mode->hdisplay, mode->vdisplay);
for (i = 0; i < priv->num_connectors; i++) {
struct drm_connector *connector = priv->connectors[i];
if (connector->encoder == encoder) {
omap_connector_mode_set(connector, mode);
}
}
}
static void omap_encoder_prepare(struct drm_encoder *encoder)
{
struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
struct drm_encoder_helper_funcs *encoder_funcs =
encoder->helper_private;
DBG("%s", omap_encoder->mgr->name);
encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
}
static void omap_encoder_commit(struct drm_encoder *encoder)
{
struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
struct drm_encoder_helper_funcs *encoder_funcs =
encoder->helper_private;
DBG("%s", omap_encoder->mgr->name);
omap_encoder->mgr->apply(omap_encoder->mgr);
encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
}
static const struct drm_encoder_funcs omap_encoder_funcs = {
.destroy = omap_encoder_destroy,
};
static const struct drm_encoder_helper_funcs omap_encoder_helper_funcs = {
.dpms = omap_encoder_dpms,
.mode_fixup = omap_encoder_mode_fixup,
.mode_set = omap_encoder_mode_set,
.prepare = omap_encoder_prepare,
.commit = omap_encoder_commit,
};
struct omap_overlay_manager *omap_encoder_get_manager(
struct drm_encoder *encoder)
{
struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
return omap_encoder->mgr;
}
/* initialize encoder */
struct drm_encoder *omap_encoder_init(struct drm_device *dev,
struct omap_overlay_manager *mgr)
{
struct drm_encoder *encoder = NULL;
struct omap_encoder *omap_encoder;
struct omap_overlay_manager_info info;
int ret;
DBG("%s", mgr->name);
omap_encoder = kzalloc(sizeof(*omap_encoder), GFP_KERNEL);
if (!omap_encoder) {
dev_err(dev->dev, "could not allocate encoder\n");
goto fail;
}
omap_encoder->mgr = mgr;
encoder = &omap_encoder->base;
drm_encoder_init(dev, encoder, &omap_encoder_funcs,
DRM_MODE_ENCODER_TMDS);
drm_encoder_helper_add(encoder, &omap_encoder_helper_funcs);
mgr->get_manager_info(mgr, &info);
/* TODO: fix hard-coded setup.. */
info.default_color = 0x00000000;
info.trans_key = 0x00000000;
info.trans_key_type = OMAP_DSS_COLOR_KEY_GFX_DST;
info.trans_enabled = false;
ret = mgr->set_manager_info(mgr, &info);
if (ret) {
dev_err(dev->dev, "could not set manager info\n");
goto fail;
}
ret = mgr->apply(mgr);
if (ret) {
dev_err(dev->dev, "could not apply\n");
goto fail;
}
return encoder;
fail:
if (encoder) {
drm_encoder_cleanup(encoder);
kfree(omap_encoder);
}
return NULL;
}
/*
* drivers/staging/omapdrm/omap_fb.c
*
* Copyright (C) 2011 Texas Instruments
* Author: Rob Clark <rob@ti.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "omap_drv.h"
#include "drm_crtc.h"
#include "drm_crtc_helper.h"
/*
* framebuffer funcs
*/
#define to_omap_framebuffer(x) container_of(x, struct omap_framebuffer, base)
struct omap_framebuffer {
struct drm_framebuffer base;
struct drm_gem_object *bo;
int size;
dma_addr_t paddr;
};
static int omap_framebuffer_create_handle(struct drm_framebuffer *fb,
struct drm_file *file_priv,
unsigned int *handle)
{
struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
return drm_gem_handle_create(file_priv, omap_fb->bo, handle);
}
static void omap_framebuffer_destroy(struct drm_framebuffer *fb)
{
struct drm_device *dev = fb->dev;
struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
DBG("destroy: FB ID: %d (%p)", fb->base.id, fb);
drm_framebuffer_cleanup(fb);
if (omap_gem_put_paddr(omap_fb->bo)) {
dev_err(dev->dev, "could not unmap!\n");
}
if (omap_fb->bo) {
drm_gem_object_unreference_unlocked(omap_fb->bo);
}
kfree(omap_fb);
}
static int omap_framebuffer_dirty(struct drm_framebuffer *fb,
struct drm_file *file_priv, unsigned flags, unsigned color,
struct drm_clip_rect *clips, unsigned num_clips)
{
int i;
for (i = 0; i < num_clips; i++) {
omap_framebuffer_flush(fb, clips[i].x1, clips[i].y1,
clips[i].x2 - clips[i].x1,
clips[i].y2 - clips[i].y1);
}
return 0;
}
static const struct drm_framebuffer_funcs omap_framebuffer_funcs = {
.create_handle = omap_framebuffer_create_handle,
.destroy = omap_framebuffer_destroy,
.dirty = omap_framebuffer_dirty,
};
/* returns the buffer size */
int omap_framebuffer_get_buffer(struct drm_framebuffer *fb, int x, int y,
void **vaddr, dma_addr_t *paddr, unsigned int *screen_width)
{
struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
int bpp = fb->bits_per_pixel / 8;
unsigned long offset;
offset = (x * bpp) + (y * fb->pitch);
if (vaddr) {
void *bo_vaddr = omap_gem_vaddr(omap_fb->bo);
/* note: we can only count on having a vaddr for buffers that
* are allocated physically contiguously to begin with (ie.
* dma_alloc_coherent()). But this should be ok because it
* is only used by legacy fbdev
*/
BUG_ON(!bo_vaddr);
*vaddr = bo_vaddr + offset;
}
*paddr = omap_fb->paddr + offset;
*screen_width = fb->pitch / bpp;
return omap_fb->size - offset;
}
struct drm_gem_object *omap_framebuffer_bo(struct drm_framebuffer *fb)
{
struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
return omap_fb->bo;
}
/* iterate thru all the connectors, returning ones that are attached
* to the same fb..
*/
struct drm_connector *omap_framebuffer_get_next_connector(
struct drm_framebuffer *fb, struct drm_connector *from)
{
struct drm_device *dev = fb->dev;
struct list_head *connector_list = &dev->mode_config.connector_list;
struct drm_connector *connector = from;
if (!from) {
return list_first_entry(connector_list, typeof(*from), head);
}
list_for_each_entry_from(connector, connector_list, head) {
if (connector != from) {
struct drm_encoder *encoder = connector->encoder;
struct drm_crtc *crtc = encoder ? encoder->crtc : NULL;
if (crtc && crtc->fb == fb) {
return connector;
}
}
}
return NULL;
}
/* flush an area of the framebuffer (in case of manual update display that
* is not automatically flushed)
*/
void omap_framebuffer_flush(struct drm_framebuffer *fb,
int x, int y, int w, int h)
{
struct drm_connector *connector = NULL;
VERB("flush: %d,%d %dx%d, fb=%p", x, y, w, h, fb);
while ((connector = omap_framebuffer_get_next_connector(fb, connector))) {
/* only consider connectors that are part of a chain */
if (connector->encoder && connector->encoder->crtc) {
/* TODO: maybe this should propagate thru the crtc who
* could do the coordinate translation..
*/
struct drm_crtc *crtc = connector->encoder->crtc;
int cx = max(0, x - crtc->x);
int cy = max(0, y - crtc->y);
int cw = w + (x - crtc->x) - cx;
int ch = h + (y - crtc->y) - cy;
omap_connector_flush(connector, cx, cy, cw, ch);
}
}
}
struct drm_framebuffer *omap_framebuffer_create(struct drm_device *dev,
struct drm_file *file, struct drm_mode_fb_cmd *mode_cmd)
{
struct drm_gem_object *bo;
struct drm_framebuffer *fb;
bo = drm_gem_object_lookup(dev, file, mode_cmd->handle);
if (!bo) {
return ERR_PTR(-ENOENT);
}
fb = omap_framebuffer_init(dev, mode_cmd, bo);
if (!fb) {
return ERR_PTR(-ENOMEM);
}
return fb;
}
struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev,
struct drm_mode_fb_cmd *mode_cmd, struct drm_gem_object *bo)
{
struct omap_framebuffer *omap_fb;
struct drm_framebuffer *fb = NULL;
int size, ret;
DBG("create framebuffer: dev=%p, mode_cmd=%p (%dx%d@%d)",
dev, mode_cmd, mode_cmd->width, mode_cmd->height,
mode_cmd->bpp);
/* in case someone tries to feed us a completely bogus stride: */
mode_cmd->pitch = align_pitch(mode_cmd->pitch,
mode_cmd->width, mode_cmd->bpp);
omap_fb = kzalloc(sizeof(*omap_fb), GFP_KERNEL);
if (!omap_fb) {
dev_err(dev->dev, "could not allocate fb\n");
goto fail;
}
fb = &omap_fb->base;
ret = drm_framebuffer_init(dev, fb, &omap_framebuffer_funcs);
if (ret) {
dev_err(dev->dev, "framebuffer init failed: %d\n", ret);
goto fail;
}
DBG("create: FB ID: %d (%p)", fb->base.id, fb);
size = PAGE_ALIGN(mode_cmd->pitch * mode_cmd->height);
if (bo) {
DBG("using existing %d byte buffer (needed %d)", bo->size, size);
if (size > bo->size) {
dev_err(dev->dev, "provided buffer object is too small!\n");
goto fail;
}
} else {
/* for convenience of all the various callers who don't want
* to be bothered to allocate their own buffer..
*/
union omap_gem_size gsize = {
.bytes = size,
};
DBG("allocating %d bytes for fb %d", size, dev->primary->index);
bo = omap_gem_new(dev, gsize, OMAP_BO_SCANOUT | OMAP_BO_WC);
if (!bo) {
dev_err(dev->dev, "failed to allocate buffer object\n");
goto fail;
}
}
omap_fb->bo = bo;
omap_fb->size = size;
if (omap_gem_get_paddr(bo, &omap_fb->paddr, true)) {
dev_err(dev->dev, "could not map (paddr)!\n");
goto fail;
}
drm_helper_mode_fill_fb_struct(fb, mode_cmd);
return fb;
fail:
if (fb) {
omap_framebuffer_destroy(fb);
}
return NULL;
}
/*
* drivers/staging/omapdrm/omap_fbdev.c
*
* Copyright (C) 2011 Texas Instruments
* Author: Rob Clark <rob@ti.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "omap_drv.h"
#include "drm_crtc.h"
#include "drm_fb_helper.h"
/*
* fbdev funcs, to implement legacy fbdev interface on top of drm driver
*/
#define to_omap_fbdev(x) container_of(x, struct omap_fbdev, base)
struct omap_fbdev {
struct drm_fb_helper base;
struct drm_framebuffer *fb;
};
static void omap_fbdev_flush(struct fb_info *fbi, int x, int y, int w, int h);
static ssize_t omap_fbdev_write(struct fb_info *fbi, const char __user *buf,
size_t count, loff_t *ppos)
{
ssize_t res;
res = fb_sys_write(fbi, buf, count, ppos);
omap_fbdev_flush(fbi, 0, 0, fbi->var.xres, fbi->var.yres);
return res;
}
static void omap_fbdev_fillrect(struct fb_info *fbi,
const struct fb_fillrect *rect)
{
sys_fillrect(fbi, rect);
omap_fbdev_flush(fbi, rect->dx, rect->dy, rect->width, rect->height);
}
static void omap_fbdev_copyarea(struct fb_info *fbi,
const struct fb_copyarea *area)
{
sys_copyarea(fbi, area);
omap_fbdev_flush(fbi, area->dx, area->dy, area->width, area->height);
}
static void omap_fbdev_imageblit(struct fb_info *fbi,
const struct fb_image *image)
{
sys_imageblit(fbi, image);
omap_fbdev_flush(fbi, image->dx, image->dy,
image->width, image->height);
}
static struct fb_ops omap_fb_ops = {
.owner = THIS_MODULE,
/* Note: to properly handle manual update displays, we wrap the
* basic fbdev ops which write to the framebuffer
*/
.fb_read = fb_sys_read,
.fb_write = omap_fbdev_write,
.fb_fillrect = omap_fbdev_fillrect,
.fb_copyarea = omap_fbdev_copyarea,
.fb_imageblit = omap_fbdev_imageblit,
.fb_check_var = drm_fb_helper_check_var,
.fb_set_par = drm_fb_helper_set_par,
.fb_pan_display = drm_fb_helper_pan_display,
.fb_blank = drm_fb_helper_blank,
.fb_setcmap = drm_fb_helper_setcmap,
.fb_debug_enter = drm_fb_helper_debug_enter,
.fb_debug_leave = drm_fb_helper_debug_leave,
};
static int omap_fbdev_create(struct drm_fb_helper *helper,
struct drm_fb_helper_surface_size *sizes)
{
struct omap_fbdev *fbdev = to_omap_fbdev(helper);
struct drm_device *dev = helper->dev;
struct drm_framebuffer *fb = NULL;
struct fb_info *fbi = NULL;
struct drm_mode_fb_cmd mode_cmd = {0};
dma_addr_t paddr;
void __iomem *vaddr;
int size, screen_width;
int ret;
/* only doing ARGB32 since this is what is needed to alpha-blend
* with video overlays:
*/
sizes->surface_bpp = 32;
sizes->surface_depth = 32;
DBG("create fbdev: %dx%d@%d", sizes->surface_width,
sizes->surface_height, sizes->surface_bpp);
mode_cmd.width = sizes->surface_width;
mode_cmd.height = sizes->surface_height;
mode_cmd.bpp = sizes->surface_bpp;
mode_cmd.depth = sizes->surface_depth;
fb = omap_framebuffer_init(dev, &mode_cmd, NULL);
if (!fb) {
dev_err(dev->dev, "failed to allocate fb\n");
ret = -ENOMEM;
goto fail;
}
mutex_lock(&dev->struct_mutex);
fbi = framebuffer_alloc(0, dev->dev);
if (!fbi) {
dev_err(dev->dev, "failed to allocate fb info\n");
ret = -ENOMEM;
goto fail_unlock;
}
DBG("fbi=%p, dev=%p", fbi, dev);
fbdev->fb = fb;
helper->fb = fb;
helper->fbdev = fbi;
fbi->par = helper;
fbi->flags = FBINFO_DEFAULT;
fbi->fbops = &omap_fb_ops;
strcpy(fbi->fix.id, MODULE_NAME);
ret = fb_alloc_cmap(&fbi->cmap, 256, 0);
if (ret) {
ret = -ENOMEM;
goto fail_unlock;
}
drm_fb_helper_fill_fix(fbi, fb->pitch, fb->depth);
drm_fb_helper_fill_var(fbi, helper, fb->width, fb->height);
size = omap_framebuffer_get_buffer(fb, 0, 0,
&vaddr, &paddr, &screen_width);
dev->mode_config.fb_base = paddr;
fbi->screen_base = vaddr;
fbi->screen_size = size;
fbi->fix.smem_start = paddr;
fbi->fix.smem_len = size;
DBG("par=%p, %dx%d", fbi->par, fbi->var.xres, fbi->var.yres);
DBG("allocated %dx%d fb", fbdev->fb->width, fbdev->fb->height);
mutex_unlock(&dev->struct_mutex);
return 0;
fail_unlock:
mutex_unlock(&dev->struct_mutex);
fail:
if (ret) {
if (fbi)
framebuffer_release(fbi);
if (fb)
fb->funcs->destroy(fb);
}
return ret;
}
static void omap_crtc_fb_gamma_set(struct drm_crtc *crtc,
u16 red, u16 green, u16 blue, int regno)
{
DBG("fbdev: set gamma");
}
static void omap_crtc_fb_gamma_get(struct drm_crtc *crtc,
u16 *red, u16 *green, u16 *blue, int regno)
{
DBG("fbdev: get gamma");
}
static int omap_fbdev_probe(struct drm_fb_helper *helper,
struct drm_fb_helper_surface_size *sizes)
{
int new_fb = 0;
int ret;
if (!helper->fb) {
ret = omap_fbdev_create(helper, sizes);
if (ret)
return ret;
new_fb = 1;
}
return new_fb;
}
static struct drm_fb_helper_funcs omap_fb_helper_funcs = {
.gamma_set = omap_crtc_fb_gamma_set,
.gamma_get = omap_crtc_fb_gamma_get,
.fb_probe = omap_fbdev_probe,
};
static struct drm_fb_helper *get_fb(struct fb_info *fbi)
{
if (!fbi || strcmp(fbi->fix.id, MODULE_NAME)) {
/* these are not the fb's you're looking for */
return NULL;
}
return fbi->par;
}
/* flush an area of the framebuffer (in case of manual update display that
* is not automatically flushed)
*/
static void omap_fbdev_flush(struct fb_info *fbi, int x, int y, int w, int h)
{
struct drm_fb_helper *helper = get_fb(fbi);
if (!helper)
return;
VERB("flush fbdev: %d,%d %dx%d, fbi=%p", x, y, w, h, fbi);
omap_framebuffer_flush(helper->fb, x, y, w, h);
}
/* initialize fbdev helper */
struct drm_fb_helper *omap_fbdev_init(struct drm_device *dev)
{
struct omap_drm_private *priv = dev->dev_private;
struct omap_fbdev *fbdev = NULL;
struct drm_fb_helper *helper;
int ret = 0;
fbdev = kzalloc(sizeof(*fbdev), GFP_KERNEL);
if (!fbdev) {
dev_err(dev->dev, "could not allocate fbdev\n");
goto fail;
}
helper = &fbdev->base;
helper->funcs = &omap_fb_helper_funcs;
ret = drm_fb_helper_init(dev, helper,
priv->num_crtcs, priv->num_connectors);
if (ret) {
dev_err(dev->dev, "could not init fbdev: ret=%d\n", ret);
goto fail;
}
drm_fb_helper_single_add_all_connectors(helper);
drm_fb_helper_initial_config(helper, 32);
priv->fbdev = helper;
return helper;
fail:
kfree(fbdev);
return NULL;
}
void omap_fbdev_free(struct drm_device *dev)
{
struct omap_drm_private *priv = dev->dev_private;
struct drm_fb_helper *helper = priv->fbdev;
struct omap_fbdev *fbdev;
struct fb_info *fbi;
DBG();
fbi = helper->fbdev;
unregister_framebuffer(fbi);
framebuffer_release(fbi);
drm_fb_helper_fini(helper);
fbdev = to_omap_fbdev(priv->fbdev);
kfree(fbdev);
priv->fbdev = NULL;
}
/*
* drivers/staging/omapdrm/omap_gem.c
*
* Copyright (C) 2011 Texas Instruments
* Author: Rob Clark <rob.clark@linaro.org>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/spinlock.h>
#include <linux/shmem_fs.h>
#include "omap_drv.h"
/* remove these once drm core helpers are merged */
struct page ** _drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask);
void _drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
bool dirty, bool accessed);
/*
* GEM buffer object implementation.
*/
#define to_omap_bo(x) container_of(x, struct omap_gem_object, base)
/* note: we use upper 8 bits of flags for driver-internal flags: */
#define OMAP_BO_DMA 0x01000000 /* actually is physically contiguous */
#define OMAP_BO_EXT_SYNC 0x02000000 /* externally allocated sync object */
#define OMAP_BO_EXT_MEM 0x04000000 /* externally allocated memory */
struct omap_gem_object {
struct drm_gem_object base;
uint32_t flags;
/**
* If buffer is allocated physically contiguous, the OMAP_BO_DMA flag
* is set and the paddr is valid.
*
* Note that OMAP_BO_SCANOUT is a hint from userspace that DMA capable
* buffer is requested, but doesn't mean that it is. Use the
* OMAP_BO_DMA flag to determine if the buffer has a DMA capable
* physical address.
*/
dma_addr_t paddr;
/**
* Array of backing pages, if allocated. Note that pages are never
* allocated for buffers originally allocated from contiguous memory
*/
struct page **pages;
/**
* Virtual address, if mapped.
*/
void *vaddr;
/**
* sync-object allocated on demand (if needed)
*
* Per-buffer sync-object for tracking pending and completed hw/dma
* read and write operations. The layout in memory is dictated by
* the SGX firmware, which uses this information to stall the command
* stream if a surface is not ready yet.
*
* Note that when buffer is used by SGX, the sync-object needs to be
* allocated from a special heap of sync-objects. This way many sync
* objects can be packed in a page, and not waste GPU virtual address
* space. Because of this we have to have a omap_gem_set_sync_object()
* API to allow replacement of the syncobj after it has (potentially)
* already been allocated. A bit ugly but I haven't thought of a
* better alternative.
*/
struct {
uint32_t write_pending;
uint32_t write_complete;
uint32_t read_pending;
uint32_t read_complete;
} *sync;
};
/* GEM objects can either be allocated from contiguous memory (in which
* case obj->filp==NULL), or w/ shmem backing (obj->filp!=NULL). But non
* contiguous buffers can be remapped in TILER/DMM if they need to be
* contiguous... but we don't do this all the time to reduce pressure
* on TILER/DMM space when we know at allocation time that the buffer
* will need to be scanned out.
*/
static inline bool is_shmem(struct drm_gem_object *obj)
{
return obj->filp != NULL;
}
static int get_pages(struct drm_gem_object *obj, struct page ***pages);
static DEFINE_SPINLOCK(sync_lock);
/** ensure backing pages are allocated */
static int omap_gem_attach_pages(struct drm_gem_object *obj)
{
struct omap_gem_object *omap_obj = to_omap_bo(obj);
struct page **pages;
WARN_ON(omap_obj->pages);
/* TODO: __GFP_DMA32 .. but somehow GFP_HIGHMEM is coming from the
* mapping_gfp_mask(mapping) which conflicts w/ GFP_DMA32.. probably
* we actually want CMA memory for it all anyways..
*/
pages = _drm_gem_get_pages(obj, GFP_KERNEL);
if (IS_ERR(pages)) {
dev_err(obj->dev->dev, "could not get pages: %ld\n", PTR_ERR(pages));
return PTR_ERR(pages);
}
omap_obj->pages = pages;
return 0;
}
/** release backing pages */
static void omap_gem_detach_pages(struct drm_gem_object *obj)
{
struct omap_gem_object *omap_obj = to_omap_bo(obj);
_drm_gem_put_pages(obj, omap_obj->pages, true, false);
omap_obj->pages = NULL;
}
/** get mmap offset */
uint64_t omap_gem_mmap_offset(struct drm_gem_object *obj)
{
if (!obj->map_list.map) {
/* Make it mmapable */
int ret = drm_gem_create_mmap_offset(obj);
if (ret) {
dev_err(obj->dev->dev, "could not allocate mmap offset");
return 0;
}
}
return (uint64_t)obj->map_list.hash.key << PAGE_SHIFT;
}
/**
* omap_gem_fault - pagefault handler for GEM objects
* @vma: the VMA of the GEM object
* @vmf: fault detail
*
* Invoked when a fault occurs on an mmap of a GEM managed area. GEM
* does most of the work for us including the actual map/unmap calls
* but we need to do the actual page work.
*
* The VMA was set up by GEM. In doing so it also ensured that the
* vma->vm_private_data points to the GEM object that is backing this
* mapping.
*/
int omap_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct drm_gem_object *obj = vma->vm_private_data;
struct omap_gem_object *omap_obj = to_omap_bo(obj);
struct drm_device *dev = obj->dev;
struct page **pages;
unsigned long pfn;
pgoff_t pgoff;
int ret;
/* Make sure we don't parallel update on a fault, nor move or remove
* something from beneath our feet
*/
mutex_lock(&dev->struct_mutex);
/* if a shmem backed object, make sure we have pages attached now */
ret = get_pages(obj, &pages);
if (ret) {
goto fail;
}
/* where should we do corresponding put_pages().. we are mapping
* the original page, rather than thru a GART, so we can't rely
* on eviction to trigger this. But munmap() or all mappings should
* probably trigger put_pages()?
*/
/* We don't use vmf->pgoff since that has the fake offset: */
pgoff = ((unsigned long)vmf->virtual_address -
vma->vm_start) >> PAGE_SHIFT;
if (omap_obj->pages) {
pfn = page_to_pfn(omap_obj->pages[pgoff]);
} else {
BUG_ON(!(omap_obj->flags & OMAP_BO_DMA));
pfn = (omap_obj->paddr >> PAGE_SHIFT) + pgoff;
}
VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
pfn, pfn << PAGE_SHIFT);
ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, pfn);
fail:
mutex_unlock(&dev->struct_mutex);
switch (ret) {
case 0:
case -ERESTARTSYS:
case -EINTR:
return VM_FAULT_NOPAGE;
case -ENOMEM:
return VM_FAULT_OOM;
default:
return VM_FAULT_SIGBUS;
}
}
/** We override mainly to fix up some of the vm mapping flags.. */
int omap_gem_mmap(struct file *filp, struct vm_area_struct *vma)
{
struct omap_gem_object *omap_obj;
int ret;
ret = drm_gem_mmap(filp, vma);
if (ret) {
DBG("mmap failed: %d", ret);
return ret;
}
/* after drm_gem_mmap(), it is safe to access the obj */
omap_obj = to_omap_bo(vma->vm_private_data);
vma->vm_flags &= ~VM_PFNMAP;
vma->vm_flags |= VM_MIXEDMAP;
if (omap_obj->flags & OMAP_BO_WC) {
vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
} else if (omap_obj->flags & OMAP_BO_UNCACHED) {
vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
} else {
vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
}
return ret;
}
/**
* omap_gem_dumb_create - create a dumb buffer
* @drm_file: our client file
* @dev: our device
* @args: the requested arguments copied from userspace
*
* Allocate a buffer suitable for use for a frame buffer of the
* form described by user space. Give userspace a handle by which
* to reference it.
*/
int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
struct drm_mode_create_dumb *args)
{
union omap_gem_size gsize;
/* in case someone tries to feed us a completely bogus stride: */
args->pitch = align_pitch(args->pitch, args->width, args->bpp);
args->size = PAGE_ALIGN(args->pitch * args->height);
gsize = (union omap_gem_size){
.bytes = args->size,
};
return omap_gem_new_handle(dev, file, gsize,
OMAP_BO_SCANOUT | OMAP_BO_WC, &args->handle);
}
/**
* omap_gem_dumb_destroy - destroy a dumb buffer
* @file: client file
* @dev: our DRM device
* @handle: the object handle
*
* Destroy a handle that was created via omap_gem_dumb_create.
*/
int omap_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
uint32_t handle)
{
/* No special work needed, drop the reference and see what falls out */
return drm_gem_handle_delete(file, handle);
}
/**
* omap_gem_dumb_map - buffer mapping for dumb interface
* @file: our drm client file
* @dev: drm device
* @handle: GEM handle to the object (from dumb_create)
*
* Do the necessary setup to allow the mapping of the frame buffer
* into user memory. We don't have to do much here at the moment.
*/
int omap_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
uint32_t handle, uint64_t *offset)
{
struct drm_gem_object *obj;
int ret = 0;
mutex_lock(&dev->struct_mutex);
/* GEM does all our handle to object mapping */
obj = drm_gem_object_lookup(dev, file, handle);
if (obj == NULL) {
ret = -ENOENT;
goto fail;
}
*offset = omap_gem_mmap_offset(obj);
drm_gem_object_unreference_unlocked(obj);
fail:
mutex_unlock(&dev->struct_mutex);
return ret;
}
/* Get physical address for DMA.. if 'remap' is true, and the buffer is not
* already contiguous, remap it to pin in physically contiguous memory.. (ie.
* map in TILER)
*/
int omap_gem_get_paddr(struct drm_gem_object *obj,
dma_addr_t *paddr, bool remap)
{
struct omap_gem_object *omap_obj = to_omap_bo(obj);
int ret = 0;
if (is_shmem(obj)) {
/* TODO: remap to TILER */
return -ENOMEM;
}
*paddr = omap_obj->paddr;
return ret;
}
/* Release physical address, when DMA is no longer being performed.. this
* could potentially unpin and unmap buffers from TILER
*/
int omap_gem_put_paddr(struct drm_gem_object *obj)
{
/* do something here when remap to TILER is used.. */
return 0;
}
/* acquire pages when needed (for example, for DMA where physically
* contiguous buffer is not required
*/
static int get_pages(struct drm_gem_object *obj, struct page ***pages)
{
struct omap_gem_object *omap_obj = to_omap_bo(obj);
int ret = 0;
if (is_shmem(obj) && !omap_obj->pages) {
ret = omap_gem_attach_pages(obj);
if (ret) {
dev_err(obj->dev->dev, "could not attach pages\n");
return ret;
}
}
/* TODO: even phys-contig.. we should have a list of pages? */
*pages = omap_obj->pages;
return 0;
}
int omap_gem_get_pages(struct drm_gem_object *obj, struct page ***pages)
{
int ret;
mutex_lock(&obj->dev->struct_mutex);
ret = get_pages(obj, pages);
mutex_unlock(&obj->dev->struct_mutex);
return ret;
}
/* release pages when DMA no longer being performed */
int omap_gem_put_pages(struct drm_gem_object *obj)
{
/* do something here if we dynamically attach/detach pages.. at
* least they would no longer need to be pinned if everyone has
* released the pages..
*/
return 0;
}
/* Get kernel virtual address for CPU access.. only buffers that are
* allocated contiguously have a kernel virtual address, so this more
* or less only exists for omap_fbdev
*/
void *omap_gem_vaddr(struct drm_gem_object *obj)
{
struct omap_gem_object *omap_obj = to_omap_bo(obj);
return omap_obj->vaddr;
}
/* Buffer Synchronization:
*/
struct omap_gem_sync_waiter {
struct list_head list;
struct omap_gem_object *omap_obj;
enum omap_gem_op op;
uint32_t read_target, write_target;
/* notify called w/ sync_lock held */
void (*notify)(void *arg);
void *arg;
};
/* list of omap_gem_sync_waiter.. the notify fxn gets called back when
* the read and/or write target count is achieved which can call a user
* callback (ex. to kick 3d and/or 2d), wakeup blocked task (prep for
* cpu access), etc.
*/
static LIST_HEAD(waiters);
static inline bool is_waiting(struct omap_gem_sync_waiter *waiter)
{
struct omap_gem_object *omap_obj = waiter->omap_obj;
if ((waiter->op & OMAP_GEM_READ) &&
(omap_obj->sync->read_complete < waiter->read_target))
return true;
if ((waiter->op & OMAP_GEM_WRITE) &&
(omap_obj->sync->write_complete < waiter->write_target))
return true;
return false;
}
/* macro for sync debug.. */
#define SYNCDBG 0
#define SYNC(fmt, ...) do { if (SYNCDBG) \
printk(KERN_ERR "%s:%d: "fmt"\n", \
__func__, __LINE__, ##__VA_ARGS__); \
} while (0)
static void sync_op_update(void)
{
struct omap_gem_sync_waiter *waiter, *n;
list_for_each_entry_safe(waiter, n, &waiters, list) {
if (!is_waiting(waiter)) {
list_del(&waiter->list);
SYNC("notify: %p", waiter);
waiter->notify(waiter->arg);
kfree(waiter);
}
}
}
static inline int sync_op(struct drm_gem_object *obj,
enum omap_gem_op op, bool start)
{
struct omap_gem_object *omap_obj = to_omap_bo(obj);
int ret = 0;
spin_lock(&sync_lock);
if (!omap_obj->sync) {
omap_obj->sync = kzalloc(sizeof(*omap_obj->sync), GFP_ATOMIC);
if (!omap_obj->sync) {
ret = -ENOMEM;
goto unlock;
}
}
if (start) {
if (op & OMAP_GEM_READ)
omap_obj->sync->read_pending++;
if (op & OMAP_GEM_WRITE)
omap_obj->sync->write_pending++;
} else {
if (op & OMAP_GEM_READ)
omap_obj->sync->read_complete++;
if (op & OMAP_GEM_WRITE)
omap_obj->sync->write_complete++;
sync_op_update();
}
unlock:
spin_unlock(&sync_lock);
return ret;
}
/* it is a bit lame to handle updates in this sort of polling way, but
* in case of PVR, the GPU can directly update read/write complete
* values, and not really tell us which ones it updated.. this also
* means that sync_lock is not quite sufficient. So we'll need to
* do something a bit better when it comes time to add support for
* separate 2d hw..
*/
void omap_gem_op_update(void)
{
spin_lock(&sync_lock);
sync_op_update();
spin_unlock(&sync_lock);
}
/* mark the start of read and/or write operation */
int omap_gem_op_start(struct drm_gem_object *obj, enum omap_gem_op op)
{
return sync_op(obj, op, true);
}
int omap_gem_op_finish(struct drm_gem_object *obj, enum omap_gem_op op)
{
return sync_op(obj, op, false);
}
static DECLARE_WAIT_QUEUE_HEAD(sync_event);
static void sync_notify(void *arg)
{
struct task_struct **waiter_task = arg;
*waiter_task = NULL;
wake_up_all(&sync_event);
}
int omap_gem_op_sync(struct drm_gem_object *obj, enum omap_gem_op op)
{
struct omap_gem_object *omap_obj = to_omap_bo(obj);
int ret = 0;
if (omap_obj->sync) {
struct task_struct *waiter_task = current;
struct omap_gem_sync_waiter *waiter =
kzalloc(sizeof(*waiter), GFP_KERNEL);
if (!waiter) {
return -ENOMEM;
}
waiter->omap_obj = omap_obj;
waiter->op = op;
waiter->read_target = omap_obj->sync->read_pending;
waiter->write_target = omap_obj->sync->write_pending;
waiter->notify = sync_notify;
waiter->arg = &waiter_task;
spin_lock(&sync_lock);
if (is_waiting(waiter)) {
SYNC("waited: %p", waiter);
list_add_tail(&waiter->list, &waiters);
spin_unlock(&sync_lock);
ret = wait_event_interruptible(sync_event,
(waiter_task == NULL));
spin_lock(&sync_lock);
if (waiter_task) {
SYNC("interrupted: %p", waiter);
/* we were interrupted */
list_del(&waiter->list);
waiter_task = NULL;
} else {
/* freed in sync_op_update() */
waiter = NULL;
}
}
spin_unlock(&sync_lock);
if (waiter) {
kfree(waiter);
}
}
return ret;
}
/* call fxn(arg), either synchronously or asynchronously if the op
* is currently blocked.. fxn() can be called from any context
*
* (TODO for now fxn is called back from whichever context calls
* omap_gem_op_update().. but this could be better defined later
* if needed)
*
* TODO more code in common w/ _sync()..
*/
int omap_gem_op_async(struct drm_gem_object *obj, enum omap_gem_op op,
void (*fxn)(void *arg), void *arg)
{
struct omap_gem_object *omap_obj = to_omap_bo(obj);
if (omap_obj->sync) {
struct omap_gem_sync_waiter *waiter =
kzalloc(sizeof(*waiter), GFP_ATOMIC);
if (!waiter) {
return -ENOMEM;
}
waiter->omap_obj = omap_obj;
waiter->op = op;
waiter->read_target = omap_obj->sync->read_pending;
waiter->write_target = omap_obj->sync->write_pending;
waiter->notify = fxn;
waiter->arg = arg;
spin_lock(&sync_lock);
if (is_waiting(waiter)) {
SYNC("waited: %p", waiter);
list_add_tail(&waiter->list, &waiters);
spin_unlock(&sync_lock);
return 0;
}
spin_unlock(&sync_lock);
}
/* no waiting.. */
fxn(arg);
return 0;
}
/* special API so PVR can update the buffer to use a sync-object allocated
* from it's sync-obj heap. Only used for a newly allocated (from PVR's
* perspective) sync-object, so we overwrite the new syncobj w/ values
* from the already allocated syncobj (if there is one)
*/
int omap_gem_set_sync_object(struct drm_gem_object *obj, void *syncobj)
{
struct omap_gem_object *omap_obj = to_omap_bo(obj);
int ret = 0;
spin_lock(&sync_lock);
if ((omap_obj->flags & OMAP_BO_EXT_SYNC) && !syncobj) {
/* clearing a previously set syncobj */
syncobj = kzalloc(sizeof(*omap_obj->sync), GFP_ATOMIC);
if (!syncobj) {
ret = -ENOMEM;
goto unlock;
}
memcpy(syncobj, omap_obj->sync, sizeof(*omap_obj->sync));
omap_obj->flags &= ~OMAP_BO_EXT_SYNC;
omap_obj->sync = syncobj;
} else if (syncobj && !(omap_obj->flags & OMAP_BO_EXT_SYNC)) {
/* replacing an existing syncobj */
if (omap_obj->sync) {
memcpy(syncobj, omap_obj->sync, sizeof(*omap_obj->sync));
kfree(omap_obj->sync);
}
omap_obj->flags |= OMAP_BO_EXT_SYNC;
omap_obj->sync = syncobj;
}
unlock:
spin_unlock(&sync_lock);
return ret;
}
int omap_gem_init_object(struct drm_gem_object *obj)
{
return -EINVAL; /* unused */
}
/* don't call directly.. called from GEM core when it is time to actually
* free the object..
*/
void omap_gem_free_object(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
struct omap_gem_object *omap_obj = to_omap_bo(obj);
if (obj->map_list.map) {
drm_gem_free_mmap_offset(obj);
}
/* don't free externally allocated backing memory */
if (!(omap_obj->flags & OMAP_BO_EXT_MEM)) {
if (omap_obj->pages) {
omap_gem_detach_pages(obj);
}
if (!is_shmem(obj)) {
dma_free_writecombine(dev->dev, obj->size,
omap_obj->vaddr, omap_obj->paddr);
}
}
/* don't free externally allocated syncobj */
if (!(omap_obj->flags & OMAP_BO_EXT_SYNC)) {
kfree(omap_obj->sync);
}
drm_gem_object_release(obj);
kfree(obj);
}
/* convenience method to construct a GEM buffer object, and userspace handle */
int omap_gem_new_handle(struct drm_device *dev, struct drm_file *file,
union omap_gem_size gsize, uint32_t flags, uint32_t *handle)
{
struct drm_gem_object *obj;
int ret;
obj = omap_gem_new(dev, gsize, flags);
if (!obj)
return -ENOMEM;
ret = drm_gem_handle_create(file, obj, handle);
if (ret) {
drm_gem_object_release(obj);
kfree(obj); /* TODO isn't there a dtor to call? just copying i915 */
return ret;
}
/* drop reference from allocate - handle holds it now */
drm_gem_object_unreference_unlocked(obj);
return 0;
}
/* GEM buffer object constructor */
struct drm_gem_object *omap_gem_new(struct drm_device *dev,
union omap_gem_size gsize, uint32_t flags)
{
struct omap_gem_object *omap_obj;
struct drm_gem_object *obj = NULL;
size_t size;
int ret;
if (flags & OMAP_BO_TILED) {
/* TODO: not implemented yet */
goto fail;
}
size = PAGE_ALIGN(gsize.bytes);
omap_obj = kzalloc(sizeof(*omap_obj), GFP_KERNEL);
if (!omap_obj) {
dev_err(dev->dev, "could not allocate GEM object\n");
goto fail;
}
obj = &omap_obj->base;
if (flags & OMAP_BO_SCANOUT) {
/* attempt to allocate contiguous memory */
omap_obj->vaddr = dma_alloc_writecombine(dev->dev, size,
&omap_obj->paddr, GFP_KERNEL);
if (omap_obj->vaddr) {
flags |= OMAP_BO_DMA;
}
}
omap_obj->flags = flags;
if (flags & (OMAP_BO_DMA|OMAP_BO_EXT_MEM)) {
ret = drm_gem_private_object_init(dev, obj, size);
} else {
ret = drm_gem_object_init(dev, obj, size);
}
if (ret) {
goto fail;
}
return obj;
fail:
if (obj) {
omap_gem_free_object(obj);
}
return NULL;
}
/*
* drivers/staging/omapdrm/omap_gem_helpers.c
*
* Copyright (C) 2011 Texas Instruments
* Author: Rob Clark <rob.clark@linaro.org>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
/* temporary copy of drm_gem_{get,put}_pages() until the
* "drm/gem: add functions to get/put pages" patch is merged..
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/shmem_fs.h>
#include <drm/drmP.h>
/**
* drm_gem_get_pages - helper to allocate backing pages for a GEM object
* @obj: obj in question
* @gfpmask: gfp mask of requested pages
*/
struct page ** _drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask)
{
struct inode *inode;
struct address_space *mapping;
struct page *p, **pages;
int i, npages;
/* This is the shared memory object that backs the GEM resource */
inode = obj->filp->f_path.dentry->d_inode;
mapping = inode->i_mapping;
npages = obj->size >> PAGE_SHIFT;
pages = drm_malloc_ab(npages, sizeof(struct page *));
if (pages == NULL)
return ERR_PTR(-ENOMEM);
gfpmask |= mapping_gfp_mask(mapping);
for (i = 0; i < npages; i++) {
p = shmem_read_mapping_page_gfp(mapping, i, gfpmask);
if (IS_ERR(p))
goto fail;
pages[i] = p;
/* There is a hypothetical issue w/ drivers that require
* buffer memory in the low 4GB.. if the pages are un-
* pinned, and swapped out, they can end up swapped back
* in above 4GB. If pages are already in memory, then
* shmem_read_mapping_page_gfp will ignore the gfpmask,
* even if the already in-memory page disobeys the mask.
*
* It is only a theoretical issue today, because none of
* the devices with this limitation can be populated with
* enough memory to trigger the issue. But this BUG_ON()
* is here as a reminder in case the problem with
* shmem_read_mapping_page_gfp() isn't solved by the time
* it does become a real issue.
*
* See this thread: http://lkml.org/lkml/2011/7/11/238
*/
BUG_ON((gfpmask & __GFP_DMA32) &&
(page_to_pfn(p) >= 0x00100000UL));
}
return pages;
fail:
while (i--) {
page_cache_release(pages[i]);
}
drm_free_large(pages);
return ERR_PTR(PTR_ERR(p));
}
/**
* drm_gem_put_pages - helper to free backing pages for a GEM object
* @obj: obj in question
* @pages: pages to free
*/
void _drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
bool dirty, bool accessed)
{
int i, npages;
npages = obj->size >> PAGE_SHIFT;
for (i = 0; i < npages; i++) {
if (dirty)
set_page_dirty(pages[i]);
if (accessed)
mark_page_accessed(pages[i]);
/* Undo the reference we took when populating the table */
page_cache_release(pages[i]);
}
drm_free_large(pages);
}
/*
* include/drm/omap_priv.h
*
* Copyright (C) 2011 Texas Instruments
* Author: Rob Clark <rob@ti.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __OMAP_PRIV_H__
#define __OMAP_PRIV_H__
/* Non-userspace facing APIs
*/
/* optional platform data to configure the default configuration of which
* pipes/overlays/CRTCs are used.. if this is not provided, then instead the
* first CONFIG_DRM_OMAP_NUM_CRTCS are used, and they are each connected to
* one manager, with priority given to managers that are connected to
* detected devices. This should be a good default behavior for most cases,
* but yet there still might be times when you wish to do something different.
*/
struct omap_drm_platform_data {
int ovl_cnt;
const int *ovl_ids;
int mgr_cnt;
const int *mgr_ids;
int dev_cnt;
const char **dev_names;
};
#endif /* __OMAP_DRM_H__ */
menuconfig FB_OMAP2 menuconfig FB_OMAP2
tristate "OMAP2+ frame buffer support" tristate "OMAP2+ frame buffer support"
depends on FB && OMAP2_DSS depends on FB && OMAP2_DSS && !DRM_OMAP
select OMAP2_VRAM select OMAP2_VRAM
select OMAP2_VRFB if ARCH_OMAP2 || ARCH_OMAP3 select OMAP2_VRFB if ARCH_OMAP2 || ARCH_OMAP3
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment