Commit fc556fb6 authored by Dave Airlie's avatar Dave Airlie

Merge tag 'drm/tegra/for-3.19-rc1-fixes' of git://people.freedesktop.org/~tagr/linux into drm-fixes

drm/tegra: Fixes for v3.19-rc1

This is a set of fixes for two regressions and one bug in the IOMMU
mapping code. It turns out that all of these issues turn up primarily
on Tegra30 hardware. The IOMMU mapping bug only manifests on buffers
that aren't multiples of the page size. I happened to be testing HDMI
with 1080p while writing the code and framebuffers for that happen to
fit exactly within 2025 pages of 4 KiB each.

One of the regressions is caused by the IOMMU code allocating pages from
shmem which can have associated cache lines. If the pages aren't flushed
then these cache lines may be flushed later on and cause framebuffer
corruption. I'm not sure why I didn't see this before. Perhaps the board
that I was using had enough RAM so that the pages shmem would hand out
had a better chance of being unused. Or maybe I didn't look too closely.
The fix for this is to fake up an SG table so that it can be passed to
the DMA API. Ideally this would use drm_clflush_*(), but implementing
that for ARM causes DRM to fail to build as a module since some of the
low-level cache maintenance functions aren't exported. Hopefully we can
get a suitable API exported on ARM for the next release.

The second regression is caused by a mismatch between the hardware pipe
number and the CRTC's DRM index. These were used inconsistently, which
could cause one code location to call drm_vblank_get() with a different
pipe than the corresponding drm_vblank_put(), thereby causing the
reference count to become unbalanced. Alexandre also reported a possible
race condition related to this, which this series also fixes.

* tag 'drm/tegra/for-3.19-rc1-fixes' of git://people.freedesktop.org/~tagr/linux:
  drm/tegra: dc: Select root window for event dispatch
  drm/tegra: gem: Use the proper size for GEM objects
  drm/tegra: gem: Flush buffer objects upon allocation
  drm/tegra: dc: Fix a potential race on page-flip completion
  drm/tegra: dc: Consistently use the same pipe
  drm/irq: Add drm_crtc_vblank_count()
  drm/irq: Add drm_crtc_handle_vblank()
  drm/irq: Add drm_crtc_send_vblank_event()
parents a548a838 93396d0f
......@@ -830,6 +830,8 @@ drm_get_last_vbltimestamp(struct drm_device *dev, int crtc,
* vblank events since the system was booted, including lost events due to
* modesetting activity.
*
* This is the legacy version of drm_crtc_vblank_count().
*
* Returns:
* The software vblank counter.
*/
......@@ -843,6 +845,25 @@ u32 drm_vblank_count(struct drm_device *dev, int crtc)
}
EXPORT_SYMBOL(drm_vblank_count);
/**
* drm_crtc_vblank_count - retrieve "cooked" vblank counter value
* @crtc: which counter to retrieve
*
* Fetches the "cooked" vblank count value that represents the number of
* vblank events since the system was booted, including lost events due to
* modesetting activity.
*
* This is the native KMS version of drm_vblank_count().
*
* Returns:
* The software vblank counter.
*/
u32 drm_crtc_vblank_count(struct drm_crtc *crtc)
{
return drm_vblank_count(crtc->dev, drm_crtc_index(crtc));
}
EXPORT_SYMBOL(drm_crtc_vblank_count);
/**
* drm_vblank_count_and_time - retrieve "cooked" vblank counter value
* and the system timestamp corresponding to that vblank counter value.
......@@ -904,6 +925,8 @@ static void send_vblank_event(struct drm_device *dev,
*
* Updates sequence # and timestamp on event, and sends it to userspace.
* Caller must hold event lock.
*
* This is the legacy version of drm_crtc_send_vblank_event().
*/
void drm_send_vblank_event(struct drm_device *dev, int crtc,
struct drm_pending_vblank_event *e)
......@@ -922,6 +945,23 @@ void drm_send_vblank_event(struct drm_device *dev, int crtc,
}
EXPORT_SYMBOL(drm_send_vblank_event);
/**
* drm_crtc_send_vblank_event - helper to send vblank event after pageflip
* @crtc: the source CRTC of the vblank event
* @e: the event to send
*
* Updates sequence # and timestamp on event, and sends it to userspace.
* Caller must hold event lock.
*
* This is the native KMS version of drm_send_vblank_event().
*/
void drm_crtc_send_vblank_event(struct drm_crtc *crtc,
struct drm_pending_vblank_event *e)
{
drm_send_vblank_event(crtc->dev, drm_crtc_index(crtc), e);
}
EXPORT_SYMBOL(drm_crtc_send_vblank_event);
/**
* drm_vblank_enable - enable the vblank interrupt on a CRTC
* @dev: DRM device
......@@ -1594,6 +1634,8 @@ static void drm_handle_vblank_events(struct drm_device *dev, int crtc)
*
* Drivers should call this routine in their vblank interrupt handlers to
* update the vblank counter and send any signals that may be pending.
*
* This is the legacy version of drm_crtc_handle_vblank().
*/
bool drm_handle_vblank(struct drm_device *dev, int crtc)
{
......@@ -1670,3 +1712,21 @@ bool drm_handle_vblank(struct drm_device *dev, int crtc)
return true;
}
EXPORT_SYMBOL(drm_handle_vblank);
/**
* drm_crtc_handle_vblank - handle a vblank event
* @crtc: where this event occurred
*
* Drivers should call this routine in their vblank interrupt handlers to
* update the vblank counter and send any signals that may be pending.
*
* This is the native KMS version of drm_handle_vblank().
*
* Returns:
* True if the event was successfully handled, false on failure.
*/
bool drm_crtc_handle_vblank(struct drm_crtc *crtc)
{
return drm_handle_vblank(crtc->dev, drm_crtc_index(crtc));
}
EXPORT_SYMBOL(drm_crtc_handle_vblank);
......@@ -168,7 +168,7 @@ static int tegra_dc_setup_window(struct tegra_dc *dc, unsigned int index,
const struct tegra_dc_window *window)
{
unsigned h_offset, v_offset, h_size, v_size, h_dda, v_dda, bpp;
unsigned long value;
unsigned long value, flags;
bool yuv, planar;
/*
......@@ -181,6 +181,8 @@ static int tegra_dc_setup_window(struct tegra_dc *dc, unsigned int index,
else
bpp = planar ? 1 : 2;
spin_lock_irqsave(&dc->lock, flags);
value = WINDOW_A_SELECT << index;
tegra_dc_writel(dc, value, DC_CMD_DISPLAY_WINDOW_HEADER);
......@@ -273,6 +275,7 @@ static int tegra_dc_setup_window(struct tegra_dc *dc, unsigned int index,
case TEGRA_BO_TILING_MODE_BLOCK:
DRM_ERROR("hardware doesn't support block linear mode\n");
spin_unlock_irqrestore(&dc->lock, flags);
return -EINVAL;
}
......@@ -331,6 +334,8 @@ static int tegra_dc_setup_window(struct tegra_dc *dc, unsigned int index,
tegra_dc_window_commit(dc, index);
spin_unlock_irqrestore(&dc->lock, flags);
return 0;
}
......@@ -338,11 +343,14 @@ static int tegra_window_plane_disable(struct drm_plane *plane)
{
struct tegra_dc *dc = to_tegra_dc(plane->crtc);
struct tegra_plane *p = to_tegra_plane(plane);
unsigned long flags;
u32 value;
if (!plane->crtc)
return 0;
spin_lock_irqsave(&dc->lock, flags);
value = WINDOW_A_SELECT << p->index;
tegra_dc_writel(dc, value, DC_CMD_DISPLAY_WINDOW_HEADER);
......@@ -352,6 +360,8 @@ static int tegra_window_plane_disable(struct drm_plane *plane)
tegra_dc_window_commit(dc, p->index);
spin_unlock_irqrestore(&dc->lock, flags);
return 0;
}
......@@ -699,14 +709,16 @@ static int tegra_dc_set_base(struct tegra_dc *dc, int x, int y,
struct tegra_bo *bo = tegra_fb_get_plane(fb, 0);
unsigned int h_offset = 0, v_offset = 0;
struct tegra_bo_tiling tiling;
unsigned long value, flags;
unsigned int format, swap;
unsigned long value;
int err;
err = tegra_fb_get_tiling(fb, &tiling);
if (err < 0)
return err;
spin_lock_irqsave(&dc->lock, flags);
tegra_dc_writel(dc, WINDOW_A_SELECT, DC_CMD_DISPLAY_WINDOW_HEADER);
value = fb->offsets[0] + y * fb->pitches[0] +
......@@ -752,6 +764,7 @@ static int tegra_dc_set_base(struct tegra_dc *dc, int x, int y,
case TEGRA_BO_TILING_MODE_BLOCK:
DRM_ERROR("hardware doesn't support block linear mode\n");
spin_unlock_irqrestore(&dc->lock, flags);
return -EINVAL;
}
......@@ -778,6 +791,8 @@ static int tegra_dc_set_base(struct tegra_dc *dc, int x, int y,
tegra_dc_writel(dc, value << 8, DC_CMD_STATE_CONTROL);
tegra_dc_writel(dc, value, DC_CMD_STATE_CONTROL);
spin_unlock_irqrestore(&dc->lock, flags);
return 0;
}
......@@ -814,23 +829,32 @@ static void tegra_dc_finish_page_flip(struct tegra_dc *dc)
unsigned long flags, base;
struct tegra_bo *bo;
if (!dc->event)
spin_lock_irqsave(&drm->event_lock, flags);
if (!dc->event) {
spin_unlock_irqrestore(&drm->event_lock, flags);
return;
}
bo = tegra_fb_get_plane(crtc->primary->fb, 0);
spin_lock_irqsave(&dc->lock, flags);
/* check if new start address has been latched */
tegra_dc_writel(dc, WINDOW_A_SELECT, DC_CMD_DISPLAY_WINDOW_HEADER);
tegra_dc_writel(dc, READ_MUX, DC_CMD_STATE_ACCESS);
base = tegra_dc_readl(dc, DC_WINBUF_START_ADDR);
tegra_dc_writel(dc, 0, DC_CMD_STATE_ACCESS);
spin_unlock_irqrestore(&dc->lock, flags);
if (base == bo->paddr + crtc->primary->fb->offsets[0]) {
spin_lock_irqsave(&drm->event_lock, flags);
drm_send_vblank_event(drm, dc->pipe, dc->event);
drm_vblank_put(drm, dc->pipe);
drm_crtc_send_vblank_event(crtc, dc->event);
drm_crtc_vblank_put(crtc);
dc->event = NULL;
spin_unlock_irqrestore(&drm->event_lock, flags);
}
spin_unlock_irqrestore(&drm->event_lock, flags);
}
void tegra_dc_cancel_page_flip(struct drm_crtc *crtc, struct drm_file *file)
......@@ -843,7 +867,7 @@ void tegra_dc_cancel_page_flip(struct drm_crtc *crtc, struct drm_file *file)
if (dc->event && dc->event->base.file_priv == file) {
dc->event->base.destroy(&dc->event->base);
drm_vblank_put(drm, dc->pipe);
drm_crtc_vblank_put(crtc);
dc->event = NULL;
}
......@@ -853,16 +877,16 @@ void tegra_dc_cancel_page_flip(struct drm_crtc *crtc, struct drm_file *file)
static int tegra_dc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
struct drm_pending_vblank_event *event, uint32_t page_flip_flags)
{
unsigned int pipe = drm_crtc_index(crtc);
struct tegra_dc *dc = to_tegra_dc(crtc);
struct drm_device *drm = crtc->dev;
if (dc->event)
return -EBUSY;
if (event) {
event->pipe = dc->pipe;
event->pipe = pipe;
dc->event = event;
drm_vblank_get(drm, dc->pipe);
drm_crtc_vblank_get(crtc);
}
tegra_dc_set_base(dc, 0, 0, fb);
......@@ -1127,7 +1151,7 @@ static irqreturn_t tegra_dc_irq(int irq, void *data)
/*
dev_dbg(dc->dev, "%s(): vertical blank\n", __func__);
*/
drm_handle_vblank(dc->base.dev, dc->pipe);
drm_crtc_handle_vblank(&dc->base);
tegra_dc_finish_page_flip(dc);
}
......
......@@ -694,24 +694,28 @@ static const struct file_operations tegra_drm_fops = {
.llseek = noop_llseek,
};
static struct drm_crtc *tegra_crtc_from_pipe(struct drm_device *drm, int pipe)
static struct drm_crtc *tegra_crtc_from_pipe(struct drm_device *drm,
unsigned int pipe)
{
struct drm_crtc *crtc;
list_for_each_entry(crtc, &drm->mode_config.crtc_list, head) {
struct tegra_dc *dc = to_tegra_dc(crtc);
if (dc->pipe == pipe)
if (pipe == drm_crtc_index(crtc))
return crtc;
}
return NULL;
}
static u32 tegra_drm_get_vblank_counter(struct drm_device *dev, int crtc)
static u32 tegra_drm_get_vblank_counter(struct drm_device *drm, int pipe)
{
struct drm_crtc *crtc = tegra_crtc_from_pipe(drm, pipe);
if (!crtc)
return 0;
/* TODO: implement real hardware counter using syncpoints */
return drm_vblank_count(dev, crtc);
return drm_crtc_vblank_count(crtc);
}
static int tegra_drm_enable_vblank(struct drm_device *drm, int pipe)
......
......@@ -216,32 +216,58 @@ static void tegra_bo_free(struct drm_device *drm, struct tegra_bo *bo)
}
}
static int tegra_bo_get_pages(struct drm_device *drm, struct tegra_bo *bo,
size_t size)
static int tegra_bo_get_pages(struct drm_device *drm, struct tegra_bo *bo)
{
struct scatterlist *s;
struct sg_table *sgt;
unsigned int i;
bo->pages = drm_gem_get_pages(&bo->gem);
if (IS_ERR(bo->pages))
return PTR_ERR(bo->pages);
bo->num_pages = size >> PAGE_SHIFT;
bo->sgt = drm_prime_pages_to_sg(bo->pages, bo->num_pages);
if (IS_ERR(bo->sgt)) {
drm_gem_put_pages(&bo->gem, bo->pages, false, false);
return PTR_ERR(bo->sgt);
bo->num_pages = bo->gem.size >> PAGE_SHIFT;
sgt = drm_prime_pages_to_sg(bo->pages, bo->num_pages);
if (IS_ERR(sgt))
goto put_pages;
/*
* Fake up the SG table so that dma_map_sg() can be used to flush the
* pages associated with it. Note that this relies on the fact that
* the DMA API doesn't hook into IOMMU on Tegra, therefore mapping is
* only cache maintenance.
*
* TODO: Replace this by drm_clflash_sg() once it can be implemented
* without relying on symbols that are not exported.
*/
for_each_sg(sgt->sgl, s, sgt->nents, i)
sg_dma_address(s) = sg_phys(s);
if (dma_map_sg(drm->dev, sgt->sgl, sgt->nents, DMA_TO_DEVICE) == 0) {
sgt = ERR_PTR(-ENOMEM);
goto release_sgt;
}
bo->sgt = sgt;
return 0;
release_sgt:
sg_free_table(sgt);
kfree(sgt);
put_pages:
drm_gem_put_pages(&bo->gem, bo->pages, false, false);
return PTR_ERR(sgt);
}
static int tegra_bo_alloc(struct drm_device *drm, struct tegra_bo *bo,
size_t size)
static int tegra_bo_alloc(struct drm_device *drm, struct tegra_bo *bo)
{
struct tegra_drm *tegra = drm->dev_private;
int err;
if (tegra->domain) {
err = tegra_bo_get_pages(drm, bo, size);
err = tegra_bo_get_pages(drm, bo);
if (err < 0)
return err;
......@@ -251,6 +277,8 @@ static int tegra_bo_alloc(struct drm_device *drm, struct tegra_bo *bo,
return err;
}
} else {
size_t size = bo->gem.size;
bo->vaddr = dma_alloc_writecombine(drm->dev, size, &bo->paddr,
GFP_KERNEL | __GFP_NOWARN);
if (!bo->vaddr) {
......@@ -274,7 +302,7 @@ struct tegra_bo *tegra_bo_create(struct drm_device *drm, size_t size,
if (IS_ERR(bo))
return bo;
err = tegra_bo_alloc(drm, bo, size);
err = tegra_bo_alloc(drm, bo);
if (err < 0)
goto release;
......
......@@ -901,11 +901,15 @@ extern int drm_vblank_init(struct drm_device *dev, int num_crtcs);
extern int drm_wait_vblank(struct drm_device *dev, void *data,
struct drm_file *filp);
extern u32 drm_vblank_count(struct drm_device *dev, int crtc);
extern u32 drm_crtc_vblank_count(struct drm_crtc *crtc);
extern u32 drm_vblank_count_and_time(struct drm_device *dev, int crtc,
struct timeval *vblanktime);
extern void drm_send_vblank_event(struct drm_device *dev, int crtc,
struct drm_pending_vblank_event *e);
extern void drm_crtc_send_vblank_event(struct drm_crtc *crtc,
struct drm_pending_vblank_event *e);
extern bool drm_handle_vblank(struct drm_device *dev, int crtc);
extern bool drm_crtc_handle_vblank(struct drm_crtc *crtc);
extern int drm_vblank_get(struct drm_device *dev, int crtc);
extern void drm_vblank_put(struct drm_device *dev, int crtc);
extern int drm_crtc_vblank_get(struct drm_crtc *crtc);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment