Commit 7a5bea77 authored by Dave Airlie's avatar Dave Airlie

Merge branch 'msm-fixes-4.14-rc4' of git://people.freedesktop.org/~robclark/linux into drm-fixes

bunch of msm fixes

* 'msm-fixes-4.14-rc4' of git://people.freedesktop.org/~robclark/linux:
  drm/msm: fix _NO_IMPLICIT fencing case
  drm/msm: fix error path cleanup
  drm/msm/mdp5: Remove extra pm_runtime_put call in mdp5_crtc_cursor_set()
  drm/msm/dsi: Use correct pm_runtime_put variant during host_init
  drm/msm: fix return value check in _msm_gem_kernel_new()
  drm/msm: use proper memory barriers for updating tail/head
  drm/msm/mdp5: add missing max size for 8x74 v1
parents a6402e80 06451a3d
...@@ -248,7 +248,7 @@ static const struct msm_dsi_cfg_handler *dsi_get_config( ...@@ -248,7 +248,7 @@ static const struct msm_dsi_cfg_handler *dsi_get_config(
clk_disable_unprepare(ahb_clk); clk_disable_unprepare(ahb_clk);
disable_gdsc: disable_gdsc:
regulator_disable(gdsc_reg); regulator_disable(gdsc_reg);
pm_runtime_put_autosuspend(dev); pm_runtime_put_sync(dev);
put_clk: put_clk:
clk_put(ahb_clk); clk_put(ahb_clk);
put_gdsc: put_gdsc:
......
...@@ -83,6 +83,8 @@ const struct mdp5_cfg_hw msm8x74v1_config = { ...@@ -83,6 +83,8 @@ const struct mdp5_cfg_hw msm8x74v1_config = {
.caps = MDP_LM_CAP_WB }, .caps = MDP_LM_CAP_WB },
}, },
.nb_stages = 5, .nb_stages = 5,
.max_width = 2048,
.max_height = 0xFFFF,
}, },
.dspp = { .dspp = {
.count = 3, .count = 3,
......
...@@ -804,8 +804,6 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc, ...@@ -804,8 +804,6 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags); spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags);
pm_runtime_put_autosuspend(&pdev->dev);
set_cursor: set_cursor:
ret = mdp5_ctl_set_cursor(ctl, pipeline, 0, cursor_enable); ret = mdp5_ctl_set_cursor(ctl, pipeline, 0, cursor_enable);
if (ret) { if (ret) {
......
...@@ -610,17 +610,6 @@ int msm_gem_sync_object(struct drm_gem_object *obj, ...@@ -610,17 +610,6 @@ int msm_gem_sync_object(struct drm_gem_object *obj,
struct dma_fence *fence; struct dma_fence *fence;
int i, ret; int i, ret;
if (!exclusive) {
/* NOTE: _reserve_shared() must happen before _add_shared_fence(),
* which makes this a slightly strange place to call it. OTOH this
* is a convenient can-fail point to hook it in. (And similar to
* how etnaviv and nouveau handle this.)
*/
ret = reservation_object_reserve_shared(msm_obj->resv);
if (ret)
return ret;
}
fobj = reservation_object_get_list(msm_obj->resv); fobj = reservation_object_get_list(msm_obj->resv);
if (!fobj || (fobj->shared_count == 0)) { if (!fobj || (fobj->shared_count == 0)) {
fence = reservation_object_get_excl(msm_obj->resv); fence = reservation_object_get_excl(msm_obj->resv);
...@@ -1045,10 +1034,10 @@ static void *_msm_gem_kernel_new(struct drm_device *dev, uint32_t size, ...@@ -1045,10 +1034,10 @@ static void *_msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
} }
vaddr = msm_gem_get_vaddr(obj); vaddr = msm_gem_get_vaddr(obj);
if (!vaddr) { if (IS_ERR(vaddr)) {
msm_gem_put_iova(obj, aspace); msm_gem_put_iova(obj, aspace);
drm_gem_object_unreference(obj); drm_gem_object_unreference(obj);
return ERR_PTR(-ENOMEM); return ERR_CAST(vaddr);
} }
if (bo) if (bo)
......
...@@ -221,7 +221,7 @@ static int submit_lock_objects(struct msm_gem_submit *submit) ...@@ -221,7 +221,7 @@ static int submit_lock_objects(struct msm_gem_submit *submit)
return ret; return ret;
} }
static int submit_fence_sync(struct msm_gem_submit *submit) static int submit_fence_sync(struct msm_gem_submit *submit, bool no_implicit)
{ {
int i, ret = 0; int i, ret = 0;
...@@ -229,6 +229,20 @@ static int submit_fence_sync(struct msm_gem_submit *submit) ...@@ -229,6 +229,20 @@ static int submit_fence_sync(struct msm_gem_submit *submit)
struct msm_gem_object *msm_obj = submit->bos[i].obj; struct msm_gem_object *msm_obj = submit->bos[i].obj;
bool write = submit->bos[i].flags & MSM_SUBMIT_BO_WRITE; bool write = submit->bos[i].flags & MSM_SUBMIT_BO_WRITE;
if (!write) {
/* NOTE: _reserve_shared() must happen before
* _add_shared_fence(), which makes this a slightly
* strange place to call it. OTOH this is a
* convenient can-fail point to hook it in.
*/
ret = reservation_object_reserve_shared(msm_obj->resv);
if (ret)
return ret;
}
if (no_implicit)
continue;
ret = msm_gem_sync_object(&msm_obj->base, submit->gpu->fctx, write); ret = msm_gem_sync_object(&msm_obj->base, submit->gpu->fctx, write);
if (ret) if (ret)
break; break;
...@@ -451,11 +465,9 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, ...@@ -451,11 +465,9 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
if (ret) if (ret)
goto out; goto out;
if (!(args->flags & MSM_SUBMIT_NO_IMPLICIT)) { ret = submit_fence_sync(submit, !!(args->flags & MSM_SUBMIT_NO_IMPLICIT));
ret = submit_fence_sync(submit);
if (ret) if (ret)
goto out; goto out;
}
ret = submit_pin_objects(submit); ret = submit_pin_objects(submit);
if (ret) if (ret)
......
...@@ -718,7 +718,8 @@ void msm_gpu_cleanup(struct msm_gpu *gpu) ...@@ -718,7 +718,8 @@ void msm_gpu_cleanup(struct msm_gpu *gpu)
msm_gem_put_iova(gpu->rb->bo, gpu->aspace); msm_gem_put_iova(gpu->rb->bo, gpu->aspace);
msm_ringbuffer_destroy(gpu->rb); msm_ringbuffer_destroy(gpu->rb);
} }
if (gpu->aspace) {
if (!IS_ERR_OR_NULL(gpu->aspace)) {
gpu->aspace->mmu->funcs->detach(gpu->aspace->mmu, gpu->aspace->mmu->funcs->detach(gpu->aspace->mmu,
NULL, 0); NULL, 0);
msm_gem_address_space_put(gpu->aspace); msm_gem_address_space_put(gpu->aspace);
......
...@@ -111,10 +111,14 @@ static void rd_write(struct msm_rd_state *rd, const void *buf, int sz) ...@@ -111,10 +111,14 @@ static void rd_write(struct msm_rd_state *rd, const void *buf, int sz)
wait_event(rd->fifo_event, circ_space(&rd->fifo) > 0); wait_event(rd->fifo_event, circ_space(&rd->fifo) > 0);
/* Note that smp_load_acquire() is not strictly required
* as CIRC_SPACE_TO_END() does not access the tail more
* than once.
*/
n = min(sz, circ_space_to_end(&rd->fifo)); n = min(sz, circ_space_to_end(&rd->fifo));
memcpy(fptr, ptr, n); memcpy(fptr, ptr, n);
fifo->head = (fifo->head + n) & (BUF_SZ - 1); smp_store_release(&fifo->head, (fifo->head + n) & (BUF_SZ - 1));
sz -= n; sz -= n;
ptr += n; ptr += n;
...@@ -145,13 +149,17 @@ static ssize_t rd_read(struct file *file, char __user *buf, ...@@ -145,13 +149,17 @@ static ssize_t rd_read(struct file *file, char __user *buf,
if (ret) if (ret)
goto out; goto out;
/* Note that smp_load_acquire() is not strictly required
* as CIRC_CNT_TO_END() does not access the head more than
* once.
*/
n = min_t(int, sz, circ_count_to_end(&rd->fifo)); n = min_t(int, sz, circ_count_to_end(&rd->fifo));
if (copy_to_user(buf, fptr, n)) { if (copy_to_user(buf, fptr, n)) {
ret = -EFAULT; ret = -EFAULT;
goto out; goto out;
} }
fifo->tail = (fifo->tail + n) & (BUF_SZ - 1); smp_store_release(&fifo->tail, (fifo->tail + n) & (BUF_SZ - 1));
*ppos += n; *ppos += n;
wake_up_all(&rd->fifo_event); wake_up_all(&rd->fifo_event);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment