Commit 660f6b5c authored by Dave Airlie's avatar Dave Airlie

Merge tag 'drm-misc-fixes-2017-07-20' of git://anongit.freedesktop.org/git/drm-misc into drm-fixes

Core Changes:
- fence: Introduce new fence flag to signify timestamp is populated (Chris)
- mst: Avoid processing incomplete data + fix NULL dereference (Imre)

Driver Changes:
- vc4: Avoid WARN from grabbing a ref from vblank that's not on (Boris)

Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Boris Brezillon <boris.brezillon@free-electrons.com>
Cc: Imre Deak <imre.deak@intel.com>

* tag 'drm-misc-fixes-2017-07-20' of git://anongit.freedesktop.org/git/drm-misc:
  drm/mst: Avoid processing partially received up/down message transactions
  drm/mst: Avoid dereferencing a NULL mstb in drm_dp_mst_handle_up_req()
  drm/mst: Fix error handling during MST sideband message reception
  drm/vc4: Fix VBLANK handling in crtc->enable() path
  dma-buf/fence: Avoid use of uninitialised timestamp
parents 22a548d0 636c4c3e
...@@ -75,11 +75,6 @@ int dma_fence_signal_locked(struct dma_fence *fence) ...@@ -75,11 +75,6 @@ int dma_fence_signal_locked(struct dma_fence *fence)
if (WARN_ON(!fence)) if (WARN_ON(!fence))
return -EINVAL; return -EINVAL;
if (!ktime_to_ns(fence->timestamp)) {
fence->timestamp = ktime_get();
smp_mb__before_atomic();
}
if (test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) { if (test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
ret = -EINVAL; ret = -EINVAL;
...@@ -87,8 +82,11 @@ int dma_fence_signal_locked(struct dma_fence *fence) ...@@ -87,8 +82,11 @@ int dma_fence_signal_locked(struct dma_fence *fence)
* we might have raced with the unlocked dma_fence_signal, * we might have raced with the unlocked dma_fence_signal,
* still run through all callbacks * still run through all callbacks
*/ */
} else } else {
fence->timestamp = ktime_get();
set_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags);
trace_dma_fence_signaled(fence); trace_dma_fence_signaled(fence);
}
list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) { list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) {
list_del_init(&cur->node); list_del_init(&cur->node);
...@@ -115,14 +113,11 @@ int dma_fence_signal(struct dma_fence *fence) ...@@ -115,14 +113,11 @@ int dma_fence_signal(struct dma_fence *fence)
if (!fence) if (!fence)
return -EINVAL; return -EINVAL;
if (!ktime_to_ns(fence->timestamp)) {
fence->timestamp = ktime_get();
smp_mb__before_atomic();
}
if (test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) if (test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
return -EINVAL; return -EINVAL;
fence->timestamp = ktime_get();
set_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags);
trace_dma_fence_signaled(fence); trace_dma_fence_signaled(fence);
if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags)) { if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags)) {
......
...@@ -84,7 +84,7 @@ static void sync_print_fence(struct seq_file *s, ...@@ -84,7 +84,7 @@ static void sync_print_fence(struct seq_file *s,
show ? "_" : "", show ? "_" : "",
sync_status_str(status)); sync_status_str(status));
if (status) { if (test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags)) {
struct timespec64 ts64 = struct timespec64 ts64 =
ktime_to_timespec64(fence->timestamp); ktime_to_timespec64(fence->timestamp);
......
...@@ -391,7 +391,13 @@ static void sync_fill_fence_info(struct dma_fence *fence, ...@@ -391,7 +391,13 @@ static void sync_fill_fence_info(struct dma_fence *fence,
sizeof(info->driver_name)); sizeof(info->driver_name));
info->status = dma_fence_get_status(fence); info->status = dma_fence_get_status(fence);
info->timestamp_ns = ktime_to_ns(fence->timestamp); while (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags) &&
!test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags))
cpu_relax();
info->timestamp_ns =
test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags) ?
ktime_to_ns(fence->timestamp) :
ktime_set(0, 0);
} }
static long sync_file_ioctl_fence_info(struct sync_file *sync_file, static long sync_file_ioctl_fence_info(struct sync_file *sync_file,
......
...@@ -330,6 +330,13 @@ static bool drm_dp_sideband_msg_build(struct drm_dp_sideband_msg_rx *msg, ...@@ -330,6 +330,13 @@ static bool drm_dp_sideband_msg_build(struct drm_dp_sideband_msg_rx *msg,
return false; return false;
} }
/*
* ignore out-of-order messages or messages that are part of a
* failed transaction
*/
if (!recv_hdr.somt && !msg->have_somt)
return false;
/* get length contained in this portion */ /* get length contained in this portion */
msg->curchunk_len = recv_hdr.msg_len; msg->curchunk_len = recv_hdr.msg_len;
msg->curchunk_hdrlen = hdrlen; msg->curchunk_hdrlen = hdrlen;
...@@ -2164,7 +2171,7 @@ int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr) ...@@ -2164,7 +2171,7 @@ int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr)
} }
EXPORT_SYMBOL(drm_dp_mst_topology_mgr_resume); EXPORT_SYMBOL(drm_dp_mst_topology_mgr_resume);
static void drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up) static bool drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up)
{ {
int len; int len;
u8 replyblock[32]; u8 replyblock[32];
...@@ -2179,12 +2186,12 @@ static void drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up) ...@@ -2179,12 +2186,12 @@ static void drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up)
replyblock, len); replyblock, len);
if (ret != len) { if (ret != len) {
DRM_DEBUG_KMS("failed to read DPCD down rep %d %d\n", len, ret); DRM_DEBUG_KMS("failed to read DPCD down rep %d %d\n", len, ret);
return; return false;
} }
ret = drm_dp_sideband_msg_build(msg, replyblock, len, true); ret = drm_dp_sideband_msg_build(msg, replyblock, len, true);
if (!ret) { if (!ret) {
DRM_DEBUG_KMS("sideband msg build failed %d\n", replyblock[0]); DRM_DEBUG_KMS("sideband msg build failed %d\n", replyblock[0]);
return; return false;
} }
replylen = msg->curchunk_len + msg->curchunk_hdrlen; replylen = msg->curchunk_len + msg->curchunk_hdrlen;
...@@ -2196,21 +2203,32 @@ static void drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up) ...@@ -2196,21 +2203,32 @@ static void drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up)
ret = drm_dp_dpcd_read(mgr->aux, basereg + curreply, ret = drm_dp_dpcd_read(mgr->aux, basereg + curreply,
replyblock, len); replyblock, len);
if (ret != len) { if (ret != len) {
DRM_DEBUG_KMS("failed to read a chunk\n"); DRM_DEBUG_KMS("failed to read a chunk (len %d, ret %d)\n",
len, ret);
return false;
} }
ret = drm_dp_sideband_msg_build(msg, replyblock, len, false); ret = drm_dp_sideband_msg_build(msg, replyblock, len, false);
if (ret == false) if (!ret) {
DRM_DEBUG_KMS("failed to build sideband msg\n"); DRM_DEBUG_KMS("failed to build sideband msg\n");
return false;
}
curreply += len; curreply += len;
replylen -= len; replylen -= len;
} }
return true;
} }
static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr) static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
{ {
int ret = 0; int ret = 0;
drm_dp_get_one_sb_msg(mgr, false); if (!drm_dp_get_one_sb_msg(mgr, false)) {
memset(&mgr->down_rep_recv, 0,
sizeof(struct drm_dp_sideband_msg_rx));
return 0;
}
if (mgr->down_rep_recv.have_eomt) { if (mgr->down_rep_recv.have_eomt) {
struct drm_dp_sideband_msg_tx *txmsg; struct drm_dp_sideband_msg_tx *txmsg;
...@@ -2266,7 +2284,12 @@ static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr) ...@@ -2266,7 +2284,12 @@ static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr) static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
{ {
int ret = 0; int ret = 0;
drm_dp_get_one_sb_msg(mgr, true);
if (!drm_dp_get_one_sb_msg(mgr, true)) {
memset(&mgr->up_req_recv, 0,
sizeof(struct drm_dp_sideband_msg_rx));
return 0;
}
if (mgr->up_req_recv.have_eomt) { if (mgr->up_req_recv.have_eomt) {
struct drm_dp_sideband_msg_req_body msg; struct drm_dp_sideband_msg_req_body msg;
...@@ -2318,7 +2341,9 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr) ...@@ -2318,7 +2341,9 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
DRM_DEBUG_KMS("Got RSN: pn: %d avail_pbn %d\n", msg.u.resource_stat.port_number, msg.u.resource_stat.available_pbn); DRM_DEBUG_KMS("Got RSN: pn: %d avail_pbn %d\n", msg.u.resource_stat.port_number, msg.u.resource_stat.available_pbn);
} }
if (mstb)
drm_dp_put_mst_branch_device(mstb); drm_dp_put_mst_branch_device(mstb);
memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx)); memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
} }
return ret; return ret;
......
...@@ -520,6 +520,34 @@ static void vc4_crtc_disable(struct drm_crtc *crtc) ...@@ -520,6 +520,34 @@ static void vc4_crtc_disable(struct drm_crtc *crtc)
SCALER_DISPSTATX_EMPTY); SCALER_DISPSTATX_EMPTY);
} }
static void vc4_crtc_update_dlist(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct vc4_dev *vc4 = to_vc4_dev(dev);
struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc->state);
if (crtc->state->event) {
unsigned long flags;
crtc->state->event->pipe = drm_crtc_index(crtc);
WARN_ON(drm_crtc_vblank_get(crtc) != 0);
spin_lock_irqsave(&dev->event_lock, flags);
vc4_crtc->event = crtc->state->event;
crtc->state->event = NULL;
HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel),
vc4_state->mm.start);
spin_unlock_irqrestore(&dev->event_lock, flags);
} else {
HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel),
vc4_state->mm.start);
}
}
static void vc4_crtc_enable(struct drm_crtc *crtc) static void vc4_crtc_enable(struct drm_crtc *crtc)
{ {
struct drm_device *dev = crtc->dev; struct drm_device *dev = crtc->dev;
...@@ -530,6 +558,12 @@ static void vc4_crtc_enable(struct drm_crtc *crtc) ...@@ -530,6 +558,12 @@ static void vc4_crtc_enable(struct drm_crtc *crtc)
require_hvs_enabled(dev); require_hvs_enabled(dev);
/* Enable vblank irq handling before crtc is started otherwise
* drm_crtc_get_vblank() fails in vc4_crtc_update_dlist().
*/
drm_crtc_vblank_on(crtc);
vc4_crtc_update_dlist(crtc);
/* Turn on the scaler, which will wait for vstart to start /* Turn on the scaler, which will wait for vstart to start
* compositing. * compositing.
*/ */
...@@ -541,9 +575,6 @@ static void vc4_crtc_enable(struct drm_crtc *crtc) ...@@ -541,9 +575,6 @@ static void vc4_crtc_enable(struct drm_crtc *crtc)
/* Turn on the pixel valve, which will emit the vstart signal. */ /* Turn on the pixel valve, which will emit the vstart signal. */
CRTC_WRITE(PV_V_CONTROL, CRTC_WRITE(PV_V_CONTROL,
CRTC_READ(PV_V_CONTROL) | PV_VCONTROL_VIDEN); CRTC_READ(PV_V_CONTROL) | PV_VCONTROL_VIDEN);
/* Enable vblank irq handling after crtc is started. */
drm_crtc_vblank_on(crtc);
} }
static bool vc4_crtc_mode_fixup(struct drm_crtc *crtc, static bool vc4_crtc_mode_fixup(struct drm_crtc *crtc,
...@@ -598,7 +629,6 @@ static void vc4_crtc_atomic_flush(struct drm_crtc *crtc, ...@@ -598,7 +629,6 @@ static void vc4_crtc_atomic_flush(struct drm_crtc *crtc,
{ {
struct drm_device *dev = crtc->dev; struct drm_device *dev = crtc->dev;
struct vc4_dev *vc4 = to_vc4_dev(dev); struct vc4_dev *vc4 = to_vc4_dev(dev);
struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc->state); struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc->state);
struct drm_plane *plane; struct drm_plane *plane;
bool debug_dump_regs = false; bool debug_dump_regs = false;
...@@ -620,25 +650,15 @@ static void vc4_crtc_atomic_flush(struct drm_crtc *crtc, ...@@ -620,25 +650,15 @@ static void vc4_crtc_atomic_flush(struct drm_crtc *crtc,
WARN_ON_ONCE(dlist_next - dlist_start != vc4_state->mm.size); WARN_ON_ONCE(dlist_next - dlist_start != vc4_state->mm.size);
if (crtc->state->event) { /* Only update DISPLIST if the CRTC was already running and is not
unsigned long flags; * being disabled.
* vc4_crtc_enable() takes care of updating the dlist just after
crtc->state->event->pipe = drm_crtc_index(crtc); * re-enabling VBLANK interrupts and before enabling the engine.
* If the CRTC is being disabled, there's no point in updating this
WARN_ON(drm_crtc_vblank_get(crtc) != 0); * information.
*/
spin_lock_irqsave(&dev->event_lock, flags); if (crtc->state->active && old_state->active)
vc4_crtc->event = crtc->state->event; vc4_crtc_update_dlist(crtc);
crtc->state->event = NULL;
HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel),
vc4_state->mm.start);
spin_unlock_irqrestore(&dev->event_lock, flags);
} else {
HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel),
vc4_state->mm.start);
}
if (debug_dump_regs) { if (debug_dump_regs) {
DRM_INFO("CRTC %d HVS after:\n", drm_crtc_index(crtc)); DRM_INFO("CRTC %d HVS after:\n", drm_crtc_index(crtc));
......
...@@ -55,6 +55,7 @@ struct dma_fence_cb; ...@@ -55,6 +55,7 @@ struct dma_fence_cb;
* of the time. * of the time.
* *
* DMA_FENCE_FLAG_SIGNALED_BIT - fence is already signaled * DMA_FENCE_FLAG_SIGNALED_BIT - fence is already signaled
* DMA_FENCE_FLAG_TIMESTAMP_BIT - timestamp recorded for fence signaling
* DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT - enable_signaling might have been called * DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT - enable_signaling might have been called
* DMA_FENCE_FLAG_USER_BITS - start of the unused bits, can be used by the * DMA_FENCE_FLAG_USER_BITS - start of the unused bits, can be used by the
* implementer of the fence for its own purposes. Can be used in different * implementer of the fence for its own purposes. Can be used in different
...@@ -84,6 +85,7 @@ struct dma_fence { ...@@ -84,6 +85,7 @@ struct dma_fence {
enum dma_fence_flag_bits { enum dma_fence_flag_bits {
DMA_FENCE_FLAG_SIGNALED_BIT, DMA_FENCE_FLAG_SIGNALED_BIT,
DMA_FENCE_FLAG_TIMESTAMP_BIT,
DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
DMA_FENCE_FLAG_USER_BITS, /* must always be last member */ DMA_FENCE_FLAG_USER_BITS, /* must always be last member */
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment