Commit 33ba90ee authored by Dave Airlie's avatar Dave Airlie

Merge tag 'drm-intel-fixes-2019-10-17' of...

Merge tag 'drm-intel-fixes-2019-10-17' of git://anongit.freedesktop.org/drm/drm-intel into drm-fixes

- Display fix on handling VBT information.
- Important circular locking fix
- Fix for preemption vs resubmission on virtual requests
  - and a prep patch to make this last one to apply cleanly
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>

From: Rodrigo Vivi <rodrigo.vivi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191017135444.GA12255@intel.com
parents 4f5cafb5 0a544a2a
...@@ -1270,7 +1270,7 @@ static void sanitize_ddc_pin(struct drm_i915_private *dev_priv, ...@@ -1270,7 +1270,7 @@ static void sanitize_ddc_pin(struct drm_i915_private *dev_priv,
DRM_DEBUG_KMS("port %c trying to use the same DDC pin (0x%x) as port %c, " DRM_DEBUG_KMS("port %c trying to use the same DDC pin (0x%x) as port %c, "
"disabling port %c DVI/HDMI support\n", "disabling port %c DVI/HDMI support\n",
port_name(port), info->alternate_ddc_pin, port_name(port), info->alternate_ddc_pin,
port_name(p), port_name(port)); port_name(p), port_name(p));
/* /*
* If we have multiple ports supposedly sharing the * If we have multiple ports supposedly sharing the
...@@ -1278,9 +1278,14 @@ static void sanitize_ddc_pin(struct drm_i915_private *dev_priv, ...@@ -1278,9 +1278,14 @@ static void sanitize_ddc_pin(struct drm_i915_private *dev_priv,
* port. Otherwise they share the same ddc bin and * port. Otherwise they share the same ddc bin and
* system couldn't communicate with them separately. * system couldn't communicate with them separately.
* *
* Give child device order the priority, first come first * Give inverse child device order the priority,
* served. * last one wins. Yes, there are real machines
* (eg. Asrock B250M-HDV) where VBT has both
* port A and port E with the same AUX ch and
* we must pick port E :(
*/ */
info = &dev_priv->vbt.ddi_port_info[p];
info->supports_dvi = false; info->supports_dvi = false;
info->supports_hdmi = false; info->supports_hdmi = false;
info->alternate_ddc_pin = 0; info->alternate_ddc_pin = 0;
...@@ -1316,7 +1321,7 @@ static void sanitize_aux_ch(struct drm_i915_private *dev_priv, ...@@ -1316,7 +1321,7 @@ static void sanitize_aux_ch(struct drm_i915_private *dev_priv,
DRM_DEBUG_KMS("port %c trying to use the same AUX CH (0x%x) as port %c, " DRM_DEBUG_KMS("port %c trying to use the same AUX CH (0x%x) as port %c, "
"disabling port %c DP support\n", "disabling port %c DP support\n",
port_name(port), info->alternate_aux_channel, port_name(port), info->alternate_aux_channel,
port_name(p), port_name(port)); port_name(p), port_name(p));
/* /*
* If we have multiple ports supposedlt sharing the * If we have multiple ports supposedlt sharing the
...@@ -1324,9 +1329,14 @@ static void sanitize_aux_ch(struct drm_i915_private *dev_priv, ...@@ -1324,9 +1329,14 @@ static void sanitize_aux_ch(struct drm_i915_private *dev_priv,
* port. Otherwise they share the same aux channel * port. Otherwise they share the same aux channel
* and system couldn't communicate with them separately. * and system couldn't communicate with them separately.
* *
* Give child device order the priority, first come first * Give inverse child device order the priority,
* served. * last one wins. Yes, there are real machines
* (eg. Asrock B250M-HDV) where VBT has both
* port A and port E with the same AUX ch and
* we must pick port E :(
*/ */
info = &dev_priv->vbt.ddi_port_info[p];
info->supports_dp = false; info->supports_dp = false;
info->alternate_aux_channel = 0; info->alternate_aux_channel = 0;
} }
......
...@@ -364,6 +364,7 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf) ...@@ -364,6 +364,7 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf)
return VM_FAULT_OOM; return VM_FAULT_OOM;
case -ENOSPC: case -ENOSPC:
case -EFAULT: case -EFAULT:
case -ENODEV: /* bad object, how did you get here! */
return VM_FAULT_SIGBUS; return VM_FAULT_SIGBUS;
default: default:
WARN_ONCE(ret, "unhandled error in %s: %i\n", __func__, ret); WARN_ONCE(ret, "unhandled error in %s: %i\n", __func__, ret);
...@@ -475,10 +476,16 @@ i915_gem_mmap_gtt(struct drm_file *file, ...@@ -475,10 +476,16 @@ i915_gem_mmap_gtt(struct drm_file *file,
if (!obj) if (!obj)
return -ENOENT; return -ENOENT;
if (i915_gem_object_never_bind_ggtt(obj)) {
ret = -ENODEV;
goto out;
}
ret = create_mmap_offset(obj); ret = create_mmap_offset(obj);
if (ret == 0) if (ret == 0)
*offset = drm_vma_node_offset_addr(&obj->base.vma_node); *offset = drm_vma_node_offset_addr(&obj->base.vma_node);
out:
i915_gem_object_put(obj); i915_gem_object_put(obj);
return ret; return ret;
} }
......
...@@ -152,6 +152,12 @@ i915_gem_object_is_proxy(const struct drm_i915_gem_object *obj) ...@@ -152,6 +152,12 @@ i915_gem_object_is_proxy(const struct drm_i915_gem_object *obj)
return obj->ops->flags & I915_GEM_OBJECT_IS_PROXY; return obj->ops->flags & I915_GEM_OBJECT_IS_PROXY;
} }
static inline bool
i915_gem_object_never_bind_ggtt(const struct drm_i915_gem_object *obj)
{
return obj->ops->flags & I915_GEM_OBJECT_NO_GGTT;
}
static inline bool static inline bool
i915_gem_object_needs_async_cancel(const struct drm_i915_gem_object *obj) i915_gem_object_needs_async_cancel(const struct drm_i915_gem_object *obj)
{ {
......
...@@ -32,7 +32,8 @@ struct drm_i915_gem_object_ops { ...@@ -32,7 +32,8 @@ struct drm_i915_gem_object_ops {
#define I915_GEM_OBJECT_HAS_STRUCT_PAGE BIT(0) #define I915_GEM_OBJECT_HAS_STRUCT_PAGE BIT(0)
#define I915_GEM_OBJECT_IS_SHRINKABLE BIT(1) #define I915_GEM_OBJECT_IS_SHRINKABLE BIT(1)
#define I915_GEM_OBJECT_IS_PROXY BIT(2) #define I915_GEM_OBJECT_IS_PROXY BIT(2)
#define I915_GEM_OBJECT_ASYNC_CANCEL BIT(3) #define I915_GEM_OBJECT_NO_GGTT BIT(3)
#define I915_GEM_OBJECT_ASYNC_CANCEL BIT(4)
/* Interface between the GEM object and its backing storage. /* Interface between the GEM object and its backing storage.
* get_pages() is called once prior to the use of the associated set * get_pages() is called once prior to the use of the associated set
......
...@@ -702,6 +702,7 @@ i915_gem_userptr_dmabuf_export(struct drm_i915_gem_object *obj) ...@@ -702,6 +702,7 @@ i915_gem_userptr_dmabuf_export(struct drm_i915_gem_object *obj)
static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = { static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = {
.flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE | .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
I915_GEM_OBJECT_IS_SHRINKABLE | I915_GEM_OBJECT_IS_SHRINKABLE |
I915_GEM_OBJECT_NO_GGTT |
I915_GEM_OBJECT_ASYNC_CANCEL, I915_GEM_OBJECT_ASYNC_CANCEL,
.get_pages = i915_gem_userptr_get_pages, .get_pages = i915_gem_userptr_get_pages,
.put_pages = i915_gem_userptr_put_pages, .put_pages = i915_gem_userptr_put_pages,
......
...@@ -234,6 +234,13 @@ static void execlists_init_reg_state(u32 *reg_state, ...@@ -234,6 +234,13 @@ static void execlists_init_reg_state(u32 *reg_state,
struct intel_engine_cs *engine, struct intel_engine_cs *engine,
struct intel_ring *ring); struct intel_ring *ring);
static void mark_eio(struct i915_request *rq)
{
if (!i915_request_signaled(rq))
dma_fence_set_error(&rq->fence, -EIO);
i915_request_mark_complete(rq);
}
static inline u32 intel_hws_preempt_address(struct intel_engine_cs *engine) static inline u32 intel_hws_preempt_address(struct intel_engine_cs *engine)
{ {
return (i915_ggtt_offset(engine->status_page.vma) + return (i915_ggtt_offset(engine->status_page.vma) +
...@@ -1236,6 +1243,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine) ...@@ -1236,6 +1243,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
submit = true; submit = true;
last = rq; last = rq;
} }
i915_request_put(rq);
/* /*
* Hmm, we have a bunch of virtual engine requests, * Hmm, we have a bunch of virtual engine requests,
...@@ -2574,12 +2582,8 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine) ...@@ -2574,12 +2582,8 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
__execlists_reset(engine, true); __execlists_reset(engine, true);
/* Mark all executing requests as skipped. */ /* Mark all executing requests as skipped. */
list_for_each_entry(rq, &engine->active.requests, sched.link) { list_for_each_entry(rq, &engine->active.requests, sched.link)
if (!i915_request_signaled(rq)) mark_eio(rq);
dma_fence_set_error(&rq->fence, -EIO);
i915_request_mark_complete(rq);
}
/* Flush the queued requests to the timeline list (for retiring). */ /* Flush the queued requests to the timeline list (for retiring). */
while ((rb = rb_first_cached(&execlists->queue))) { while ((rb = rb_first_cached(&execlists->queue))) {
...@@ -2587,9 +2591,8 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine) ...@@ -2587,9 +2591,8 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
int i; int i;
priolist_for_each_request_consume(rq, rn, p, i) { priolist_for_each_request_consume(rq, rn, p, i) {
mark_eio(rq);
__i915_request_submit(rq); __i915_request_submit(rq);
dma_fence_set_error(&rq->fence, -EIO);
i915_request_mark_complete(rq);
} }
rb_erase_cached(&p->node, &execlists->queue); rb_erase_cached(&p->node, &execlists->queue);
...@@ -2605,13 +2608,15 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine) ...@@ -2605,13 +2608,15 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
RB_CLEAR_NODE(rb); RB_CLEAR_NODE(rb);
spin_lock(&ve->base.active.lock); spin_lock(&ve->base.active.lock);
if (ve->request) { rq = fetch_and_zero(&ve->request);
ve->request->engine = engine; if (rq) {
__i915_request_submit(ve->request); mark_eio(rq);
dma_fence_set_error(&ve->request->fence, -EIO);
i915_request_mark_complete(ve->request); rq->engine = engine;
__i915_request_submit(rq);
i915_request_put(rq);
ve->base.execlists.queue_priority_hint = INT_MIN; ve->base.execlists.queue_priority_hint = INT_MIN;
ve->request = NULL;
} }
spin_unlock(&ve->base.active.lock); spin_unlock(&ve->base.active.lock);
} }
...@@ -3615,6 +3620,8 @@ static void virtual_submission_tasklet(unsigned long data) ...@@ -3615,6 +3620,8 @@ static void virtual_submission_tasklet(unsigned long data)
static void virtual_submit_request(struct i915_request *rq) static void virtual_submit_request(struct i915_request *rq)
{ {
struct virtual_engine *ve = to_virtual_engine(rq->engine); struct virtual_engine *ve = to_virtual_engine(rq->engine);
struct i915_request *old;
unsigned long flags;
GEM_TRACE("%s: rq=%llx:%lld\n", GEM_TRACE("%s: rq=%llx:%lld\n",
ve->base.name, ve->base.name,
...@@ -3623,15 +3630,31 @@ static void virtual_submit_request(struct i915_request *rq) ...@@ -3623,15 +3630,31 @@ static void virtual_submit_request(struct i915_request *rq)
GEM_BUG_ON(ve->base.submit_request != virtual_submit_request); GEM_BUG_ON(ve->base.submit_request != virtual_submit_request);
GEM_BUG_ON(ve->request); spin_lock_irqsave(&ve->base.active.lock, flags);
GEM_BUG_ON(!list_empty(virtual_queue(ve)));
old = ve->request;
if (old) { /* background completion event from preempt-to-busy */
GEM_BUG_ON(!i915_request_completed(old));
__i915_request_submit(old);
i915_request_put(old);
}
ve->base.execlists.queue_priority_hint = rq_prio(rq); if (i915_request_completed(rq)) {
WRITE_ONCE(ve->request, rq); __i915_request_submit(rq);
list_move_tail(&rq->sched.link, virtual_queue(ve)); ve->base.execlists.queue_priority_hint = INT_MIN;
ve->request = NULL;
} else {
ve->base.execlists.queue_priority_hint = rq_prio(rq);
ve->request = i915_request_get(rq);
GEM_BUG_ON(!list_empty(virtual_queue(ve)));
list_move_tail(&rq->sched.link, virtual_queue(ve));
tasklet_schedule(&ve->base.execlists.tasklet);
}
tasklet_schedule(&ve->base.execlists.tasklet); spin_unlock_irqrestore(&ve->base.active.lock, flags);
} }
static struct ve_bond * static struct ve_bond *
......
...@@ -969,6 +969,9 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj, ...@@ -969,6 +969,9 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
lockdep_assert_held(&obj->base.dev->struct_mutex); lockdep_assert_held(&obj->base.dev->struct_mutex);
if (i915_gem_object_never_bind_ggtt(obj))
return ERR_PTR(-ENODEV);
if (flags & PIN_MAPPABLE && if (flags & PIN_MAPPABLE &&
(!view || view->type == I915_GGTT_VIEW_NORMAL)) { (!view || view->type == I915_GGTT_VIEW_NORMAL)) {
/* If the required space is larger than the available /* If the required space is larger than the available
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment