Commit 64c3fd53 authored by Dave Airlie's avatar Dave Airlie

Merge tag 'drm-intel-fixes-2020-03-05' of...

Merge tag 'drm-intel-fixes-2020-03-05' of git://anongit.freedesktop.org/drm/drm-intel into drm-fixes

drm/i915 fixes for v5.6-rc5:
- Break up long lists of object reclaim with cond_resched()
- PSR probe fix
- TGL workarounds
- Selftest return value fix
- Drop timeline mutex while waiting for retirement
- Wait for OA configuration completion before writes to OA buffer
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>

From: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/87eeu7nl6z.fsf@intel.com
parents 26398db1 169c0aa4
...@@ -4466,13 +4466,19 @@ static void icl_dbuf_disable(struct drm_i915_private *dev_priv) ...@@ -4466,13 +4466,19 @@ static void icl_dbuf_disable(struct drm_i915_private *dev_priv)
static void icl_mbus_init(struct drm_i915_private *dev_priv) static void icl_mbus_init(struct drm_i915_private *dev_priv)
{ {
u32 val; u32 mask, val;
mask = MBUS_ABOX_BT_CREDIT_POOL1_MASK |
MBUS_ABOX_BT_CREDIT_POOL2_MASK |
MBUS_ABOX_B_CREDIT_MASK |
MBUS_ABOX_BW_CREDIT_MASK;
val = MBUS_ABOX_BT_CREDIT_POOL1(16) | val = I915_READ(MBUS_ABOX_CTL);
val &= ~mask;
val |= MBUS_ABOX_BT_CREDIT_POOL1(16) |
MBUS_ABOX_BT_CREDIT_POOL2(16) | MBUS_ABOX_BT_CREDIT_POOL2(16) |
MBUS_ABOX_B_CREDIT(1) | MBUS_ABOX_B_CREDIT(1) |
MBUS_ABOX_BW_CREDIT(1); MBUS_ABOX_BW_CREDIT(1);
I915_WRITE(MBUS_ABOX_CTL, val); I915_WRITE(MBUS_ABOX_CTL, val);
} }
...@@ -4968,8 +4974,21 @@ static void tgl_bw_buddy_init(struct drm_i915_private *dev_priv) ...@@ -4968,8 +4974,21 @@ static void tgl_bw_buddy_init(struct drm_i915_private *dev_priv)
I915_WRITE(BW_BUDDY1_CTL, BW_BUDDY_DISABLE); I915_WRITE(BW_BUDDY1_CTL, BW_BUDDY_DISABLE);
I915_WRITE(BW_BUDDY2_CTL, BW_BUDDY_DISABLE); I915_WRITE(BW_BUDDY2_CTL, BW_BUDDY_DISABLE);
} else { } else {
u32 val;
I915_WRITE(BW_BUDDY1_PAGE_MASK, table[i].page_mask); I915_WRITE(BW_BUDDY1_PAGE_MASK, table[i].page_mask);
I915_WRITE(BW_BUDDY2_PAGE_MASK, table[i].page_mask); I915_WRITE(BW_BUDDY2_PAGE_MASK, table[i].page_mask);
/* Wa_22010178259:tgl */
val = I915_READ(BW_BUDDY1_CTL);
val &= ~BW_BUDDY_TLB_REQ_TIMER_MASK;
val |= REG_FIELD_PREP(BW_BUDDY_TLB_REQ_TIMER_MASK, 0x8);
I915_WRITE(BW_BUDDY1_CTL, val);
val = I915_READ(BW_BUDDY2_CTL);
val &= ~BW_BUDDY_TLB_REQ_TIMER_MASK;
val |= REG_FIELD_PREP(BW_BUDDY_TLB_REQ_TIMER_MASK, 0x8);
I915_WRITE(BW_BUDDY2_CTL, val);
} }
} }
......
...@@ -852,10 +852,12 @@ void intel_psr_enable(struct intel_dp *intel_dp, ...@@ -852,10 +852,12 @@ void intel_psr_enable(struct intel_dp *intel_dp,
{ {
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
if (!crtc_state->has_psr) if (!CAN_PSR(dev_priv) || dev_priv->psr.dp != intel_dp)
return; return;
if (WARN_ON(!CAN_PSR(dev_priv))) dev_priv->psr.force_mode_changed = false;
if (!crtc_state->has_psr)
return; return;
WARN_ON(dev_priv->drrs.dp); WARN_ON(dev_priv->drrs.dp);
...@@ -1009,6 +1011,8 @@ void intel_psr_update(struct intel_dp *intel_dp, ...@@ -1009,6 +1011,8 @@ void intel_psr_update(struct intel_dp *intel_dp,
if (!CAN_PSR(dev_priv) || READ_ONCE(psr->dp) != intel_dp) if (!CAN_PSR(dev_priv) || READ_ONCE(psr->dp) != intel_dp)
return; return;
dev_priv->psr.force_mode_changed = false;
mutex_lock(&dev_priv->psr.lock); mutex_lock(&dev_priv->psr.lock);
enable = crtc_state->has_psr && psr_global_enabled(psr->debug); enable = crtc_state->has_psr && psr_global_enabled(psr->debug);
...@@ -1534,7 +1538,7 @@ void intel_psr_atomic_check(struct drm_connector *connector, ...@@ -1534,7 +1538,7 @@ void intel_psr_atomic_check(struct drm_connector *connector,
struct drm_crtc_state *crtc_state; struct drm_crtc_state *crtc_state;
if (!CAN_PSR(dev_priv) || !new_state->crtc || if (!CAN_PSR(dev_priv) || !new_state->crtc ||
dev_priv->psr.initially_probed) !dev_priv->psr.force_mode_changed)
return; return;
intel_connector = to_intel_connector(connector); intel_connector = to_intel_connector(connector);
...@@ -1545,5 +1549,18 @@ void intel_psr_atomic_check(struct drm_connector *connector, ...@@ -1545,5 +1549,18 @@ void intel_psr_atomic_check(struct drm_connector *connector,
crtc_state = drm_atomic_get_new_crtc_state(new_state->state, crtc_state = drm_atomic_get_new_crtc_state(new_state->state,
new_state->crtc); new_state->crtc);
crtc_state->mode_changed = true; crtc_state->mode_changed = true;
dev_priv->psr.initially_probed = true; }
void intel_psr_set_force_mode_changed(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv;
if (!intel_dp)
return;
dev_priv = dp_to_i915(intel_dp);
if (!CAN_PSR(dev_priv) || intel_dp != dev_priv->psr.dp)
return;
dev_priv->psr.force_mode_changed = true;
} }
...@@ -40,5 +40,6 @@ bool intel_psr_enabled(struct intel_dp *intel_dp); ...@@ -40,5 +40,6 @@ bool intel_psr_enabled(struct intel_dp *intel_dp);
void intel_psr_atomic_check(struct drm_connector *connector, void intel_psr_atomic_check(struct drm_connector *connector,
struct drm_connector_state *old_state, struct drm_connector_state *old_state,
struct drm_connector_state *new_state); struct drm_connector_state *new_state);
void intel_psr_set_force_mode_changed(struct intel_dp *intel_dp);
#endif /* __INTEL_PSR_H__ */ #endif /* __INTEL_PSR_H__ */
...@@ -225,6 +225,7 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915, ...@@ -225,6 +225,7 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
/* But keep the pointer alive for RCU-protected lookups */ /* But keep the pointer alive for RCU-protected lookups */
call_rcu(&obj->rcu, __i915_gem_free_object_rcu); call_rcu(&obj->rcu, __i915_gem_free_object_rcu);
cond_resched();
} }
intel_runtime_pm_put(&i915->runtime_pm, wakeref); intel_runtime_pm_put(&i915->runtime_pm, wakeref);
} }
......
...@@ -570,7 +570,7 @@ static bool assert_mmap_offset(struct drm_i915_private *i915, ...@@ -570,7 +570,7 @@ static bool assert_mmap_offset(struct drm_i915_private *i915,
obj = i915_gem_object_create_internal(i915, size); obj = i915_gem_object_create_internal(i915, size);
if (IS_ERR(obj)) if (IS_ERR(obj))
return PTR_ERR(obj); return false;
mmo = mmap_offset_attach(obj, I915_MMAP_OFFSET_GTT, NULL); mmo = mmap_offset_attach(obj, I915_MMAP_OFFSET_GTT, NULL);
i915_gem_object_put(obj); i915_gem_object_put(obj);
......
...@@ -147,24 +147,32 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout) ...@@ -147,24 +147,32 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout)
fence = i915_active_fence_get(&tl->last_request); fence = i915_active_fence_get(&tl->last_request);
if (fence) { if (fence) {
mutex_unlock(&tl->mutex);
timeout = dma_fence_wait_timeout(fence, timeout = dma_fence_wait_timeout(fence,
interruptible, interruptible,
timeout); timeout);
dma_fence_put(fence); dma_fence_put(fence);
/* Retirement is best effort */
if (!mutex_trylock(&tl->mutex)) {
active_count++;
goto out_active;
}
} }
} }
if (!retire_requests(tl) || flush_submission(gt)) if (!retire_requests(tl) || flush_submission(gt))
active_count++; active_count++;
mutex_unlock(&tl->mutex);
spin_lock(&timelines->lock); out_active: spin_lock(&timelines->lock);
/* Resume iteration after dropping lock */ /* Resume list iteration after reacquiring spinlock */
list_safe_reset_next(tl, tn, link); list_safe_reset_next(tl, tn, link);
if (atomic_dec_and_test(&tl->active_count)) if (atomic_dec_and_test(&tl->active_count))
list_del(&tl->link); list_del(&tl->link);
mutex_unlock(&tl->mutex);
/* Defer the final release to after the spinlock */ /* Defer the final release to after the spinlock */
if (refcount_dec_and_test(&tl->kref.refcount)) { if (refcount_dec_and_test(&tl->kref.refcount)) {
......
...@@ -575,24 +575,19 @@ static void icl_ctx_workarounds_init(struct intel_engine_cs *engine, ...@@ -575,24 +575,19 @@ static void icl_ctx_workarounds_init(struct intel_engine_cs *engine,
static void tgl_ctx_workarounds_init(struct intel_engine_cs *engine, static void tgl_ctx_workarounds_init(struct intel_engine_cs *engine,
struct i915_wa_list *wal) struct i915_wa_list *wal)
{ {
u32 val;
/* Wa_1409142259:tgl */ /* Wa_1409142259:tgl */
WA_SET_BIT_MASKED(GEN11_COMMON_SLICE_CHICKEN3, WA_SET_BIT_MASKED(GEN11_COMMON_SLICE_CHICKEN3,
GEN12_DISABLE_CPS_AWARE_COLOR_PIPE); GEN12_DISABLE_CPS_AWARE_COLOR_PIPE);
/* Wa_1604555607:tgl */
val = intel_uncore_read(engine->uncore, FF_MODE2);
val &= ~FF_MODE2_TDS_TIMER_MASK;
val |= FF_MODE2_TDS_TIMER_128;
/* /*
* FIXME: FF_MODE2 register is not readable till TGL B0. We can * Wa_1604555607:gen12 and Wa_1608008084:gen12
* enable verification of WA from the later steppings, which enables * FF_MODE2 register will return the wrong value when read. The default
* the read of FF_MODE2. * value for this register is zero for all fields and there are no bit
* masks. So instead of doing a RMW we should just write the TDS timer
* value for Wa_1604555607.
*/ */
wa_add(wal, FF_MODE2, FF_MODE2_TDS_TIMER_MASK, val, wa_add(wal, FF_MODE2, FF_MODE2_TDS_TIMER_MASK,
IS_TGL_REVID(engine->i915, TGL_REVID_A0, TGL_REVID_A0) ? 0 : FF_MODE2_TDS_TIMER_128, 0);
FF_MODE2_TDS_TIMER_MASK);
} }
static void static void
......
...@@ -56,6 +56,7 @@ ...@@ -56,6 +56,7 @@
#include "display/intel_hotplug.h" #include "display/intel_hotplug.h"
#include "display/intel_overlay.h" #include "display/intel_overlay.h"
#include "display/intel_pipe_crc.h" #include "display/intel_pipe_crc.h"
#include "display/intel_psr.h"
#include "display/intel_sprite.h" #include "display/intel_sprite.h"
#include "display/intel_vga.h" #include "display/intel_vga.h"
...@@ -330,6 +331,8 @@ static int i915_driver_modeset_probe(struct drm_i915_private *i915) ...@@ -330,6 +331,8 @@ static int i915_driver_modeset_probe(struct drm_i915_private *i915)
intel_init_ipc(i915); intel_init_ipc(i915);
intel_psr_set_force_mode_changed(i915->psr.dp);
return 0; return 0;
cleanup_gem: cleanup_gem:
......
...@@ -505,7 +505,7 @@ struct i915_psr { ...@@ -505,7 +505,7 @@ struct i915_psr {
bool dc3co_enabled; bool dc3co_enabled;
u32 dc3co_exit_delay; u32 dc3co_exit_delay;
struct delayed_work idle_work; struct delayed_work idle_work;
bool initially_probed; bool force_mode_changed;
}; };
#define QUIRK_LVDS_SSC_DISABLE (1<<1) #define QUIRK_LVDS_SSC_DISABLE (1<<1)
......
...@@ -1954,7 +1954,8 @@ get_oa_vma(struct i915_perf_stream *stream, struct i915_oa_config *oa_config) ...@@ -1954,7 +1954,8 @@ get_oa_vma(struct i915_perf_stream *stream, struct i915_oa_config *oa_config)
return i915_vma_get(oa_bo->vma); return i915_vma_get(oa_bo->vma);
} }
static int emit_oa_config(struct i915_perf_stream *stream, static struct i915_request *
emit_oa_config(struct i915_perf_stream *stream,
struct i915_oa_config *oa_config, struct i915_oa_config *oa_config,
struct intel_context *ce) struct intel_context *ce)
{ {
...@@ -1964,7 +1965,7 @@ static int emit_oa_config(struct i915_perf_stream *stream, ...@@ -1964,7 +1965,7 @@ static int emit_oa_config(struct i915_perf_stream *stream,
vma = get_oa_vma(stream, oa_config); vma = get_oa_vma(stream, oa_config);
if (IS_ERR(vma)) if (IS_ERR(vma))
return PTR_ERR(vma); return ERR_CAST(vma);
err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH); err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
if (err) if (err)
...@@ -1989,13 +1990,17 @@ static int emit_oa_config(struct i915_perf_stream *stream, ...@@ -1989,13 +1990,17 @@ static int emit_oa_config(struct i915_perf_stream *stream,
err = rq->engine->emit_bb_start(rq, err = rq->engine->emit_bb_start(rq,
vma->node.start, 0, vma->node.start, 0,
I915_DISPATCH_SECURE); I915_DISPATCH_SECURE);
if (err)
goto err_add_request;
i915_request_get(rq);
err_add_request: err_add_request:
i915_request_add(rq); i915_request_add(rq);
err_vma_unpin: err_vma_unpin:
i915_vma_unpin(vma); i915_vma_unpin(vma);
err_vma_put: err_vma_put:
i915_vma_put(vma); i915_vma_put(vma);
return err; return err ? ERR_PTR(err) : rq;
} }
static struct intel_context *oa_context(struct i915_perf_stream *stream) static struct intel_context *oa_context(struct i915_perf_stream *stream)
...@@ -2003,7 +2008,8 @@ static struct intel_context *oa_context(struct i915_perf_stream *stream) ...@@ -2003,7 +2008,8 @@ static struct intel_context *oa_context(struct i915_perf_stream *stream)
return stream->pinned_ctx ?: stream->engine->kernel_context; return stream->pinned_ctx ?: stream->engine->kernel_context;
} }
static int hsw_enable_metric_set(struct i915_perf_stream *stream) static struct i915_request *
hsw_enable_metric_set(struct i915_perf_stream *stream)
{ {
struct intel_uncore *uncore = stream->uncore; struct intel_uncore *uncore = stream->uncore;
...@@ -2406,7 +2412,8 @@ static int lrc_configure_all_contexts(struct i915_perf_stream *stream, ...@@ -2406,7 +2412,8 @@ static int lrc_configure_all_contexts(struct i915_perf_stream *stream,
return oa_configure_all_contexts(stream, regs, ARRAY_SIZE(regs)); return oa_configure_all_contexts(stream, regs, ARRAY_SIZE(regs));
} }
static int gen8_enable_metric_set(struct i915_perf_stream *stream) static struct i915_request *
gen8_enable_metric_set(struct i915_perf_stream *stream)
{ {
struct intel_uncore *uncore = stream->uncore; struct intel_uncore *uncore = stream->uncore;
struct i915_oa_config *oa_config = stream->oa_config; struct i915_oa_config *oa_config = stream->oa_config;
...@@ -2448,7 +2455,7 @@ static int gen8_enable_metric_set(struct i915_perf_stream *stream) ...@@ -2448,7 +2455,7 @@ static int gen8_enable_metric_set(struct i915_perf_stream *stream)
*/ */
ret = lrc_configure_all_contexts(stream, oa_config); ret = lrc_configure_all_contexts(stream, oa_config);
if (ret) if (ret)
return ret; return ERR_PTR(ret);
return emit_oa_config(stream, oa_config, oa_context(stream)); return emit_oa_config(stream, oa_config, oa_context(stream));
} }
...@@ -2460,7 +2467,8 @@ static u32 oag_report_ctx_switches(const struct i915_perf_stream *stream) ...@@ -2460,7 +2467,8 @@ static u32 oag_report_ctx_switches(const struct i915_perf_stream *stream)
0 : GEN12_OAG_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS); 0 : GEN12_OAG_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS);
} }
static int gen12_enable_metric_set(struct i915_perf_stream *stream) static struct i915_request *
gen12_enable_metric_set(struct i915_perf_stream *stream)
{ {
struct intel_uncore *uncore = stream->uncore; struct intel_uncore *uncore = stream->uncore;
struct i915_oa_config *oa_config = stream->oa_config; struct i915_oa_config *oa_config = stream->oa_config;
...@@ -2491,7 +2499,7 @@ static int gen12_enable_metric_set(struct i915_perf_stream *stream) ...@@ -2491,7 +2499,7 @@ static int gen12_enable_metric_set(struct i915_perf_stream *stream)
*/ */
ret = gen12_configure_all_contexts(stream, oa_config); ret = gen12_configure_all_contexts(stream, oa_config);
if (ret) if (ret)
return ret; return ERR_PTR(ret);
/* /*
* For Gen12, performance counters are context * For Gen12, performance counters are context
...@@ -2501,7 +2509,7 @@ static int gen12_enable_metric_set(struct i915_perf_stream *stream) ...@@ -2501,7 +2509,7 @@ static int gen12_enable_metric_set(struct i915_perf_stream *stream)
if (stream->ctx) { if (stream->ctx) {
ret = gen12_configure_oar_context(stream, true); ret = gen12_configure_oar_context(stream, true);
if (ret) if (ret)
return ret; return ERR_PTR(ret);
} }
return emit_oa_config(stream, oa_config, oa_context(stream)); return emit_oa_config(stream, oa_config, oa_context(stream));
...@@ -2696,6 +2704,20 @@ static const struct i915_perf_stream_ops i915_oa_stream_ops = { ...@@ -2696,6 +2704,20 @@ static const struct i915_perf_stream_ops i915_oa_stream_ops = {
.read = i915_oa_read, .read = i915_oa_read,
}; };
static int i915_perf_stream_enable_sync(struct i915_perf_stream *stream)
{
struct i915_request *rq;
rq = stream->perf->ops.enable_metric_set(stream);
if (IS_ERR(rq))
return PTR_ERR(rq);
i915_request_wait(rq, 0, MAX_SCHEDULE_TIMEOUT);
i915_request_put(rq);
return 0;
}
/** /**
* i915_oa_stream_init - validate combined props for OA stream and init * i915_oa_stream_init - validate combined props for OA stream and init
* @stream: An i915 perf stream * @stream: An i915 perf stream
...@@ -2829,7 +2851,7 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream, ...@@ -2829,7 +2851,7 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
stream->ops = &i915_oa_stream_ops; stream->ops = &i915_oa_stream_ops;
perf->exclusive_stream = stream; perf->exclusive_stream = stream;
ret = perf->ops.enable_metric_set(stream); ret = i915_perf_stream_enable_sync(stream);
if (ret) { if (ret) {
DRM_DEBUG("Unable to enable metric set\n"); DRM_DEBUG("Unable to enable metric set\n");
goto err_enable; goto err_enable;
...@@ -3147,7 +3169,7 @@ static long i915_perf_config_locked(struct i915_perf_stream *stream, ...@@ -3147,7 +3169,7 @@ static long i915_perf_config_locked(struct i915_perf_stream *stream,
return -EINVAL; return -EINVAL;
if (config != stream->oa_config) { if (config != stream->oa_config) {
int err; struct i915_request *rq;
/* /*
* If OA is bound to a specific context, emit the * If OA is bound to a specific context, emit the
...@@ -3158,11 +3180,13 @@ static long i915_perf_config_locked(struct i915_perf_stream *stream, ...@@ -3158,11 +3180,13 @@ static long i915_perf_config_locked(struct i915_perf_stream *stream,
* When set globally, we use a low priority kernel context, * When set globally, we use a low priority kernel context,
* so it will effectively take effect when idle. * so it will effectively take effect when idle.
*/ */
err = emit_oa_config(stream, config, oa_context(stream)); rq = emit_oa_config(stream, config, oa_context(stream));
if (err == 0) if (!IS_ERR(rq)) {
config = xchg(&stream->oa_config, config); config = xchg(&stream->oa_config, config);
else i915_request_put(rq);
ret = err; } else {
ret = PTR_ERR(rq);
}
} }
i915_oa_config_put(config); i915_oa_config_put(config);
......
...@@ -339,7 +339,8 @@ struct i915_oa_ops { ...@@ -339,7 +339,8 @@ struct i915_oa_ops {
* counter reports being sampled. May apply system constraints such as * counter reports being sampled. May apply system constraints such as
* disabling EU clock gating as required. * disabling EU clock gating as required.
*/ */
int (*enable_metric_set)(struct i915_perf_stream *stream); struct i915_request *
(*enable_metric_set)(struct i915_perf_stream *stream);
/** /**
* @disable_metric_set: Remove system constraints associated with using * @disable_metric_set: Remove system constraints associated with using
......
...@@ -7757,6 +7757,7 @@ enum { ...@@ -7757,6 +7757,7 @@ enum {
#define BW_BUDDY1_CTL _MMIO(0x45140) #define BW_BUDDY1_CTL _MMIO(0x45140)
#define BW_BUDDY2_CTL _MMIO(0x45150) #define BW_BUDDY2_CTL _MMIO(0x45150)
#define BW_BUDDY_DISABLE REG_BIT(31) #define BW_BUDDY_DISABLE REG_BIT(31)
#define BW_BUDDY_TLB_REQ_TIMER_MASK REG_GENMASK(21, 16)
#define BW_BUDDY1_PAGE_MASK _MMIO(0x45144) #define BW_BUDDY1_PAGE_MASK _MMIO(0x45144)
#define BW_BUDDY2_PAGE_MASK _MMIO(0x45154) #define BW_BUDDY2_PAGE_MASK _MMIO(0x45154)
......
...@@ -275,7 +275,7 @@ bool i915_request_retire(struct i915_request *rq) ...@@ -275,7 +275,7 @@ bool i915_request_retire(struct i915_request *rq)
spin_unlock_irq(&rq->lock); spin_unlock_irq(&rq->lock);
remove_from_client(rq); remove_from_client(rq);
list_del(&rq->link); list_del_rcu(&rq->link);
intel_context_exit(rq->context); intel_context_exit(rq->context);
intel_context_unpin(rq->context); intel_context_unpin(rq->context);
...@@ -721,6 +721,8 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp) ...@@ -721,6 +721,8 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp)
rq->infix = rq->ring->emit; /* end of header; start of user payload */ rq->infix = rq->ring->emit; /* end of header; start of user payload */
intel_context_mark_active(ce); intel_context_mark_active(ce);
list_add_tail_rcu(&rq->link, &tl->requests);
return rq; return rq;
err_unwind: err_unwind:
...@@ -777,13 +779,23 @@ i915_request_await_start(struct i915_request *rq, struct i915_request *signal) ...@@ -777,13 +779,23 @@ i915_request_await_start(struct i915_request *rq, struct i915_request *signal)
GEM_BUG_ON(i915_request_timeline(rq) == GEM_BUG_ON(i915_request_timeline(rq) ==
rcu_access_pointer(signal->timeline)); rcu_access_pointer(signal->timeline));
if (i915_request_started(signal))
return 0;
fence = NULL; fence = NULL;
rcu_read_lock(); rcu_read_lock();
spin_lock_irq(&signal->lock); spin_lock_irq(&signal->lock);
if (!i915_request_started(signal) && do {
!list_is_first(&signal->link, struct list_head *pos = READ_ONCE(signal->link.prev);
&rcu_dereference(signal->timeline)->requests)) { struct i915_request *prev;
struct i915_request *prev = list_prev_entry(signal, link);
/* Confirm signal has not been retired, the link is valid */
if (unlikely(i915_request_started(signal)))
break;
/* Is signal the earliest request on its timeline? */
if (pos == &rcu_dereference(signal->timeline)->requests)
break;
/* /*
* Peek at the request before us in the timeline. That * Peek at the request before us in the timeline. That
...@@ -791,13 +803,18 @@ i915_request_await_start(struct i915_request *rq, struct i915_request *signal) ...@@ -791,13 +803,18 @@ i915_request_await_start(struct i915_request *rq, struct i915_request *signal)
* after acquiring a reference to it, confirm that it is * after acquiring a reference to it, confirm that it is
* still part of the signaler's timeline. * still part of the signaler's timeline.
*/ */
if (i915_request_get_rcu(prev)) { prev = list_entry(pos, typeof(*prev), link);
if (list_next_entry(prev, link) == signal) if (!i915_request_get_rcu(prev))
fence = &prev->fence; break;
else
/* After the strong barrier, confirm prev is still attached */
if (unlikely(READ_ONCE(prev->link.next) != &signal->link)) {
i915_request_put(prev); i915_request_put(prev);
break;
} }
}
fence = &prev->fence;
} while (0);
spin_unlock_irq(&signal->lock); spin_unlock_irq(&signal->lock);
rcu_read_unlock(); rcu_read_unlock();
if (!fence) if (!fence)
...@@ -1242,8 +1259,6 @@ __i915_request_add_to_timeline(struct i915_request *rq) ...@@ -1242,8 +1259,6 @@ __i915_request_add_to_timeline(struct i915_request *rq)
0); 0);
} }
list_add_tail(&rq->link, &timeline->requests);
/* /*
* Make sure that no request gazumped us - if it was allocated after * Make sure that no request gazumped us - if it was allocated after
* our i915_request_alloc() and called __i915_request_add() before * our i915_request_alloc() and called __i915_request_add() before
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment