Commit 7b4d3a16 authored by Chris Wilson's avatar Chris Wilson

drm/i915: Remove stop-rings debugfs interface

Now that we have (near) universal GPU recovery code, we can inject a
real hang from userspace and not need any fakery. Not only does this
mean that the testing is far more realistic, but we can simplify the
kernel in the process.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarArun Siluvery <arun.siluvery@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/1467616119-4093-7-git-send-email-chris@chris-wilson.co.uk
parent c33d247d
......@@ -4830,40 +4830,6 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops,
i915_wedged_get, i915_wedged_set,
"%llu\n");
static int
i915_ring_stop_get(void *data, u64 *val)
{
struct drm_device *dev = data;
struct drm_i915_private *dev_priv = dev->dev_private;
*val = dev_priv->gpu_error.stop_rings;
return 0;
}
static int
i915_ring_stop_set(void *data, u64 val)
{
struct drm_device *dev = data;
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
DRM_DEBUG_DRIVER("Stopping rings 0x%08llx\n", val);
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
dev_priv->gpu_error.stop_rings = val;
mutex_unlock(&dev->struct_mutex);
return 0;
}
DEFINE_SIMPLE_ATTRIBUTE(i915_ring_stop_fops,
i915_ring_stop_get, i915_ring_stop_set,
"0x%08llx\n");
static int
i915_ring_missed_irq_get(void *data, u64 *val)
{
......@@ -5493,7 +5459,6 @@ static const struct i915_debugfs_files {
{"i915_max_freq", &i915_max_freq_fops},
{"i915_min_freq", &i915_min_freq_fops},
{"i915_cache_sharing", &i915_cache_sharing_fops},
{"i915_ring_stop", &i915_ring_stop_fops},
{"i915_ring_missed_irq", &i915_ring_missed_irq_fops},
{"i915_ring_test_irq", &i915_ring_test_irq_fops},
{"i915_gem_drop_caches", &i915_drop_caches_fops},
......
......@@ -2159,24 +2159,11 @@ int i915_reset(struct drm_i915_private *dev_priv)
goto error;
}
pr_notice("drm/i915: Resetting chip after gpu hang\n");
i915_gem_reset(dev);
ret = intel_gpu_reset(dev_priv, ALL_ENGINES);
/* Also reset the gpu hangman. */
if (error->stop_rings != 0) {
DRM_INFO("Simulated gpu hang, resetting stop_rings\n");
error->stop_rings = 0;
if (ret == -ENODEV) {
DRM_INFO("Reset not implemented, but ignoring "
"error for simulated gpu hangs\n");
ret = 0;
}
}
if (i915_stop_ring_allow_warn(dev_priv))
pr_notice("drm/i915: Resetting chip after gpu hang\n");
if (ret) {
if (ret != -ENODEV)
DRM_ERROR("Failed to reset chip: %i\n", ret);
......
......@@ -1402,13 +1402,6 @@ struct i915_gpu_error {
*/
wait_queue_head_t reset_queue;
/* Userspace knobs for gpu hang simulation;
* combines both a ring mask, and extra flags
*/
u32 stop_rings;
#define I915_STOP_RING_ALLOW_BAN (1 << 31)
#define I915_STOP_RING_ALLOW_WARN (1 << 30)
/* For missed irq/seqno simulation. */
unsigned long test_irq_rings;
};
......@@ -3360,18 +3353,6 @@ static inline u32 i915_reset_count(struct i915_gpu_error *error)
return ((i915_reset_counter(error) & ~I915_WEDGED) + 1) / 2;
}
static inline bool i915_stop_ring_allow_ban(struct drm_i915_private *dev_priv)
{
return dev_priv->gpu_error.stop_rings == 0 ||
dev_priv->gpu_error.stop_rings & I915_STOP_RING_ALLOW_BAN;
}
static inline bool i915_stop_ring_allow_warn(struct drm_i915_private *dev_priv)
{
return dev_priv->gpu_error.stop_rings == 0 ||
dev_priv->gpu_error.stop_rings & I915_STOP_RING_ALLOW_WARN;
}
void i915_gem_reset(struct drm_device *dev);
bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force);
int __must_check i915_gem_init(struct drm_device *dev);
......
......@@ -2956,44 +2956,30 @@ void __i915_add_request(struct drm_i915_gem_request *request,
i915_gem_mark_busy(engine);
}
static bool i915_context_is_banned(struct drm_i915_private *dev_priv,
const struct i915_gem_context *ctx)
static bool i915_context_is_banned(const struct i915_gem_context *ctx)
{
unsigned long elapsed;
elapsed = get_seconds() - ctx->hang_stats.guilty_ts;
if (ctx->hang_stats.banned)
return true;
elapsed = get_seconds() - ctx->hang_stats.guilty_ts;
if (ctx->hang_stats.ban_period_seconds &&
elapsed <= ctx->hang_stats.ban_period_seconds) {
if (!i915_gem_context_is_default(ctx)) {
DRM_DEBUG("context hanging too fast, banning!\n");
return true;
} else if (i915_stop_ring_allow_ban(dev_priv)) {
if (i915_stop_ring_allow_warn(dev_priv))
DRM_ERROR("gpu hanging too fast, banning!\n");
return true;
}
}
return false;
}
static void i915_set_reset_status(struct drm_i915_private *dev_priv,
struct i915_gem_context *ctx,
static void i915_set_reset_status(struct i915_gem_context *ctx,
const bool guilty)
{
struct i915_ctx_hang_stats *hs;
if (WARN_ON(!ctx))
return;
hs = &ctx->hang_stats;
struct i915_ctx_hang_stats *hs = &ctx->hang_stats;
if (guilty) {
hs->banned = i915_context_is_banned(dev_priv, ctx);
hs->banned = i915_context_is_banned(ctx);
hs->batch_active++;
hs->guilty_ts = get_seconds();
} else {
......@@ -3119,27 +3105,23 @@ i915_gem_find_active_request(struct intel_engine_cs *engine)
return NULL;
}
static void i915_gem_reset_engine_status(struct drm_i915_private *dev_priv,
struct intel_engine_cs *engine)
static void i915_gem_reset_engine_status(struct intel_engine_cs *engine)
{
struct drm_i915_gem_request *request;
bool ring_hung;
request = i915_gem_find_active_request(engine);
if (request == NULL)
return;
ring_hung = engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG;
i915_set_reset_status(dev_priv, request->ctx, ring_hung);
i915_set_reset_status(request->ctx, ring_hung);
list_for_each_entry_continue(request, &engine->request_list, list)
i915_set_reset_status(dev_priv, request->ctx, false);
i915_set_reset_status(request->ctx, false);
}
static void i915_gem_reset_engine_cleanup(struct drm_i915_private *dev_priv,
struct intel_engine_cs *engine)
static void i915_gem_reset_engine_cleanup(struct intel_engine_cs *engine)
{
struct intel_ringbuffer *buffer;
......@@ -3209,10 +3191,10 @@ void i915_gem_reset(struct drm_device *dev)
* their reference to the objects, the inspection must be done first.
*/
for_each_engine(engine, dev_priv)
i915_gem_reset_engine_status(dev_priv, engine);
i915_gem_reset_engine_status(engine);
for_each_engine(engine, dev_priv)
i915_gem_reset_engine_cleanup(dev_priv, engine);
i915_gem_reset_engine_cleanup(engine);
i915_gem_context_reset(dev);
......
......@@ -789,9 +789,6 @@ intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request)
intel_logical_ring_emit(ringbuf, MI_NOOP);
intel_logical_ring_advance(ringbuf);
if (intel_engine_stopped(engine))
return 0;
/* We keep the previous context alive until we retire the following
* request. This ensures that any the context object is still pinned
* for any residual writes the HW makes into it on the context switch
......
......@@ -58,18 +58,10 @@ void intel_ring_update_space(struct intel_ringbuffer *ringbuf)
ringbuf->tail, ringbuf->size);
}
bool intel_engine_stopped(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
return dev_priv->gpu_error.stop_rings & intel_engine_flag(engine);
}
static void __intel_ring_advance(struct intel_engine_cs *engine)
{
struct intel_ringbuffer *ringbuf = engine->buffer;
ringbuf->tail &= ringbuf->size - 1;
if (intel_engine_stopped(engine))
return;
engine->write_tail(engine, ringbuf->tail);
}
......
......@@ -468,7 +468,6 @@ static inline void intel_ring_advance(struct intel_engine_cs *engine)
}
int __intel_ring_space(int head, int tail, int size);
void intel_ring_update_space(struct intel_ringbuffer *ringbuf);
bool intel_engine_stopped(struct intel_engine_cs *engine);
int __must_check intel_engine_idle(struct intel_engine_cs *engine);
void intel_ring_init_seqno(struct intel_engine_cs *engine, u32 seqno);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment