Commit 66101975 authored by Chris Wilson's avatar Chris Wilson

drm/i915: Move request runtime management onto gt

Requests are run from the gt and are tided into the gt runtime power
management, so pull the runtime request management under gt/
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarTvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191004134015.13204-12-chris@chris-wilson.co.uk
parent 789ed955
...@@ -83,6 +83,7 @@ gt-y += \ ...@@ -83,6 +83,7 @@ gt-y += \
gt/intel_gt_irq.o \ gt/intel_gt_irq.o \
gt/intel_gt_pm.o \ gt/intel_gt_pm.o \
gt/intel_gt_pm_irq.o \ gt/intel_gt_pm_irq.o \
gt/intel_gt_requests.o \
gt/intel_hangcheck.o \ gt/intel_hangcheck.o \
gt/intel_lrc.o \ gt/intel_lrc.o \
gt/intel_rc6.o \ gt/intel_rc6.o \
......
...@@ -8,6 +8,7 @@ ...@@ -8,6 +8,7 @@
#include <linux/sizes.h> #include <linux/sizes.h>
#include "gt/intel_gt.h" #include "gt/intel_gt.h"
#include "gt/intel_gt_requests.h"
#include "i915_drv.h" #include "i915_drv.h"
#include "i915_gem_gtt.h" #include "i915_gem_gtt.h"
...@@ -424,6 +425,7 @@ void i915_gem_object_release_mmap(struct drm_i915_gem_object *obj) ...@@ -424,6 +425,7 @@ void i915_gem_object_release_mmap(struct drm_i915_gem_object *obj)
static int create_mmap_offset(struct drm_i915_gem_object *obj) static int create_mmap_offset(struct drm_i915_gem_object *obj)
{ {
struct drm_i915_private *i915 = to_i915(obj->base.dev); struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct intel_gt *gt = &i915->gt;
int err; int err;
err = drm_gem_create_mmap_offset(&obj->base); err = drm_gem_create_mmap_offset(&obj->base);
...@@ -431,7 +433,7 @@ static int create_mmap_offset(struct drm_i915_gem_object *obj) ...@@ -431,7 +433,7 @@ static int create_mmap_offset(struct drm_i915_gem_object *obj)
return 0; return 0;
/* Attempt to reap some mmap space from dead objects */ /* Attempt to reap some mmap space from dead objects */
err = i915_retire_requests_timeout(i915, MAX_SCHEDULE_TIMEOUT); err = intel_gt_retire_requests_timeout(gt, MAX_SCHEDULE_TIMEOUT);
if (err) if (err)
return err; return err;
......
...@@ -7,31 +7,18 @@ ...@@ -7,31 +7,18 @@
#include "gem/i915_gem_pm.h" #include "gem/i915_gem_pm.h"
#include "gt/intel_gt.h" #include "gt/intel_gt.h"
#include "gt/intel_gt_pm.h" #include "gt/intel_gt_pm.h"
#include "gt/intel_gt_requests.h"
#include "i915_drv.h" #include "i915_drv.h"
#include "i915_globals.h" #include "i915_globals.h"
static void i915_gem_park(struct drm_i915_private *i915) static void i915_gem_park(struct drm_i915_private *i915)
{ {
cancel_delayed_work(&i915->gem.retire_work);
i915_vma_parked(i915); i915_vma_parked(i915);
i915_globals_park(); i915_globals_park();
} }
static void retire_work_handler(struct work_struct *work)
{
struct drm_i915_private *i915 =
container_of(work, typeof(*i915), gem.retire_work.work);
i915_retire_requests(i915);
queue_delayed_work(i915->wq,
&i915->gem.retire_work,
round_jiffies_up_relative(HZ));
}
static int pm_notifier(struct notifier_block *nb, static int pm_notifier(struct notifier_block *nb,
unsigned long action, unsigned long action,
void *data) void *data)
...@@ -42,9 +29,6 @@ static int pm_notifier(struct notifier_block *nb, ...@@ -42,9 +29,6 @@ static int pm_notifier(struct notifier_block *nb,
switch (action) { switch (action) {
case INTEL_GT_UNPARK: case INTEL_GT_UNPARK:
i915_globals_unpark(); i915_globals_unpark();
queue_delayed_work(i915->wq,
&i915->gem.retire_work,
round_jiffies_up_relative(HZ));
break; break;
case INTEL_GT_PARK: case INTEL_GT_PARK:
...@@ -59,7 +43,7 @@ static bool switch_to_kernel_context_sync(struct intel_gt *gt) ...@@ -59,7 +43,7 @@ static bool switch_to_kernel_context_sync(struct intel_gt *gt)
{ {
bool result = !intel_gt_is_wedged(gt); bool result = !intel_gt_is_wedged(gt);
if (i915_gem_wait_for_idle(gt->i915, I915_GEM_IDLE_TIMEOUT) == -ETIME) { if (intel_gt_wait_for_idle(gt, I915_GEM_IDLE_TIMEOUT) == -ETIME) {
/* XXX hide warning from gem_eio */ /* XXX hide warning from gem_eio */
if (i915_modparams.reset) { if (i915_modparams.reset) {
dev_err(gt->i915->drm.dev, dev_err(gt->i915->drm.dev,
...@@ -122,14 +106,12 @@ void i915_gem_suspend(struct drm_i915_private *i915) ...@@ -122,14 +106,12 @@ void i915_gem_suspend(struct drm_i915_private *i915)
* state. Fortunately, the kernel_context is disposable and we do * state. Fortunately, the kernel_context is disposable and we do
* not rely on its state. * not rely on its state.
*/ */
switch_to_kernel_context_sync(&i915->gt); intel_gt_suspend(&i915->gt);
intel_uc_suspend(&i915->gt.uc);
cancel_delayed_work_sync(&i915->gt.hangcheck.work); cancel_delayed_work_sync(&i915->gt.hangcheck.work);
i915_gem_drain_freed_objects(i915); i915_gem_drain_freed_objects(i915);
intel_uc_suspend(&i915->gt.uc);
intel_gt_suspend(&i915->gt);
} }
static struct drm_i915_gem_object *first_mm_object(struct list_head *list) static struct drm_i915_gem_object *first_mm_object(struct list_head *list)
...@@ -239,8 +221,6 @@ void i915_gem_resume(struct drm_i915_private *i915) ...@@ -239,8 +221,6 @@ void i915_gem_resume(struct drm_i915_private *i915)
void i915_gem_init__pm(struct drm_i915_private *i915) void i915_gem_init__pm(struct drm_i915_private *i915)
{ {
INIT_DELAYED_WORK(&i915->gem.retire_work, retire_work_handler);
i915->gem.pm_notifier.notifier_call = pm_notifier; i915->gem.pm_notifier.notifier_call = pm_notifier;
blocking_notifier_chain_register(&i915->gt.pm_notifications, blocking_notifier_chain_register(&i915->gt.pm_notifications,
&i915->gem.pm_notifier); &i915->gem.pm_notifier);
......
...@@ -8,6 +8,7 @@ ...@@ -8,6 +8,7 @@
#include "gem/i915_gem_pm.h" #include "gem/i915_gem_pm.h"
#include "gt/intel_gt.h" #include "gt/intel_gt.h"
#include "gt/intel_gt_requests.h"
#include "gt/intel_reset.h" #include "gt/intel_reset.h"
#include "i915_selftest.h" #include "i915_selftest.h"
...@@ -518,7 +519,7 @@ create_test_object(struct i915_address_space *vm, ...@@ -518,7 +519,7 @@ create_test_object(struct i915_address_space *vm,
int err; int err;
/* Keep in GEM's good graces */ /* Keep in GEM's good graces */
i915_retire_requests(vm->i915); intel_gt_retire_requests(vm->gt);
size = min(vm->total / 2, 1024ull * DW_PER_PAGE * PAGE_SIZE); size = min(vm->total / 2, 1024ull * DW_PER_PAGE * PAGE_SIZE);
size = round_down(size, DW_PER_PAGE * PAGE_SIZE); size = round_down(size, DW_PER_PAGE * PAGE_SIZE);
...@@ -1136,7 +1137,7 @@ __sseu_finish(const char *name, ...@@ -1136,7 +1137,7 @@ __sseu_finish(const char *name,
igt_spinner_end(spin); igt_spinner_end(spin);
if ((flags & TEST_IDLE) && ret == 0) { if ((flags & TEST_IDLE) && ret == 0) {
ret = i915_gem_wait_for_idle(ce->engine->i915, ret = intel_gt_wait_for_idle(ce->engine->gt,
MAX_SCHEDULE_TIMEOUT); MAX_SCHEDULE_TIMEOUT);
if (ret) if (ret)
return ret; return ret;
......
...@@ -573,7 +573,7 @@ static void disable_retire_worker(struct drm_i915_private *i915) ...@@ -573,7 +573,7 @@ static void disable_retire_worker(struct drm_i915_private *i915)
{ {
i915_gem_driver_unregister__shrinker(i915); i915_gem_driver_unregister__shrinker(i915);
intel_gt_pm_get(&i915->gt); intel_gt_pm_get(&i915->gt);
cancel_delayed_work_sync(&i915->gem.retire_work); cancel_delayed_work_sync(&i915->gt.requests.retire_work);
} }
static void restore_retire_worker(struct drm_i915_private *i915) static void restore_retire_worker(struct drm_i915_private *i915)
......
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
#include "i915_drv.h" #include "i915_drv.h"
#include "intel_gt.h" #include "intel_gt.h"
#include "intel_gt_pm.h" #include "intel_gt_pm.h"
#include "intel_gt_requests.h"
#include "intel_mocs.h" #include "intel_mocs.h"
#include "intel_rc6.h" #include "intel_rc6.h"
#include "intel_uncore.h" #include "intel_uncore.h"
...@@ -23,6 +24,7 @@ void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915) ...@@ -23,6 +24,7 @@ void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915)
intel_gt_init_hangcheck(gt); intel_gt_init_hangcheck(gt);
intel_gt_init_reset(gt); intel_gt_init_reset(gt);
intel_gt_init_requests(gt);
intel_gt_pm_init_early(gt); intel_gt_pm_init_early(gt);
intel_uc_init_early(&gt->uc); intel_uc_init_early(&gt->uc);
} }
......
...@@ -10,6 +10,7 @@ ...@@ -10,6 +10,7 @@
#include "intel_engine_pm.h" #include "intel_engine_pm.h"
#include "intel_gt.h" #include "intel_gt.h"
#include "intel_gt_pm.h" #include "intel_gt_pm.h"
#include "intel_gt_requests.h"
#include "intel_pm.h" #include "intel_pm.h"
#include "intel_rc6.h" #include "intel_rc6.h"
#include "intel_wakeref.h" #include "intel_wakeref.h"
...@@ -49,6 +50,7 @@ static int __gt_unpark(struct intel_wakeref *wf) ...@@ -49,6 +50,7 @@ static int __gt_unpark(struct intel_wakeref *wf)
i915_pmu_gt_unparked(i915); i915_pmu_gt_unparked(i915);
intel_gt_queue_hangcheck(gt); intel_gt_queue_hangcheck(gt);
intel_gt_unpark_requests(gt);
pm_notify(gt, INTEL_GT_UNPARK); pm_notify(gt, INTEL_GT_UNPARK);
...@@ -64,6 +66,7 @@ static int __gt_park(struct intel_wakeref *wf) ...@@ -64,6 +66,7 @@ static int __gt_park(struct intel_wakeref *wf)
GEM_TRACE("\n"); GEM_TRACE("\n");
pm_notify(gt, INTEL_GT_PARK); pm_notify(gt, INTEL_GT_PARK);
intel_gt_park_requests(gt);
i915_pmu_gt_parked(i915); i915_pmu_gt_parked(i915);
if (INTEL_GEN(i915) >= 6) if (INTEL_GEN(i915) >= 6)
...@@ -196,7 +199,7 @@ int intel_gt_resume(struct intel_gt *gt) ...@@ -196,7 +199,7 @@ int intel_gt_resume(struct intel_gt *gt)
static void wait_for_idle(struct intel_gt *gt) static void wait_for_idle(struct intel_gt *gt)
{ {
if (i915_gem_wait_for_idle(gt->i915, I915_GEM_IDLE_TIMEOUT) == -ETIME) { if (intel_gt_wait_for_idle(gt, I915_GEM_IDLE_TIMEOUT) == -ETIME) {
/* /*
* Forcibly cancel outstanding work and leave * Forcibly cancel outstanding work and leave
* the gpu quiet. * the gpu quiet.
......
/*
* SPDX-License-Identifier: MIT
*
* Copyright © 2019 Intel Corporation
*/
#include "i915_request.h"
#include "intel_gt.h"
#include "intel_gt_pm.h"
#include "intel_gt_requests.h"
#include "intel_timeline.h"
static void retire_requests(struct intel_timeline *tl)
{
struct i915_request *rq, *rn;
list_for_each_entry_safe(rq, rn, &tl->requests, link)
if (!i915_request_retire(rq))
break;
}
long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout)
{
struct intel_gt_timelines *timelines = &gt->timelines;
struct intel_timeline *tl, *tn;
unsigned long active_count = 0;
unsigned long flags;
bool interruptible;
LIST_HEAD(free);
interruptible = true;
if (unlikely(timeout < 0))
timeout = -timeout, interruptible = false;
spin_lock_irqsave(&timelines->lock, flags);
list_for_each_entry_safe(tl, tn, &timelines->active_list, link) {
if (!mutex_trylock(&tl->mutex))
continue;
intel_timeline_get(tl);
GEM_BUG_ON(!tl->active_count);
tl->active_count++; /* pin the list element */
spin_unlock_irqrestore(&timelines->lock, flags);
if (timeout > 0) {
struct dma_fence *fence;
fence = i915_active_fence_get(&tl->last_request);
if (fence) {
timeout = dma_fence_wait_timeout(fence,
true,
timeout);
dma_fence_put(fence);
}
}
retire_requests(tl);
spin_lock_irqsave(&timelines->lock, flags);
/* Resume iteration after dropping lock */
list_safe_reset_next(tl, tn, link);
if (--tl->active_count)
active_count += !!rcu_access_pointer(tl->last_request.fence);
else
list_del(&tl->link);
mutex_unlock(&tl->mutex);
/* Defer the final release to after the spinlock */
if (refcount_dec_and_test(&tl->kref.refcount)) {
GEM_BUG_ON(tl->active_count);
list_add(&tl->link, &free);
}
}
spin_unlock_irqrestore(&timelines->lock, flags);
list_for_each_entry_safe(tl, tn, &free, link)
__intel_timeline_free(&tl->kref);
return active_count ? timeout : 0;
}
int intel_gt_wait_for_idle(struct intel_gt *gt, long timeout)
{
/* If the device is asleep, we have no requests outstanding */
if (!intel_gt_pm_is_awake(gt))
return 0;
while ((timeout = intel_gt_retire_requests_timeout(gt, timeout)) > 0) {
cond_resched();
if (signal_pending(current))
return -EINTR;
}
return timeout;
}
static void retire_work_handler(struct work_struct *work)
{
struct intel_gt *gt =
container_of(work, typeof(*gt), requests.retire_work.work);
intel_gt_retire_requests(gt);
schedule_delayed_work(&gt->requests.retire_work,
round_jiffies_up_relative(HZ));
}
void intel_gt_init_requests(struct intel_gt *gt)
{
INIT_DELAYED_WORK(&gt->requests.retire_work, retire_work_handler);
}
void intel_gt_park_requests(struct intel_gt *gt)
{
cancel_delayed_work(&gt->requests.retire_work);
}
void intel_gt_unpark_requests(struct intel_gt *gt)
{
schedule_delayed_work(&gt->requests.retire_work,
round_jiffies_up_relative(HZ));
}
/*
* SPDX-License-Identifier: MIT
*
* Copyright © 2019 Intel Corporation
*/
#ifndef INTEL_GT_REQUESTS_H
#define INTEL_GT_REQUESTS_H
struct intel_gt;
long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout);
static inline void intel_gt_retire_requests(struct intel_gt *gt)
{
intel_gt_retire_requests_timeout(gt, 0);
}
int intel_gt_wait_for_idle(struct intel_gt *gt, long timeout);
void intel_gt_init_requests(struct intel_gt *gt);
void intel_gt_park_requests(struct intel_gt *gt);
void intel_gt_unpark_requests(struct intel_gt *gt);
#endif /* INTEL_GT_REQUESTS_H */
...@@ -50,6 +50,17 @@ struct intel_gt { ...@@ -50,6 +50,17 @@ struct intel_gt {
struct list_head hwsp_free_list; struct list_head hwsp_free_list;
} timelines; } timelines;
struct intel_gt_requests {
/**
* We leave the user IRQ off as much as possible,
* but this means that requests will finish and never
* be retired once the system goes idle. Set a timer to
* fire periodically while the ring is running. When it
* fires, go retire requests.
*/
struct delayed_work retire_work;
} requests;
struct intel_wakeref wakeref; struct intel_wakeref wakeref;
atomic_t user_wakeref; atomic_t user_wakeref;
......
...@@ -8,6 +8,7 @@ ...@@ -8,6 +8,7 @@
#include "intel_engine_pm.h" #include "intel_engine_pm.h"
#include "intel_gt.h" #include "intel_gt.h"
#include "intel_gt_requests.h"
#include "../selftests/i915_random.h" #include "../selftests/i915_random.h"
#include "../i915_selftest.h" #include "../i915_selftest.h"
...@@ -641,6 +642,7 @@ static int live_hwsp_alternate(void *arg) ...@@ -641,6 +642,7 @@ static int live_hwsp_alternate(void *arg)
static int live_hwsp_wrap(void *arg) static int live_hwsp_wrap(void *arg)
{ {
struct drm_i915_private *i915 = arg; struct drm_i915_private *i915 = arg;
struct intel_gt *gt = &i915->gt;
struct intel_engine_cs *engine; struct intel_engine_cs *engine;
struct intel_timeline *tl; struct intel_timeline *tl;
enum intel_engine_id id; enum intel_engine_id id;
...@@ -651,7 +653,7 @@ static int live_hwsp_wrap(void *arg) ...@@ -651,7 +653,7 @@ static int live_hwsp_wrap(void *arg)
* foreign GPU references. * foreign GPU references.
*/ */
tl = intel_timeline_create(&i915->gt, NULL); tl = intel_timeline_create(gt, NULL);
if (IS_ERR(tl)) if (IS_ERR(tl))
return PTR_ERR(tl); return PTR_ERR(tl);
...@@ -662,7 +664,7 @@ static int live_hwsp_wrap(void *arg) ...@@ -662,7 +664,7 @@ static int live_hwsp_wrap(void *arg)
if (err) if (err)
goto out_free; goto out_free;
for_each_engine(engine, i915, id) { for_each_engine(engine, gt->i915, id) {
const u32 *hwsp_seqno[2]; const u32 *hwsp_seqno[2];
struct i915_request *rq; struct i915_request *rq;
u32 seqno[2]; u32 seqno[2];
...@@ -734,7 +736,7 @@ static int live_hwsp_wrap(void *arg) ...@@ -734,7 +736,7 @@ static int live_hwsp_wrap(void *arg)
goto out; goto out;
} }
i915_retire_requests(i915); /* recycle HWSP */ intel_gt_retire_requests(gt); /* recycle HWSP */
} }
out: out:
......
...@@ -41,6 +41,7 @@ ...@@ -41,6 +41,7 @@
#include "gem/i915_gem_context.h" #include "gem/i915_gem_context.h"
#include "gt/intel_gt_pm.h" #include "gt/intel_gt_pm.h"
#include "gt/intel_gt_requests.h"
#include "gt/intel_reset.h" #include "gt/intel_reset.h"
#include "gt/intel_rc6.h" #include "gt/intel_rc6.h"
#include "gt/uc/intel_guc_submission.h" #include "gt/uc/intel_guc_submission.h"
...@@ -3621,33 +3622,33 @@ static int ...@@ -3621,33 +3622,33 @@ static int
i915_drop_caches_set(void *data, u64 val) i915_drop_caches_set(void *data, u64 val)
{ {
struct drm_i915_private *i915 = data; struct drm_i915_private *i915 = data;
struct intel_gt *gt = &i915->gt;
int ret; int ret;
DRM_DEBUG("Dropping caches: 0x%08llx [0x%08llx]\n", DRM_DEBUG("Dropping caches: 0x%08llx [0x%08llx]\n",
val, val & DROP_ALL); val, val & DROP_ALL);
if (val & DROP_RESET_ACTIVE && if (val & DROP_RESET_ACTIVE &&
wait_for(intel_engines_are_idle(&i915->gt), wait_for(intel_engines_are_idle(gt), I915_IDLE_ENGINES_TIMEOUT))
I915_IDLE_ENGINES_TIMEOUT)) intel_gt_set_wedged(gt);
intel_gt_set_wedged(&i915->gt);
if (val & DROP_RETIRE) if (val & DROP_RETIRE)
i915_retire_requests(i915); intel_gt_retire_requests(gt);
if (val & (DROP_IDLE | DROP_ACTIVE)) { if (val & (DROP_IDLE | DROP_ACTIVE)) {
ret = i915_gem_wait_for_idle(i915, MAX_SCHEDULE_TIMEOUT); ret = intel_gt_wait_for_idle(gt, MAX_SCHEDULE_TIMEOUT);
if (ret) if (ret)
return ret; return ret;
} }
if (val & DROP_IDLE) { if (val & DROP_IDLE) {
ret = intel_gt_pm_wait_for_idle(&i915->gt); ret = intel_gt_pm_wait_for_idle(gt);
if (ret) if (ret)
return ret; return ret;
} }
if (val & DROP_RESET_ACTIVE && intel_gt_terminally_wedged(&i915->gt)) if (val & DROP_RESET_ACTIVE && intel_gt_terminally_wedged(gt))
intel_gt_handle_error(&i915->gt, ALL_ENGINES, 0, NULL); intel_gt_handle_error(gt, ALL_ENGINES, 0, NULL);
fs_reclaim_acquire(GFP_KERNEL); fs_reclaim_acquire(GFP_KERNEL);
if (val & DROP_BOUND) if (val & DROP_BOUND)
......
...@@ -1710,15 +1710,6 @@ struct drm_i915_private { ...@@ -1710,15 +1710,6 @@ struct drm_i915_private {
struct { struct {
struct notifier_block pm_notifier; struct notifier_block pm_notifier;
/**
* We leave the user IRQ off as much as possible,
* but this means that requests will finish and never
* be retired once the system goes idle. Set a timer to
* fire periodically while the ring is running. When it
* fires, go retire requests.
*/
struct delayed_work retire_work;
} gem; } gem;
/* For i945gm vblank irq vs. C3 workaround */ /* For i945gm vblank irq vs. C3 workaround */
...@@ -2321,7 +2312,6 @@ void i915_gem_driver_register(struct drm_i915_private *i915); ...@@ -2321,7 +2312,6 @@ void i915_gem_driver_register(struct drm_i915_private *i915);
void i915_gem_driver_unregister(struct drm_i915_private *i915); void i915_gem_driver_unregister(struct drm_i915_private *i915);
void i915_gem_driver_remove(struct drm_i915_private *dev_priv); void i915_gem_driver_remove(struct drm_i915_private *dev_priv);
void i915_gem_driver_release(struct drm_i915_private *dev_priv); void i915_gem_driver_release(struct drm_i915_private *dev_priv);
int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv, long timeout);
void i915_gem_suspend(struct drm_i915_private *dev_priv); void i915_gem_suspend(struct drm_i915_private *dev_priv);
void i915_gem_suspend_late(struct drm_i915_private *dev_priv); void i915_gem_suspend_late(struct drm_i915_private *dev_priv);
void i915_gem_resume(struct drm_i915_private *dev_priv); void i915_gem_resume(struct drm_i915_private *dev_priv);
......
...@@ -883,23 +883,6 @@ void i915_gem_runtime_suspend(struct drm_i915_private *i915) ...@@ -883,23 +883,6 @@ void i915_gem_runtime_suspend(struct drm_i915_private *i915)
} }
} }
int i915_gem_wait_for_idle(struct drm_i915_private *i915, long timeout)
{
struct intel_gt *gt = &i915->gt;
/* If the device is asleep, we have no requests outstanding */
if (!intel_gt_pm_is_awake(gt))
return 0;
while ((timeout = i915_retire_requests_timeout(i915, timeout)) > 0) {
cond_resched();
if (signal_pending(current))
return -EINTR;
}
return timeout;
}
struct i915_vma * struct i915_vma *
i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj, i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
const struct i915_ggtt_view *view, const struct i915_ggtt_view *view,
......
...@@ -29,6 +29,7 @@ ...@@ -29,6 +29,7 @@
#include <drm/i915_drm.h> #include <drm/i915_drm.h>
#include "gem/i915_gem_context.h" #include "gem/i915_gem_context.h"
#include "gt/intel_gt_requests.h"
#include "i915_drv.h" #include "i915_drv.h"
#include "i915_trace.h" #include "i915_trace.h"
...@@ -37,7 +38,7 @@ I915_SELFTEST_DECLARE(static struct igt_evict_ctl { ...@@ -37,7 +38,7 @@ I915_SELFTEST_DECLARE(static struct igt_evict_ctl {
bool fail_if_busy:1; bool fail_if_busy:1;
} igt_evict_ctl;) } igt_evict_ctl;)
static int ggtt_flush(struct drm_i915_private *i915) static int ggtt_flush(struct intel_gt *gt)
{ {
/* /*
* Not everything in the GGTT is tracked via vma (otherwise we * Not everything in the GGTT is tracked via vma (otherwise we
...@@ -46,7 +47,7 @@ static int ggtt_flush(struct drm_i915_private *i915) ...@@ -46,7 +47,7 @@ static int ggtt_flush(struct drm_i915_private *i915)
* the hopes that we can then remove contexts and the like only * the hopes that we can then remove contexts and the like only
* bound by their active reference. * bound by their active reference.
*/ */
return i915_gem_wait_for_idle(i915, MAX_SCHEDULE_TIMEOUT); return intel_gt_wait_for_idle(gt, MAX_SCHEDULE_TIMEOUT);
} }
static bool static bool
...@@ -92,7 +93,6 @@ i915_gem_evict_something(struct i915_address_space *vm, ...@@ -92,7 +93,6 @@ i915_gem_evict_something(struct i915_address_space *vm,
u64 start, u64 end, u64 start, u64 end,
unsigned flags) unsigned flags)
{ {
struct drm_i915_private *dev_priv = vm->i915;
struct drm_mm_scan scan; struct drm_mm_scan scan;
struct list_head eviction_list; struct list_head eviction_list;
struct i915_vma *vma, *next; struct i915_vma *vma, *next;
...@@ -124,7 +124,7 @@ i915_gem_evict_something(struct i915_address_space *vm, ...@@ -124,7 +124,7 @@ i915_gem_evict_something(struct i915_address_space *vm,
min_size, alignment, color, min_size, alignment, color,
start, end, mode); start, end, mode);
i915_retire_requests(vm->i915); intel_gt_retire_requests(vm->gt);
search_again: search_again:
active = NULL; active = NULL;
...@@ -197,7 +197,7 @@ i915_gem_evict_something(struct i915_address_space *vm, ...@@ -197,7 +197,7 @@ i915_gem_evict_something(struct i915_address_space *vm,
if (I915_SELFTEST_ONLY(igt_evict_ctl.fail_if_busy)) if (I915_SELFTEST_ONLY(igt_evict_ctl.fail_if_busy))
return -EBUSY; return -EBUSY;
ret = ggtt_flush(dev_priv); ret = ggtt_flush(vm->gt);
if (ret) if (ret)
return ret; return ret;
...@@ -270,7 +270,7 @@ int i915_gem_evict_for_node(struct i915_address_space *vm, ...@@ -270,7 +270,7 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
* a stray pin (preventing eviction) that can only be resolved by * a stray pin (preventing eviction) that can only be resolved by
* retiring. * retiring.
*/ */
i915_retire_requests(vm->i915); intel_gt_retire_requests(vm->gt);
if (i915_vm_has_cache_coloring(vm)) { if (i915_vm_has_cache_coloring(vm)) {
/* Expand search to cover neighbouring guard pages (or lack!) */ /* Expand search to cover neighbouring guard pages (or lack!) */
...@@ -372,7 +372,7 @@ int i915_gem_evict_vm(struct i915_address_space *vm) ...@@ -372,7 +372,7 @@ int i915_gem_evict_vm(struct i915_address_space *vm)
* switch otherwise is ineffective. * switch otherwise is ineffective.
*/ */
if (i915_is_ggtt(vm)) { if (i915_is_ggtt(vm)) {
ret = ggtt_flush(vm->i915); ret = ggtt_flush(vm->gt);
if (ret) if (ret)
return ret; return ret;
} }
......
...@@ -38,6 +38,7 @@ ...@@ -38,6 +38,7 @@
#include "display/intel_frontbuffer.h" #include "display/intel_frontbuffer.h"
#include "gt/intel_gt.h" #include "gt/intel_gt.h"
#include "gt/intel_gt_requests.h"
#include "i915_drv.h" #include "i915_drv.h"
#include "i915_scatterlist.h" #include "i915_scatterlist.h"
...@@ -2529,8 +2530,8 @@ void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj, ...@@ -2529,8 +2530,8 @@ void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
if (unlikely(ggtt->do_idle_maps)) { if (unlikely(ggtt->do_idle_maps)) {
/* XXX This does not prevent more requests being submitted! */ /* XXX This does not prevent more requests being submitted! */
if (i915_retire_requests_timeout(dev_priv, if (intel_gt_retire_requests_timeout(ggtt->vm.gt,
-MAX_SCHEDULE_TIMEOUT)) { -MAX_SCHEDULE_TIMEOUT)) {
DRM_ERROR("Failed to wait for idle; VT'd may hang.\n"); DRM_ERROR("Failed to wait for idle; VT'd may hang.\n");
/* Wait a bit, in hopes it avoids the hang */ /* Wait a bit, in hopes it avoids the hang */
udelay(10); udelay(10);
......
...@@ -216,7 +216,7 @@ static void remove_from_engine(struct i915_request *rq) ...@@ -216,7 +216,7 @@ static void remove_from_engine(struct i915_request *rq)
spin_unlock(&locked->active.lock); spin_unlock(&locked->active.lock);
} }
static bool i915_request_retire(struct i915_request *rq) bool i915_request_retire(struct i915_request *rq)
{ {
if (!i915_request_completed(rq)) if (!i915_request_completed(rq))
return false; return false;
...@@ -1508,68 +1508,6 @@ long i915_request_wait(struct i915_request *rq, ...@@ -1508,68 +1508,6 @@ long i915_request_wait(struct i915_request *rq,
return timeout; return timeout;
} }
long i915_retire_requests_timeout(struct drm_i915_private *i915, long timeout)
{
struct intel_gt_timelines *timelines = &i915->gt.timelines;
struct intel_timeline *tl, *tn;
unsigned long active_count = 0;
unsigned long flags;
bool interruptible;
LIST_HEAD(free);
interruptible = true;
if (timeout < 0)
timeout = -timeout, interruptible = false;
spin_lock_irqsave(&timelines->lock, flags);
list_for_each_entry_safe(tl, tn, &timelines->active_list, link) {
if (!mutex_trylock(&tl->mutex))
continue;
intel_timeline_get(tl);
GEM_BUG_ON(!tl->active_count);
tl->active_count++; /* pin the list element */
spin_unlock_irqrestore(&timelines->lock, flags);
if (timeout > 0) {
struct dma_fence *fence;
fence = i915_active_fence_get(&tl->last_request);
if (fence) {
timeout = dma_fence_wait_timeout(fence,
interruptible,
timeout);
dma_fence_put(fence);
}
}
retire_requests(tl);
spin_lock_irqsave(&timelines->lock, flags);
/* Resume iteration after dropping lock */
list_safe_reset_next(tl, tn, link);
if (--tl->active_count)
active_count += !!rcu_access_pointer(tl->last_request.fence);
else
list_del(&tl->link);
mutex_unlock(&tl->mutex);
/* Defer the final release to after the spinlock */
if (refcount_dec_and_test(&tl->kref.refcount)) {
GEM_BUG_ON(tl->active_count);
list_add(&tl->link, &free);
}
}
spin_unlock_irqrestore(&timelines->lock, flags);
list_for_each_entry_safe(tl, tn, &free, link)
__intel_timeline_free(&tl->kref);
return active_count ? timeout : 0;
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/mock_request.c" #include "selftests/mock_request.c"
#include "selftests/i915_request.c" #include "selftests/i915_request.c"
......
...@@ -250,6 +250,7 @@ struct i915_request *__i915_request_commit(struct i915_request *request); ...@@ -250,6 +250,7 @@ struct i915_request *__i915_request_commit(struct i915_request *request);
void __i915_request_queue(struct i915_request *rq, void __i915_request_queue(struct i915_request *rq,
const struct i915_sched_attr *attr); const struct i915_sched_attr *attr);
bool i915_request_retire(struct i915_request *rq);
void i915_request_retire_upto(struct i915_request *rq); void i915_request_retire_upto(struct i915_request *rq);
static inline struct i915_request * static inline struct i915_request *
...@@ -459,10 +460,4 @@ i915_request_active_timeline(struct i915_request *rq) ...@@ -459,10 +460,4 @@ i915_request_active_timeline(struct i915_request *rq)
lockdep_is_held(&rq->engine->active.lock)); lockdep_is_held(&rq->engine->active.lock));
} }
long i915_retire_requests_timeout(struct drm_i915_private *i915, long timeout);
static inline void i915_retire_requests(struct drm_i915_private *i915)
{
i915_retire_requests_timeout(i915, 0);
}
#endif /* I915_REQUEST_H */ #endif /* I915_REQUEST_H */
...@@ -4,8 +4,8 @@ ...@@ -4,8 +4,8 @@
* Copyright © 2018 Intel Corporation * Copyright © 2018 Intel Corporation
*/ */
#include "gem/i915_gem_context.h"
#include "gt/intel_gt.h" #include "gt/intel_gt.h"
#include "gt/intel_gt_requests.h"
#include "i915_drv.h" #include "i915_drv.h"
#include "i915_selftest.h" #include "i915_selftest.h"
...@@ -14,11 +14,12 @@ ...@@ -14,11 +14,12 @@
int igt_flush_test(struct drm_i915_private *i915) int igt_flush_test(struct drm_i915_private *i915)
{ {
int ret = intel_gt_is_wedged(&i915->gt) ? -EIO : 0; struct intel_gt *gt = &i915->gt;
int ret = intel_gt_is_wedged(gt) ? -EIO : 0;
cond_resched(); cond_resched();
if (i915_gem_wait_for_idle(i915, HZ / 5) == -ETIME) { if (intel_gt_wait_for_idle(gt, HZ / 5) == -ETIME) {
pr_err("%pS timed out, cancelling all further testing.\n", pr_err("%pS timed out, cancelling all further testing.\n",
__builtin_return_address(0)); __builtin_return_address(0));
...@@ -26,7 +27,7 @@ int igt_flush_test(struct drm_i915_private *i915) ...@@ -26,7 +27,7 @@ int igt_flush_test(struct drm_i915_private *i915)
__builtin_return_address(0)); __builtin_return_address(0));
GEM_TRACE_DUMP(); GEM_TRACE_DUMP();
intel_gt_set_wedged(&i915->gt); intel_gt_set_wedged(gt);
ret = -EIO; ret = -EIO;
} }
......
...@@ -4,7 +4,8 @@ ...@@ -4,7 +4,8 @@
* Copyright © 2018 Intel Corporation * Copyright © 2018 Intel Corporation
*/ */
#include "../i915_drv.h" #include "i915_drv.h"
#include "gt/intel_gt_requests.h"
#include "../i915_selftest.h" #include "../i915_selftest.h"
#include "igt_flush_test.h" #include "igt_flush_test.h"
...@@ -23,7 +24,7 @@ int igt_live_test_begin(struct igt_live_test *t, ...@@ -23,7 +24,7 @@ int igt_live_test_begin(struct igt_live_test *t,
t->func = func; t->func = func;
t->name = name; t->name = name;
err = i915_gem_wait_for_idle(i915, MAX_SCHEDULE_TIMEOUT); err = intel_gt_wait_for_idle(&i915->gt, MAX_SCHEDULE_TIMEOUT);
if (err) { if (err) {
pr_err("%s(%s): failed to idle before, with err=%d!", pr_err("%s(%s): failed to idle before, with err=%d!",
func, name, err); func, name, err);
......
...@@ -26,6 +26,7 @@ ...@@ -26,6 +26,7 @@
#include <linux/pm_runtime.h> #include <linux/pm_runtime.h>
#include "gt/intel_gt.h" #include "gt/intel_gt.h"
#include "gt/intel_gt_requests.h"
#include "gt/mock_engine.h" #include "gt/mock_engine.h"
#include "mock_request.h" #include "mock_request.h"
...@@ -44,7 +45,8 @@ void mock_device_flush(struct drm_i915_private *i915) ...@@ -44,7 +45,8 @@ void mock_device_flush(struct drm_i915_private *i915)
do { do {
for_each_engine(engine, i915, id) for_each_engine(engine, i915, id)
mock_engine_flush(engine); mock_engine_flush(engine);
} while (i915_retire_requests_timeout(i915, MAX_SCHEDULE_TIMEOUT)); } while (intel_gt_retire_requests_timeout(&i915->gt,
MAX_SCHEDULE_TIMEOUT));
} }
static void mock_device_release(struct drm_device *dev) static void mock_device_release(struct drm_device *dev)
...@@ -98,10 +100,6 @@ static void release_dev(struct device *dev) ...@@ -98,10 +100,6 @@ static void release_dev(struct device *dev)
kfree(pdev); kfree(pdev);
} }
static void mock_retire_work_handler(struct work_struct *work)
{
}
static int pm_domain_resume(struct device *dev) static int pm_domain_resume(struct device *dev)
{ {
return pm_generic_runtime_resume(dev); return pm_generic_runtime_resume(dev);
...@@ -181,8 +179,6 @@ struct drm_i915_private *mock_gem_device(void) ...@@ -181,8 +179,6 @@ struct drm_i915_private *mock_gem_device(void)
mock_init_contexts(i915); mock_init_contexts(i915);
INIT_DELAYED_WORK(&i915->gem.retire_work, mock_retire_work_handler);
intel_timelines_init(i915); intel_timelines_init(i915);
mutex_lock(&i915->drm.struct_mutex); mutex_lock(&i915->drm.struct_mutex);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment