Commit 16e87459 authored by Chris Wilson's avatar Chris Wilson

drm/i915/gt: Move the batch buffer pool from the engine to the gt

Since the introduction of 'soft-rc6', we aim to park the device quickly
and that results in frequent idling of the whole device. Currently upon
idling we free the batch buffer pool, and so this renders the cache
ineffective for many workloads. If we want to have an effective cache of
recently allocated buffers available for reuse, we need to decouple that
cache from the engine powermanagement and make it timer based. As there
is no reason then to keep it within the engine (where it once made
retirement order easier to track), we can move it up the hierarchy to the
owner of the memory allocations.

v2: Hook up to debugfs/drop_caches to clear the cache on demand.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: default avatarTvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200430111819.10262-2-chris@chris-wilson.co.uk
parent 230982d8
......@@ -87,11 +87,11 @@ gt-y += \
gt/intel_engine_cs.o \
gt/intel_engine_heartbeat.o \
gt/intel_engine_pm.o \
gt/intel_engine_pool.o \
gt/intel_engine_user.o \
gt/intel_ggtt.o \
gt/intel_ggtt_fencing.o \
gt/intel_gt.o \
gt/intel_gt_buffer_pool.o \
gt/intel_gt_clock_utils.o \
gt/intel_gt_irq.o \
gt/intel_gt_pm.o \
......
......@@ -6,7 +6,6 @@
#include "i915_drv.h"
#include "gt/intel_context.h"
#include "gt/intel_engine_pm.h"
#include "gt/intel_engine_pool.h"
#include "i915_gem_client_blt.h"
#include "i915_gem_object_blt.h"
......
......@@ -15,8 +15,8 @@
#include "gem/i915_gem_ioctls.h"
#include "gt/intel_context.h"
#include "gt/intel_engine_pool.h"
#include "gt/intel_gt.h"
#include "gt/intel_gt_buffer_pool.h"
#include "gt/intel_gt_pm.h"
#include "gt/intel_ring.h"
......@@ -1194,13 +1194,13 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
unsigned int len)
{
struct reloc_cache *cache = &eb->reloc_cache;
struct intel_engine_pool_node *pool;
struct intel_gt_buffer_pool_node *pool;
struct i915_request *rq;
struct i915_vma *batch;
u32 *cmd;
int err;
pool = intel_engine_get_pool(eb->engine, PAGE_SIZE);
pool = intel_gt_get_buffer_pool(eb->engine->gt, PAGE_SIZE);
if (IS_ERR(pool))
return PTR_ERR(pool);
......@@ -1229,7 +1229,7 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
goto err_unpin;
}
err = intel_engine_pool_mark_active(pool, rq);
err = intel_gt_buffer_pool_mark_active(pool, rq);
if (err)
goto err_request;
......@@ -1270,7 +1270,7 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
err_unmap:
i915_gem_object_unpin_map(pool->obj);
out_pool:
intel_engine_pool_put(pool);
intel_gt_buffer_pool_put(pool);
return err;
}
......@@ -1887,7 +1887,7 @@ static int eb_parse_pipeline(struct i915_execbuffer *eb,
static int eb_parse(struct i915_execbuffer *eb)
{
struct drm_i915_private *i915 = eb->i915;
struct intel_engine_pool_node *pool;
struct intel_gt_buffer_pool_node *pool;
struct i915_vma *shadow, *trampoline;
unsigned int len;
int err;
......@@ -1910,7 +1910,7 @@ static int eb_parse(struct i915_execbuffer *eb)
len += I915_CMD_PARSER_TRAMPOLINE_SIZE;
}
pool = intel_engine_get_pool(eb->engine, len);
pool = intel_gt_get_buffer_pool(eb->engine->gt, len);
if (IS_ERR(pool))
return PTR_ERR(pool);
......@@ -1958,7 +1958,7 @@ static int eb_parse(struct i915_execbuffer *eb)
err_shadow:
i915_vma_unpin(shadow);
err:
intel_engine_pool_put(pool);
intel_gt_buffer_pool_put(pool);
return err;
}
......@@ -2643,7 +2643,7 @@ i915_gem_do_execbuffer(struct drm_device *dev,
*/
eb.request->batch = batch;
if (batch->private)
intel_engine_pool_mark_active(batch->private, eb.request);
intel_gt_buffer_pool_mark_active(batch->private, eb.request);
trace_i915_request_queue(eb.request, eb.batch_flags);
err = eb_submit(&eb, batch);
......@@ -2672,7 +2672,7 @@ i915_gem_do_execbuffer(struct drm_device *dev,
i915_vma_unpin(batch);
err_parse:
if (batch->private)
intel_engine_pool_put(batch->private);
intel_gt_buffer_pool_put(batch->private);
err_vma:
if (eb.trampoline)
i915_vma_unpin(eb.trampoline);
......
......@@ -6,8 +6,8 @@
#include "i915_drv.h"
#include "gt/intel_context.h"
#include "gt/intel_engine_pm.h"
#include "gt/intel_engine_pool.h"
#include "gt/intel_gt.h"
#include "gt/intel_gt_buffer_pool.h"
#include "gt/intel_ring.h"
#include "i915_gem_clflush.h"
#include "i915_gem_object_blt.h"
......@@ -18,7 +18,7 @@ struct i915_vma *intel_emit_vma_fill_blt(struct intel_context *ce,
{
struct drm_i915_private *i915 = ce->vm->i915;
const u32 block_size = SZ_8M; /* ~1ms at 8GiB/s preemption delay */
struct intel_engine_pool_node *pool;
struct intel_gt_buffer_pool_node *pool;
struct i915_vma *batch;
u64 offset;
u64 count;
......@@ -33,7 +33,7 @@ struct i915_vma *intel_emit_vma_fill_blt(struct intel_context *ce,
count = div_u64(round_up(vma->size, block_size), block_size);
size = (1 + 8 * count) * sizeof(u32);
size = round_up(size, PAGE_SIZE);
pool = intel_engine_get_pool(ce->engine, size);
pool = intel_gt_get_buffer_pool(ce->engine->gt, size);
if (IS_ERR(pool)) {
err = PTR_ERR(pool);
goto out_pm;
......@@ -96,7 +96,7 @@ struct i915_vma *intel_emit_vma_fill_blt(struct intel_context *ce,
return batch;
out_put:
intel_engine_pool_put(pool);
intel_gt_buffer_pool_put(pool);
out_pm:
intel_engine_pm_put(ce->engine);
return ERR_PTR(err);
......@@ -114,13 +114,13 @@ int intel_emit_vma_mark_active(struct i915_vma *vma, struct i915_request *rq)
if (unlikely(err))
return err;
return intel_engine_pool_mark_active(vma->private, rq);
return intel_gt_buffer_pool_mark_active(vma->private, rq);
}
void intel_emit_vma_release(struct intel_context *ce, struct i915_vma *vma)
{
i915_vma_unpin(vma);
intel_engine_pool_put(vma->private);
intel_gt_buffer_pool_put(vma->private);
intel_engine_pm_put(ce->engine);
}
......@@ -213,7 +213,7 @@ struct i915_vma *intel_emit_vma_copy_blt(struct intel_context *ce,
{
struct drm_i915_private *i915 = ce->vm->i915;
const u32 block_size = SZ_8M; /* ~1ms at 8GiB/s preemption delay */
struct intel_engine_pool_node *pool;
struct intel_gt_buffer_pool_node *pool;
struct i915_vma *batch;
u64 src_offset, dst_offset;
u64 count, rem;
......@@ -228,7 +228,7 @@ struct i915_vma *intel_emit_vma_copy_blt(struct intel_context *ce,
count = div_u64(round_up(dst->size, block_size), block_size);
size = (1 + 11 * count) * sizeof(u32);
size = round_up(size, PAGE_SIZE);
pool = intel_engine_get_pool(ce->engine, size);
pool = intel_gt_get_buffer_pool(ce->engine->gt, size);
if (IS_ERR(pool)) {
err = PTR_ERR(pool);
goto out_pm;
......@@ -307,7 +307,7 @@ struct i915_vma *intel_emit_vma_copy_blt(struct intel_context *ce,
return batch;
out_put:
intel_engine_pool_put(pool);
intel_gt_buffer_pool_put(pool);
out_pm:
intel_engine_pm_put(ce->engine);
return ERR_PTR(err);
......
......@@ -10,7 +10,6 @@
#include "gt/intel_context.h"
#include "gt/intel_engine_pm.h"
#include "gt/intel_engine_pool.h"
#include "i915_vma.h"
struct drm_i915_gem_object;
......
......@@ -31,7 +31,6 @@
#include "intel_context.h"
#include "intel_engine.h"
#include "intel_engine_pm.h"
#include "intel_engine_pool.h"
#include "intel_engine_user.h"
#include "intel_gt.h"
#include "intel_gt_requests.h"
......@@ -631,8 +630,6 @@ static int engine_setup_common(struct intel_engine_cs *engine)
intel_engine_init__pm(engine);
intel_engine_init_retire(engine);
intel_engine_pool_init(&engine->pool);
/* Use the whole device by default */
engine->sseu =
intel_sseu_from_device_info(&RUNTIME_INFO(engine->i915)->sseu);
......@@ -829,7 +826,6 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine)
cleanup_status_page(engine);
intel_engine_fini_retire(engine);
intel_engine_pool_fini(&engine->pool);
intel_engine_fini_breadcrumbs(engine);
intel_engine_cleanup_cmd_parser(engine);
......
......@@ -10,7 +10,6 @@
#include "intel_engine.h"
#include "intel_engine_heartbeat.h"
#include "intel_engine_pm.h"
#include "intel_engine_pool.h"
#include "intel_gt.h"
#include "intel_gt_pm.h"
#include "intel_rc6.h"
......@@ -254,7 +253,6 @@ static int __engine_park(struct intel_wakeref *wf)
intel_engine_park_heartbeat(engine);
intel_engine_disarm_breadcrumbs(engine);
intel_engine_pool_park(&engine->pool);
/* Must be reset upon idling, or we may miss the busy wakeup. */
GEM_BUG_ON(engine->execlists.queue_priority_hint != INT_MIN);
......
/*
* SPDX-License-Identifier: MIT
*
* Copyright © 2014-2018 Intel Corporation
*/
#ifndef INTEL_ENGINE_POOL_H
#define INTEL_ENGINE_POOL_H
#include "intel_engine_pool_types.h"
#include "i915_active.h"
#include "i915_request.h"
struct intel_engine_pool_node *
intel_engine_get_pool(struct intel_engine_cs *engine, size_t size);
static inline int
intel_engine_pool_mark_active(struct intel_engine_pool_node *node,
struct i915_request *rq)
{
return i915_active_add_request(&node->active, rq);
}
static inline void
intel_engine_pool_put(struct intel_engine_pool_node *node)
{
i915_active_release(&node->active);
}
void intel_engine_pool_init(struct intel_engine_pool *pool);
void intel_engine_pool_park(struct intel_engine_pool *pool);
void intel_engine_pool_fini(struct intel_engine_pool *pool);
#endif /* INTEL_ENGINE_POOL_H */
......@@ -22,7 +22,6 @@
#include "i915_pmu.h"
#include "i915_priolist_types.h"
#include "i915_selftest.h"
#include "intel_engine_pool_types.h"
#include "intel_sseu.h"
#include "intel_timeline_types.h"
#include "intel_wakeref.h"
......@@ -405,13 +404,6 @@ struct intel_engine_cs {
struct i915_pmu_sample sample[I915_ENGINE_SAMPLE_COUNT];
} pmu;
/*
* A pool of objects to use as shadow copies of client batch buffers
* when the command parser is enabled. Prevents the client from
* modifying the batch contents after software parsing.
*/
struct intel_engine_pool pool;
struct intel_hw_status_page status_page;
struct i915_ctx_workarounds wa_ctx;
struct i915_wa_list ctx_wa_list;
......
......@@ -7,6 +7,7 @@
#include "i915_drv.h"
#include "intel_context.h"
#include "intel_gt.h"
#include "intel_gt_buffer_pool.h"
#include "intel_gt_clock_utils.h"
#include "intel_gt_pm.h"
#include "intel_gt_requests.h"
......@@ -28,6 +29,7 @@ void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915)
INIT_LIST_HEAD(&gt->closed_vma);
spin_lock_init(&gt->closed_lock);
intel_gt_init_buffer_pool(gt);
intel_gt_init_reset(gt);
intel_gt_init_requests(gt);
intel_gt_init_timelines(gt);
......@@ -621,6 +623,7 @@ void intel_gt_driver_release(struct intel_gt *gt)
intel_gt_pm_fini(gt);
intel_gt_fini_scratch(gt);
intel_gt_fini_buffer_pool(gt);
}
void intel_gt_driver_late_release(struct intel_gt *gt)
......
// SPDX-License-Identifier: MIT
/*
* SPDX-License-Identifier: MIT
*
* Copyright © 2014-2018 Intel Corporation
*/
......@@ -8,15 +7,15 @@
#include "i915_drv.h"
#include "intel_engine_pm.h"
#include "intel_engine_pool.h"
#include "intel_gt_buffer_pool.h"
static struct intel_engine_cs *to_engine(struct intel_engine_pool *pool)
static struct intel_gt *to_gt(struct intel_gt_buffer_pool *pool)
{
return container_of(pool, struct intel_engine_cs, pool);
return container_of(pool, struct intel_gt, buffer_pool);
}
static struct list_head *
bucket_for_size(struct intel_engine_pool *pool, size_t sz)
bucket_for_size(struct intel_gt_buffer_pool *pool, size_t sz)
{
int n;
......@@ -32,16 +31,50 @@ bucket_for_size(struct intel_engine_pool *pool, size_t sz)
return &pool->cache_list[n];
}
static void node_free(struct intel_engine_pool_node *node)
static void node_free(struct intel_gt_buffer_pool_node *node)
{
i915_gem_object_put(node->obj);
i915_active_fini(&node->active);
kfree(node);
}
static void pool_free_work(struct work_struct *wrk)
{
struct intel_gt_buffer_pool *pool =
container_of(wrk, typeof(*pool), work.work);
struct intel_gt_buffer_pool_node *node, *next;
unsigned long old = jiffies - HZ;
bool active = false;
LIST_HEAD(stale);
int n;
/* Free buffers that have not been used in the past second */
spin_lock_irq(&pool->lock);
for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++) {
struct list_head *list = &pool->cache_list[n];
/* Most recent at head; oldest at tail */
list_for_each_entry_safe_reverse(node, next, list, link) {
if (time_before(node->age, old))
break;
list_move(&node->link, &stale);
}
active |= !list_empty(list);
}
spin_unlock_irq(&pool->lock);
list_for_each_entry_safe(node, next, &stale, link)
node_free(node);
if (active)
schedule_delayed_work(&pool->work,
round_jiffies_up_relative(HZ));
}
static int pool_active(struct i915_active *ref)
{
struct intel_engine_pool_node *node =
struct intel_gt_buffer_pool_node *node =
container_of(ref, typeof(*node), active);
struct dma_resv *resv = node->obj->base.resv;
int err;
......@@ -64,29 +97,31 @@ static int pool_active(struct i915_active *ref)
__i915_active_call
static void pool_retire(struct i915_active *ref)
{
struct intel_engine_pool_node *node =
struct intel_gt_buffer_pool_node *node =
container_of(ref, typeof(*node), active);
struct intel_engine_pool *pool = node->pool;
struct intel_gt_buffer_pool *pool = node->pool;
struct list_head *list = bucket_for_size(pool, node->obj->base.size);
unsigned long flags;
GEM_BUG_ON(!intel_engine_pm_is_awake(to_engine(pool)));
i915_gem_object_unpin_pages(node->obj);
/* Return this object to the shrinker pool */
i915_gem_object_make_purgeable(node->obj);
spin_lock_irqsave(&pool->lock, flags);
node->age = jiffies;
list_add(&node->link, list);
spin_unlock_irqrestore(&pool->lock, flags);
schedule_delayed_work(&pool->work,
round_jiffies_up_relative(HZ));
}
static struct intel_engine_pool_node *
node_create(struct intel_engine_pool *pool, size_t sz)
static struct intel_gt_buffer_pool_node *
node_create(struct intel_gt_buffer_pool *pool, size_t sz)
{
struct intel_engine_cs *engine = to_engine(pool);
struct intel_engine_pool_node *node;
struct intel_gt *gt = to_gt(pool);
struct intel_gt_buffer_pool_node *node;
struct drm_i915_gem_object *obj;
node = kmalloc(sizeof(*node),
......@@ -97,7 +132,7 @@ node_create(struct intel_engine_pool *pool, size_t sz)
node->pool = pool;
i915_active_init(&node->active, pool_active, pool_retire);
obj = i915_gem_object_create_internal(engine->i915, sz);
obj = i915_gem_object_create_internal(gt->i915, sz);
if (IS_ERR(obj)) {
i915_active_fini(&node->active);
kfree(node);
......@@ -110,26 +145,15 @@ node_create(struct intel_engine_pool *pool, size_t sz)
return node;
}
static struct intel_engine_pool *lookup_pool(struct intel_engine_cs *engine)
struct intel_gt_buffer_pool_node *
intel_gt_get_buffer_pool(struct intel_gt *gt, size_t size)
{
if (intel_engine_is_virtual(engine))
engine = intel_virtual_engine_get_sibling(engine, 0);
GEM_BUG_ON(!engine);
return &engine->pool;
}
struct intel_engine_pool_node *
intel_engine_get_pool(struct intel_engine_cs *engine, size_t size)
{
struct intel_engine_pool *pool = lookup_pool(engine);
struct intel_engine_pool_node *node;
struct intel_gt_buffer_pool *pool = &gt->buffer_pool;
struct intel_gt_buffer_pool_node *node;
struct list_head *list;
unsigned long flags;
int ret;
GEM_BUG_ON(!intel_engine_pm_is_awake(to_engine(pool)));
size = PAGE_ALIGN(size);
list = bucket_for_size(pool, size);
......@@ -157,34 +181,48 @@ intel_engine_get_pool(struct intel_engine_cs *engine, size_t size)
return node;
}
void intel_engine_pool_init(struct intel_engine_pool *pool)
void intel_gt_init_buffer_pool(struct intel_gt *gt)
{
struct intel_gt_buffer_pool *pool = &gt->buffer_pool;
int n;
spin_lock_init(&pool->lock);
for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++)
INIT_LIST_HEAD(&pool->cache_list[n]);
INIT_DELAYED_WORK(&pool->work, pool_free_work);
}
void intel_engine_pool_park(struct intel_engine_pool *pool)
static void pool_free_imm(struct intel_gt_buffer_pool *pool)
{
int n;
spin_lock_irq(&pool->lock);
for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++) {
struct intel_gt_buffer_pool_node *node, *next;
struct list_head *list = &pool->cache_list[n];
struct intel_engine_pool_node *node, *nn;
list_for_each_entry_safe(node, nn, list, link)
list_for_each_entry_safe(node, next, list, link)
node_free(node);
INIT_LIST_HEAD(list);
}
spin_unlock_irq(&pool->lock);
}
void intel_gt_flush_buffer_pool(struct intel_gt *gt)
{
struct intel_gt_buffer_pool *pool = &gt->buffer_pool;
if (cancel_delayed_work_sync(&pool->work))
pool_free_imm(pool);
}
void intel_engine_pool_fini(struct intel_engine_pool *pool)
void intel_gt_fini_buffer_pool(struct intel_gt *gt)
{
struct intel_gt_buffer_pool *pool = &gt->buffer_pool;
int n;
intel_gt_flush_buffer_pool(gt);
for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++)
GEM_BUG_ON(!list_empty(&pool->cache_list[n]));
}
/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2014-2018 Intel Corporation
*/
#ifndef INTEL_GT_BUFFER_POOL_H
#define INTEL_GT_BUFFER_POOL_H
#include <linux/types.h>
#include "i915_active.h"
#include "intel_gt_buffer_pool_types.h"
struct intel_gt;
struct i915_request;
struct intel_gt_buffer_pool_node *
intel_gt_get_buffer_pool(struct intel_gt *gt, size_t size);
static inline int
intel_gt_buffer_pool_mark_active(struct intel_gt_buffer_pool_node *node,
struct i915_request *rq)
{
return i915_active_add_request(&node->active, rq);
}
static inline void
intel_gt_buffer_pool_put(struct intel_gt_buffer_pool_node *node)
{
i915_active_release(&node->active);
}
void intel_gt_init_buffer_pool(struct intel_gt *gt);
void intel_gt_flush_buffer_pool(struct intel_gt *gt);
void intel_gt_fini_buffer_pool(struct intel_gt *gt);
#endif /* INTEL_GT_BUFFER_POOL_H */
......@@ -4,26 +4,29 @@
* Copyright © 2014-2018 Intel Corporation
*/
#ifndef INTEL_ENGINE_POOL_TYPES_H
#define INTEL_ENGINE_POOL_TYPES_H
#ifndef INTEL_GT_BUFFER_POOL_TYPES_H
#define INTEL_GT_BUFFER_POOL_TYPES_H
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/workqueue.h>
#include "i915_active_types.h"
struct drm_i915_gem_object;
struct intel_engine_pool {
struct intel_gt_buffer_pool {
spinlock_t lock;
struct list_head cache_list[4];
struct delayed_work work;
};
struct intel_engine_pool_node {
struct intel_gt_buffer_pool_node {
struct i915_active active;
struct drm_i915_gem_object *obj;
struct list_head link;
struct intel_engine_pool *pool;
struct intel_gt_buffer_pool *pool;
unsigned long age;
};
#endif /* INTEL_ENGINE_POOL_TYPES_H */
#endif /* INTEL_GT_BUFFER_POOL_TYPES_H */
......@@ -17,6 +17,7 @@
#include "i915_vma.h"
#include "intel_engine_types.h"
#include "intel_gt_buffer_pool_types.h"
#include "intel_llc_types.h"
#include "intel_reset_types.h"
#include "intel_rc6_types.h"
......@@ -97,6 +98,16 @@ struct intel_gt {
*/
struct i915_address_space *vm;
/*
* A pool of objects to use as shadow copies of client batch buffers
* when the command parser is enabled. Prevents the client from
* modifying the batch contents after software parsing.
*
* Buffers older than 1s are periodically reaped from the pool,
* or may be reclaimed by the shrinker before then.
*/
struct intel_gt_buffer_pool buffer_pool;
struct i915_vma *scratch;
};
......
......@@ -28,7 +28,6 @@
#include "i915_drv.h"
#include "intel_context.h"
#include "intel_engine_pm.h"
#include "intel_engine_pool.h"
#include "mock_engine.h"
#include "selftests/mock_request.h"
......@@ -328,7 +327,6 @@ int mock_engine_init(struct intel_engine_cs *engine)
intel_engine_init_execlists(engine);
intel_engine_init__pm(engine);
intel_engine_init_retire(engine);
intel_engine_pool_init(&engine->pool);
ce = create_kernel_context(engine);
if (IS_ERR(ce))
......
......@@ -32,6 +32,7 @@
#include <drm/drm_debugfs.h>
#include "gem/i915_gem_context.h"
#include "gt/intel_gt_buffer_pool.h"
#include "gt/intel_gt_clock_utils.h"
#include "gt/intel_gt_pm.h"
#include "gt/intel_gt_requests.h"
......@@ -1484,6 +1485,9 @@ gt_drop_caches(struct intel_gt *gt, u64 val)
if (val & DROP_RESET_ACTIVE && intel_gt_terminally_wedged(gt))
intel_gt_handle_error(gt, ALL_ENGINES, 0, NULL);
if (val & DROP_FREED)
intel_gt_flush_buffer_pool(gt);
return 0;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment