Commit 06b73c2d authored by Chris Wilson's avatar Chris Wilson Committed by Joonas Lahtinen

drm/i915/gt: Delay taking the spinlock for grabbing from the buffer pool

Some very low hanging fruit, but contention on the pool->lock is
noticeable between intel_gt_get_buffer_pool() and pool_retire(), with
the majority of the hold time due to the locked list iteration. If we
make the node itself RCU protected, we can perform the search for an
suitable node just under RCU, reserving taking the lock itself for
claiming the node and manipulating the list.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarMatthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200729080245.8070-1-chris@chris-wilson.co.ukSigned-off-by: default avatarRodrigo Vivi <rodrigo.vivi@intel.com>
Signed-off-by: default avatarJoonas Lahtinen <joonas.lahtinen@linux.intel.com>
parent a817c891
...@@ -35,39 +35,62 @@ static void node_free(struct intel_gt_buffer_pool_node *node) ...@@ -35,39 +35,62 @@ static void node_free(struct intel_gt_buffer_pool_node *node)
{ {
i915_gem_object_put(node->obj); i915_gem_object_put(node->obj);
i915_active_fini(&node->active); i915_active_fini(&node->active);
kfree(node); kfree_rcu(node, rcu);
} }
static void pool_free_work(struct work_struct *wrk) static bool
pool_free_older_than(struct intel_gt_buffer_pool *pool, unsigned long old)
{ {
struct intel_gt_buffer_pool *pool = struct intel_gt_buffer_pool_node *node, *stale = NULL;
container_of(wrk, typeof(*pool), work.work);
struct intel_gt_buffer_pool_node *node, *next;
unsigned long old = jiffies - HZ;
bool active = false; bool active = false;
LIST_HEAD(stale);
int n; int n;
/* Free buffers that have not been used in the past second */ /* Free buffers that have not been used in the past second */
spin_lock_irq(&pool->lock);
for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++) { for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++) {
struct list_head *list = &pool->cache_list[n]; struct list_head *list = &pool->cache_list[n];
/* Most recent at head; oldest at tail */ if (list_empty(list))
list_for_each_entry_safe_reverse(node, next, list, link) { continue;
if (time_before(node->age, old))
break; if (spin_trylock_irq(&pool->lock)) {
struct list_head *pos;
list_move(&node->link, &stale); /* Most recent at head; oldest at tail */
list_for_each_prev(pos, list) {
node = list_entry(pos, typeof(*node), link);
if (time_before(node->age, old))
break;
/* Check we are the first to claim this node */
if (!xchg(&node->age, 0))
break;
node->free = stale;
stale = node;
}
if (!list_is_last(pos, list))
__list_del_many(pos, list);
spin_unlock_irq(&pool->lock);
} }
active |= !list_empty(list); active |= !list_empty(list);
} }
spin_unlock_irq(&pool->lock);
list_for_each_entry_safe(node, next, &stale, link) while ((node = stale)) {
stale = stale->free;
node_free(node); node_free(node);
}
return active;
}
static void pool_free_work(struct work_struct *wrk)
{
struct intel_gt_buffer_pool *pool =
container_of(wrk, typeof(*pool), work.work);
if (active) if (pool_free_older_than(pool, jiffies - HZ))
schedule_delayed_work(&pool->work, schedule_delayed_work(&pool->work,
round_jiffies_up_relative(HZ)); round_jiffies_up_relative(HZ));
} }
...@@ -108,9 +131,9 @@ static void pool_retire(struct i915_active *ref) ...@@ -108,9 +131,9 @@ static void pool_retire(struct i915_active *ref)
/* Return this object to the shrinker pool */ /* Return this object to the shrinker pool */
i915_gem_object_make_purgeable(node->obj); i915_gem_object_make_purgeable(node->obj);
WRITE_ONCE(node->age, jiffies ?: 1); /* 0 reserved for active nodes */
spin_lock_irqsave(&pool->lock, flags); spin_lock_irqsave(&pool->lock, flags);
node->age = jiffies; list_add_rcu(&node->link, list);
list_add(&node->link, list);
spin_unlock_irqrestore(&pool->lock, flags); spin_unlock_irqrestore(&pool->lock, flags);
schedule_delayed_work(&pool->work, schedule_delayed_work(&pool->work,
...@@ -151,20 +174,30 @@ intel_gt_get_buffer_pool(struct intel_gt *gt, size_t size) ...@@ -151,20 +174,30 @@ intel_gt_get_buffer_pool(struct intel_gt *gt, size_t size)
struct intel_gt_buffer_pool *pool = &gt->buffer_pool; struct intel_gt_buffer_pool *pool = &gt->buffer_pool;
struct intel_gt_buffer_pool_node *node; struct intel_gt_buffer_pool_node *node;
struct list_head *list; struct list_head *list;
unsigned long flags;
int ret; int ret;
size = PAGE_ALIGN(size); size = PAGE_ALIGN(size);
list = bucket_for_size(pool, size); list = bucket_for_size(pool, size);
spin_lock_irqsave(&pool->lock, flags); rcu_read_lock();
list_for_each_entry(node, list, link) { list_for_each_entry_rcu(node, list, link) {
unsigned long age;
if (node->obj->base.size < size) if (node->obj->base.size < size)
continue; continue;
list_del(&node->link);
break; age = READ_ONCE(node->age);
if (!age)
continue;
if (cmpxchg(&node->age, age, 0) == age) {
spin_lock_irq(&pool->lock);
list_del_rcu(&node->link);
spin_unlock_irq(&pool->lock);
break;
}
} }
spin_unlock_irqrestore(&pool->lock, flags); rcu_read_unlock();
if (&node->link == list) { if (&node->link == list) {
node = node_create(pool, size); node = node_create(pool, size);
...@@ -192,28 +225,13 @@ void intel_gt_init_buffer_pool(struct intel_gt *gt) ...@@ -192,28 +225,13 @@ void intel_gt_init_buffer_pool(struct intel_gt *gt)
INIT_DELAYED_WORK(&pool->work, pool_free_work); INIT_DELAYED_WORK(&pool->work, pool_free_work);
} }
static void pool_free_imm(struct intel_gt_buffer_pool *pool)
{
int n;
spin_lock_irq(&pool->lock);
for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++) {
struct intel_gt_buffer_pool_node *node, *next;
struct list_head *list = &pool->cache_list[n];
list_for_each_entry_safe(node, next, list, link)
node_free(node);
INIT_LIST_HEAD(list);
}
spin_unlock_irq(&pool->lock);
}
void intel_gt_flush_buffer_pool(struct intel_gt *gt) void intel_gt_flush_buffer_pool(struct intel_gt *gt)
{ {
struct intel_gt_buffer_pool *pool = &gt->buffer_pool; struct intel_gt_buffer_pool *pool = &gt->buffer_pool;
do { do {
pool_free_imm(pool); while (pool_free_older_than(pool, jiffies + 1))
;
} while (cancel_delayed_work_sync(&pool->work)); } while (cancel_delayed_work_sync(&pool->work));
} }
......
...@@ -25,7 +25,11 @@ struct intel_gt_buffer_pool_node { ...@@ -25,7 +25,11 @@ struct intel_gt_buffer_pool_node {
struct i915_active active; struct i915_active active;
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
struct list_head link; struct list_head link;
struct intel_gt_buffer_pool *pool; union {
struct intel_gt_buffer_pool *pool;
struct intel_gt_buffer_pool_node *free;
struct rcu_head rcu;
};
unsigned long age; unsigned long age;
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment