Commit fc0e1270 authored by Chris Wilson's avatar Chris Wilson

drm/i915: Improve execute_cb struct packing

Reduce the irq_work llist for attaching the callbacks to the signal for
both smaller structs (two fewer pointers!) and simpler [debug] code:

Function                                     old     new   delta
irq_execute_cb                                35      34      -1
__igt_breadcrumbs_smoketest                 1684    1682      -2
i915_request_retire                         2003    1996      -7
__i915_request_create                       1047    1040      -7
__notify_execute_cb                          135     126      -9
__i915_request_ctor                          188     178     -10
__await_execution.part.constprop             451     440     -11
igt_wait_request                             924     714    -210

One minor artifact is that the order of cb exection is reversed. No
current use cases are affected by that change.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarMika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200526112051.10229-1-chris@chris-wilson.co.uk
parent 9ae6c4ef
......@@ -42,7 +42,6 @@
#include "intel_pm.h"
struct execute_cb {
struct list_head link;
struct irq_work work;
struct i915_sw_fence *fence;
void (*hook)(struct i915_request *rq, struct dma_fence *signal);
......@@ -189,14 +188,14 @@ static void irq_execute_cb_hook(struct irq_work *wrk)
static void __notify_execute_cb(struct i915_request *rq)
{
struct execute_cb *cb;
struct execute_cb *cb, *cn;
lockdep_assert_held(&rq->lock);
if (list_empty(&rq->execute_cb))
if (llist_empty(&rq->execute_cb))
return;
list_for_each_entry(cb, &rq->execute_cb, link)
llist_for_each_entry_safe(cb, cn, rq->execute_cb.first, work.llnode)
irq_work_queue(&cb->work);
/*
......@@ -209,7 +208,7 @@ static void __notify_execute_cb(struct i915_request *rq)
* preempt-to-idle cycle on the target engine, all the while the
* master execute_cb may refire.
*/
INIT_LIST_HEAD(&rq->execute_cb);
init_llist_head(&rq->execute_cb);
}
static inline void
......@@ -327,7 +326,7 @@ bool i915_request_retire(struct i915_request *rq)
set_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags);
__notify_execute_cb(rq);
}
GEM_BUG_ON(!list_empty(&rq->execute_cb));
GEM_BUG_ON(!llist_empty(&rq->execute_cb));
spin_unlock_irq(&rq->lock);
remove_from_client(rq);
......@@ -357,6 +356,12 @@ void i915_request_retire_upto(struct i915_request *rq)
} while (i915_request_retire(tmp) && tmp != rq);
}
static void __llist_add(struct llist_node *node, struct llist_head *head)
{
node->next = head->first;
head->first = node;
}
static int
__await_execution(struct i915_request *rq,
struct i915_request *signal,
......@@ -395,7 +400,7 @@ __await_execution(struct i915_request *rq,
i915_sw_fence_complete(cb->fence);
kmem_cache_free(global.slab_execute_cbs, cb);
} else {
list_add_tail(&cb->link, &signal->execute_cb);
__llist_add(&cb->work.llnode, &signal->execute_cb);
}
spin_unlock_irq(&signal->lock);
......@@ -704,7 +709,7 @@ static void __i915_request_ctor(void *arg)
rq->file_priv = NULL;
rq->capture_list = NULL;
INIT_LIST_HEAD(&rq->execute_cb);
init_llist_head(&rq->execute_cb);
}
struct i915_request *
......@@ -794,7 +799,7 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp)
rq->batch = NULL;
GEM_BUG_ON(rq->file_priv);
GEM_BUG_ON(rq->capture_list);
GEM_BUG_ON(!list_empty(&rq->execute_cb));
GEM_BUG_ON(!llist_empty(&rq->execute_cb));
/*
* Reserve space in the ring buffer for all the commands required to
......
......@@ -214,7 +214,7 @@ struct i915_request {
ktime_t emitted;
} duration;
};
struct list_head execute_cb;
struct llist_head execute_cb;
struct i915_sw_fence semaphore;
/*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment