Commit e1c31fb5 authored by Chris Wilson's avatar Chris Wilson

drm/i915: Merge i915_request.flags with i915_request.fence.flags

As we already have a flags field buried within i915_request, reuse it!
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarMaarten Lankhorst <maarten.lankhorst@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200106114234.2529613-3-chris@chris-wilson.co.uk
parent 6d728d92
......@@ -2173,7 +2173,7 @@ static int eb_submit(struct i915_execbuffer *eb)
}
if (intel_context_nopreempt(eb->context))
eb->request->flags |= I915_REQUEST_NOPREEMPT;
__set_bit(I915_FENCE_FLAG_NOPREEMPT, &eb->request->fence.flags);
return 0;
}
......
......@@ -199,7 +199,7 @@ int intel_engine_pulse(struct intel_engine_cs *engine)
goto out_unlock;
}
rq->flags |= I915_REQUEST_SENTINEL;
__set_bit(I915_FENCE_FLAG_SENTINEL, &rq->fence.flags);
idle_pulse(engine, rq);
__i915_request_commit(rq);
......
......@@ -1538,8 +1538,8 @@ static bool can_merge_rq(const struct i915_request *prev,
if (i915_request_completed(next))
return true;
if (unlikely((prev->flags ^ next->flags) &
(I915_REQUEST_NOPREEMPT | I915_REQUEST_SENTINEL)))
if (unlikely((prev->fence.flags ^ next->fence.flags) &
(I915_FENCE_FLAG_NOPREEMPT | I915_FENCE_FLAG_SENTINEL)))
return false;
if (!can_merge_ctx(prev->context, next->context))
......
......@@ -777,7 +777,7 @@ void intel_rps_boost(struct i915_request *rq)
spin_lock_irqsave(&rq->lock, flags);
if (!i915_request_has_waitboost(rq) &&
!dma_fence_is_signaled_locked(&rq->fence)) {
rq->flags |= I915_REQUEST_WAITBOOST;
set_bit(I915_FENCE_FLAG_BOOST, &rq->fence.flags);
if (!atomic_fetch_inc(&rps->num_waiters) &&
READ_ONCE(rps->cur_freq) < rps->boost_freq)
......
......@@ -1153,7 +1153,7 @@ static int live_nopreempt(void *arg)
}
/* Low priority client, but unpreemptable! */
rq_a->flags |= I915_REQUEST_NOPREEMPT;
__set_bit(I915_FENCE_FLAG_NOPREEMPT, &rq_a->fence.flags);
i915_request_add(rq_a);
if (!igt_wait_for_spinner(&a.spin, rq_a)) {
......
......@@ -658,7 +658,6 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp)
rq->engine = ce->engine;
rq->ring = ce->ring;
rq->execution_mask = ce->engine->mask;
rq->flags = 0;
RCU_INIT_POINTER(rq->timeline, tl);
RCU_INIT_POINTER(rq->hwsp_cacheline, tl->hwsp_cacheline);
......
......@@ -77,6 +77,38 @@ enum {
* a request is on the various signal_list.
*/
I915_FENCE_FLAG_SIGNAL,
/*
* I915_FENCE_FLAG_NOPREEMPT - this request should not be preempted
*
* The execution of some requests should not be interrupted. This is
* a sensitive operation as it makes the request super important,
* blocking other higher priority work. Abuse of this flag will
* lead to quality of service issues.
*/
I915_FENCE_FLAG_NOPREEMPT,
/*
* I915_FENCE_FLAG_SENTINEL - this request should be last in the queue
*
* A high priority sentinel request may be submitted to clear the
* submission queue. As it will be the only request in-flight, upon
* execution all other active requests will have been preempted and
* unsubmitted. This preemptive pulse is used to re-evaluate the
* in-flight requests, particularly in cases where an active context
* is banned and those active requests need to be cancelled.
*/
I915_FENCE_FLAG_SENTINEL,
/*
* I915_FENCE_FLAG_BOOST - upclock the gpu for this request
*
* Some requests are more important than others! In particular, a
* request that the user is waiting on is typically required for
* interactive latency, for which we want to minimise by upclocking
* the GPU. Here we track such boost requests on a per-request basis.
*/
I915_FENCE_FLAG_BOOST,
};
/**
......@@ -225,11 +257,6 @@ struct i915_request {
/** Time at which this request was emitted, in jiffies. */
unsigned long emitted_jiffies;
unsigned long flags;
#define I915_REQUEST_WAITBOOST BIT(0)
#define I915_REQUEST_NOPREEMPT BIT(1)
#define I915_REQUEST_SENTINEL BIT(2)
/** timeline->request entry for this request */
struct list_head link;
......@@ -442,18 +469,18 @@ static inline void i915_request_mark_complete(struct i915_request *rq)
static inline bool i915_request_has_waitboost(const struct i915_request *rq)
{
return rq->flags & I915_REQUEST_WAITBOOST;
return test_bit(I915_FENCE_FLAG_BOOST, &rq->fence.flags);
}
static inline bool i915_request_has_nopreempt(const struct i915_request *rq)
{
/* Preemption should only be disabled very rarely */
return unlikely(rq->flags & I915_REQUEST_NOPREEMPT);
return unlikely(test_bit(I915_FENCE_FLAG_NOPREEMPT, &rq->fence.flags));
}
static inline bool i915_request_has_sentinel(const struct i915_request *rq)
{
return unlikely(rq->flags & I915_REQUEST_SENTINEL);
return unlikely(test_bit(I915_FENCE_FLAG_SENTINEL, &rq->fence.flags));
}
static inline struct intel_timeline *
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment