Commit f24a44e5 authored by Chris Wilson's avatar Chris Wilson

drm/i915/gt: Widen CSB pointer to u64 for the parsers

A CSB entry is 64b, and it is simpler for us to treat it as an array of
64b entries than as an array of pairs of 32b entries.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Reviewed-by: default avatarMika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200915134923.30088-1-chris@chris-wilson.co.uk
parent 6cb304b3
...@@ -278,7 +278,7 @@ struct intel_engine_execlists { ...@@ -278,7 +278,7 @@ struct intel_engine_execlists {
* *
* Note these register may be either mmio or HWSP shadow. * Note these register may be either mmio or HWSP shadow.
*/ */
u32 *csb_status; u64 *csb_status;
/** /**
* @csb_size: context status buffer FIFO size * @csb_size: context status buffer FIFO size
......
...@@ -2464,7 +2464,7 @@ cancel_port_requests(struct intel_engine_execlists * const execlists) ...@@ -2464,7 +2464,7 @@ cancel_port_requests(struct intel_engine_execlists * const execlists)
} }
static inline void static inline void
invalidate_csb_entries(const u32 *first, const u32 *last) invalidate_csb_entries(const u64 *first, const u64 *last)
{ {
clflush((void *)first); clflush((void *)first);
clflush((void *)last); clflush((void *)last);
...@@ -2496,14 +2496,12 @@ invalidate_csb_entries(const u32 *first, const u32 *last) ...@@ -2496,14 +2496,12 @@ invalidate_csb_entries(const u32 *first, const u32 *last)
* bits 47-57: sw context id of the lrc the GT switched away from * bits 47-57: sw context id of the lrc the GT switched away from
* bits 58-63: sw counter of the lrc the GT switched away from * bits 58-63: sw counter of the lrc the GT switched away from
*/ */
static inline bool static inline bool gen12_csb_parse(const u64 *csb)
gen12_csb_parse(const struct intel_engine_execlists *execlists, const u32 *csb)
{ {
u32 lower_dw = csb[0]; u64 entry = READ_ONCE(*csb);
u32 upper_dw = csb[1]; bool ctx_away_valid = GEN12_CSB_CTX_VALID(upper_32_bits(entry));
bool ctx_to_valid = GEN12_CSB_CTX_VALID(lower_dw); bool new_queue =
bool ctx_away_valid = GEN12_CSB_CTX_VALID(upper_dw); lower_32_bits(entry) & GEN12_CTX_STATUS_SWITCHED_TO_NEW_QUEUE;
bool new_queue = lower_dw & GEN12_CTX_STATUS_SWITCHED_TO_NEW_QUEUE;
/* /*
* The context switch detail is not guaranteed to be 5 when a preemption * The context switch detail is not guaranteed to be 5 when a preemption
...@@ -2513,7 +2511,7 @@ gen12_csb_parse(const struct intel_engine_execlists *execlists, const u32 *csb) ...@@ -2513,7 +2511,7 @@ gen12_csb_parse(const struct intel_engine_execlists *execlists, const u32 *csb)
* would require some extra handling, but we don't support that. * would require some extra handling, but we don't support that.
*/ */
if (!ctx_away_valid || new_queue) { if (!ctx_away_valid || new_queue) {
GEM_BUG_ON(!ctx_to_valid); GEM_BUG_ON(!GEN12_CSB_CTX_VALID(lower_32_bits(entry)));
return true; return true;
} }
...@@ -2522,12 +2520,11 @@ gen12_csb_parse(const struct intel_engine_execlists *execlists, const u32 *csb) ...@@ -2522,12 +2520,11 @@ gen12_csb_parse(const struct intel_engine_execlists *execlists, const u32 *csb)
* context switch on an unsuccessful wait instruction since we always * context switch on an unsuccessful wait instruction since we always
* use polling mode. * use polling mode.
*/ */
GEM_BUG_ON(GEN12_CTX_SWITCH_DETAIL(upper_dw)); GEM_BUG_ON(GEN12_CTX_SWITCH_DETAIL(upper_32_bits(entry)));
return false; return false;
} }
static inline bool static inline bool gen8_csb_parse(const u64 *csb)
gen8_csb_parse(const struct intel_engine_execlists *execlists, const u32 *csb)
{ {
return *csb & (GEN8_CTX_STATUS_IDLE_ACTIVE | GEN8_CTX_STATUS_PREEMPTED); return *csb & (GEN8_CTX_STATUS_IDLE_ACTIVE | GEN8_CTX_STATUS_PREEMPTED);
} }
...@@ -2535,7 +2532,7 @@ gen8_csb_parse(const struct intel_engine_execlists *execlists, const u32 *csb) ...@@ -2535,7 +2532,7 @@ gen8_csb_parse(const struct intel_engine_execlists *execlists, const u32 *csb)
static void process_csb(struct intel_engine_cs *engine) static void process_csb(struct intel_engine_cs *engine)
{ {
struct intel_engine_execlists * const execlists = &engine->execlists; struct intel_engine_execlists * const execlists = &engine->execlists;
const u32 * const buf = execlists->csb_status; const u64 * const buf = execlists->csb_status;
const u8 num_entries = execlists->csb_size; const u8 num_entries = execlists->csb_size;
u8 head, tail; u8 head, tail;
...@@ -2616,12 +2613,14 @@ static void process_csb(struct intel_engine_cs *engine) ...@@ -2616,12 +2613,14 @@ static void process_csb(struct intel_engine_cs *engine)
*/ */
ENGINE_TRACE(engine, "csb[%d]: status=0x%08x:0x%08x\n", ENGINE_TRACE(engine, "csb[%d]: status=0x%08x:0x%08x\n",
head, buf[2 * head + 0], buf[2 * head + 1]); head,
upper_32_bits(buf[head]),
lower_32_bits(buf[head]));
if (INTEL_GEN(engine->i915) >= 12) if (INTEL_GEN(engine->i915) >= 12)
promote = gen12_csb_parse(execlists, buf + 2 * head); promote = gen12_csb_parse(buf + head);
else else
promote = gen8_csb_parse(execlists, buf + 2 * head); promote = gen8_csb_parse(buf + head);
if (promote) { if (promote) {
struct i915_request * const *old = execlists->active; struct i915_request * const *old = execlists->active;
...@@ -5157,7 +5156,7 @@ int intel_execlists_submission_setup(struct intel_engine_cs *engine) ...@@ -5157,7 +5156,7 @@ int intel_execlists_submission_setup(struct intel_engine_cs *engine)
} }
execlists->csb_status = execlists->csb_status =
&engine->status_page.addr[I915_HWS_CSB_BUF0_INDEX]; (u64 *)&engine->status_page.addr[I915_HWS_CSB_BUF0_INDEX];
execlists->csb_write = execlists->csb_write =
&engine->status_page.addr[intel_hws_csb_write_index(i915)]; &engine->status_page.addr[intel_hws_csb_write_index(i915)];
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment