Commit 9f379407 authored by Daniele Ceraolo Spurio's avatar Daniele Ceraolo Spurio Committed by Chris Wilson

drm/i915: drop lrc header page

Recent GuC binaries (including all the ones we're currently using)
don't require this shared area anymore, having moved the relevant
entries into the stage pool instead. i915 itself doesn't write
anything into it either, so we can safely drop it.
Signed-off-by: default avatarDaniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Michal Wajdeczko <michal.wajdeczko@intel.com>
Cc: John Harrison <John.C.Harrison@Intel.com>
Cc: Matthew Brost <matthew.brost@intel.com>
Acked-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarMatthew Brost <matthew.brost@intel.com>
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20191031013040.25803-1-daniele.ceraolospurio@intel.com
parent dde01d94
......@@ -471,8 +471,7 @@ lrc_descriptor(struct intel_context *ce, struct intel_engine_cs *engine)
if (IS_GEN(engine->i915, 8))
desc |= GEN8_CTX_L3LLC_COHERENT;
desc |= i915_ggtt_offset(ce->state) + LRC_HEADER_PAGES * PAGE_SIZE;
/* bits 12-31 */
desc |= i915_ggtt_offset(ce->state); /* bits 12-31 */
/*
* The following 32bits are copied into the OA reports (dword 2).
* Consider updating oa_get_render_ctx_id in i915_perf.c when changing
......@@ -2316,7 +2315,6 @@ set_redzone(void *vaddr, const struct intel_engine_cs *engine)
if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
return;
vaddr += LRC_HEADER_PAGES * PAGE_SIZE;
vaddr += engine->context_size;
memset(vaddr, POISON_INUSE, I915_GTT_PAGE_SIZE);
......@@ -2328,7 +2326,6 @@ check_redzone(const void *vaddr, const struct intel_engine_cs *engine)
if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
return;
vaddr += LRC_HEADER_PAGES * PAGE_SIZE;
vaddr += engine->context_size;
if (memchr_inv(vaddr, POISON_INUSE, I915_GTT_PAGE_SIZE))
......@@ -3995,12 +3992,6 @@ populate_lr_context(struct intel_context *ce,
set_redzone(vaddr, engine);
if (engine->default_state) {
/*
* We only want to copy over the template context state;
* skipping over the headers reserved for GuC communication,
* leaving those as zero.
*/
const unsigned long start = LRC_HEADER_PAGES * PAGE_SIZE;
void *defaults;
defaults = i915_gem_object_pin_map(engine->default_state,
......@@ -4010,7 +4001,7 @@ populate_lr_context(struct intel_context *ce,
goto err_unpin_ctx;
}
memcpy(vaddr + start, defaults + start, engine->context_size);
memcpy(vaddr, defaults, engine->context_size);
i915_gem_object_unpin_map(engine->default_state);
inhibit = false;
}
......@@ -4025,9 +4016,7 @@ populate_lr_context(struct intel_context *ce,
ret = 0;
err_unpin_ctx:
__i915_gem_object_flush_map(ctx_obj,
LRC_HEADER_PAGES * PAGE_SIZE,
engine->context_size);
__i915_gem_object_flush_map(ctx_obj, 0, engine->context_size);
i915_gem_object_unpin_map(ctx_obj);
return ret;
}
......@@ -4044,11 +4033,6 @@ static int __execlists_context_alloc(struct intel_context *ce,
GEM_BUG_ON(ce->state);
context_size = round_up(engine->context_size, I915_GTT_PAGE_SIZE);
/*
* Before the actual start of the context image, we insert a few pages
* for our own use and for sharing with the GuC.
*/
context_size += LRC_HEADER_PAGES * PAGE_SIZE;
if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
context_size += I915_GTT_PAGE_SIZE; /* for redzone */
......
......@@ -86,31 +86,12 @@ int intel_execlists_submission_setup(struct intel_engine_cs *engine);
int intel_execlists_submission_init(struct intel_engine_cs *engine);
/* Logical Ring Contexts */
/*
* We allocate a header at the start of the context image for our own
* use, therefore the actual location of the logical state is offset
* from the start of the VMA. The layout is
*
* | [guc] | [hwsp] [logical state] |
* |<- our header ->|<- context image ->|
*
*/
/* The first page is used for sharing data with the GuC */
#define LRC_GUCSHR_PN (0)
#define LRC_GUCSHR_SZ (1)
/* At the start of the context image is its per-process HWS page */
#define LRC_PPHWSP_PN (LRC_GUCSHR_PN + LRC_GUCSHR_SZ)
#define LRC_PPHWSP_PN (0)
#define LRC_PPHWSP_SZ (1)
/* Finally we have the logical state for the context */
/* After the PPHWSP we have the logical state for the context */
#define LRC_STATE_PN (LRC_PPHWSP_PN + LRC_PPHWSP_SZ)
/*
* Currently we include the PPHWSP in __intel_engine_context_size() so
* the size of the header is synonymous with the start of the PPHWSP.
*/
#define LRC_HEADER_PAGES LRC_PPHWSP_PN
/* Space within PPHWSP reserved to be used as scratch */
#define LRC_PPHWSP_SCRATCH 0x34
#define LRC_PPHWSP_SCRATCH_ADDR (LRC_PPHWSP_SCRATCH * sizeof(u32))
......
......@@ -103,9 +103,6 @@ static int __live_context_size(struct intel_engine_cs *engine,
*
* TLDR; this overlaps with the execlists redzone.
*/
if (HAS_EXECLISTS(engine->i915))
vaddr += LRC_HEADER_PAGES * PAGE_SIZE;
vaddr += engine->context_size - I915_GTT_PAGE_SIZE;
memset(vaddr, POISON_INUSE, I915_GTT_PAGE_SIZE);
......
......@@ -195,7 +195,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
return -EFAULT;
}
page = i915_gem_object_get_page(ctx_obj, LRC_HEADER_PAGES + i);
page = i915_gem_object_get_page(ctx_obj, i);
dst = kmap(page);
intel_gvt_hypervisor_read_gpa(vgpu, context_gpa, dst,
I915_GTT_PAGE_SIZE);
......@@ -835,7 +835,7 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
return;
}
page = i915_gem_object_get_page(ctx_obj, LRC_HEADER_PAGES + i);
page = i915_gem_object_get_page(ctx_obj, i);
src = kmap(page);
intel_gvt_hypervisor_write_gpa(vgpu, context_gpa, src,
I915_GTT_PAGE_SIZE);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment