Commit 16f11f46 authored by Michal Wajdeczko's avatar Michal Wajdeczko Committed by Chris Wilson

drm/i915/guc: Use formalized struct definition for ads object

Manual pointer manipulation is error prone. Let compiler calculate
right offsets for us in case we need to change ads layout.

v2: don't call it object (Chris)
v3: restyle offset assignments (Chris)
v4: stylistic reductions
Signed-off-by: default avatarMichal Wajdeczko <michal.wajdeczko@intel.com>
Cc: Oscar Mateo <oscar.mateo@intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Link: http://patchwork.freedesktop.org/patch/msgid/20170314133309.126432-1-michal.wajdeczko@intel.comSigned-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
parent b3420dde
...@@ -810,22 +810,21 @@ static void guc_addon_create(struct intel_guc *guc) ...@@ -810,22 +810,21 @@ static void guc_addon_create(struct intel_guc *guc)
{ {
struct drm_i915_private *dev_priv = guc_to_i915(guc); struct drm_i915_private *dev_priv = guc_to_i915(guc);
struct i915_vma *vma; struct i915_vma *vma;
struct guc_ads *ads;
struct guc_policies *policies;
struct guc_mmio_reg_state *reg_state;
struct intel_engine_cs *engine;
enum intel_engine_id id;
struct page *page; struct page *page;
u32 size;
/* The ads obj includes the struct itself and buffers passed to GuC */ /* The ads obj includes the struct itself and buffers passed to GuC */
size = sizeof(struct guc_ads) + sizeof(struct guc_policies) + struct {
sizeof(struct guc_mmio_reg_state) + struct guc_ads ads;
GUC_S3_SAVE_SPACE_PAGES * PAGE_SIZE; struct guc_policies policies;
struct guc_mmio_reg_state reg_state;
u8 reg_state_buffer[GUC_S3_SAVE_SPACE_PAGES * PAGE_SIZE];
} __packed *blob;
struct intel_engine_cs *engine;
enum intel_engine_id id;
u32 base;
vma = guc->ads_vma; vma = guc->ads_vma;
if (!vma) { if (!vma) {
vma = intel_guc_allocate_vma(guc, PAGE_ALIGN(size)); vma = intel_guc_allocate_vma(guc, PAGE_ALIGN(sizeof(*blob)));
if (IS_ERR(vma)) if (IS_ERR(vma))
return; return;
...@@ -833,44 +832,38 @@ static void guc_addon_create(struct intel_guc *guc) ...@@ -833,44 +832,38 @@ static void guc_addon_create(struct intel_guc *guc)
} }
page = i915_vma_first_page(vma); page = i915_vma_first_page(vma);
ads = kmap(page); blob = kmap(page);
/*
* The GuC requires a "Golden Context" when it reinitialises
* engines after a reset. Here we use the Render ring default
* context, which must already exist and be pinned in the GGTT,
* so its address won't change after we've told the GuC where
* to find it.
*/
engine = dev_priv->engine[RCS];
ads->golden_context_lrca = engine->status_page.ggtt_offset;
for_each_engine(engine, dev_priv, id)
ads->eng_state_size[engine->guc_id] = intel_lr_context_size(engine);
/* GuC scheduling policies */ /* GuC scheduling policies */
policies = (void *)ads + sizeof(struct guc_ads); guc_policies_init(&blob->policies);
guc_policies_init(policies);
ads->scheduler_policies =
guc_ggtt_offset(vma) + sizeof(struct guc_ads);
/* MMIO reg state */ /* MMIO reg state */
reg_state = (void *)policies + sizeof(struct guc_policies);
for_each_engine(engine, dev_priv, id) { for_each_engine(engine, dev_priv, id) {
reg_state->mmio_white_list[engine->guc_id].mmio_start = blob->reg_state.mmio_white_list[engine->guc_id].mmio_start =
engine->mmio_base + GUC_MMIO_WHITE_LIST_START; engine->mmio_base + GUC_MMIO_WHITE_LIST_START;
/* Nothing to be saved or restored for now. */ /* Nothing to be saved or restored for now. */
reg_state->mmio_white_list[engine->guc_id].count = 0; blob->reg_state.mmio_white_list[engine->guc_id].count = 0;
} }
ads->reg_state_addr = ads->scheduler_policies + /*
sizeof(struct guc_policies); * The GuC requires a "Golden Context" when it reinitialises
* engines after a reset. Here we use the Render ring default
* context, which must already exist and be pinned in the GGTT,
* so its address won't change after we've told the GuC where
* to find it.
*/
blob->ads.golden_context_lrca =
dev_priv->engine[RCS]->status_page.ggtt_offset;
for_each_engine(engine, dev_priv, id)
blob->ads.eng_state_size[engine->guc_id] =
intel_lr_context_size(engine);
ads->reg_state_buffer = ads->reg_state_addr + base = guc_ggtt_offset(vma);
sizeof(struct guc_mmio_reg_state); blob->ads.scheduler_policies = base + ptr_offset(blob, policies);
blob->ads.reg_state_buffer = base + ptr_offset(blob, reg_state_buffer);
blob->ads.reg_state_addr = base + ptr_offset(blob, reg_state);
kunmap(page); kunmap(page);
} }
......
...@@ -66,6 +66,8 @@ ...@@ -66,6 +66,8 @@
#define ptr_pack_bits(ptr, bits) \ #define ptr_pack_bits(ptr, bits) \
((typeof(ptr))((unsigned long)(ptr) | (bits))) ((typeof(ptr))((unsigned long)(ptr) | (bits)))
#define ptr_offset(ptr, member) offsetof(typeof(*(ptr)), member)
#define fetch_and_zero(ptr) ({ \ #define fetch_and_zero(ptr) ({ \
typeof(*ptr) __T = *(ptr); \ typeof(*ptr) __T = *(ptr); \
*(ptr) = (typeof(*ptr))0; \ *(ptr) = (typeof(*ptr))0; \
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment