Commit fbffc5a3 authored by Chris Wilson's avatar Chris Wilson

drm/i915/guc: Propagate the fw xfer timeout

Propagate the timeout on transferring the fw back to the caller where it
may act upon it, usually by restarting the xfer before failing.

v2: Simplify the wait to only wait upon the guc signaling completion,
with an assertion that the fw xfer must have completed for it to be
ready!

Testcase: igt/drv_selftest/live_hangcheck
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Michal Wajdeczko <michal.wajdeczko@intel.com>
Cc: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Reviewed-by: default avatarDaniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20181018195536.11522-1-chris@chris-wilson.co.uk
parent 7b554301
...@@ -125,66 +125,26 @@ static void guc_prepare_xfer(struct intel_guc *guc) ...@@ -125,66 +125,26 @@ static void guc_prepare_xfer(struct intel_guc *guc)
} }
/* Copy RSA signature from the fw image to HW for verification */ /* Copy RSA signature from the fw image to HW for verification */
static int guc_xfer_rsa(struct intel_guc *guc, struct i915_vma *vma) static void guc_xfer_rsa(struct intel_guc *guc, struct i915_vma *vma)
{ {
struct drm_i915_private *dev_priv = guc_to_i915(guc); struct drm_i915_private *dev_priv = guc_to_i915(guc);
struct intel_uc_fw *guc_fw = &guc->fw;
struct sg_table *sg = vma->pages;
u32 rsa[UOS_RSA_SCRATCH_COUNT]; u32 rsa[UOS_RSA_SCRATCH_COUNT];
int i; int i;
if (sg_pcopy_to_buffer(sg->sgl, sg->nents, rsa, sizeof(rsa), sg_pcopy_to_buffer(vma->pages->sgl, vma->pages->nents,
guc_fw->rsa_offset) != sizeof(rsa)) rsa, sizeof(rsa), guc->fw.rsa_offset);
return -EINVAL;
for (i = 0; i < UOS_RSA_SCRATCH_COUNT; i++) for (i = 0; i < UOS_RSA_SCRATCH_COUNT; i++)
I915_WRITE(UOS_RSA_SCRATCH(i), rsa[i]); I915_WRITE(UOS_RSA_SCRATCH(i), rsa[i]);
return 0;
} }
/* static bool guc_xfer_completed(struct intel_guc *guc, u32 *status)
* Transfer the firmware image to RAM for execution by the microcontroller.
*
* Architecturally, the DMA engine is bidirectional, and can potentially even
* transfer between GTT locations. This functionality is left out of the API
* for now as there is no need for it.
*/
static int guc_xfer_ucode(struct intel_guc *guc, struct i915_vma *vma)
{ {
struct drm_i915_private *dev_priv = guc_to_i915(guc); struct drm_i915_private *dev_priv = guc_to_i915(guc);
struct intel_uc_fw *guc_fw = &guc->fw;
unsigned long offset;
u32 status;
int ret;
/*
* The header plus uCode will be copied to WOPCM via DMA, excluding any
* other components
*/
I915_WRITE(DMA_COPY_SIZE, guc_fw->header_size + guc_fw->ucode_size);
/* Set the source address for the new blob */
offset = intel_guc_ggtt_offset(guc, vma) + guc_fw->header_offset;
I915_WRITE(DMA_ADDR_0_LOW, lower_32_bits(offset));
I915_WRITE(DMA_ADDR_0_HIGH, upper_32_bits(offset) & 0xFFFF);
/* /* Did we complete the xfer? */
* Set the DMA destination. Current uCode expects the code to be *status = I915_READ(DMA_CTRL);
* loaded at 8k; locations below this are used for the stack. return !(*status & START_DMA);
*/
I915_WRITE(DMA_ADDR_1_LOW, 0x2000);
I915_WRITE(DMA_ADDR_1_HIGH, DMA_ADDRESS_SPACE_WOPCM);
/* Finally start the DMA */
I915_WRITE(DMA_CTRL, _MASKED_BIT_ENABLE(UOS_MOVE | START_DMA));
/* Wait for DMA to finish */
ret = __intel_wait_for_register_fw(dev_priv, DMA_CTRL, START_DMA, 0,
2, 100, &status);
DRM_DEBUG_DRIVER("GuC DMA status %#x\n", status);
return ret;
} }
/* /*
...@@ -228,9 +188,51 @@ static int guc_wait_ucode(struct intel_guc *guc) ...@@ -228,9 +188,51 @@ static int guc_wait_ucode(struct intel_guc *guc)
ret = -ENOEXEC; ret = -ENOEXEC;
} }
if (ret == 0 && !guc_xfer_completed(guc, &status)) {
DRM_ERROR("GuC is ready, but the xfer %08x is incomplete\n",
status);
ret = -ENXIO;
}
return ret; return ret;
} }
/*
* Transfer the firmware image to RAM for execution by the microcontroller.
*
* Architecturally, the DMA engine is bidirectional, and can potentially even
* transfer between GTT locations. This functionality is left out of the API
* for now as there is no need for it.
*/
static int guc_xfer_ucode(struct intel_guc *guc, struct i915_vma *vma)
{
struct drm_i915_private *dev_priv = guc_to_i915(guc);
struct intel_uc_fw *guc_fw = &guc->fw;
unsigned long offset;
/*
* The header plus uCode will be copied to WOPCM via DMA, excluding any
* other components
*/
I915_WRITE(DMA_COPY_SIZE, guc_fw->header_size + guc_fw->ucode_size);
/* Set the source address for the new blob */
offset = intel_guc_ggtt_offset(guc, vma) + guc_fw->header_offset;
I915_WRITE(DMA_ADDR_0_LOW, lower_32_bits(offset));
I915_WRITE(DMA_ADDR_0_HIGH, upper_32_bits(offset) & 0xFFFF);
/*
* Set the DMA destination. Current uCode expects the code to be
* loaded at 8k; locations below this are used for the stack.
*/
I915_WRITE(DMA_ADDR_1_LOW, 0x2000);
I915_WRITE(DMA_ADDR_1_HIGH, DMA_ADDRESS_SPACE_WOPCM);
/* Finally start the DMA */
I915_WRITE(DMA_CTRL, _MASKED_BIT_ENABLE(UOS_MOVE | START_DMA));
return guc_wait_ucode(guc);
}
/* /*
* Load the GuC firmware blob into the MinuteIA. * Load the GuC firmware blob into the MinuteIA.
*/ */
...@@ -251,17 +253,9 @@ static int guc_fw_xfer(struct intel_uc_fw *guc_fw, struct i915_vma *vma) ...@@ -251,17 +253,9 @@ static int guc_fw_xfer(struct intel_uc_fw *guc_fw, struct i915_vma *vma)
* by the DMA engine in one operation, whereas the RSA signature is * by the DMA engine in one operation, whereas the RSA signature is
* loaded via MMIO. * loaded via MMIO.
*/ */
ret = guc_xfer_rsa(guc, vma); guc_xfer_rsa(guc, vma);
if (ret)
DRM_WARN("GuC firmware signature xfer error %d\n", ret);
ret = guc_xfer_ucode(guc, vma); ret = guc_xfer_ucode(guc, vma);
if (ret)
DRM_WARN("GuC firmware code xfer error %d\n", ret);
ret = guc_wait_ucode(guc);
if (ret)
DRM_ERROR("GuC firmware xfer error %d\n", ret);
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment