Commit 100d46bd authored by Linus Torvalds's avatar Linus Torvalds

Merge Intel Gen8/Gen9 graphics fixes from Jon Bloomfield.

This fixes two different classes of bugs in the Intel graphics hardware:

MMIO register read hang:
 "On Intels Gen8 and Gen9 Graphics hardware, a read of specific graphics
  MMIO registers when the product is in certain low power states causes
  a system hang.

  There are two potential triggers for DoS:
    a) H/W corruption of the RC6 save/restore vector
    b) Hard hang within the MIPI hardware

  This prevents the DoS in two areas of the hardware:
    1) Detect corruption of RC6 address on exit from low-power state,
       and if we find it corrupted, disable RC6 and RPM
    2) Permanently lower the MIPI MMIO timeout"

Blitter command streamer unrestricted memory accesses:
 "On Intels Gen9 Graphics hardware the Blitter Command Streamer (BCS)
  allows writing to Memory Mapped Input Output (MMIO) that should be
  blocked. With modifications of page tables, this can lead to privilege
  escalation. This exposure is limited to the Guest Physical Address
  space and does not allow for access outside of the graphics virtual
  machine.

  This series establishes a software parser into the Blitter command
  stream to scan for, and prevent, reads or writes to MMIO's that should
  not be accessible to non-privileged contexts.

  Much of the command parser infrastructure has existed for some time,
  and is used on Ivybridge/Haswell/Valleyview derived products to allow
  the use of features normally blocked by hardware. In this legacy
  context, the command parser is employed to allow normally unprivileged
  submissions to be run with elevated privileges in order to grant
  access to a limited set of extra capabilities. In this mode the parser
  is optional; In the event that the parser finds any construct that it
  cannot properly validate (e.g. nested command buffers), it simply
  aborts the scan and submits the buffer in non-privileged mode.

  For Gen9 Graphics, this series makes the parser mandatory for all
  Blitter submissions. The incoming user buffer is first copied to a
  kernel owned buffer, and parsed. If all checks are successful the
  kernel owned buffer is mapped READ-ONLY and submitted on behalf of the
  user. If any checks fail, or the parser is unable to complete the scan
  (nested buffers), it is forcibly rejected. The successfully scanned
  buffer is executed with NORMAL user privileges (key difference from
  legacy usage).

  Modern usermode does not use the Blitter on later hardware, having
  switched over to using the 3D engine instead for performance reasons.
  There are however some legacy usermode apps that rely on Blitter,
  notably the SNA X-Server. There are no known usermode applications
  that require nested command buffers on the Blitter, so the forcible
  rejection of such buffers in this patch series is considered an
  acceptable limitation"

* Intel graphics fixes in emailed bundle from Jon Bloomfield <jon.bloomfield@intel.com>:
  drm/i915/cmdparser: Fix jump whitelist clearing
  drm/i915/gen8+: Add RC6 CTX corruption WA
  drm/i915: Lower RM timeout to avoid DSI hard hangs
  drm/i915/cmdparser: Ignore Length operands during command matching
  drm/i915/cmdparser: Add support for backward jumps
  drm/i915/cmdparser: Use explicit goto for error paths
  drm/i915: Add gen9 BCS cmdparsing
  drm/i915: Allow parsing of unsized batches
  drm/i915: Support ro ppgtt mapped cmdparser shadow buffers
  drm/i915: Add support for mandatory cmdparsing
  drm/i915: Remove Master tables from cmdparser
  drm/i915: Disable Secure Batches for gen6+
  drm/i915: Rename gen7 cmdparser tables
parents de620fb9 ea0b163b
...@@ -319,6 +319,8 @@ static void i915_gem_context_free(struct i915_gem_context *ctx) ...@@ -319,6 +319,8 @@ static void i915_gem_context_free(struct i915_gem_context *ctx)
free_engines(rcu_access_pointer(ctx->engines)); free_engines(rcu_access_pointer(ctx->engines));
mutex_destroy(&ctx->engines_mutex); mutex_destroy(&ctx->engines_mutex);
kfree(ctx->jump_whitelist);
if (ctx->timeline) if (ctx->timeline)
intel_timeline_put(ctx->timeline); intel_timeline_put(ctx->timeline);
...@@ -441,6 +443,9 @@ __create_context(struct drm_i915_private *i915) ...@@ -441,6 +443,9 @@ __create_context(struct drm_i915_private *i915)
for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp); i++) for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp); i++)
ctx->hang_timestamp[i] = jiffies - CONTEXT_FAST_HANG_JIFFIES; ctx->hang_timestamp[i] = jiffies - CONTEXT_FAST_HANG_JIFFIES;
ctx->jump_whitelist = NULL;
ctx->jump_whitelist_cmds = 0;
return ctx; return ctx;
err_free: err_free:
......
...@@ -192,6 +192,13 @@ struct i915_gem_context { ...@@ -192,6 +192,13 @@ struct i915_gem_context {
* per vm, which may be one per context or shared with the global GTT) * per vm, which may be one per context or shared with the global GTT)
*/ */
struct radix_tree_root handles_vma; struct radix_tree_root handles_vma;
/** jump_whitelist: Bit array for tracking cmds during cmdparsing
* Guarded by struct_mutex
*/
unsigned long *jump_whitelist;
/** jump_whitelist_cmds: No of cmd slots available */
u32 jump_whitelist_cmds;
}; };
#endif /* __I915_GEM_CONTEXT_TYPES_H__ */ #endif /* __I915_GEM_CONTEXT_TYPES_H__ */
...@@ -296,7 +296,9 @@ static inline u64 gen8_noncanonical_addr(u64 address) ...@@ -296,7 +296,9 @@ static inline u64 gen8_noncanonical_addr(u64 address)
static inline bool eb_use_cmdparser(const struct i915_execbuffer *eb) static inline bool eb_use_cmdparser(const struct i915_execbuffer *eb)
{ {
return intel_engine_needs_cmd_parser(eb->engine) && eb->batch_len; return intel_engine_requires_cmd_parser(eb->engine) ||
(intel_engine_using_cmd_parser(eb->engine) &&
eb->args->batch_len);
} }
static int eb_create(struct i915_execbuffer *eb) static int eb_create(struct i915_execbuffer *eb)
...@@ -1955,40 +1957,94 @@ static int i915_reset_gen7_sol_offsets(struct i915_request *rq) ...@@ -1955,40 +1957,94 @@ static int i915_reset_gen7_sol_offsets(struct i915_request *rq)
return 0; return 0;
} }
static struct i915_vma *eb_parse(struct i915_execbuffer *eb, bool is_master) static struct i915_vma *
shadow_batch_pin(struct i915_execbuffer *eb, struct drm_i915_gem_object *obj)
{
struct drm_i915_private *dev_priv = eb->i915;
struct i915_vma * const vma = *eb->vma;
struct i915_address_space *vm;
u64 flags;
/*
* PPGTT backed shadow buffers must be mapped RO, to prevent
* post-scan tampering
*/
if (CMDPARSER_USES_GGTT(dev_priv)) {
flags = PIN_GLOBAL;
vm = &dev_priv->ggtt.vm;
} else if (vma->vm->has_read_only) {
flags = PIN_USER;
vm = vma->vm;
i915_gem_object_set_readonly(obj);
} else {
DRM_DEBUG("Cannot prevent post-scan tampering without RO capable vm\n");
return ERR_PTR(-EINVAL);
}
return i915_gem_object_pin(obj, vm, NULL, 0, 0, flags);
}
static struct i915_vma *eb_parse(struct i915_execbuffer *eb)
{ {
struct intel_engine_pool_node *pool; struct intel_engine_pool_node *pool;
struct i915_vma *vma; struct i915_vma *vma;
u64 batch_start;
u64 shadow_batch_start;
int err; int err;
pool = intel_engine_pool_get(&eb->engine->pool, eb->batch_len); pool = intel_engine_pool_get(&eb->engine->pool, eb->batch_len);
if (IS_ERR(pool)) if (IS_ERR(pool))
return ERR_CAST(pool); return ERR_CAST(pool);
err = intel_engine_cmd_parser(eb->engine, vma = shadow_batch_pin(eb, pool->obj);
if (IS_ERR(vma))
goto err;
batch_start = gen8_canonical_addr(eb->batch->node.start) +
eb->batch_start_offset;
shadow_batch_start = gen8_canonical_addr(vma->node.start);
err = intel_engine_cmd_parser(eb->gem_context,
eb->engine,
eb->batch->obj, eb->batch->obj,
pool->obj, batch_start,
eb->batch_start_offset, eb->batch_start_offset,
eb->batch_len, eb->batch_len,
is_master); pool->obj,
shadow_batch_start);
if (err) { if (err) {
if (err == -EACCES) /* unhandled chained batch */ i915_vma_unpin(vma);
/*
* Unsafe GGTT-backed buffers can still be submitted safely
* as non-secure.
* For PPGTT backing however, we have no choice but to forcibly
* reject unsafe buffers
*/
if (CMDPARSER_USES_GGTT(eb->i915) && (err == -EACCES))
/* Execute original buffer non-secure */
vma = NULL; vma = NULL;
else else
vma = ERR_PTR(err); vma = ERR_PTR(err);
goto err; goto err;
} }
vma = i915_gem_object_ggtt_pin(pool->obj, NULL, 0, 0, 0);
if (IS_ERR(vma))
goto err;
eb->vma[eb->buffer_count] = i915_vma_get(vma); eb->vma[eb->buffer_count] = i915_vma_get(vma);
eb->flags[eb->buffer_count] = eb->flags[eb->buffer_count] =
__EXEC_OBJECT_HAS_PIN | __EXEC_OBJECT_HAS_REF; __EXEC_OBJECT_HAS_PIN | __EXEC_OBJECT_HAS_REF;
vma->exec_flags = &eb->flags[eb->buffer_count]; vma->exec_flags = &eb->flags[eb->buffer_count];
eb->buffer_count++; eb->buffer_count++;
eb->batch_start_offset = 0;
eb->batch = vma;
if (CMDPARSER_USES_GGTT(eb->i915))
eb->batch_flags |= I915_DISPATCH_SECURE;
/* eb->batch_len unchanged */
vma->private = pool; vma->private = pool;
return vma; return vma;
...@@ -2421,6 +2477,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, ...@@ -2421,6 +2477,7 @@ i915_gem_do_execbuffer(struct drm_device *dev,
struct drm_i915_gem_exec_object2 *exec, struct drm_i915_gem_exec_object2 *exec,
struct drm_syncobj **fences) struct drm_syncobj **fences)
{ {
struct drm_i915_private *i915 = to_i915(dev);
struct i915_execbuffer eb; struct i915_execbuffer eb;
struct dma_fence *in_fence = NULL; struct dma_fence *in_fence = NULL;
struct dma_fence *exec_fence = NULL; struct dma_fence *exec_fence = NULL;
...@@ -2432,7 +2489,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, ...@@ -2432,7 +2489,7 @@ i915_gem_do_execbuffer(struct drm_device *dev,
BUILD_BUG_ON(__EXEC_OBJECT_INTERNAL_FLAGS & BUILD_BUG_ON(__EXEC_OBJECT_INTERNAL_FLAGS &
~__EXEC_OBJECT_UNKNOWN_FLAGS); ~__EXEC_OBJECT_UNKNOWN_FLAGS);
eb.i915 = to_i915(dev); eb.i915 = i915;
eb.file = file; eb.file = file;
eb.args = args; eb.args = args;
if (DBG_FORCE_RELOC || !(args->flags & I915_EXEC_NO_RELOC)) if (DBG_FORCE_RELOC || !(args->flags & I915_EXEC_NO_RELOC))
...@@ -2452,6 +2509,13 @@ i915_gem_do_execbuffer(struct drm_device *dev, ...@@ -2452,6 +2509,13 @@ i915_gem_do_execbuffer(struct drm_device *dev,
eb.batch_flags = 0; eb.batch_flags = 0;
if (args->flags & I915_EXEC_SECURE) { if (args->flags & I915_EXEC_SECURE) {
if (INTEL_GEN(i915) >= 11)
return -ENODEV;
/* Return -EPERM to trigger fallback code on old binaries. */
if (!HAS_SECURE_BATCHES(i915))
return -EPERM;
if (!drm_is_current_master(file) || !capable(CAP_SYS_ADMIN)) if (!drm_is_current_master(file) || !capable(CAP_SYS_ADMIN))
return -EPERM; return -EPERM;
...@@ -2530,34 +2594,19 @@ i915_gem_do_execbuffer(struct drm_device *dev, ...@@ -2530,34 +2594,19 @@ i915_gem_do_execbuffer(struct drm_device *dev,
goto err_vma; goto err_vma;
} }
if (eb.batch_len == 0)
eb.batch_len = eb.batch->size - eb.batch_start_offset;
if (eb_use_cmdparser(&eb)) { if (eb_use_cmdparser(&eb)) {
struct i915_vma *vma; struct i915_vma *vma;
vma = eb_parse(&eb, drm_is_current_master(file)); vma = eb_parse(&eb);
if (IS_ERR(vma)) { if (IS_ERR(vma)) {
err = PTR_ERR(vma); err = PTR_ERR(vma);
goto err_vma; goto err_vma;
} }
if (vma) {
/*
* Batch parsed and accepted:
*
* Set the DISPATCH_SECURE bit to remove the NON_SECURE
* bit from MI_BATCH_BUFFER_START commands issued in
* the dispatch_execbuffer implementations. We
* specifically don't want that set on batches the
* command parser has accepted.
*/
eb.batch_flags |= I915_DISPATCH_SECURE;
eb.batch_start_offset = 0;
eb.batch = vma;
}
} }
if (eb.batch_len == 0)
eb.batch_len = eb.batch->size - eb.batch_start_offset;
/* /*
* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure * snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
* batch" bit. Hence we need to pin secure batches into the global gtt. * batch" bit. Hence we need to pin secure batches into the global gtt.
......
...@@ -475,12 +475,13 @@ struct intel_engine_cs { ...@@ -475,12 +475,13 @@ struct intel_engine_cs {
struct intel_engine_hangcheck hangcheck; struct intel_engine_hangcheck hangcheck;
#define I915_ENGINE_NEEDS_CMD_PARSER BIT(0) #define I915_ENGINE_USING_CMD_PARSER BIT(0)
#define I915_ENGINE_SUPPORTS_STATS BIT(1) #define I915_ENGINE_SUPPORTS_STATS BIT(1)
#define I915_ENGINE_HAS_PREEMPTION BIT(2) #define I915_ENGINE_HAS_PREEMPTION BIT(2)
#define I915_ENGINE_HAS_SEMAPHORES BIT(3) #define I915_ENGINE_HAS_SEMAPHORES BIT(3)
#define I915_ENGINE_NEEDS_BREADCRUMB_TASKLET BIT(4) #define I915_ENGINE_NEEDS_BREADCRUMB_TASKLET BIT(4)
#define I915_ENGINE_IS_VIRTUAL BIT(5) #define I915_ENGINE_IS_VIRTUAL BIT(5)
#define I915_ENGINE_REQUIRES_CMD_PARSER BIT(7)
unsigned int flags; unsigned int flags;
/* /*
...@@ -541,9 +542,15 @@ struct intel_engine_cs { ...@@ -541,9 +542,15 @@ struct intel_engine_cs {
}; };
static inline bool static inline bool
intel_engine_needs_cmd_parser(const struct intel_engine_cs *engine) intel_engine_using_cmd_parser(const struct intel_engine_cs *engine)
{ {
return engine->flags & I915_ENGINE_NEEDS_CMD_PARSER; return engine->flags & I915_ENGINE_USING_CMD_PARSER;
}
static inline bool
intel_engine_requires_cmd_parser(const struct intel_engine_cs *engine)
{
return engine->flags & I915_ENGINE_REQUIRES_CMD_PARSER;
} }
static inline bool static inline bool
......
...@@ -38,6 +38,9 @@ static int __gt_unpark(struct intel_wakeref *wf) ...@@ -38,6 +38,9 @@ static int __gt_unpark(struct intel_wakeref *wf)
gt->awake = intel_display_power_get(i915, POWER_DOMAIN_GT_IRQ); gt->awake = intel_display_power_get(i915, POWER_DOMAIN_GT_IRQ);
GEM_BUG_ON(!gt->awake); GEM_BUG_ON(!gt->awake);
if (NEEDS_RC6_CTX_CORRUPTION_WA(i915))
intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL);
intel_enable_gt_powersave(i915); intel_enable_gt_powersave(i915);
i915_update_gfx_val(i915); i915_update_gfx_val(i915);
...@@ -67,6 +70,11 @@ static int __gt_park(struct intel_wakeref *wf) ...@@ -67,6 +70,11 @@ static int __gt_park(struct intel_wakeref *wf)
if (INTEL_GEN(i915) >= 6) if (INTEL_GEN(i915) >= 6)
gen6_rps_idle(i915); gen6_rps_idle(i915);
if (NEEDS_RC6_CTX_CORRUPTION_WA(i915)) {
i915_rc6_ctx_wa_check(i915);
intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
}
/* Everything switched off, flush any residual interrupt just in case */ /* Everything switched off, flush any residual interrupt just in case */
intel_synchronize_irq(i915); intel_synchronize_irq(i915);
......
...@@ -53,13 +53,11 @@ ...@@ -53,13 +53,11 @@
* granting userspace undue privileges. There are three categories of privilege. * granting userspace undue privileges. There are three categories of privilege.
* *
* First, commands which are explicitly defined as privileged or which should * First, commands which are explicitly defined as privileged or which should
* only be used by the kernel driver. The parser generally rejects such * only be used by the kernel driver. The parser rejects such commands
* commands, though it may allow some from the drm master process.
* *
* Second, commands which access registers. To support correct/enhanced * Second, commands which access registers. To support correct/enhanced
* userspace functionality, particularly certain OpenGL extensions, the parser * userspace functionality, particularly certain OpenGL extensions, the parser
* provides a whitelist of registers which userspace may safely access (for both * provides a whitelist of registers which userspace may safely access
* normal and drm master processes).
* *
* Third, commands which access privileged memory (i.e. GGTT, HWS page, etc). * Third, commands which access privileged memory (i.e. GGTT, HWS page, etc).
* The parser always rejects such commands. * The parser always rejects such commands.
...@@ -84,9 +82,9 @@ ...@@ -84,9 +82,9 @@
* in the per-engine command tables. * in the per-engine command tables.
* *
* Other command table entries map fairly directly to high level categories * Other command table entries map fairly directly to high level categories
* mentioned above: rejected, master-only, register whitelist. The parser * mentioned above: rejected, register whitelist. The parser implements a number
* implements a number of checks, including the privileged memory checks, via a * of checks, including the privileged memory checks, via a general bitmasking
* general bitmasking mechanism. * mechanism.
*/ */
/* /*
...@@ -104,8 +102,6 @@ struct drm_i915_cmd_descriptor { ...@@ -104,8 +102,6 @@ struct drm_i915_cmd_descriptor {
* CMD_DESC_REJECT: The command is never allowed * CMD_DESC_REJECT: The command is never allowed
* CMD_DESC_REGISTER: The command should be checked against the * CMD_DESC_REGISTER: The command should be checked against the
* register whitelist for the appropriate ring * register whitelist for the appropriate ring
* CMD_DESC_MASTER: The command is allowed if the submitting process
* is the DRM master
*/ */
u32 flags; u32 flags;
#define CMD_DESC_FIXED (1<<0) #define CMD_DESC_FIXED (1<<0)
...@@ -113,7 +109,6 @@ struct drm_i915_cmd_descriptor { ...@@ -113,7 +109,6 @@ struct drm_i915_cmd_descriptor {
#define CMD_DESC_REJECT (1<<2) #define CMD_DESC_REJECT (1<<2)
#define CMD_DESC_REGISTER (1<<3) #define CMD_DESC_REGISTER (1<<3)
#define CMD_DESC_BITMASK (1<<4) #define CMD_DESC_BITMASK (1<<4)
#define CMD_DESC_MASTER (1<<5)
/* /*
* The command's unique identification bits and the bitmask to get them. * The command's unique identification bits and the bitmask to get them.
...@@ -194,7 +189,7 @@ struct drm_i915_cmd_table { ...@@ -194,7 +189,7 @@ struct drm_i915_cmd_table {
#define CMD(op, opm, f, lm, fl, ...) \ #define CMD(op, opm, f, lm, fl, ...) \
{ \ { \
.flags = (fl) | ((f) ? CMD_DESC_FIXED : 0), \ .flags = (fl) | ((f) ? CMD_DESC_FIXED : 0), \
.cmd = { (op), ~0u << (opm) }, \ .cmd = { (op & ~0u << (opm)), ~0u << (opm) }, \
.length = { (lm) }, \ .length = { (lm) }, \
__VA_ARGS__ \ __VA_ARGS__ \
} }
...@@ -209,14 +204,13 @@ struct drm_i915_cmd_table { ...@@ -209,14 +204,13 @@ struct drm_i915_cmd_table {
#define R CMD_DESC_REJECT #define R CMD_DESC_REJECT
#define W CMD_DESC_REGISTER #define W CMD_DESC_REGISTER
#define B CMD_DESC_BITMASK #define B CMD_DESC_BITMASK
#define M CMD_DESC_MASTER
/* Command Mask Fixed Len Action /* Command Mask Fixed Len Action
---------------------------------------------------------- */ ---------------------------------------------------------- */
static const struct drm_i915_cmd_descriptor common_cmds[] = { static const struct drm_i915_cmd_descriptor gen7_common_cmds[] = {
CMD( MI_NOOP, SMI, F, 1, S ), CMD( MI_NOOP, SMI, F, 1, S ),
CMD( MI_USER_INTERRUPT, SMI, F, 1, R ), CMD( MI_USER_INTERRUPT, SMI, F, 1, R ),
CMD( MI_WAIT_FOR_EVENT, SMI, F, 1, M ), CMD( MI_WAIT_FOR_EVENT, SMI, F, 1, R ),
CMD( MI_ARB_CHECK, SMI, F, 1, S ), CMD( MI_ARB_CHECK, SMI, F, 1, S ),
CMD( MI_REPORT_HEAD, SMI, F, 1, S ), CMD( MI_REPORT_HEAD, SMI, F, 1, S ),
CMD( MI_SUSPEND_FLUSH, SMI, F, 1, S ), CMD( MI_SUSPEND_FLUSH, SMI, F, 1, S ),
...@@ -246,7 +240,7 @@ static const struct drm_i915_cmd_descriptor common_cmds[] = { ...@@ -246,7 +240,7 @@ static const struct drm_i915_cmd_descriptor common_cmds[] = {
CMD( MI_BATCH_BUFFER_START, SMI, !F, 0xFF, S ), CMD( MI_BATCH_BUFFER_START, SMI, !F, 0xFF, S ),
}; };
static const struct drm_i915_cmd_descriptor render_cmds[] = { static const struct drm_i915_cmd_descriptor gen7_render_cmds[] = {
CMD( MI_FLUSH, SMI, F, 1, S ), CMD( MI_FLUSH, SMI, F, 1, S ),
CMD( MI_ARB_ON_OFF, SMI, F, 1, R ), CMD( MI_ARB_ON_OFF, SMI, F, 1, R ),
CMD( MI_PREDICATE, SMI, F, 1, S ), CMD( MI_PREDICATE, SMI, F, 1, S ),
...@@ -313,7 +307,7 @@ static const struct drm_i915_cmd_descriptor hsw_render_cmds[] = { ...@@ -313,7 +307,7 @@ static const struct drm_i915_cmd_descriptor hsw_render_cmds[] = {
CMD( MI_URB_ATOMIC_ALLOC, SMI, F, 1, S ), CMD( MI_URB_ATOMIC_ALLOC, SMI, F, 1, S ),
CMD( MI_SET_APPID, SMI, F, 1, S ), CMD( MI_SET_APPID, SMI, F, 1, S ),
CMD( MI_RS_CONTEXT, SMI, F, 1, S ), CMD( MI_RS_CONTEXT, SMI, F, 1, S ),
CMD( MI_LOAD_SCAN_LINES_INCL, SMI, !F, 0x3F, M ), CMD( MI_LOAD_SCAN_LINES_INCL, SMI, !F, 0x3F, R ),
CMD( MI_LOAD_SCAN_LINES_EXCL, SMI, !F, 0x3F, R ), CMD( MI_LOAD_SCAN_LINES_EXCL, SMI, !F, 0x3F, R ),
CMD( MI_LOAD_REGISTER_REG, SMI, !F, 0xFF, W, CMD( MI_LOAD_REGISTER_REG, SMI, !F, 0xFF, W,
.reg = { .offset = 1, .mask = 0x007FFFFC, .step = 1 } ), .reg = { .offset = 1, .mask = 0x007FFFFC, .step = 1 } ),
...@@ -330,7 +324,7 @@ static const struct drm_i915_cmd_descriptor hsw_render_cmds[] = { ...@@ -330,7 +324,7 @@ static const struct drm_i915_cmd_descriptor hsw_render_cmds[] = {
CMD( GFX_OP_3DSTATE_BINDING_TABLE_EDIT_PS, S3D, !F, 0x1FF, S ), CMD( GFX_OP_3DSTATE_BINDING_TABLE_EDIT_PS, S3D, !F, 0x1FF, S ),
}; };
static const struct drm_i915_cmd_descriptor video_cmds[] = { static const struct drm_i915_cmd_descriptor gen7_video_cmds[] = {
CMD( MI_ARB_ON_OFF, SMI, F, 1, R ), CMD( MI_ARB_ON_OFF, SMI, F, 1, R ),
CMD( MI_SET_APPID, SMI, F, 1, S ), CMD( MI_SET_APPID, SMI, F, 1, S ),
CMD( MI_STORE_DWORD_IMM, SMI, !F, 0xFF, B, CMD( MI_STORE_DWORD_IMM, SMI, !F, 0xFF, B,
...@@ -374,7 +368,7 @@ static const struct drm_i915_cmd_descriptor video_cmds[] = { ...@@ -374,7 +368,7 @@ static const struct drm_i915_cmd_descriptor video_cmds[] = {
CMD( MFX_WAIT, SMFX, F, 1, S ), CMD( MFX_WAIT, SMFX, F, 1, S ),
}; };
static const struct drm_i915_cmd_descriptor vecs_cmds[] = { static const struct drm_i915_cmd_descriptor gen7_vecs_cmds[] = {
CMD( MI_ARB_ON_OFF, SMI, F, 1, R ), CMD( MI_ARB_ON_OFF, SMI, F, 1, R ),
CMD( MI_SET_APPID, SMI, F, 1, S ), CMD( MI_SET_APPID, SMI, F, 1, S ),
CMD( MI_STORE_DWORD_IMM, SMI, !F, 0xFF, B, CMD( MI_STORE_DWORD_IMM, SMI, !F, 0xFF, B,
...@@ -412,7 +406,7 @@ static const struct drm_i915_cmd_descriptor vecs_cmds[] = { ...@@ -412,7 +406,7 @@ static const struct drm_i915_cmd_descriptor vecs_cmds[] = {
}}, ), }}, ),
}; };
static const struct drm_i915_cmd_descriptor blt_cmds[] = { static const struct drm_i915_cmd_descriptor gen7_blt_cmds[] = {
CMD( MI_DISPLAY_FLIP, SMI, !F, 0xFF, R ), CMD( MI_DISPLAY_FLIP, SMI, !F, 0xFF, R ),
CMD( MI_STORE_DWORD_IMM, SMI, !F, 0x3FF, B, CMD( MI_STORE_DWORD_IMM, SMI, !F, 0x3FF, B,
.bits = {{ .bits = {{
...@@ -446,10 +440,64 @@ static const struct drm_i915_cmd_descriptor blt_cmds[] = { ...@@ -446,10 +440,64 @@ static const struct drm_i915_cmd_descriptor blt_cmds[] = {
}; };
static const struct drm_i915_cmd_descriptor hsw_blt_cmds[] = { static const struct drm_i915_cmd_descriptor hsw_blt_cmds[] = {
CMD( MI_LOAD_SCAN_LINES_INCL, SMI, !F, 0x3F, M ), CMD( MI_LOAD_SCAN_LINES_INCL, SMI, !F, 0x3F, R ),
CMD( MI_LOAD_SCAN_LINES_EXCL, SMI, !F, 0x3F, R ), CMD( MI_LOAD_SCAN_LINES_EXCL, SMI, !F, 0x3F, R ),
}; };
/*
* For Gen9 we can still rely on the h/w to enforce cmd security, and only
* need to re-enforce the register access checks. We therefore only need to
* teach the cmdparser how to find the end of each command, and identify
* register accesses. The table doesn't need to reject any commands, and so
* the only commands listed here are:
* 1) Those that touch registers
* 2) Those that do not have the default 8-bit length
*
* Note that the default MI length mask chosen for this table is 0xFF, not
* the 0x3F used on older devices. This is because the vast majority of MI
* cmds on Gen9 use a standard 8-bit Length field.
* All the Gen9 blitter instructions are standard 0xFF length mask, and
* none allow access to non-general registers, so in fact no BLT cmds are
* included in the table at all.
*
*/
static const struct drm_i915_cmd_descriptor gen9_blt_cmds[] = {
CMD( MI_NOOP, SMI, F, 1, S ),
CMD( MI_USER_INTERRUPT, SMI, F, 1, S ),
CMD( MI_WAIT_FOR_EVENT, SMI, F, 1, S ),
CMD( MI_FLUSH, SMI, F, 1, S ),
CMD( MI_ARB_CHECK, SMI, F, 1, S ),
CMD( MI_REPORT_HEAD, SMI, F, 1, S ),
CMD( MI_ARB_ON_OFF, SMI, F, 1, S ),
CMD( MI_SUSPEND_FLUSH, SMI, F, 1, S ),
CMD( MI_LOAD_SCAN_LINES_INCL, SMI, !F, 0x3F, S ),
CMD( MI_LOAD_SCAN_LINES_EXCL, SMI, !F, 0x3F, S ),
CMD( MI_STORE_DWORD_IMM, SMI, !F, 0x3FF, S ),
CMD( MI_LOAD_REGISTER_IMM(1), SMI, !F, 0xFF, W,
.reg = { .offset = 1, .mask = 0x007FFFFC, .step = 2 } ),
CMD( MI_UPDATE_GTT, SMI, !F, 0x3FF, S ),
CMD( MI_STORE_REGISTER_MEM_GEN8, SMI, F, 4, W,
.reg = { .offset = 1, .mask = 0x007FFFFC } ),
CMD( MI_FLUSH_DW, SMI, !F, 0x3F, S ),
CMD( MI_LOAD_REGISTER_MEM_GEN8, SMI, F, 4, W,
.reg = { .offset = 1, .mask = 0x007FFFFC } ),
CMD( MI_LOAD_REGISTER_REG, SMI, !F, 0xFF, W,
.reg = { .offset = 1, .mask = 0x007FFFFC, .step = 1 } ),
/*
* We allow BB_START but apply further checks. We just sanitize the
* basic fields here.
*/
#define MI_BB_START_OPERAND_MASK GENMASK(SMI-1, 0)
#define MI_BB_START_OPERAND_EXPECT (MI_BATCH_PPGTT_HSW | 1)
CMD( MI_BATCH_BUFFER_START_GEN8, SMI, !F, 0xFF, B,
.bits = {{
.offset = 0,
.mask = MI_BB_START_OPERAND_MASK,
.expected = MI_BB_START_OPERAND_EXPECT,
}}, ),
};
static const struct drm_i915_cmd_descriptor noop_desc = static const struct drm_i915_cmd_descriptor noop_desc =
CMD(MI_NOOP, SMI, F, 1, S); CMD(MI_NOOP, SMI, F, 1, S);
...@@ -463,40 +511,44 @@ static const struct drm_i915_cmd_descriptor noop_desc = ...@@ -463,40 +511,44 @@ static const struct drm_i915_cmd_descriptor noop_desc =
#undef R #undef R
#undef W #undef W
#undef B #undef B
#undef M
static const struct drm_i915_cmd_table gen7_render_cmds[] = { static const struct drm_i915_cmd_table gen7_render_cmd_table[] = {
{ common_cmds, ARRAY_SIZE(common_cmds) }, { gen7_common_cmds, ARRAY_SIZE(gen7_common_cmds) },
{ render_cmds, ARRAY_SIZE(render_cmds) }, { gen7_render_cmds, ARRAY_SIZE(gen7_render_cmds) },
}; };
static const struct drm_i915_cmd_table hsw_render_ring_cmds[] = { static const struct drm_i915_cmd_table hsw_render_ring_cmd_table[] = {
{ common_cmds, ARRAY_SIZE(common_cmds) }, { gen7_common_cmds, ARRAY_SIZE(gen7_common_cmds) },
{ render_cmds, ARRAY_SIZE(render_cmds) }, { gen7_render_cmds, ARRAY_SIZE(gen7_render_cmds) },
{ hsw_render_cmds, ARRAY_SIZE(hsw_render_cmds) }, { hsw_render_cmds, ARRAY_SIZE(hsw_render_cmds) },
}; };
static const struct drm_i915_cmd_table gen7_video_cmds[] = { static const struct drm_i915_cmd_table gen7_video_cmd_table[] = {
{ common_cmds, ARRAY_SIZE(common_cmds) }, { gen7_common_cmds, ARRAY_SIZE(gen7_common_cmds) },
{ video_cmds, ARRAY_SIZE(video_cmds) }, { gen7_video_cmds, ARRAY_SIZE(gen7_video_cmds) },
}; };
static const struct drm_i915_cmd_table hsw_vebox_cmds[] = { static const struct drm_i915_cmd_table hsw_vebox_cmd_table[] = {
{ common_cmds, ARRAY_SIZE(common_cmds) }, { gen7_common_cmds, ARRAY_SIZE(gen7_common_cmds) },
{ vecs_cmds, ARRAY_SIZE(vecs_cmds) }, { gen7_vecs_cmds, ARRAY_SIZE(gen7_vecs_cmds) },
}; };
static const struct drm_i915_cmd_table gen7_blt_cmds[] = { static const struct drm_i915_cmd_table gen7_blt_cmd_table[] = {
{ common_cmds, ARRAY_SIZE(common_cmds) }, { gen7_common_cmds, ARRAY_SIZE(gen7_common_cmds) },
{ blt_cmds, ARRAY_SIZE(blt_cmds) }, { gen7_blt_cmds, ARRAY_SIZE(gen7_blt_cmds) },
}; };
static const struct drm_i915_cmd_table hsw_blt_ring_cmds[] = { static const struct drm_i915_cmd_table hsw_blt_ring_cmd_table[] = {
{ common_cmds, ARRAY_SIZE(common_cmds) }, { gen7_common_cmds, ARRAY_SIZE(gen7_common_cmds) },
{ blt_cmds, ARRAY_SIZE(blt_cmds) }, { gen7_blt_cmds, ARRAY_SIZE(gen7_blt_cmds) },
{ hsw_blt_cmds, ARRAY_SIZE(hsw_blt_cmds) }, { hsw_blt_cmds, ARRAY_SIZE(hsw_blt_cmds) },
}; };
static const struct drm_i915_cmd_table gen9_blt_cmd_table[] = {
{ gen9_blt_cmds, ARRAY_SIZE(gen9_blt_cmds) },
};
/* /*
* Register whitelists, sorted by increasing register offset. * Register whitelists, sorted by increasing register offset.
*/ */
...@@ -612,17 +664,27 @@ static const struct drm_i915_reg_descriptor gen7_blt_regs[] = { ...@@ -612,17 +664,27 @@ static const struct drm_i915_reg_descriptor gen7_blt_regs[] = {
REG64_IDX(RING_TIMESTAMP, BLT_RING_BASE), REG64_IDX(RING_TIMESTAMP, BLT_RING_BASE),
}; };
static const struct drm_i915_reg_descriptor ivb_master_regs[] = { static const struct drm_i915_reg_descriptor gen9_blt_regs[] = {
REG32(FORCEWAKE_MT), REG64_IDX(RING_TIMESTAMP, RENDER_RING_BASE),
REG32(DERRMR), REG64_IDX(RING_TIMESTAMP, BSD_RING_BASE),
REG32(GEN7_PIPE_DE_LOAD_SL(PIPE_A)), REG32(BCS_SWCTRL),
REG32(GEN7_PIPE_DE_LOAD_SL(PIPE_B)), REG64_IDX(RING_TIMESTAMP, BLT_RING_BASE),
REG32(GEN7_PIPE_DE_LOAD_SL(PIPE_C)), REG64_IDX(BCS_GPR, 0),
}; REG64_IDX(BCS_GPR, 1),
REG64_IDX(BCS_GPR, 2),
static const struct drm_i915_reg_descriptor hsw_master_regs[] = { REG64_IDX(BCS_GPR, 3),
REG32(FORCEWAKE_MT), REG64_IDX(BCS_GPR, 4),
REG32(DERRMR), REG64_IDX(BCS_GPR, 5),
REG64_IDX(BCS_GPR, 6),
REG64_IDX(BCS_GPR, 7),
REG64_IDX(BCS_GPR, 8),
REG64_IDX(BCS_GPR, 9),
REG64_IDX(BCS_GPR, 10),
REG64_IDX(BCS_GPR, 11),
REG64_IDX(BCS_GPR, 12),
REG64_IDX(BCS_GPR, 13),
REG64_IDX(BCS_GPR, 14),
REG64_IDX(BCS_GPR, 15),
}; };
#undef REG64 #undef REG64
...@@ -631,28 +693,27 @@ static const struct drm_i915_reg_descriptor hsw_master_regs[] = { ...@@ -631,28 +693,27 @@ static const struct drm_i915_reg_descriptor hsw_master_regs[] = {
struct drm_i915_reg_table { struct drm_i915_reg_table {
const struct drm_i915_reg_descriptor *regs; const struct drm_i915_reg_descriptor *regs;
int num_regs; int num_regs;
bool master;
}; };
static const struct drm_i915_reg_table ivb_render_reg_tables[] = { static const struct drm_i915_reg_table ivb_render_reg_tables[] = {
{ gen7_render_regs, ARRAY_SIZE(gen7_render_regs), false }, { gen7_render_regs, ARRAY_SIZE(gen7_render_regs) },
{ ivb_master_regs, ARRAY_SIZE(ivb_master_regs), true },
}; };
static const struct drm_i915_reg_table ivb_blt_reg_tables[] = { static const struct drm_i915_reg_table ivb_blt_reg_tables[] = {
{ gen7_blt_regs, ARRAY_SIZE(gen7_blt_regs), false }, { gen7_blt_regs, ARRAY_SIZE(gen7_blt_regs) },
{ ivb_master_regs, ARRAY_SIZE(ivb_master_regs), true },
}; };
static const struct drm_i915_reg_table hsw_render_reg_tables[] = { static const struct drm_i915_reg_table hsw_render_reg_tables[] = {
{ gen7_render_regs, ARRAY_SIZE(gen7_render_regs), false }, { gen7_render_regs, ARRAY_SIZE(gen7_render_regs) },
{ hsw_render_regs, ARRAY_SIZE(hsw_render_regs), false }, { hsw_render_regs, ARRAY_SIZE(hsw_render_regs) },
{ hsw_master_regs, ARRAY_SIZE(hsw_master_regs), true },
}; };
static const struct drm_i915_reg_table hsw_blt_reg_tables[] = { static const struct drm_i915_reg_table hsw_blt_reg_tables[] = {
{ gen7_blt_regs, ARRAY_SIZE(gen7_blt_regs), false }, { gen7_blt_regs, ARRAY_SIZE(gen7_blt_regs) },
{ hsw_master_regs, ARRAY_SIZE(hsw_master_regs), true }, };
static const struct drm_i915_reg_table gen9_blt_reg_tables[] = {
{ gen9_blt_regs, ARRAY_SIZE(gen9_blt_regs) },
}; };
static u32 gen7_render_get_cmd_length_mask(u32 cmd_header) static u32 gen7_render_get_cmd_length_mask(u32 cmd_header)
...@@ -710,6 +771,17 @@ static u32 gen7_blt_get_cmd_length_mask(u32 cmd_header) ...@@ -710,6 +771,17 @@ static u32 gen7_blt_get_cmd_length_mask(u32 cmd_header)
return 0; return 0;
} }
static u32 gen9_blt_get_cmd_length_mask(u32 cmd_header)
{
u32 client = cmd_header >> INSTR_CLIENT_SHIFT;
if (client == INSTR_MI_CLIENT || client == INSTR_BC_CLIENT)
return 0xFF;
DRM_DEBUG_DRIVER("CMD: Abnormal blt cmd length! 0x%08X\n", cmd_header);
return 0;
}
static bool validate_cmds_sorted(const struct intel_engine_cs *engine, static bool validate_cmds_sorted(const struct intel_engine_cs *engine,
const struct drm_i915_cmd_table *cmd_tables, const struct drm_i915_cmd_table *cmd_tables,
int cmd_table_count) int cmd_table_count)
...@@ -867,18 +939,19 @@ void intel_engine_init_cmd_parser(struct intel_engine_cs *engine) ...@@ -867,18 +939,19 @@ void intel_engine_init_cmd_parser(struct intel_engine_cs *engine)
int cmd_table_count; int cmd_table_count;
int ret; int ret;
if (!IS_GEN(engine->i915, 7)) if (!IS_GEN(engine->i915, 7) && !(IS_GEN(engine->i915, 9) &&
engine->class == COPY_ENGINE_CLASS))
return; return;
switch (engine->class) { switch (engine->class) {
case RENDER_CLASS: case RENDER_CLASS:
if (IS_HASWELL(engine->i915)) { if (IS_HASWELL(engine->i915)) {
cmd_tables = hsw_render_ring_cmds; cmd_tables = hsw_render_ring_cmd_table;
cmd_table_count = cmd_table_count =
ARRAY_SIZE(hsw_render_ring_cmds); ARRAY_SIZE(hsw_render_ring_cmd_table);
} else { } else {
cmd_tables = gen7_render_cmds; cmd_tables = gen7_render_cmd_table;
cmd_table_count = ARRAY_SIZE(gen7_render_cmds); cmd_table_count = ARRAY_SIZE(gen7_render_cmd_table);
} }
if (IS_HASWELL(engine->i915)) { if (IS_HASWELL(engine->i915)) {
...@@ -888,36 +961,46 @@ void intel_engine_init_cmd_parser(struct intel_engine_cs *engine) ...@@ -888,36 +961,46 @@ void intel_engine_init_cmd_parser(struct intel_engine_cs *engine)
engine->reg_tables = ivb_render_reg_tables; engine->reg_tables = ivb_render_reg_tables;
engine->reg_table_count = ARRAY_SIZE(ivb_render_reg_tables); engine->reg_table_count = ARRAY_SIZE(ivb_render_reg_tables);
} }
engine->get_cmd_length_mask = gen7_render_get_cmd_length_mask; engine->get_cmd_length_mask = gen7_render_get_cmd_length_mask;
break; break;
case VIDEO_DECODE_CLASS: case VIDEO_DECODE_CLASS:
cmd_tables = gen7_video_cmds; cmd_tables = gen7_video_cmd_table;
cmd_table_count = ARRAY_SIZE(gen7_video_cmds); cmd_table_count = ARRAY_SIZE(gen7_video_cmd_table);
engine->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask; engine->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask;
break; break;
case COPY_ENGINE_CLASS: case COPY_ENGINE_CLASS:
if (IS_HASWELL(engine->i915)) { engine->get_cmd_length_mask = gen7_blt_get_cmd_length_mask;
cmd_tables = hsw_blt_ring_cmds; if (IS_GEN(engine->i915, 9)) {
cmd_table_count = ARRAY_SIZE(hsw_blt_ring_cmds); cmd_tables = gen9_blt_cmd_table;
cmd_table_count = ARRAY_SIZE(gen9_blt_cmd_table);
engine->get_cmd_length_mask =
gen9_blt_get_cmd_length_mask;
/* BCS Engine unsafe without parser */
engine->flags |= I915_ENGINE_REQUIRES_CMD_PARSER;
} else if (IS_HASWELL(engine->i915)) {
cmd_tables = hsw_blt_ring_cmd_table;
cmd_table_count = ARRAY_SIZE(hsw_blt_ring_cmd_table);
} else { } else {
cmd_tables = gen7_blt_cmds; cmd_tables = gen7_blt_cmd_table;
cmd_table_count = ARRAY_SIZE(gen7_blt_cmds); cmd_table_count = ARRAY_SIZE(gen7_blt_cmd_table);
} }
if (IS_HASWELL(engine->i915)) { if (IS_GEN(engine->i915, 9)) {
engine->reg_tables = gen9_blt_reg_tables;
engine->reg_table_count =
ARRAY_SIZE(gen9_blt_reg_tables);
} else if (IS_HASWELL(engine->i915)) {
engine->reg_tables = hsw_blt_reg_tables; engine->reg_tables = hsw_blt_reg_tables;
engine->reg_table_count = ARRAY_SIZE(hsw_blt_reg_tables); engine->reg_table_count = ARRAY_SIZE(hsw_blt_reg_tables);
} else { } else {
engine->reg_tables = ivb_blt_reg_tables; engine->reg_tables = ivb_blt_reg_tables;
engine->reg_table_count = ARRAY_SIZE(ivb_blt_reg_tables); engine->reg_table_count = ARRAY_SIZE(ivb_blt_reg_tables);
} }
engine->get_cmd_length_mask = gen7_blt_get_cmd_length_mask;
break; break;
case VIDEO_ENHANCEMENT_CLASS: case VIDEO_ENHANCEMENT_CLASS:
cmd_tables = hsw_vebox_cmds; cmd_tables = hsw_vebox_cmd_table;
cmd_table_count = ARRAY_SIZE(hsw_vebox_cmds); cmd_table_count = ARRAY_SIZE(hsw_vebox_cmd_table);
/* VECS can use the same length_mask function as VCS */ /* VECS can use the same length_mask function as VCS */
engine->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask; engine->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask;
break; break;
...@@ -943,7 +1026,7 @@ void intel_engine_init_cmd_parser(struct intel_engine_cs *engine) ...@@ -943,7 +1026,7 @@ void intel_engine_init_cmd_parser(struct intel_engine_cs *engine)
return; return;
} }
engine->flags |= I915_ENGINE_NEEDS_CMD_PARSER; engine->flags |= I915_ENGINE_USING_CMD_PARSER;
} }
/** /**
...@@ -955,7 +1038,7 @@ void intel_engine_init_cmd_parser(struct intel_engine_cs *engine) ...@@ -955,7 +1038,7 @@ void intel_engine_init_cmd_parser(struct intel_engine_cs *engine)
*/ */
void intel_engine_cleanup_cmd_parser(struct intel_engine_cs *engine) void intel_engine_cleanup_cmd_parser(struct intel_engine_cs *engine)
{ {
if (!intel_engine_needs_cmd_parser(engine)) if (!intel_engine_using_cmd_parser(engine))
return; return;
fini_hash_table(engine); fini_hash_table(engine);
...@@ -1029,22 +1112,16 @@ __find_reg(const struct drm_i915_reg_descriptor *table, int count, u32 addr) ...@@ -1029,22 +1112,16 @@ __find_reg(const struct drm_i915_reg_descriptor *table, int count, u32 addr)
} }
static const struct drm_i915_reg_descriptor * static const struct drm_i915_reg_descriptor *
find_reg(const struct intel_engine_cs *engine, bool is_master, u32 addr) find_reg(const struct intel_engine_cs *engine, u32 addr)
{ {
const struct drm_i915_reg_table *table = engine->reg_tables; const struct drm_i915_reg_table *table = engine->reg_tables;
const struct drm_i915_reg_descriptor *reg = NULL;
int count = engine->reg_table_count; int count = engine->reg_table_count;
for (; count > 0; ++table, --count) { for (; !reg && (count > 0); ++table, --count)
if (!table->master || is_master) {
const struct drm_i915_reg_descriptor *reg;
reg = __find_reg(table->regs, table->num_regs, addr); reg = __find_reg(table->regs, table->num_regs, addr);
if (reg != NULL)
return reg;
}
}
return NULL; return reg;
} }
/* Returns a vmap'd pointer to dst_obj, which the caller must unmap */ /* Returns a vmap'd pointer to dst_obj, which the caller must unmap */
...@@ -1128,8 +1205,7 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj, ...@@ -1128,8 +1205,7 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj,
static bool check_cmd(const struct intel_engine_cs *engine, static bool check_cmd(const struct intel_engine_cs *engine,
const struct drm_i915_cmd_descriptor *desc, const struct drm_i915_cmd_descriptor *desc,
const u32 *cmd, u32 length, const u32 *cmd, u32 length)
const bool is_master)
{ {
if (desc->flags & CMD_DESC_SKIP) if (desc->flags & CMD_DESC_SKIP)
return true; return true;
...@@ -1139,12 +1215,6 @@ static bool check_cmd(const struct intel_engine_cs *engine, ...@@ -1139,12 +1215,6 @@ static bool check_cmd(const struct intel_engine_cs *engine,
return false; return false;
} }
if ((desc->flags & CMD_DESC_MASTER) && !is_master) {
DRM_DEBUG_DRIVER("CMD: Rejected master-only command: 0x%08X\n",
*cmd);
return false;
}
if (desc->flags & CMD_DESC_REGISTER) { if (desc->flags & CMD_DESC_REGISTER) {
/* /*
* Get the distance between individual register offset * Get the distance between individual register offset
...@@ -1158,7 +1228,7 @@ static bool check_cmd(const struct intel_engine_cs *engine, ...@@ -1158,7 +1228,7 @@ static bool check_cmd(const struct intel_engine_cs *engine,
offset += step) { offset += step) {
const u32 reg_addr = cmd[offset] & desc->reg.mask; const u32 reg_addr = cmd[offset] & desc->reg.mask;
const struct drm_i915_reg_descriptor *reg = const struct drm_i915_reg_descriptor *reg =
find_reg(engine, is_master, reg_addr); find_reg(engine, reg_addr);
if (!reg) { if (!reg) {
DRM_DEBUG_DRIVER("CMD: Rejected register 0x%08X in command: 0x%08X (%s)\n", DRM_DEBUG_DRIVER("CMD: Rejected register 0x%08X in command: 0x%08X (%s)\n",
...@@ -1236,16 +1306,112 @@ static bool check_cmd(const struct intel_engine_cs *engine, ...@@ -1236,16 +1306,112 @@ static bool check_cmd(const struct intel_engine_cs *engine,
return true; return true;
} }
static int check_bbstart(const struct i915_gem_context *ctx,
u32 *cmd, u32 offset, u32 length,
u32 batch_len,
u64 batch_start,
u64 shadow_batch_start)
{
u64 jump_offset, jump_target;
u32 target_cmd_offset, target_cmd_index;
/* For igt compatibility on older platforms */
if (CMDPARSER_USES_GGTT(ctx->i915)) {
DRM_DEBUG("CMD: Rejecting BB_START for ggtt based submission\n");
return -EACCES;
}
if (length != 3) {
DRM_DEBUG("CMD: Recursive BB_START with bad length(%u)\n",
length);
return -EINVAL;
}
jump_target = *(u64*)(cmd+1);
jump_offset = jump_target - batch_start;
/*
* Any underflow of jump_target is guaranteed to be outside the range
* of a u32, so >= test catches both too large and too small
*/
if (jump_offset >= batch_len) {
DRM_DEBUG("CMD: BB_START to 0x%llx jumps out of BB\n",
jump_target);
return -EINVAL;
}
/*
* This cannot overflow a u32 because we already checked jump_offset
* is within the BB, and the batch_len is a u32
*/
target_cmd_offset = lower_32_bits(jump_offset);
target_cmd_index = target_cmd_offset / sizeof(u32);
*(u64*)(cmd + 1) = shadow_batch_start + target_cmd_offset;
if (target_cmd_index == offset)
return 0;
if (ctx->jump_whitelist_cmds <= target_cmd_index) {
DRM_DEBUG("CMD: Rejecting BB_START - truncated whitelist array\n");
return -EINVAL;
} else if (!test_bit(target_cmd_index, ctx->jump_whitelist)) {
DRM_DEBUG("CMD: BB_START to 0x%llx not a previously executed cmd\n",
jump_target);
return -EINVAL;
}
return 0;
}
static void init_whitelist(struct i915_gem_context *ctx, u32 batch_len)
{
const u32 batch_cmds = DIV_ROUND_UP(batch_len, sizeof(u32));
const u32 exact_size = BITS_TO_LONGS(batch_cmds);
u32 next_size = BITS_TO_LONGS(roundup_pow_of_two(batch_cmds));
unsigned long *next_whitelist;
if (CMDPARSER_USES_GGTT(ctx->i915))
return;
if (batch_cmds <= ctx->jump_whitelist_cmds) {
bitmap_zero(ctx->jump_whitelist, batch_cmds);
return;
}
again:
next_whitelist = kcalloc(next_size, sizeof(long), GFP_KERNEL);
if (next_whitelist) {
kfree(ctx->jump_whitelist);
ctx->jump_whitelist = next_whitelist;
ctx->jump_whitelist_cmds =
next_size * BITS_PER_BYTE * sizeof(long);
return;
}
if (next_size > exact_size) {
next_size = exact_size;
goto again;
}
DRM_DEBUG("CMD: Failed to extend whitelist. BB_START may be disallowed\n");
bitmap_zero(ctx->jump_whitelist, ctx->jump_whitelist_cmds);
return;
}
#define LENGTH_BIAS 2 #define LENGTH_BIAS 2
/** /**
* i915_parse_cmds() - parse a submitted batch buffer for privilege violations * i915_parse_cmds() - parse a submitted batch buffer for privilege violations
* @ctx: the context in which the batch is to execute
* @engine: the engine on which the batch is to execute * @engine: the engine on which the batch is to execute
* @batch_obj: the batch buffer in question * @batch_obj: the batch buffer in question
* @shadow_batch_obj: copy of the batch buffer in question * @batch_start: Canonical base address of batch
* @batch_start_offset: byte offset in the batch at which execution starts * @batch_start_offset: byte offset in the batch at which execution starts
* @batch_len: length of the commands in batch_obj * @batch_len: length of the commands in batch_obj
* @is_master: is the submitting process the drm master? * @shadow_batch_obj: copy of the batch buffer in question
* @shadow_batch_start: Canonical base address of shadow_batch_obj
* *
* Parses the specified batch buffer looking for privilege violations as * Parses the specified batch buffer looking for privilege violations as
* described in the overview. * described in the overview.
...@@ -1253,14 +1419,17 @@ static bool check_cmd(const struct intel_engine_cs *engine, ...@@ -1253,14 +1419,17 @@ static bool check_cmd(const struct intel_engine_cs *engine,
* Return: non-zero if the parser finds violations or otherwise fails; -EACCES * Return: non-zero if the parser finds violations or otherwise fails; -EACCES
* if the batch appears legal but should use hardware parsing * if the batch appears legal but should use hardware parsing
*/ */
int intel_engine_cmd_parser(struct intel_engine_cs *engine,
int intel_engine_cmd_parser(struct i915_gem_context *ctx,
struct intel_engine_cs *engine,
struct drm_i915_gem_object *batch_obj, struct drm_i915_gem_object *batch_obj,
struct drm_i915_gem_object *shadow_batch_obj, u64 batch_start,
u32 batch_start_offset, u32 batch_start_offset,
u32 batch_len, u32 batch_len,
bool is_master) struct drm_i915_gem_object *shadow_batch_obj,
u64 shadow_batch_start)
{ {
u32 *cmd, *batch_end; u32 *cmd, *batch_end, offset = 0;
struct drm_i915_cmd_descriptor default_desc = noop_desc; struct drm_i915_cmd_descriptor default_desc = noop_desc;
const struct drm_i915_cmd_descriptor *desc = &default_desc; const struct drm_i915_cmd_descriptor *desc = &default_desc;
bool needs_clflush_after = false; bool needs_clflush_after = false;
...@@ -1274,6 +1443,8 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine, ...@@ -1274,6 +1443,8 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine,
return PTR_ERR(cmd); return PTR_ERR(cmd);
} }
init_whitelist(ctx, batch_len);
/* /*
* We use the batch length as size because the shadow object is as * We use the batch length as size because the shadow object is as
* large or larger and copy_batch() will write MI_NOPs to the extra * large or larger and copy_batch() will write MI_NOPs to the extra
...@@ -1283,31 +1454,15 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine, ...@@ -1283,31 +1454,15 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine,
do { do {
u32 length; u32 length;
if (*cmd == MI_BATCH_BUFFER_END) { if (*cmd == MI_BATCH_BUFFER_END)
if (needs_clflush_after) {
void *ptr = page_mask_bits(shadow_batch_obj->mm.mapping);
drm_clflush_virt_range(ptr,
(void *)(cmd + 1) - ptr);
}
break; break;
}
desc = find_cmd(engine, *cmd, desc, &default_desc); desc = find_cmd(engine, *cmd, desc, &default_desc);
if (!desc) { if (!desc) {
DRM_DEBUG_DRIVER("CMD: Unrecognized command: 0x%08X\n", DRM_DEBUG_DRIVER("CMD: Unrecognized command: 0x%08X\n",
*cmd); *cmd);
ret = -EINVAL; ret = -EINVAL;
break; goto err;
}
/*
* If the batch buffer contains a chained batch, return an
* error that tells the caller to abort and dispatch the
* workload as a non-secure batch.
*/
if (desc->cmd.value == MI_BATCH_BUFFER_START) {
ret = -EACCES;
break;
} }
if (desc->flags & CMD_DESC_FIXED) if (desc->flags & CMD_DESC_FIXED)
...@@ -1321,22 +1476,43 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine, ...@@ -1321,22 +1476,43 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine,
length, length,
batch_end - cmd); batch_end - cmd);
ret = -EINVAL; ret = -EINVAL;
break; goto err;
} }
if (!check_cmd(engine, desc, cmd, length, is_master)) { if (!check_cmd(engine, desc, cmd, length)) {
ret = -EACCES; ret = -EACCES;
goto err;
}
if (desc->cmd.value == MI_BATCH_BUFFER_START) {
ret = check_bbstart(ctx, cmd, offset, length,
batch_len, batch_start,
shadow_batch_start);
if (ret)
goto err;
break; break;
} }
if (ctx->jump_whitelist_cmds > offset)
set_bit(offset, ctx->jump_whitelist);
cmd += length; cmd += length;
offset += length;
if (cmd >= batch_end) { if (cmd >= batch_end) {
DRM_DEBUG_DRIVER("CMD: Got to the end of the buffer w/o a BBE cmd!\n"); DRM_DEBUG_DRIVER("CMD: Got to the end of the buffer w/o a BBE cmd!\n");
ret = -EINVAL; ret = -EINVAL;
break; goto err;
} }
} while (1); } while (1);
if (needs_clflush_after) {
void *ptr = page_mask_bits(shadow_batch_obj->mm.mapping);
drm_clflush_virt_range(ptr, (void *)(cmd + 1) - ptr);
}
err:
i915_gem_object_unpin_map(shadow_batch_obj); i915_gem_object_unpin_map(shadow_batch_obj);
return ret; return ret;
} }
...@@ -1357,7 +1533,7 @@ int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv) ...@@ -1357,7 +1533,7 @@ int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv)
/* If the command parser is not enabled, report 0 - unsupported */ /* If the command parser is not enabled, report 0 - unsupported */
for_each_uabi_engine(engine, dev_priv) { for_each_uabi_engine(engine, dev_priv) {
if (intel_engine_needs_cmd_parser(engine)) { if (intel_engine_using_cmd_parser(engine)) {
active = true; active = true;
break; break;
} }
...@@ -1382,6 +1558,7 @@ int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv) ...@@ -1382,6 +1558,7 @@ int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv)
* the parser enabled. * the parser enabled.
* 9. Don't whitelist or handle oacontrol specially, as ownership * 9. Don't whitelist or handle oacontrol specially, as ownership
* for oacontrol state is moving to i915-perf. * for oacontrol state is moving to i915-perf.
* 10. Support for Gen9 BCS Parsing
*/ */
return 9; return 10;
} }
...@@ -1850,6 +1850,8 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation) ...@@ -1850,6 +1850,8 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
i915_gem_suspend_late(dev_priv); i915_gem_suspend_late(dev_priv);
i915_rc6_ctx_wa_suspend(dev_priv);
intel_uncore_suspend(&dev_priv->uncore); intel_uncore_suspend(&dev_priv->uncore);
intel_power_domains_suspend(dev_priv, intel_power_domains_suspend(dev_priv,
...@@ -2053,6 +2055,8 @@ static int i915_drm_resume_early(struct drm_device *dev) ...@@ -2053,6 +2055,8 @@ static int i915_drm_resume_early(struct drm_device *dev)
intel_power_domains_resume(dev_priv); intel_power_domains_resume(dev_priv);
i915_rc6_ctx_wa_resume(dev_priv);
intel_gt_sanitize(&dev_priv->gt, true); intel_gt_sanitize(&dev_priv->gt, true);
enable_rpm_wakeref_asserts(&dev_priv->runtime_pm); enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
......
...@@ -593,6 +593,8 @@ struct intel_rps { ...@@ -593,6 +593,8 @@ struct intel_rps {
struct intel_rc6 { struct intel_rc6 {
bool enabled; bool enabled;
bool ctx_corrupted;
intel_wakeref_t ctx_corrupted_wakeref;
u64 prev_hw_residency[4]; u64 prev_hw_residency[4];
u64 cur_residency[4]; u64 cur_residency[4];
}; };
...@@ -2075,9 +2077,16 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, ...@@ -2075,9 +2077,16 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define VEBOX_MASK(dev_priv) \ #define VEBOX_MASK(dev_priv) \
ENGINE_INSTANCES_MASK(dev_priv, VECS0, I915_MAX_VECS) ENGINE_INSTANCES_MASK(dev_priv, VECS0, I915_MAX_VECS)
/*
* The Gen7 cmdparser copies the scanned buffer to the ggtt for execution
* All later gens can run the final buffer from the ppgtt
*/
#define CMDPARSER_USES_GGTT(dev_priv) IS_GEN(dev_priv, 7)
#define HAS_LLC(dev_priv) (INTEL_INFO(dev_priv)->has_llc) #define HAS_LLC(dev_priv) (INTEL_INFO(dev_priv)->has_llc)
#define HAS_SNOOP(dev_priv) (INTEL_INFO(dev_priv)->has_snoop) #define HAS_SNOOP(dev_priv) (INTEL_INFO(dev_priv)->has_snoop)
#define HAS_EDRAM(dev_priv) ((dev_priv)->edram_size_mb) #define HAS_EDRAM(dev_priv) ((dev_priv)->edram_size_mb)
#define HAS_SECURE_BATCHES(dev_priv) (INTEL_GEN(dev_priv) < 6)
#define HAS_WT(dev_priv) ((IS_HASWELL(dev_priv) || \ #define HAS_WT(dev_priv) ((IS_HASWELL(dev_priv) || \
IS_BROADWELL(dev_priv)) && HAS_EDRAM(dev_priv)) IS_BROADWELL(dev_priv)) && HAS_EDRAM(dev_priv))
...@@ -2110,10 +2119,12 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, ...@@ -2110,10 +2119,12 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
/* Early gen2 have a totally busted CS tlb and require pinned batches. */ /* Early gen2 have a totally busted CS tlb and require pinned batches. */
#define HAS_BROKEN_CS_TLB(dev_priv) (IS_I830(dev_priv) || IS_I845G(dev_priv)) #define HAS_BROKEN_CS_TLB(dev_priv) (IS_I830(dev_priv) || IS_I845G(dev_priv))
#define NEEDS_RC6_CTX_CORRUPTION_WA(dev_priv) \
(IS_BROADWELL(dev_priv) || IS_GEN(dev_priv, 9))
/* WaRsDisableCoarsePowerGating:skl,cnl */ /* WaRsDisableCoarsePowerGating:skl,cnl */
#define NEEDS_WaRsDisableCoarsePowerGating(dev_priv) \ #define NEEDS_WaRsDisableCoarsePowerGating(dev_priv) \
(IS_CANNONLAKE(dev_priv) || \ (IS_CANNONLAKE(dev_priv) || IS_GEN(dev_priv, 9))
IS_SKL_GT3(dev_priv) || IS_SKL_GT4(dev_priv))
#define HAS_GMBUS_IRQ(dev_priv) (INTEL_GEN(dev_priv) >= 4) #define HAS_GMBUS_IRQ(dev_priv) (INTEL_GEN(dev_priv) >= 4)
#define HAS_GMBUS_BURST_READ(dev_priv) (INTEL_GEN(dev_priv) >= 10 || \ #define HAS_GMBUS_BURST_READ(dev_priv) (INTEL_GEN(dev_priv) >= 10 || \
...@@ -2284,6 +2295,14 @@ int i915_gem_object_unbind(struct drm_i915_gem_object *obj, ...@@ -2284,6 +2295,14 @@ int i915_gem_object_unbind(struct drm_i915_gem_object *obj,
unsigned long flags); unsigned long flags);
#define I915_GEM_OBJECT_UNBIND_ACTIVE BIT(0) #define I915_GEM_OBJECT_UNBIND_ACTIVE BIT(0)
struct i915_vma * __must_check
i915_gem_object_pin(struct drm_i915_gem_object *obj,
struct i915_address_space *vm,
const struct i915_ggtt_view *view,
u64 size,
u64 alignment,
u64 flags);
void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv); void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv);
static inline int __must_check static inline int __must_check
...@@ -2393,12 +2412,14 @@ const char *i915_cache_level_str(struct drm_i915_private *i915, int type); ...@@ -2393,12 +2412,14 @@ const char *i915_cache_level_str(struct drm_i915_private *i915, int type);
int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv); int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv);
void intel_engine_init_cmd_parser(struct intel_engine_cs *engine); void intel_engine_init_cmd_parser(struct intel_engine_cs *engine);
void intel_engine_cleanup_cmd_parser(struct intel_engine_cs *engine); void intel_engine_cleanup_cmd_parser(struct intel_engine_cs *engine);
int intel_engine_cmd_parser(struct intel_engine_cs *engine, int intel_engine_cmd_parser(struct i915_gem_context *cxt,
struct intel_engine_cs *engine,
struct drm_i915_gem_object *batch_obj, struct drm_i915_gem_object *batch_obj,
struct drm_i915_gem_object *shadow_batch_obj, u64 user_batch_start,
u32 batch_start_offset, u32 batch_start_offset,
u32 batch_len, u32 batch_len,
bool is_master); struct drm_i915_gem_object *shadow_batch_obj,
u64 shadow_batch_start);
/* intel_device_info.c */ /* intel_device_info.c */
static inline struct intel_device_info * static inline struct intel_device_info *
......
...@@ -964,6 +964,20 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj, ...@@ -964,6 +964,20 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
{ {
struct drm_i915_private *dev_priv = to_i915(obj->base.dev); struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
struct i915_address_space *vm = &dev_priv->ggtt.vm; struct i915_address_space *vm = &dev_priv->ggtt.vm;
return i915_gem_object_pin(obj, vm, view, size, alignment,
flags | PIN_GLOBAL);
}
struct i915_vma *
i915_gem_object_pin(struct drm_i915_gem_object *obj,
struct i915_address_space *vm,
const struct i915_ggtt_view *view,
u64 size,
u64 alignment,
u64 flags)
{
struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
struct i915_vma *vma; struct i915_vma *vma;
int ret; int ret;
...@@ -1038,7 +1052,7 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj, ...@@ -1038,7 +1052,7 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
return ERR_PTR(ret); return ERR_PTR(ret);
} }
ret = i915_vma_pin(vma, size, alignment, flags | PIN_GLOBAL); ret = i915_vma_pin(vma, size, alignment, flags);
if (ret) if (ret)
return ERR_PTR(ret); return ERR_PTR(ret);
......
...@@ -62,7 +62,7 @@ int i915_getparam_ioctl(struct drm_device *dev, void *data, ...@@ -62,7 +62,7 @@ int i915_getparam_ioctl(struct drm_device *dev, void *data,
value = !!(i915->caps.scheduler & I915_SCHEDULER_CAP_SEMAPHORES); value = !!(i915->caps.scheduler & I915_SCHEDULER_CAP_SEMAPHORES);
break; break;
case I915_PARAM_HAS_SECURE_BATCHES: case I915_PARAM_HAS_SECURE_BATCHES:
value = capable(CAP_SYS_ADMIN); value = HAS_SECURE_BATCHES(i915) && capable(CAP_SYS_ADMIN);
break; break;
case I915_PARAM_CMD_PARSER_VERSION: case I915_PARAM_CMD_PARSER_VERSION:
value = i915_cmd_parser_get_version(i915); value = i915_cmd_parser_get_version(i915);
......
...@@ -471,6 +471,8 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) ...@@ -471,6 +471,8 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define ECOCHK_PPGTT_WT_HSW (0x2 << 3) #define ECOCHK_PPGTT_WT_HSW (0x2 << 3)
#define ECOCHK_PPGTT_WB_HSW (0x3 << 3) #define ECOCHK_PPGTT_WB_HSW (0x3 << 3)
#define GEN8_RC6_CTX_INFO _MMIO(0x8504)
#define GAC_ECO_BITS _MMIO(0x14090) #define GAC_ECO_BITS _MMIO(0x14090)
#define ECOBITS_SNB_BIT (1 << 13) #define ECOBITS_SNB_BIT (1 << 13)
#define ECOBITS_PPGTT_CACHE64B (3 << 8) #define ECOBITS_PPGTT_CACHE64B (3 << 8)
...@@ -555,6 +557,10 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) ...@@ -555,6 +557,10 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
*/ */
#define BCS_SWCTRL _MMIO(0x22200) #define BCS_SWCTRL _MMIO(0x22200)
/* There are 16 GPR registers */
#define BCS_GPR(n) _MMIO(0x22600 + (n) * 8)
#define BCS_GPR_UDW(n) _MMIO(0x22600 + (n) * 8 + 4)
#define GPGPU_THREADS_DISPATCHED _MMIO(0x2290) #define GPGPU_THREADS_DISPATCHED _MMIO(0x2290)
#define GPGPU_THREADS_DISPATCHED_UDW _MMIO(0x2290 + 4) #define GPGPU_THREADS_DISPATCHED_UDW _MMIO(0x2290 + 4)
#define HS_INVOCATION_COUNT _MMIO(0x2300) #define HS_INVOCATION_COUNT _MMIO(0x2300)
...@@ -7211,6 +7217,10 @@ enum { ...@@ -7211,6 +7217,10 @@ enum {
#define TGL_DMC_DEBUG_DC5_COUNT _MMIO(0x101084) #define TGL_DMC_DEBUG_DC5_COUNT _MMIO(0x101084)
#define TGL_DMC_DEBUG_DC6_COUNT _MMIO(0x101088) #define TGL_DMC_DEBUG_DC6_COUNT _MMIO(0x101088)
/* Display Internal Timeout Register */
#define RM_TIMEOUT _MMIO(0x42060)
#define MMIO_TIMEOUT_US(us) ((us) << 0)
/* interrupts */ /* interrupts */
#define DE_MASTER_IRQ_CONTROL (1 << 31) #define DE_MASTER_IRQ_CONTROL (1 << 31)
#define DE_SPRITEB_FLIP_DONE (1 << 29) #define DE_SPRITEB_FLIP_DONE (1 << 29)
......
...@@ -126,6 +126,14 @@ static void bxt_init_clock_gating(struct drm_i915_private *dev_priv) ...@@ -126,6 +126,14 @@ static void bxt_init_clock_gating(struct drm_i915_private *dev_priv)
*/ */
I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) | I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) |
PWM1_GATING_DIS | PWM2_GATING_DIS); PWM1_GATING_DIS | PWM2_GATING_DIS);
/*
* Lower the display internal timeout.
* This is needed to avoid any hard hangs when DSI port PLL
* is off and a MMIO access is attempted by any privilege
* application, using batch buffers or any other means.
*/
I915_WRITE(RM_TIMEOUT, MMIO_TIMEOUT_US(950));
} }
static void glk_init_clock_gating(struct drm_i915_private *dev_priv) static void glk_init_clock_gating(struct drm_i915_private *dev_priv)
...@@ -8544,6 +8552,100 @@ static void intel_init_emon(struct drm_i915_private *dev_priv) ...@@ -8544,6 +8552,100 @@ static void intel_init_emon(struct drm_i915_private *dev_priv)
dev_priv->ips.corr = (lcfuse & LCFUSE_HIV_MASK); dev_priv->ips.corr = (lcfuse & LCFUSE_HIV_MASK);
} }
static bool i915_rc6_ctx_corrupted(struct drm_i915_private *dev_priv)
{
return !I915_READ(GEN8_RC6_CTX_INFO);
}
static void i915_rc6_ctx_wa_init(struct drm_i915_private *i915)
{
if (!NEEDS_RC6_CTX_CORRUPTION_WA(i915))
return;
if (i915_rc6_ctx_corrupted(i915)) {
DRM_INFO("RC6 context corrupted, disabling runtime power management\n");
i915->gt_pm.rc6.ctx_corrupted = true;
i915->gt_pm.rc6.ctx_corrupted_wakeref =
intel_runtime_pm_get(&i915->runtime_pm);
}
}
static void i915_rc6_ctx_wa_cleanup(struct drm_i915_private *i915)
{
if (i915->gt_pm.rc6.ctx_corrupted) {
intel_runtime_pm_put(&i915->runtime_pm,
i915->gt_pm.rc6.ctx_corrupted_wakeref);
i915->gt_pm.rc6.ctx_corrupted = false;
}
}
/**
* i915_rc6_ctx_wa_suspend - system suspend sequence for the RC6 CTX WA
* @i915: i915 device
*
* Perform any steps needed to clean up the RC6 CTX WA before system suspend.
*/
void i915_rc6_ctx_wa_suspend(struct drm_i915_private *i915)
{
if (i915->gt_pm.rc6.ctx_corrupted)
intel_runtime_pm_put(&i915->runtime_pm,
i915->gt_pm.rc6.ctx_corrupted_wakeref);
}
/**
* i915_rc6_ctx_wa_resume - system resume sequence for the RC6 CTX WA
* @i915: i915 device
*
* Perform any steps needed to re-init the RC6 CTX WA after system resume.
*/
void i915_rc6_ctx_wa_resume(struct drm_i915_private *i915)
{
if (!i915->gt_pm.rc6.ctx_corrupted)
return;
if (i915_rc6_ctx_corrupted(i915)) {
i915->gt_pm.rc6.ctx_corrupted_wakeref =
intel_runtime_pm_get(&i915->runtime_pm);
return;
}
DRM_INFO("RC6 context restored, re-enabling runtime power management\n");
i915->gt_pm.rc6.ctx_corrupted = false;
}
static void intel_disable_rc6(struct drm_i915_private *dev_priv);
/**
* i915_rc6_ctx_wa_check - check for a new RC6 CTX corruption
* @i915: i915 device
*
* Check if an RC6 CTX corruption has happened since the last check and if so
* disable RC6 and runtime power management.
*
* Return false if no context corruption has happened since the last call of
* this function, true otherwise.
*/
bool i915_rc6_ctx_wa_check(struct drm_i915_private *i915)
{
if (!NEEDS_RC6_CTX_CORRUPTION_WA(i915))
return false;
if (i915->gt_pm.rc6.ctx_corrupted)
return false;
if (!i915_rc6_ctx_corrupted(i915))
return false;
DRM_NOTE("RC6 context corruption, disabling runtime power management\n");
intel_disable_rc6(i915);
i915->gt_pm.rc6.ctx_corrupted = true;
i915->gt_pm.rc6.ctx_corrupted_wakeref =
intel_runtime_pm_get_noresume(&i915->runtime_pm);
return true;
}
void intel_init_gt_powersave(struct drm_i915_private *dev_priv) void intel_init_gt_powersave(struct drm_i915_private *dev_priv)
{ {
struct intel_rps *rps = &dev_priv->gt_pm.rps; struct intel_rps *rps = &dev_priv->gt_pm.rps;
...@@ -8557,6 +8659,8 @@ void intel_init_gt_powersave(struct drm_i915_private *dev_priv) ...@@ -8557,6 +8659,8 @@ void intel_init_gt_powersave(struct drm_i915_private *dev_priv)
pm_runtime_get(&dev_priv->drm.pdev->dev); pm_runtime_get(&dev_priv->drm.pdev->dev);
} }
i915_rc6_ctx_wa_init(dev_priv);
/* Initialize RPS limits (for userspace) */ /* Initialize RPS limits (for userspace) */
if (IS_CHERRYVIEW(dev_priv)) if (IS_CHERRYVIEW(dev_priv))
cherryview_init_gt_powersave(dev_priv); cherryview_init_gt_powersave(dev_priv);
...@@ -8595,6 +8699,8 @@ void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv) ...@@ -8595,6 +8699,8 @@ void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv)
if (IS_VALLEYVIEW(dev_priv)) if (IS_VALLEYVIEW(dev_priv))
valleyview_cleanup_gt_powersave(dev_priv); valleyview_cleanup_gt_powersave(dev_priv);
i915_rc6_ctx_wa_cleanup(dev_priv);
if (!HAS_RC6(dev_priv)) if (!HAS_RC6(dev_priv))
pm_runtime_put(&dev_priv->drm.pdev->dev); pm_runtime_put(&dev_priv->drm.pdev->dev);
} }
...@@ -8623,7 +8729,7 @@ static inline void intel_disable_llc_pstate(struct drm_i915_private *i915) ...@@ -8623,7 +8729,7 @@ static inline void intel_disable_llc_pstate(struct drm_i915_private *i915)
i915->gt_pm.llc_pstate.enabled = false; i915->gt_pm.llc_pstate.enabled = false;
} }
static void intel_disable_rc6(struct drm_i915_private *dev_priv) static void __intel_disable_rc6(struct drm_i915_private *dev_priv)
{ {
lockdep_assert_held(&dev_priv->gt_pm.rps.lock); lockdep_assert_held(&dev_priv->gt_pm.rps.lock);
...@@ -8642,6 +8748,15 @@ static void intel_disable_rc6(struct drm_i915_private *dev_priv) ...@@ -8642,6 +8748,15 @@ static void intel_disable_rc6(struct drm_i915_private *dev_priv)
dev_priv->gt_pm.rc6.enabled = false; dev_priv->gt_pm.rc6.enabled = false;
} }
static void intel_disable_rc6(struct drm_i915_private *dev_priv)
{
struct intel_rps *rps = &dev_priv->gt_pm.rps;
mutex_lock(&rps->lock);
__intel_disable_rc6(dev_priv);
mutex_unlock(&rps->lock);
}
static void intel_disable_rps(struct drm_i915_private *dev_priv) static void intel_disable_rps(struct drm_i915_private *dev_priv)
{ {
lockdep_assert_held(&dev_priv->gt_pm.rps.lock); lockdep_assert_held(&dev_priv->gt_pm.rps.lock);
...@@ -8667,7 +8782,7 @@ void intel_disable_gt_powersave(struct drm_i915_private *dev_priv) ...@@ -8667,7 +8782,7 @@ void intel_disable_gt_powersave(struct drm_i915_private *dev_priv)
{ {
mutex_lock(&dev_priv->gt_pm.rps.lock); mutex_lock(&dev_priv->gt_pm.rps.lock);
intel_disable_rc6(dev_priv); __intel_disable_rc6(dev_priv);
intel_disable_rps(dev_priv); intel_disable_rps(dev_priv);
if (HAS_LLC(dev_priv)) if (HAS_LLC(dev_priv))
intel_disable_llc_pstate(dev_priv); intel_disable_llc_pstate(dev_priv);
...@@ -8694,6 +8809,9 @@ static void intel_enable_rc6(struct drm_i915_private *dev_priv) ...@@ -8694,6 +8809,9 @@ static void intel_enable_rc6(struct drm_i915_private *dev_priv)
if (dev_priv->gt_pm.rc6.enabled) if (dev_priv->gt_pm.rc6.enabled)
return; return;
if (dev_priv->gt_pm.rc6.ctx_corrupted)
return;
if (IS_CHERRYVIEW(dev_priv)) if (IS_CHERRYVIEW(dev_priv))
cherryview_enable_rc6(dev_priv); cherryview_enable_rc6(dev_priv);
else if (IS_VALLEYVIEW(dev_priv)) else if (IS_VALLEYVIEW(dev_priv))
......
...@@ -36,6 +36,9 @@ void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv); ...@@ -36,6 +36,9 @@ void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv);
void intel_sanitize_gt_powersave(struct drm_i915_private *dev_priv); void intel_sanitize_gt_powersave(struct drm_i915_private *dev_priv);
void intel_enable_gt_powersave(struct drm_i915_private *dev_priv); void intel_enable_gt_powersave(struct drm_i915_private *dev_priv);
void intel_disable_gt_powersave(struct drm_i915_private *dev_priv); void intel_disable_gt_powersave(struct drm_i915_private *dev_priv);
bool i915_rc6_ctx_wa_check(struct drm_i915_private *i915);
void i915_rc6_ctx_wa_suspend(struct drm_i915_private *i915);
void i915_rc6_ctx_wa_resume(struct drm_i915_private *i915);
void gen6_rps_busy(struct drm_i915_private *dev_priv); void gen6_rps_busy(struct drm_i915_private *dev_priv);
void gen6_rps_idle(struct drm_i915_private *dev_priv); void gen6_rps_idle(struct drm_i915_private *dev_priv);
void gen6_rps_boost(struct i915_request *rq); void gen6_rps_boost(struct i915_request *rq);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment