Commit dfce9025 authored by Dave Airlie's avatar Dave Airlie

Backmerge i915 security patches from commit 'ea0b163b' into drm-next

This backmerges the branch that ended up in Linus' tree. It removes
all the changes for the rc6 patches from Linus' tree in favour of
a patch that is based on a large refactor that occured.

Otherwise it all looks good.
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>
parents 2248a283 ea0b163b
...@@ -236,6 +236,8 @@ static void i915_gem_context_free(struct i915_gem_context *ctx) ...@@ -236,6 +236,8 @@ static void i915_gem_context_free(struct i915_gem_context *ctx)
free_engines(rcu_access_pointer(ctx->engines)); free_engines(rcu_access_pointer(ctx->engines));
mutex_destroy(&ctx->engines_mutex); mutex_destroy(&ctx->engines_mutex);
kfree(ctx->jump_whitelist);
if (ctx->timeline) if (ctx->timeline)
intel_timeline_put(ctx->timeline); intel_timeline_put(ctx->timeline);
...@@ -527,6 +529,9 @@ __create_context(struct drm_i915_private *i915) ...@@ -527,6 +529,9 @@ __create_context(struct drm_i915_private *i915)
for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp); i++) for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp); i++)
ctx->hang_timestamp[i] = jiffies - CONTEXT_FAST_HANG_JIFFIES; ctx->hang_timestamp[i] = jiffies - CONTEXT_FAST_HANG_JIFFIES;
ctx->jump_whitelist = NULL;
ctx->jump_whitelist_cmds = 0;
spin_lock(&i915->gem.contexts.lock); spin_lock(&i915->gem.contexts.lock);
list_add_tail(&ctx->link, &i915->gem.contexts.list); list_add_tail(&ctx->link, &i915->gem.contexts.list);
spin_unlock(&i915->gem.contexts.lock); spin_unlock(&i915->gem.contexts.lock);
......
...@@ -176,6 +176,13 @@ struct i915_gem_context { ...@@ -176,6 +176,13 @@ struct i915_gem_context {
* per vm, which may be one per context or shared with the global GTT) * per vm, which may be one per context or shared with the global GTT)
*/ */
struct radix_tree_root handles_vma; struct radix_tree_root handles_vma;
/** jump_whitelist: Bit array for tracking cmds during cmdparsing
* Guarded by struct_mutex
*/
unsigned long *jump_whitelist;
/** jump_whitelist_cmds: No of cmd slots available */
u32 jump_whitelist_cmds;
}; };
#endif /* __I915_GEM_CONTEXT_TYPES_H__ */ #endif /* __I915_GEM_CONTEXT_TYPES_H__ */
...@@ -298,7 +298,9 @@ static inline u64 gen8_noncanonical_addr(u64 address) ...@@ -298,7 +298,9 @@ static inline u64 gen8_noncanonical_addr(u64 address)
static inline bool eb_use_cmdparser(const struct i915_execbuffer *eb) static inline bool eb_use_cmdparser(const struct i915_execbuffer *eb)
{ {
return intel_engine_needs_cmd_parser(eb->engine) && eb->batch_len; return intel_engine_requires_cmd_parser(eb->engine) ||
(intel_engine_using_cmd_parser(eb->engine) &&
eb->args->batch_len);
} }
static int eb_create(struct i915_execbuffer *eb) static int eb_create(struct i915_execbuffer *eb)
...@@ -1990,40 +1992,94 @@ static int i915_reset_gen7_sol_offsets(struct i915_request *rq) ...@@ -1990,40 +1992,94 @@ static int i915_reset_gen7_sol_offsets(struct i915_request *rq)
return 0; return 0;
} }
static struct i915_vma *eb_parse(struct i915_execbuffer *eb, bool is_master) static struct i915_vma *
shadow_batch_pin(struct i915_execbuffer *eb, struct drm_i915_gem_object *obj)
{
struct drm_i915_private *dev_priv = eb->i915;
struct i915_vma * const vma = *eb->vma;
struct i915_address_space *vm;
u64 flags;
/*
* PPGTT backed shadow buffers must be mapped RO, to prevent
* post-scan tampering
*/
if (CMDPARSER_USES_GGTT(dev_priv)) {
flags = PIN_GLOBAL;
vm = &dev_priv->ggtt.vm;
} else if (vma->vm->has_read_only) {
flags = PIN_USER;
vm = vma->vm;
i915_gem_object_set_readonly(obj);
} else {
DRM_DEBUG("Cannot prevent post-scan tampering without RO capable vm\n");
return ERR_PTR(-EINVAL);
}
return i915_gem_object_pin(obj, vm, NULL, 0, 0, flags);
}
static struct i915_vma *eb_parse(struct i915_execbuffer *eb)
{ {
struct intel_engine_pool_node *pool; struct intel_engine_pool_node *pool;
struct i915_vma *vma; struct i915_vma *vma;
u64 batch_start;
u64 shadow_batch_start;
int err; int err;
pool = intel_engine_get_pool(eb->engine, eb->batch_len); pool = intel_engine_get_pool(eb->engine, eb->batch_len);
if (IS_ERR(pool)) if (IS_ERR(pool))
return ERR_CAST(pool); return ERR_CAST(pool);
err = intel_engine_cmd_parser(eb->engine, vma = shadow_batch_pin(eb, pool->obj);
if (IS_ERR(vma))
goto err;
batch_start = gen8_canonical_addr(eb->batch->node.start) +
eb->batch_start_offset;
shadow_batch_start = gen8_canonical_addr(vma->node.start);
err = intel_engine_cmd_parser(eb->gem_context,
eb->engine,
eb->batch->obj, eb->batch->obj,
pool->obj, batch_start,
eb->batch_start_offset, eb->batch_start_offset,
eb->batch_len, eb->batch_len,
is_master); pool->obj,
shadow_batch_start);
if (err) { if (err) {
if (err == -EACCES) /* unhandled chained batch */ i915_vma_unpin(vma);
/*
* Unsafe GGTT-backed buffers can still be submitted safely
* as non-secure.
* For PPGTT backing however, we have no choice but to forcibly
* reject unsafe buffers
*/
if (CMDPARSER_USES_GGTT(eb->i915) && (err == -EACCES))
/* Execute original buffer non-secure */
vma = NULL; vma = NULL;
else else
vma = ERR_PTR(err); vma = ERR_PTR(err);
goto err; goto err;
} }
vma = i915_gem_object_ggtt_pin(pool->obj, NULL, 0, 0, 0);
if (IS_ERR(vma))
goto err;
eb->vma[eb->buffer_count] = i915_vma_get(vma); eb->vma[eb->buffer_count] = i915_vma_get(vma);
eb->flags[eb->buffer_count] = eb->flags[eb->buffer_count] =
__EXEC_OBJECT_HAS_PIN | __EXEC_OBJECT_HAS_REF; __EXEC_OBJECT_HAS_PIN | __EXEC_OBJECT_HAS_REF;
vma->exec_flags = &eb->flags[eb->buffer_count]; vma->exec_flags = &eb->flags[eb->buffer_count];
eb->buffer_count++; eb->buffer_count++;
eb->batch_start_offset = 0;
eb->batch = vma;
if (CMDPARSER_USES_GGTT(eb->i915))
eb->batch_flags |= I915_DISPATCH_SECURE;
/* eb->batch_len unchanged */
vma->private = pool; vma->private = pool;
return vma; return vma;
...@@ -2430,6 +2486,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, ...@@ -2430,6 +2486,7 @@ i915_gem_do_execbuffer(struct drm_device *dev,
struct drm_i915_gem_exec_object2 *exec, struct drm_i915_gem_exec_object2 *exec,
struct drm_syncobj **fences) struct drm_syncobj **fences)
{ {
struct drm_i915_private *i915 = to_i915(dev);
struct i915_execbuffer eb; struct i915_execbuffer eb;
struct dma_fence *in_fence = NULL; struct dma_fence *in_fence = NULL;
struct dma_fence *exec_fence = NULL; struct dma_fence *exec_fence = NULL;
...@@ -2441,7 +2498,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, ...@@ -2441,7 +2498,7 @@ i915_gem_do_execbuffer(struct drm_device *dev,
BUILD_BUG_ON(__EXEC_OBJECT_INTERNAL_FLAGS & BUILD_BUG_ON(__EXEC_OBJECT_INTERNAL_FLAGS &
~__EXEC_OBJECT_UNKNOWN_FLAGS); ~__EXEC_OBJECT_UNKNOWN_FLAGS);
eb.i915 = to_i915(dev); eb.i915 = i915;
eb.file = file; eb.file = file;
eb.args = args; eb.args = args;
if (DBG_FORCE_RELOC || !(args->flags & I915_EXEC_NO_RELOC)) if (DBG_FORCE_RELOC || !(args->flags & I915_EXEC_NO_RELOC))
...@@ -2461,6 +2518,13 @@ i915_gem_do_execbuffer(struct drm_device *dev, ...@@ -2461,6 +2518,13 @@ i915_gem_do_execbuffer(struct drm_device *dev,
eb.batch_flags = 0; eb.batch_flags = 0;
if (args->flags & I915_EXEC_SECURE) { if (args->flags & I915_EXEC_SECURE) {
if (INTEL_GEN(i915) >= 11)
return -ENODEV;
/* Return -EPERM to trigger fallback code on old binaries. */
if (!HAS_SECURE_BATCHES(i915))
return -EPERM;
if (!drm_is_current_master(file) || !capable(CAP_SYS_ADMIN)) if (!drm_is_current_master(file) || !capable(CAP_SYS_ADMIN))
return -EPERM; return -EPERM;
...@@ -2539,34 +2603,19 @@ i915_gem_do_execbuffer(struct drm_device *dev, ...@@ -2539,34 +2603,19 @@ i915_gem_do_execbuffer(struct drm_device *dev,
goto err_vma; goto err_vma;
} }
if (eb.batch_len == 0)
eb.batch_len = eb.batch->size - eb.batch_start_offset;
if (eb_use_cmdparser(&eb)) { if (eb_use_cmdparser(&eb)) {
struct i915_vma *vma; struct i915_vma *vma;
vma = eb_parse(&eb, drm_is_current_master(file)); vma = eb_parse(&eb);
if (IS_ERR(vma)) { if (IS_ERR(vma)) {
err = PTR_ERR(vma); err = PTR_ERR(vma);
goto err_vma; goto err_vma;
} }
if (vma) {
/*
* Batch parsed and accepted:
*
* Set the DISPATCH_SECURE bit to remove the NON_SECURE
* bit from MI_BATCH_BUFFER_START commands issued in
* the dispatch_execbuffer implementations. We
* specifically don't want that set on batches the
* command parser has accepted.
*/
eb.batch_flags |= I915_DISPATCH_SECURE;
eb.batch_start_offset = 0;
eb.batch = vma;
}
} }
if (eb.batch_len == 0)
eb.batch_len = eb.batch->size - eb.batch_start_offset;
/* /*
* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure * snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
* batch" bit. Hence we need to pin secure batches into the global gtt. * batch" bit. Hence we need to pin secure batches into the global gtt.
......
...@@ -454,13 +454,14 @@ struct intel_engine_cs { ...@@ -454,13 +454,14 @@ struct intel_engine_cs {
/* status_notifier: list of callbacks for context-switch changes */ /* status_notifier: list of callbacks for context-switch changes */
struct atomic_notifier_head context_status_notifier; struct atomic_notifier_head context_status_notifier;
#define I915_ENGINE_NEEDS_CMD_PARSER BIT(0) #define I915_ENGINE_USING_CMD_PARSER BIT(0)
#define I915_ENGINE_SUPPORTS_STATS BIT(1) #define I915_ENGINE_SUPPORTS_STATS BIT(1)
#define I915_ENGINE_HAS_PREEMPTION BIT(2) #define I915_ENGINE_HAS_PREEMPTION BIT(2)
#define I915_ENGINE_HAS_SEMAPHORES BIT(3) #define I915_ENGINE_HAS_SEMAPHORES BIT(3)
#define I915_ENGINE_NEEDS_BREADCRUMB_TASKLET BIT(4) #define I915_ENGINE_NEEDS_BREADCRUMB_TASKLET BIT(4)
#define I915_ENGINE_IS_VIRTUAL BIT(5) #define I915_ENGINE_IS_VIRTUAL BIT(5)
#define I915_ENGINE_HAS_RELATIVE_MMIO BIT(6) #define I915_ENGINE_HAS_RELATIVE_MMIO BIT(6)
#define I915_ENGINE_REQUIRES_CMD_PARSER BIT(7)
unsigned int flags; unsigned int flags;
/* /*
...@@ -528,9 +529,15 @@ struct intel_engine_cs { ...@@ -528,9 +529,15 @@ struct intel_engine_cs {
}; };
static inline bool static inline bool
intel_engine_needs_cmd_parser(const struct intel_engine_cs *engine) intel_engine_using_cmd_parser(const struct intel_engine_cs *engine)
{ {
return engine->flags & I915_ENGINE_NEEDS_CMD_PARSER; return engine->flags & I915_ENGINE_USING_CMD_PARSER;
}
static inline bool
intel_engine_requires_cmd_parser(const struct intel_engine_cs *engine)
{
return engine->flags & I915_ENGINE_REQUIRES_CMD_PARSER;
} }
static inline bool static inline bool
......
...@@ -53,13 +53,11 @@ ...@@ -53,13 +53,11 @@
* granting userspace undue privileges. There are three categories of privilege. * granting userspace undue privileges. There are three categories of privilege.
* *
* First, commands which are explicitly defined as privileged or which should * First, commands which are explicitly defined as privileged or which should
* only be used by the kernel driver. The parser generally rejects such * only be used by the kernel driver. The parser rejects such commands
* commands, though it may allow some from the drm master process.
* *
* Second, commands which access registers. To support correct/enhanced * Second, commands which access registers. To support correct/enhanced
* userspace functionality, particularly certain OpenGL extensions, the parser * userspace functionality, particularly certain OpenGL extensions, the parser
* provides a whitelist of registers which userspace may safely access (for both * provides a whitelist of registers which userspace may safely access
* normal and drm master processes).
* *
* Third, commands which access privileged memory (i.e. GGTT, HWS page, etc). * Third, commands which access privileged memory (i.e. GGTT, HWS page, etc).
* The parser always rejects such commands. * The parser always rejects such commands.
...@@ -84,9 +82,9 @@ ...@@ -84,9 +82,9 @@
* in the per-engine command tables. * in the per-engine command tables.
* *
* Other command table entries map fairly directly to high level categories * Other command table entries map fairly directly to high level categories
* mentioned above: rejected, master-only, register whitelist. The parser * mentioned above: rejected, register whitelist. The parser implements a number
* implements a number of checks, including the privileged memory checks, via a * of checks, including the privileged memory checks, via a general bitmasking
* general bitmasking mechanism. * mechanism.
*/ */
/* /*
...@@ -104,8 +102,6 @@ struct drm_i915_cmd_descriptor { ...@@ -104,8 +102,6 @@ struct drm_i915_cmd_descriptor {
* CMD_DESC_REJECT: The command is never allowed * CMD_DESC_REJECT: The command is never allowed
* CMD_DESC_REGISTER: The command should be checked against the * CMD_DESC_REGISTER: The command should be checked against the
* register whitelist for the appropriate ring * register whitelist for the appropriate ring
* CMD_DESC_MASTER: The command is allowed if the submitting process
* is the DRM master
*/ */
u32 flags; u32 flags;
#define CMD_DESC_FIXED (1<<0) #define CMD_DESC_FIXED (1<<0)
...@@ -113,7 +109,6 @@ struct drm_i915_cmd_descriptor { ...@@ -113,7 +109,6 @@ struct drm_i915_cmd_descriptor {
#define CMD_DESC_REJECT (1<<2) #define CMD_DESC_REJECT (1<<2)
#define CMD_DESC_REGISTER (1<<3) #define CMD_DESC_REGISTER (1<<3)
#define CMD_DESC_BITMASK (1<<4) #define CMD_DESC_BITMASK (1<<4)
#define CMD_DESC_MASTER (1<<5)
/* /*
* The command's unique identification bits and the bitmask to get them. * The command's unique identification bits and the bitmask to get them.
...@@ -194,7 +189,7 @@ struct drm_i915_cmd_table { ...@@ -194,7 +189,7 @@ struct drm_i915_cmd_table {
#define CMD(op, opm, f, lm, fl, ...) \ #define CMD(op, opm, f, lm, fl, ...) \
{ \ { \
.flags = (fl) | ((f) ? CMD_DESC_FIXED : 0), \ .flags = (fl) | ((f) ? CMD_DESC_FIXED : 0), \
.cmd = { (op), ~0u << (opm) }, \ .cmd = { (op & ~0u << (opm)), ~0u << (opm) }, \
.length = { (lm) }, \ .length = { (lm) }, \
__VA_ARGS__ \ __VA_ARGS__ \
} }
...@@ -209,14 +204,13 @@ struct drm_i915_cmd_table { ...@@ -209,14 +204,13 @@ struct drm_i915_cmd_table {
#define R CMD_DESC_REJECT #define R CMD_DESC_REJECT
#define W CMD_DESC_REGISTER #define W CMD_DESC_REGISTER
#define B CMD_DESC_BITMASK #define B CMD_DESC_BITMASK
#define M CMD_DESC_MASTER
/* Command Mask Fixed Len Action /* Command Mask Fixed Len Action
---------------------------------------------------------- */ ---------------------------------------------------------- */
static const struct drm_i915_cmd_descriptor common_cmds[] = { static const struct drm_i915_cmd_descriptor gen7_common_cmds[] = {
CMD( MI_NOOP, SMI, F, 1, S ), CMD( MI_NOOP, SMI, F, 1, S ),
CMD( MI_USER_INTERRUPT, SMI, F, 1, R ), CMD( MI_USER_INTERRUPT, SMI, F, 1, R ),
CMD( MI_WAIT_FOR_EVENT, SMI, F, 1, M ), CMD( MI_WAIT_FOR_EVENT, SMI, F, 1, R ),
CMD( MI_ARB_CHECK, SMI, F, 1, S ), CMD( MI_ARB_CHECK, SMI, F, 1, S ),
CMD( MI_REPORT_HEAD, SMI, F, 1, S ), CMD( MI_REPORT_HEAD, SMI, F, 1, S ),
CMD( MI_SUSPEND_FLUSH, SMI, F, 1, S ), CMD( MI_SUSPEND_FLUSH, SMI, F, 1, S ),
...@@ -246,7 +240,7 @@ static const struct drm_i915_cmd_descriptor common_cmds[] = { ...@@ -246,7 +240,7 @@ static const struct drm_i915_cmd_descriptor common_cmds[] = {
CMD( MI_BATCH_BUFFER_START, SMI, !F, 0xFF, S ), CMD( MI_BATCH_BUFFER_START, SMI, !F, 0xFF, S ),
}; };
static const struct drm_i915_cmd_descriptor render_cmds[] = { static const struct drm_i915_cmd_descriptor gen7_render_cmds[] = {
CMD( MI_FLUSH, SMI, F, 1, S ), CMD( MI_FLUSH, SMI, F, 1, S ),
CMD( MI_ARB_ON_OFF, SMI, F, 1, R ), CMD( MI_ARB_ON_OFF, SMI, F, 1, R ),
CMD( MI_PREDICATE, SMI, F, 1, S ), CMD( MI_PREDICATE, SMI, F, 1, S ),
...@@ -313,7 +307,7 @@ static const struct drm_i915_cmd_descriptor hsw_render_cmds[] = { ...@@ -313,7 +307,7 @@ static const struct drm_i915_cmd_descriptor hsw_render_cmds[] = {
CMD( MI_URB_ATOMIC_ALLOC, SMI, F, 1, S ), CMD( MI_URB_ATOMIC_ALLOC, SMI, F, 1, S ),
CMD( MI_SET_APPID, SMI, F, 1, S ), CMD( MI_SET_APPID, SMI, F, 1, S ),
CMD( MI_RS_CONTEXT, SMI, F, 1, S ), CMD( MI_RS_CONTEXT, SMI, F, 1, S ),
CMD( MI_LOAD_SCAN_LINES_INCL, SMI, !F, 0x3F, M ), CMD( MI_LOAD_SCAN_LINES_INCL, SMI, !F, 0x3F, R ),
CMD( MI_LOAD_SCAN_LINES_EXCL, SMI, !F, 0x3F, R ), CMD( MI_LOAD_SCAN_LINES_EXCL, SMI, !F, 0x3F, R ),
CMD( MI_LOAD_REGISTER_REG, SMI, !F, 0xFF, W, CMD( MI_LOAD_REGISTER_REG, SMI, !F, 0xFF, W,
.reg = { .offset = 1, .mask = 0x007FFFFC, .step = 1 } ), .reg = { .offset = 1, .mask = 0x007FFFFC, .step = 1 } ),
...@@ -330,7 +324,7 @@ static const struct drm_i915_cmd_descriptor hsw_render_cmds[] = { ...@@ -330,7 +324,7 @@ static const struct drm_i915_cmd_descriptor hsw_render_cmds[] = {
CMD( GFX_OP_3DSTATE_BINDING_TABLE_EDIT_PS, S3D, !F, 0x1FF, S ), CMD( GFX_OP_3DSTATE_BINDING_TABLE_EDIT_PS, S3D, !F, 0x1FF, S ),
}; };
static const struct drm_i915_cmd_descriptor video_cmds[] = { static const struct drm_i915_cmd_descriptor gen7_video_cmds[] = {
CMD( MI_ARB_ON_OFF, SMI, F, 1, R ), CMD( MI_ARB_ON_OFF, SMI, F, 1, R ),
CMD( MI_SET_APPID, SMI, F, 1, S ), CMD( MI_SET_APPID, SMI, F, 1, S ),
CMD( MI_STORE_DWORD_IMM, SMI, !F, 0xFF, B, CMD( MI_STORE_DWORD_IMM, SMI, !F, 0xFF, B,
...@@ -374,7 +368,7 @@ static const struct drm_i915_cmd_descriptor video_cmds[] = { ...@@ -374,7 +368,7 @@ static const struct drm_i915_cmd_descriptor video_cmds[] = {
CMD( MFX_WAIT, SMFX, F, 1, S ), CMD( MFX_WAIT, SMFX, F, 1, S ),
}; };
static const struct drm_i915_cmd_descriptor vecs_cmds[] = { static const struct drm_i915_cmd_descriptor gen7_vecs_cmds[] = {
CMD( MI_ARB_ON_OFF, SMI, F, 1, R ), CMD( MI_ARB_ON_OFF, SMI, F, 1, R ),
CMD( MI_SET_APPID, SMI, F, 1, S ), CMD( MI_SET_APPID, SMI, F, 1, S ),
CMD( MI_STORE_DWORD_IMM, SMI, !F, 0xFF, B, CMD( MI_STORE_DWORD_IMM, SMI, !F, 0xFF, B,
...@@ -412,7 +406,7 @@ static const struct drm_i915_cmd_descriptor vecs_cmds[] = { ...@@ -412,7 +406,7 @@ static const struct drm_i915_cmd_descriptor vecs_cmds[] = {
}}, ), }}, ),
}; };
static const struct drm_i915_cmd_descriptor blt_cmds[] = { static const struct drm_i915_cmd_descriptor gen7_blt_cmds[] = {
CMD( MI_DISPLAY_FLIP, SMI, !F, 0xFF, R ), CMD( MI_DISPLAY_FLIP, SMI, !F, 0xFF, R ),
CMD( MI_STORE_DWORD_IMM, SMI, !F, 0x3FF, B, CMD( MI_STORE_DWORD_IMM, SMI, !F, 0x3FF, B,
.bits = {{ .bits = {{
...@@ -446,10 +440,64 @@ static const struct drm_i915_cmd_descriptor blt_cmds[] = { ...@@ -446,10 +440,64 @@ static const struct drm_i915_cmd_descriptor blt_cmds[] = {
}; };
static const struct drm_i915_cmd_descriptor hsw_blt_cmds[] = { static const struct drm_i915_cmd_descriptor hsw_blt_cmds[] = {
CMD( MI_LOAD_SCAN_LINES_INCL, SMI, !F, 0x3F, M ), CMD( MI_LOAD_SCAN_LINES_INCL, SMI, !F, 0x3F, R ),
CMD( MI_LOAD_SCAN_LINES_EXCL, SMI, !F, 0x3F, R ), CMD( MI_LOAD_SCAN_LINES_EXCL, SMI, !F, 0x3F, R ),
}; };
/*
* For Gen9 we can still rely on the h/w to enforce cmd security, and only
* need to re-enforce the register access checks. We therefore only need to
* teach the cmdparser how to find the end of each command, and identify
* register accesses. The table doesn't need to reject any commands, and so
* the only commands listed here are:
* 1) Those that touch registers
* 2) Those that do not have the default 8-bit length
*
* Note that the default MI length mask chosen for this table is 0xFF, not
* the 0x3F used on older devices. This is because the vast majority of MI
* cmds on Gen9 use a standard 8-bit Length field.
* All the Gen9 blitter instructions are standard 0xFF length mask, and
* none allow access to non-general registers, so in fact no BLT cmds are
* included in the table at all.
*
*/
static const struct drm_i915_cmd_descriptor gen9_blt_cmds[] = {
CMD( MI_NOOP, SMI, F, 1, S ),
CMD( MI_USER_INTERRUPT, SMI, F, 1, S ),
CMD( MI_WAIT_FOR_EVENT, SMI, F, 1, S ),
CMD( MI_FLUSH, SMI, F, 1, S ),
CMD( MI_ARB_CHECK, SMI, F, 1, S ),
CMD( MI_REPORT_HEAD, SMI, F, 1, S ),
CMD( MI_ARB_ON_OFF, SMI, F, 1, S ),
CMD( MI_SUSPEND_FLUSH, SMI, F, 1, S ),
CMD( MI_LOAD_SCAN_LINES_INCL, SMI, !F, 0x3F, S ),
CMD( MI_LOAD_SCAN_LINES_EXCL, SMI, !F, 0x3F, S ),
CMD( MI_STORE_DWORD_IMM, SMI, !F, 0x3FF, S ),
CMD( MI_LOAD_REGISTER_IMM(1), SMI, !F, 0xFF, W,
.reg = { .offset = 1, .mask = 0x007FFFFC, .step = 2 } ),
CMD( MI_UPDATE_GTT, SMI, !F, 0x3FF, S ),
CMD( MI_STORE_REGISTER_MEM_GEN8, SMI, F, 4, W,
.reg = { .offset = 1, .mask = 0x007FFFFC } ),
CMD( MI_FLUSH_DW, SMI, !F, 0x3F, S ),
CMD( MI_LOAD_REGISTER_MEM_GEN8, SMI, F, 4, W,
.reg = { .offset = 1, .mask = 0x007FFFFC } ),
CMD( MI_LOAD_REGISTER_REG, SMI, !F, 0xFF, W,
.reg = { .offset = 1, .mask = 0x007FFFFC, .step = 1 } ),
/*
* We allow BB_START but apply further checks. We just sanitize the
* basic fields here.
*/
#define MI_BB_START_OPERAND_MASK GENMASK(SMI-1, 0)
#define MI_BB_START_OPERAND_EXPECT (MI_BATCH_PPGTT_HSW | 1)
CMD( MI_BATCH_BUFFER_START_GEN8, SMI, !F, 0xFF, B,
.bits = {{
.offset = 0,
.mask = MI_BB_START_OPERAND_MASK,
.expected = MI_BB_START_OPERAND_EXPECT,
}}, ),
};
static const struct drm_i915_cmd_descriptor noop_desc = static const struct drm_i915_cmd_descriptor noop_desc =
CMD(MI_NOOP, SMI, F, 1, S); CMD(MI_NOOP, SMI, F, 1, S);
...@@ -463,40 +511,44 @@ static const struct drm_i915_cmd_descriptor noop_desc = ...@@ -463,40 +511,44 @@ static const struct drm_i915_cmd_descriptor noop_desc =
#undef R #undef R
#undef W #undef W
#undef B #undef B
#undef M
static const struct drm_i915_cmd_table gen7_render_cmds[] = { static const struct drm_i915_cmd_table gen7_render_cmd_table[] = {
{ common_cmds, ARRAY_SIZE(common_cmds) }, { gen7_common_cmds, ARRAY_SIZE(gen7_common_cmds) },
{ render_cmds, ARRAY_SIZE(render_cmds) }, { gen7_render_cmds, ARRAY_SIZE(gen7_render_cmds) },
}; };
static const struct drm_i915_cmd_table hsw_render_ring_cmds[] = { static const struct drm_i915_cmd_table hsw_render_ring_cmd_table[] = {
{ common_cmds, ARRAY_SIZE(common_cmds) }, { gen7_common_cmds, ARRAY_SIZE(gen7_common_cmds) },
{ render_cmds, ARRAY_SIZE(render_cmds) }, { gen7_render_cmds, ARRAY_SIZE(gen7_render_cmds) },
{ hsw_render_cmds, ARRAY_SIZE(hsw_render_cmds) }, { hsw_render_cmds, ARRAY_SIZE(hsw_render_cmds) },
}; };
static const struct drm_i915_cmd_table gen7_video_cmds[] = { static const struct drm_i915_cmd_table gen7_video_cmd_table[] = {
{ common_cmds, ARRAY_SIZE(common_cmds) }, { gen7_common_cmds, ARRAY_SIZE(gen7_common_cmds) },
{ video_cmds, ARRAY_SIZE(video_cmds) }, { gen7_video_cmds, ARRAY_SIZE(gen7_video_cmds) },
}; };
static const struct drm_i915_cmd_table hsw_vebox_cmds[] = { static const struct drm_i915_cmd_table hsw_vebox_cmd_table[] = {
{ common_cmds, ARRAY_SIZE(common_cmds) }, { gen7_common_cmds, ARRAY_SIZE(gen7_common_cmds) },
{ vecs_cmds, ARRAY_SIZE(vecs_cmds) }, { gen7_vecs_cmds, ARRAY_SIZE(gen7_vecs_cmds) },
}; };
static const struct drm_i915_cmd_table gen7_blt_cmds[] = { static const struct drm_i915_cmd_table gen7_blt_cmd_table[] = {
{ common_cmds, ARRAY_SIZE(common_cmds) }, { gen7_common_cmds, ARRAY_SIZE(gen7_common_cmds) },
{ blt_cmds, ARRAY_SIZE(blt_cmds) }, { gen7_blt_cmds, ARRAY_SIZE(gen7_blt_cmds) },
}; };
static const struct drm_i915_cmd_table hsw_blt_ring_cmds[] = { static const struct drm_i915_cmd_table hsw_blt_ring_cmd_table[] = {
{ common_cmds, ARRAY_SIZE(common_cmds) }, { gen7_common_cmds, ARRAY_SIZE(gen7_common_cmds) },
{ blt_cmds, ARRAY_SIZE(blt_cmds) }, { gen7_blt_cmds, ARRAY_SIZE(gen7_blt_cmds) },
{ hsw_blt_cmds, ARRAY_SIZE(hsw_blt_cmds) }, { hsw_blt_cmds, ARRAY_SIZE(hsw_blt_cmds) },
}; };
static const struct drm_i915_cmd_table gen9_blt_cmd_table[] = {
{ gen9_blt_cmds, ARRAY_SIZE(gen9_blt_cmds) },
};
/* /*
* Register whitelists, sorted by increasing register offset. * Register whitelists, sorted by increasing register offset.
*/ */
...@@ -612,17 +664,27 @@ static const struct drm_i915_reg_descriptor gen7_blt_regs[] = { ...@@ -612,17 +664,27 @@ static const struct drm_i915_reg_descriptor gen7_blt_regs[] = {
REG64_IDX(RING_TIMESTAMP, BLT_RING_BASE), REG64_IDX(RING_TIMESTAMP, BLT_RING_BASE),
}; };
static const struct drm_i915_reg_descriptor ivb_master_regs[] = { static const struct drm_i915_reg_descriptor gen9_blt_regs[] = {
REG32(FORCEWAKE_MT), REG64_IDX(RING_TIMESTAMP, RENDER_RING_BASE),
REG32(DERRMR), REG64_IDX(RING_TIMESTAMP, BSD_RING_BASE),
REG32(GEN7_PIPE_DE_LOAD_SL(PIPE_A)), REG32(BCS_SWCTRL),
REG32(GEN7_PIPE_DE_LOAD_SL(PIPE_B)), REG64_IDX(RING_TIMESTAMP, BLT_RING_BASE),
REG32(GEN7_PIPE_DE_LOAD_SL(PIPE_C)), REG64_IDX(BCS_GPR, 0),
}; REG64_IDX(BCS_GPR, 1),
REG64_IDX(BCS_GPR, 2),
static const struct drm_i915_reg_descriptor hsw_master_regs[] = { REG64_IDX(BCS_GPR, 3),
REG32(FORCEWAKE_MT), REG64_IDX(BCS_GPR, 4),
REG32(DERRMR), REG64_IDX(BCS_GPR, 5),
REG64_IDX(BCS_GPR, 6),
REG64_IDX(BCS_GPR, 7),
REG64_IDX(BCS_GPR, 8),
REG64_IDX(BCS_GPR, 9),
REG64_IDX(BCS_GPR, 10),
REG64_IDX(BCS_GPR, 11),
REG64_IDX(BCS_GPR, 12),
REG64_IDX(BCS_GPR, 13),
REG64_IDX(BCS_GPR, 14),
REG64_IDX(BCS_GPR, 15),
}; };
#undef REG64 #undef REG64
...@@ -631,28 +693,27 @@ static const struct drm_i915_reg_descriptor hsw_master_regs[] = { ...@@ -631,28 +693,27 @@ static const struct drm_i915_reg_descriptor hsw_master_regs[] = {
struct drm_i915_reg_table { struct drm_i915_reg_table {
const struct drm_i915_reg_descriptor *regs; const struct drm_i915_reg_descriptor *regs;
int num_regs; int num_regs;
bool master;
}; };
static const struct drm_i915_reg_table ivb_render_reg_tables[] = { static const struct drm_i915_reg_table ivb_render_reg_tables[] = {
{ gen7_render_regs, ARRAY_SIZE(gen7_render_regs), false }, { gen7_render_regs, ARRAY_SIZE(gen7_render_regs) },
{ ivb_master_regs, ARRAY_SIZE(ivb_master_regs), true },
}; };
static const struct drm_i915_reg_table ivb_blt_reg_tables[] = { static const struct drm_i915_reg_table ivb_blt_reg_tables[] = {
{ gen7_blt_regs, ARRAY_SIZE(gen7_blt_regs), false }, { gen7_blt_regs, ARRAY_SIZE(gen7_blt_regs) },
{ ivb_master_regs, ARRAY_SIZE(ivb_master_regs), true },
}; };
static const struct drm_i915_reg_table hsw_render_reg_tables[] = { static const struct drm_i915_reg_table hsw_render_reg_tables[] = {
{ gen7_render_regs, ARRAY_SIZE(gen7_render_regs), false }, { gen7_render_regs, ARRAY_SIZE(gen7_render_regs) },
{ hsw_render_regs, ARRAY_SIZE(hsw_render_regs), false }, { hsw_render_regs, ARRAY_SIZE(hsw_render_regs) },
{ hsw_master_regs, ARRAY_SIZE(hsw_master_regs), true },
}; };
static const struct drm_i915_reg_table hsw_blt_reg_tables[] = { static const struct drm_i915_reg_table hsw_blt_reg_tables[] = {
{ gen7_blt_regs, ARRAY_SIZE(gen7_blt_regs), false }, { gen7_blt_regs, ARRAY_SIZE(gen7_blt_regs) },
{ hsw_master_regs, ARRAY_SIZE(hsw_master_regs), true }, };
static const struct drm_i915_reg_table gen9_blt_reg_tables[] = {
{ gen9_blt_regs, ARRAY_SIZE(gen9_blt_regs) },
}; };
static u32 gen7_render_get_cmd_length_mask(u32 cmd_header) static u32 gen7_render_get_cmd_length_mask(u32 cmd_header)
...@@ -710,6 +771,17 @@ static u32 gen7_blt_get_cmd_length_mask(u32 cmd_header) ...@@ -710,6 +771,17 @@ static u32 gen7_blt_get_cmd_length_mask(u32 cmd_header)
return 0; return 0;
} }
static u32 gen9_blt_get_cmd_length_mask(u32 cmd_header)
{
u32 client = cmd_header >> INSTR_CLIENT_SHIFT;
if (client == INSTR_MI_CLIENT || client == INSTR_BC_CLIENT)
return 0xFF;
DRM_DEBUG_DRIVER("CMD: Abnormal blt cmd length! 0x%08X\n", cmd_header);
return 0;
}
static bool validate_cmds_sorted(const struct intel_engine_cs *engine, static bool validate_cmds_sorted(const struct intel_engine_cs *engine,
const struct drm_i915_cmd_table *cmd_tables, const struct drm_i915_cmd_table *cmd_tables,
int cmd_table_count) int cmd_table_count)
...@@ -867,18 +939,19 @@ void intel_engine_init_cmd_parser(struct intel_engine_cs *engine) ...@@ -867,18 +939,19 @@ void intel_engine_init_cmd_parser(struct intel_engine_cs *engine)
int cmd_table_count; int cmd_table_count;
int ret; int ret;
if (!IS_GEN(engine->i915, 7)) if (!IS_GEN(engine->i915, 7) && !(IS_GEN(engine->i915, 9) &&
engine->class == COPY_ENGINE_CLASS))
return; return;
switch (engine->class) { switch (engine->class) {
case RENDER_CLASS: case RENDER_CLASS:
if (IS_HASWELL(engine->i915)) { if (IS_HASWELL(engine->i915)) {
cmd_tables = hsw_render_ring_cmds; cmd_tables = hsw_render_ring_cmd_table;
cmd_table_count = cmd_table_count =
ARRAY_SIZE(hsw_render_ring_cmds); ARRAY_SIZE(hsw_render_ring_cmd_table);
} else { } else {
cmd_tables = gen7_render_cmds; cmd_tables = gen7_render_cmd_table;
cmd_table_count = ARRAY_SIZE(gen7_render_cmds); cmd_table_count = ARRAY_SIZE(gen7_render_cmd_table);
} }
if (IS_HASWELL(engine->i915)) { if (IS_HASWELL(engine->i915)) {
...@@ -888,36 +961,46 @@ void intel_engine_init_cmd_parser(struct intel_engine_cs *engine) ...@@ -888,36 +961,46 @@ void intel_engine_init_cmd_parser(struct intel_engine_cs *engine)
engine->reg_tables = ivb_render_reg_tables; engine->reg_tables = ivb_render_reg_tables;
engine->reg_table_count = ARRAY_SIZE(ivb_render_reg_tables); engine->reg_table_count = ARRAY_SIZE(ivb_render_reg_tables);
} }
engine->get_cmd_length_mask = gen7_render_get_cmd_length_mask; engine->get_cmd_length_mask = gen7_render_get_cmd_length_mask;
break; break;
case VIDEO_DECODE_CLASS: case VIDEO_DECODE_CLASS:
cmd_tables = gen7_video_cmds; cmd_tables = gen7_video_cmd_table;
cmd_table_count = ARRAY_SIZE(gen7_video_cmds); cmd_table_count = ARRAY_SIZE(gen7_video_cmd_table);
engine->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask; engine->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask;
break; break;
case COPY_ENGINE_CLASS: case COPY_ENGINE_CLASS:
if (IS_HASWELL(engine->i915)) { engine->get_cmd_length_mask = gen7_blt_get_cmd_length_mask;
cmd_tables = hsw_blt_ring_cmds; if (IS_GEN(engine->i915, 9)) {
cmd_table_count = ARRAY_SIZE(hsw_blt_ring_cmds); cmd_tables = gen9_blt_cmd_table;
cmd_table_count = ARRAY_SIZE(gen9_blt_cmd_table);
engine->get_cmd_length_mask =
gen9_blt_get_cmd_length_mask;
/* BCS Engine unsafe without parser */
engine->flags |= I915_ENGINE_REQUIRES_CMD_PARSER;
} else if (IS_HASWELL(engine->i915)) {
cmd_tables = hsw_blt_ring_cmd_table;
cmd_table_count = ARRAY_SIZE(hsw_blt_ring_cmd_table);
} else { } else {
cmd_tables = gen7_blt_cmds; cmd_tables = gen7_blt_cmd_table;
cmd_table_count = ARRAY_SIZE(gen7_blt_cmds); cmd_table_count = ARRAY_SIZE(gen7_blt_cmd_table);
} }
if (IS_HASWELL(engine->i915)) { if (IS_GEN(engine->i915, 9)) {
engine->reg_tables = gen9_blt_reg_tables;
engine->reg_table_count =
ARRAY_SIZE(gen9_blt_reg_tables);
} else if (IS_HASWELL(engine->i915)) {
engine->reg_tables = hsw_blt_reg_tables; engine->reg_tables = hsw_blt_reg_tables;
engine->reg_table_count = ARRAY_SIZE(hsw_blt_reg_tables); engine->reg_table_count = ARRAY_SIZE(hsw_blt_reg_tables);
} else { } else {
engine->reg_tables = ivb_blt_reg_tables; engine->reg_tables = ivb_blt_reg_tables;
engine->reg_table_count = ARRAY_SIZE(ivb_blt_reg_tables); engine->reg_table_count = ARRAY_SIZE(ivb_blt_reg_tables);
} }
engine->get_cmd_length_mask = gen7_blt_get_cmd_length_mask;
break; break;
case VIDEO_ENHANCEMENT_CLASS: case VIDEO_ENHANCEMENT_CLASS:
cmd_tables = hsw_vebox_cmds; cmd_tables = hsw_vebox_cmd_table;
cmd_table_count = ARRAY_SIZE(hsw_vebox_cmds); cmd_table_count = ARRAY_SIZE(hsw_vebox_cmd_table);
/* VECS can use the same length_mask function as VCS */ /* VECS can use the same length_mask function as VCS */
engine->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask; engine->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask;
break; break;
...@@ -943,7 +1026,7 @@ void intel_engine_init_cmd_parser(struct intel_engine_cs *engine) ...@@ -943,7 +1026,7 @@ void intel_engine_init_cmd_parser(struct intel_engine_cs *engine)
return; return;
} }
engine->flags |= I915_ENGINE_NEEDS_CMD_PARSER; engine->flags |= I915_ENGINE_USING_CMD_PARSER;
} }
/** /**
...@@ -955,7 +1038,7 @@ void intel_engine_init_cmd_parser(struct intel_engine_cs *engine) ...@@ -955,7 +1038,7 @@ void intel_engine_init_cmd_parser(struct intel_engine_cs *engine)
*/ */
void intel_engine_cleanup_cmd_parser(struct intel_engine_cs *engine) void intel_engine_cleanup_cmd_parser(struct intel_engine_cs *engine)
{ {
if (!intel_engine_needs_cmd_parser(engine)) if (!intel_engine_using_cmd_parser(engine))
return; return;
fini_hash_table(engine); fini_hash_table(engine);
...@@ -1029,22 +1112,16 @@ __find_reg(const struct drm_i915_reg_descriptor *table, int count, u32 addr) ...@@ -1029,22 +1112,16 @@ __find_reg(const struct drm_i915_reg_descriptor *table, int count, u32 addr)
} }
static const struct drm_i915_reg_descriptor * static const struct drm_i915_reg_descriptor *
find_reg(const struct intel_engine_cs *engine, bool is_master, u32 addr) find_reg(const struct intel_engine_cs *engine, u32 addr)
{ {
const struct drm_i915_reg_table *table = engine->reg_tables; const struct drm_i915_reg_table *table = engine->reg_tables;
const struct drm_i915_reg_descriptor *reg = NULL;
int count = engine->reg_table_count; int count = engine->reg_table_count;
for (; count > 0; ++table, --count) { for (; !reg && (count > 0); ++table, --count)
if (!table->master || is_master) {
const struct drm_i915_reg_descriptor *reg;
reg = __find_reg(table->regs, table->num_regs, addr); reg = __find_reg(table->regs, table->num_regs, addr);
if (reg != NULL)
return reg;
}
}
return NULL; return reg;
} }
/* Returns a vmap'd pointer to dst_obj, which the caller must unmap */ /* Returns a vmap'd pointer to dst_obj, which the caller must unmap */
...@@ -1128,8 +1205,7 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj, ...@@ -1128,8 +1205,7 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj,
static bool check_cmd(const struct intel_engine_cs *engine, static bool check_cmd(const struct intel_engine_cs *engine,
const struct drm_i915_cmd_descriptor *desc, const struct drm_i915_cmd_descriptor *desc,
const u32 *cmd, u32 length, const u32 *cmd, u32 length)
const bool is_master)
{ {
if (desc->flags & CMD_DESC_SKIP) if (desc->flags & CMD_DESC_SKIP)
return true; return true;
...@@ -1139,12 +1215,6 @@ static bool check_cmd(const struct intel_engine_cs *engine, ...@@ -1139,12 +1215,6 @@ static bool check_cmd(const struct intel_engine_cs *engine,
return false; return false;
} }
if ((desc->flags & CMD_DESC_MASTER) && !is_master) {
DRM_DEBUG_DRIVER("CMD: Rejected master-only command: 0x%08X\n",
*cmd);
return false;
}
if (desc->flags & CMD_DESC_REGISTER) { if (desc->flags & CMD_DESC_REGISTER) {
/* /*
* Get the distance between individual register offset * Get the distance between individual register offset
...@@ -1158,7 +1228,7 @@ static bool check_cmd(const struct intel_engine_cs *engine, ...@@ -1158,7 +1228,7 @@ static bool check_cmd(const struct intel_engine_cs *engine,
offset += step) { offset += step) {
const u32 reg_addr = cmd[offset] & desc->reg.mask; const u32 reg_addr = cmd[offset] & desc->reg.mask;
const struct drm_i915_reg_descriptor *reg = const struct drm_i915_reg_descriptor *reg =
find_reg(engine, is_master, reg_addr); find_reg(engine, reg_addr);
if (!reg) { if (!reg) {
DRM_DEBUG_DRIVER("CMD: Rejected register 0x%08X in command: 0x%08X (%s)\n", DRM_DEBUG_DRIVER("CMD: Rejected register 0x%08X in command: 0x%08X (%s)\n",
...@@ -1236,16 +1306,112 @@ static bool check_cmd(const struct intel_engine_cs *engine, ...@@ -1236,16 +1306,112 @@ static bool check_cmd(const struct intel_engine_cs *engine,
return true; return true;
} }
static int check_bbstart(const struct i915_gem_context *ctx,
u32 *cmd, u32 offset, u32 length,
u32 batch_len,
u64 batch_start,
u64 shadow_batch_start)
{
u64 jump_offset, jump_target;
u32 target_cmd_offset, target_cmd_index;
/* For igt compatibility on older platforms */
if (CMDPARSER_USES_GGTT(ctx->i915)) {
DRM_DEBUG("CMD: Rejecting BB_START for ggtt based submission\n");
return -EACCES;
}
if (length != 3) {
DRM_DEBUG("CMD: Recursive BB_START with bad length(%u)\n",
length);
return -EINVAL;
}
jump_target = *(u64*)(cmd+1);
jump_offset = jump_target - batch_start;
/*
* Any underflow of jump_target is guaranteed to be outside the range
* of a u32, so >= test catches both too large and too small
*/
if (jump_offset >= batch_len) {
DRM_DEBUG("CMD: BB_START to 0x%llx jumps out of BB\n",
jump_target);
return -EINVAL;
}
/*
* This cannot overflow a u32 because we already checked jump_offset
* is within the BB, and the batch_len is a u32
*/
target_cmd_offset = lower_32_bits(jump_offset);
target_cmd_index = target_cmd_offset / sizeof(u32);
*(u64*)(cmd + 1) = shadow_batch_start + target_cmd_offset;
if (target_cmd_index == offset)
return 0;
if (ctx->jump_whitelist_cmds <= target_cmd_index) {
DRM_DEBUG("CMD: Rejecting BB_START - truncated whitelist array\n");
return -EINVAL;
} else if (!test_bit(target_cmd_index, ctx->jump_whitelist)) {
DRM_DEBUG("CMD: BB_START to 0x%llx not a previously executed cmd\n",
jump_target);
return -EINVAL;
}
return 0;
}
static void init_whitelist(struct i915_gem_context *ctx, u32 batch_len)
{
const u32 batch_cmds = DIV_ROUND_UP(batch_len, sizeof(u32));
const u32 exact_size = BITS_TO_LONGS(batch_cmds);
u32 next_size = BITS_TO_LONGS(roundup_pow_of_two(batch_cmds));
unsigned long *next_whitelist;
if (CMDPARSER_USES_GGTT(ctx->i915))
return;
if (batch_cmds <= ctx->jump_whitelist_cmds) {
bitmap_zero(ctx->jump_whitelist, batch_cmds);
return;
}
again:
next_whitelist = kcalloc(next_size, sizeof(long), GFP_KERNEL);
if (next_whitelist) {
kfree(ctx->jump_whitelist);
ctx->jump_whitelist = next_whitelist;
ctx->jump_whitelist_cmds =
next_size * BITS_PER_BYTE * sizeof(long);
return;
}
if (next_size > exact_size) {
next_size = exact_size;
goto again;
}
DRM_DEBUG("CMD: Failed to extend whitelist. BB_START may be disallowed\n");
bitmap_zero(ctx->jump_whitelist, ctx->jump_whitelist_cmds);
return;
}
#define LENGTH_BIAS 2 #define LENGTH_BIAS 2
/** /**
* i915_parse_cmds() - parse a submitted batch buffer for privilege violations * i915_parse_cmds() - parse a submitted batch buffer for privilege violations
* @ctx: the context in which the batch is to execute
* @engine: the engine on which the batch is to execute * @engine: the engine on which the batch is to execute
* @batch_obj: the batch buffer in question * @batch_obj: the batch buffer in question
* @shadow_batch_obj: copy of the batch buffer in question * @batch_start: Canonical base address of batch
* @batch_start_offset: byte offset in the batch at which execution starts * @batch_start_offset: byte offset in the batch at which execution starts
* @batch_len: length of the commands in batch_obj * @batch_len: length of the commands in batch_obj
* @is_master: is the submitting process the drm master? * @shadow_batch_obj: copy of the batch buffer in question
* @shadow_batch_start: Canonical base address of shadow_batch_obj
* *
* Parses the specified batch buffer looking for privilege violations as * Parses the specified batch buffer looking for privilege violations as
* described in the overview. * described in the overview.
...@@ -1253,14 +1419,17 @@ static bool check_cmd(const struct intel_engine_cs *engine, ...@@ -1253,14 +1419,17 @@ static bool check_cmd(const struct intel_engine_cs *engine,
* Return: non-zero if the parser finds violations or otherwise fails; -EACCES * Return: non-zero if the parser finds violations or otherwise fails; -EACCES
* if the batch appears legal but should use hardware parsing * if the batch appears legal but should use hardware parsing
*/ */
int intel_engine_cmd_parser(struct intel_engine_cs *engine,
int intel_engine_cmd_parser(struct i915_gem_context *ctx,
struct intel_engine_cs *engine,
struct drm_i915_gem_object *batch_obj, struct drm_i915_gem_object *batch_obj,
struct drm_i915_gem_object *shadow_batch_obj, u64 batch_start,
u32 batch_start_offset, u32 batch_start_offset,
u32 batch_len, u32 batch_len,
bool is_master) struct drm_i915_gem_object *shadow_batch_obj,
u64 shadow_batch_start)
{ {
u32 *cmd, *batch_end; u32 *cmd, *batch_end, offset = 0;
struct drm_i915_cmd_descriptor default_desc = noop_desc; struct drm_i915_cmd_descriptor default_desc = noop_desc;
const struct drm_i915_cmd_descriptor *desc = &default_desc; const struct drm_i915_cmd_descriptor *desc = &default_desc;
bool needs_clflush_after = false; bool needs_clflush_after = false;
...@@ -1274,6 +1443,8 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine, ...@@ -1274,6 +1443,8 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine,
return PTR_ERR(cmd); return PTR_ERR(cmd);
} }
init_whitelist(ctx, batch_len);
/* /*
* We use the batch length as size because the shadow object is as * We use the batch length as size because the shadow object is as
* large or larger and copy_batch() will write MI_NOPs to the extra * large or larger and copy_batch() will write MI_NOPs to the extra
...@@ -1283,31 +1454,15 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine, ...@@ -1283,31 +1454,15 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine,
do { do {
u32 length; u32 length;
if (*cmd == MI_BATCH_BUFFER_END) { if (*cmd == MI_BATCH_BUFFER_END)
if (needs_clflush_after) {
void *ptr = page_mask_bits(shadow_batch_obj->mm.mapping);
drm_clflush_virt_range(ptr,
(void *)(cmd + 1) - ptr);
}
break; break;
}
desc = find_cmd(engine, *cmd, desc, &default_desc); desc = find_cmd(engine, *cmd, desc, &default_desc);
if (!desc) { if (!desc) {
DRM_DEBUG_DRIVER("CMD: Unrecognized command: 0x%08X\n", DRM_DEBUG_DRIVER("CMD: Unrecognized command: 0x%08X\n",
*cmd); *cmd);
ret = -EINVAL; ret = -EINVAL;
break; goto err;
}
/*
* If the batch buffer contains a chained batch, return an
* error that tells the caller to abort and dispatch the
* workload as a non-secure batch.
*/
if (desc->cmd.value == MI_BATCH_BUFFER_START) {
ret = -EACCES;
break;
} }
if (desc->flags & CMD_DESC_FIXED) if (desc->flags & CMD_DESC_FIXED)
...@@ -1321,22 +1476,43 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine, ...@@ -1321,22 +1476,43 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine,
length, length,
batch_end - cmd); batch_end - cmd);
ret = -EINVAL; ret = -EINVAL;
break; goto err;
} }
if (!check_cmd(engine, desc, cmd, length, is_master)) { if (!check_cmd(engine, desc, cmd, length)) {
ret = -EACCES; ret = -EACCES;
goto err;
}
if (desc->cmd.value == MI_BATCH_BUFFER_START) {
ret = check_bbstart(ctx, cmd, offset, length,
batch_len, batch_start,
shadow_batch_start);
if (ret)
goto err;
break; break;
} }
if (ctx->jump_whitelist_cmds > offset)
set_bit(offset, ctx->jump_whitelist);
cmd += length; cmd += length;
offset += length;
if (cmd >= batch_end) { if (cmd >= batch_end) {
DRM_DEBUG_DRIVER("CMD: Got to the end of the buffer w/o a BBE cmd!\n"); DRM_DEBUG_DRIVER("CMD: Got to the end of the buffer w/o a BBE cmd!\n");
ret = -EINVAL; ret = -EINVAL;
break; goto err;
} }
} while (1); } while (1);
if (needs_clflush_after) {
void *ptr = page_mask_bits(shadow_batch_obj->mm.mapping);
drm_clflush_virt_range(ptr, (void *)(cmd + 1) - ptr);
}
err:
i915_gem_object_unpin_map(shadow_batch_obj); i915_gem_object_unpin_map(shadow_batch_obj);
return ret; return ret;
} }
...@@ -1357,7 +1533,7 @@ int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv) ...@@ -1357,7 +1533,7 @@ int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv)
/* If the command parser is not enabled, report 0 - unsupported */ /* If the command parser is not enabled, report 0 - unsupported */
for_each_uabi_engine(engine, dev_priv) { for_each_uabi_engine(engine, dev_priv) {
if (intel_engine_needs_cmd_parser(engine)) { if (intel_engine_using_cmd_parser(engine)) {
active = true; active = true;
break; break;
} }
...@@ -1382,6 +1558,7 @@ int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv) ...@@ -1382,6 +1558,7 @@ int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv)
* the parser enabled. * the parser enabled.
* 9. Don't whitelist or handle oacontrol specially, as ownership * 9. Don't whitelist or handle oacontrol specially, as ownership
* for oacontrol state is moving to i915-perf. * for oacontrol state is moving to i915-perf.
* 10. Support for Gen9 BCS Parsing
*/ */
return 9; return 10;
} }
...@@ -1614,9 +1614,16 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, ...@@ -1614,9 +1614,16 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define VEBOX_MASK(dev_priv) \ #define VEBOX_MASK(dev_priv) \
ENGINE_INSTANCES_MASK(dev_priv, VECS0, I915_MAX_VECS) ENGINE_INSTANCES_MASK(dev_priv, VECS0, I915_MAX_VECS)
/*
* The Gen7 cmdparser copies the scanned buffer to the ggtt for execution
* All later gens can run the final buffer from the ppgtt
*/
#define CMDPARSER_USES_GGTT(dev_priv) IS_GEN(dev_priv, 7)
#define HAS_LLC(dev_priv) (INTEL_INFO(dev_priv)->has_llc) #define HAS_LLC(dev_priv) (INTEL_INFO(dev_priv)->has_llc)
#define HAS_SNOOP(dev_priv) (INTEL_INFO(dev_priv)->has_snoop) #define HAS_SNOOP(dev_priv) (INTEL_INFO(dev_priv)->has_snoop)
#define HAS_EDRAM(dev_priv) ((dev_priv)->edram_size_mb) #define HAS_EDRAM(dev_priv) ((dev_priv)->edram_size_mb)
#define HAS_SECURE_BATCHES(dev_priv) (INTEL_GEN(dev_priv) < 6)
#define HAS_WT(dev_priv) ((IS_HASWELL(dev_priv) || \ #define HAS_WT(dev_priv) ((IS_HASWELL(dev_priv) || \
IS_BROADWELL(dev_priv)) && HAS_EDRAM(dev_priv)) IS_BROADWELL(dev_priv)) && HAS_EDRAM(dev_priv))
...@@ -1836,6 +1843,14 @@ int i915_gem_object_unbind(struct drm_i915_gem_object *obj, ...@@ -1836,6 +1843,14 @@ int i915_gem_object_unbind(struct drm_i915_gem_object *obj,
unsigned long flags); unsigned long flags);
#define I915_GEM_OBJECT_UNBIND_ACTIVE BIT(0) #define I915_GEM_OBJECT_UNBIND_ACTIVE BIT(0)
struct i915_vma * __must_check
i915_gem_object_pin(struct drm_i915_gem_object *obj,
struct i915_address_space *vm,
const struct i915_ggtt_view *view,
u64 size,
u64 alignment,
u64 flags);
void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv); void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv);
static inline int __must_check static inline int __must_check
...@@ -1941,12 +1956,14 @@ const char *i915_cache_level_str(struct drm_i915_private *i915, int type); ...@@ -1941,12 +1956,14 @@ const char *i915_cache_level_str(struct drm_i915_private *i915, int type);
int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv); int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv);
void intel_engine_init_cmd_parser(struct intel_engine_cs *engine); void intel_engine_init_cmd_parser(struct intel_engine_cs *engine);
void intel_engine_cleanup_cmd_parser(struct intel_engine_cs *engine); void intel_engine_cleanup_cmd_parser(struct intel_engine_cs *engine);
int intel_engine_cmd_parser(struct intel_engine_cs *engine, int intel_engine_cmd_parser(struct i915_gem_context *cxt,
struct intel_engine_cs *engine,
struct drm_i915_gem_object *batch_obj, struct drm_i915_gem_object *batch_obj,
struct drm_i915_gem_object *shadow_batch_obj, u64 user_batch_start,
u32 batch_start_offset, u32 batch_start_offset,
u32 batch_len, u32 batch_len,
bool is_master); struct drm_i915_gem_object *shadow_batch_obj,
u64 shadow_batch_start);
/* intel_device_info.c */ /* intel_device_info.c */
static inline struct intel_device_info * static inline struct intel_device_info *
......
...@@ -893,6 +893,20 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj, ...@@ -893,6 +893,20 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
{ {
struct drm_i915_private *dev_priv = to_i915(obj->base.dev); struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
struct i915_address_space *vm = &dev_priv->ggtt.vm; struct i915_address_space *vm = &dev_priv->ggtt.vm;
return i915_gem_object_pin(obj, vm, view, size, alignment,
flags | PIN_GLOBAL);
}
struct i915_vma *
i915_gem_object_pin(struct drm_i915_gem_object *obj,
struct i915_address_space *vm,
const struct i915_ggtt_view *view,
u64 size,
u64 alignment,
u64 flags)
{
struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
struct i915_vma *vma; struct i915_vma *vma;
int ret; int ret;
...@@ -958,7 +972,7 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj, ...@@ -958,7 +972,7 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
return ERR_PTR(ret); return ERR_PTR(ret);
} }
ret = i915_vma_pin(vma, size, alignment, flags | PIN_GLOBAL); ret = i915_vma_pin(vma, size, alignment, flags);
if (ret) if (ret)
return ERR_PTR(ret); return ERR_PTR(ret);
......
...@@ -63,7 +63,7 @@ int i915_getparam_ioctl(struct drm_device *dev, void *data, ...@@ -63,7 +63,7 @@ int i915_getparam_ioctl(struct drm_device *dev, void *data,
value = !!(i915->caps.scheduler & I915_SCHEDULER_CAP_SEMAPHORES); value = !!(i915->caps.scheduler & I915_SCHEDULER_CAP_SEMAPHORES);
break; break;
case I915_PARAM_HAS_SECURE_BATCHES: case I915_PARAM_HAS_SECURE_BATCHES:
value = capable(CAP_SYS_ADMIN); value = HAS_SECURE_BATCHES(i915) && capable(CAP_SYS_ADMIN);
break; break;
case I915_PARAM_CMD_PARSER_VERSION: case I915_PARAM_CMD_PARSER_VERSION:
value = i915_cmd_parser_get_version(i915); value = i915_cmd_parser_get_version(i915);
......
...@@ -562,6 +562,10 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) ...@@ -562,6 +562,10 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
*/ */
#define BCS_SWCTRL _MMIO(0x22200) #define BCS_SWCTRL _MMIO(0x22200)
/* There are 16 GPR registers */
#define BCS_GPR(n) _MMIO(0x22600 + (n) * 8)
#define BCS_GPR_UDW(n) _MMIO(0x22600 + (n) * 8 + 4)
#define GPGPU_THREADS_DISPATCHED _MMIO(0x2290) #define GPGPU_THREADS_DISPATCHED _MMIO(0x2290)
#define GPGPU_THREADS_DISPATCHED_UDW _MMIO(0x2290 + 4) #define GPGPU_THREADS_DISPATCHED_UDW _MMIO(0x2290 + 4)
#define HS_INVOCATION_COUNT _MMIO(0x2300) #define HS_INVOCATION_COUNT _MMIO(0x2300)
...@@ -7355,6 +7359,10 @@ enum { ...@@ -7355,6 +7359,10 @@ enum {
#define DMC_DEBUG3 _MMIO(0x101090) #define DMC_DEBUG3 _MMIO(0x101090)
/* Display Internal Timeout Register */
#define RM_TIMEOUT _MMIO(0x42060)
#define MMIO_TIMEOUT_US(us) ((us) << 0)
/* interrupts */ /* interrupts */
#define DE_MASTER_IRQ_CONTROL (1 << 31) #define DE_MASTER_IRQ_CONTROL (1 << 31)
#define DE_SPRITEB_FLIP_DONE (1 << 29) #define DE_SPRITEB_FLIP_DONE (1 << 29)
......
...@@ -107,6 +107,14 @@ static void bxt_init_clock_gating(struct drm_i915_private *dev_priv) ...@@ -107,6 +107,14 @@ static void bxt_init_clock_gating(struct drm_i915_private *dev_priv)
*/ */
I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) | I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) |
PWM1_GATING_DIS | PWM2_GATING_DIS); PWM1_GATING_DIS | PWM2_GATING_DIS);
/*
* Lower the display internal timeout.
* This is needed to avoid any hard hangs when DSI port PLL
* is off and a MMIO access is attempted by any privilege
* application, using batch buffers or any other means.
*/
I915_WRITE(RM_TIMEOUT, MMIO_TIMEOUT_US(950));
} }
static void glk_init_clock_gating(struct drm_i915_private *dev_priv) static void glk_init_clock_gating(struct drm_i915_private *dev_priv)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment