Commit b7eeb2b4 authored by Chris Wilson's avatar Chris Wilson

drm/i915: Avoid mixing integer types during batch copies

Be consistent and use unsigned long throughout the chunk copies to
avoid the inherent clumsiness of mixing integer types of different
widths and signs. Failing to take acount of a wider unsigned type when
using min_t can lead to treating it as a negative, only for it flip back
to a large unsigned value after passing a boundary check.

Fixes: ed13033f ("drm/i915/cmdparser: Only cache the dst vmap")
Testcase: igt/gen9_exec_parse/bb-large
Reported-by: default avatar"Candelaria, Jared" <jared.candelaria@intel.com>
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: "Candelaria, Jared" <jared.candelaria@intel.com>
Cc: "Bloomfield, Jon" <jon.bloomfield@intel.com>
Cc: <stable@vger.kernel.org> # v4.9+
Reviewed-by: default avatarMika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200928215942.31917-1-chris@chris-wilson.co.uk
parent d3bb2f9b
...@@ -2267,8 +2267,8 @@ struct eb_parse_work { ...@@ -2267,8 +2267,8 @@ struct eb_parse_work {
struct i915_vma *batch; struct i915_vma *batch;
struct i915_vma *shadow; struct i915_vma *shadow;
struct i915_vma *trampoline; struct i915_vma *trampoline;
unsigned int batch_offset; unsigned long batch_offset;
unsigned int batch_length; unsigned long batch_length;
}; };
static int __eb_parse(struct dma_fence_work *work) static int __eb_parse(struct dma_fence_work *work)
...@@ -2338,6 +2338,9 @@ static int eb_parse_pipeline(struct i915_execbuffer *eb, ...@@ -2338,6 +2338,9 @@ static int eb_parse_pipeline(struct i915_execbuffer *eb,
struct eb_parse_work *pw; struct eb_parse_work *pw;
int err; int err;
GEM_BUG_ON(overflows_type(eb->batch_start_offset, pw->batch_offset));
GEM_BUG_ON(overflows_type(eb->batch_len, pw->batch_length));
pw = kzalloc(sizeof(*pw), GFP_KERNEL); pw = kzalloc(sizeof(*pw), GFP_KERNEL);
if (!pw) if (!pw)
return -ENOMEM; return -ENOMEM;
......
...@@ -1136,7 +1136,7 @@ find_reg(const struct intel_engine_cs *engine, u32 addr) ...@@ -1136,7 +1136,7 @@ find_reg(const struct intel_engine_cs *engine, u32 addr)
/* Returns a vmap'd pointer to dst_obj, which the caller must unmap */ /* Returns a vmap'd pointer to dst_obj, which the caller must unmap */
static u32 *copy_batch(struct drm_i915_gem_object *dst_obj, static u32 *copy_batch(struct drm_i915_gem_object *dst_obj,
struct drm_i915_gem_object *src_obj, struct drm_i915_gem_object *src_obj,
u32 offset, u32 length) unsigned long offset, unsigned long length)
{ {
bool needs_clflush; bool needs_clflush;
void *dst, *src; void *dst, *src;
...@@ -1166,8 +1166,8 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj, ...@@ -1166,8 +1166,8 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj,
} }
} }
if (IS_ERR(src)) { if (IS_ERR(src)) {
unsigned long x, n;
void *ptr; void *ptr;
int x, n;
/* /*
* We can avoid clflushing partial cachelines before the write * We can avoid clflushing partial cachelines before the write
...@@ -1184,7 +1184,7 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj, ...@@ -1184,7 +1184,7 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj,
ptr = dst; ptr = dst;
x = offset_in_page(offset); x = offset_in_page(offset);
for (n = offset >> PAGE_SHIFT; length; n++) { for (n = offset >> PAGE_SHIFT; length; n++) {
int len = min_t(int, length, PAGE_SIZE - x); int len = min(length, PAGE_SIZE - x);
src = kmap_atomic(i915_gem_object_get_page(src_obj, n)); src = kmap_atomic(i915_gem_object_get_page(src_obj, n));
if (needs_clflush) if (needs_clflush)
...@@ -1414,8 +1414,8 @@ static bool shadow_needs_clflush(struct drm_i915_gem_object *obj) ...@@ -1414,8 +1414,8 @@ static bool shadow_needs_clflush(struct drm_i915_gem_object *obj)
*/ */
int intel_engine_cmd_parser(struct intel_engine_cs *engine, int intel_engine_cmd_parser(struct intel_engine_cs *engine,
struct i915_vma *batch, struct i915_vma *batch,
u32 batch_offset, unsigned long batch_offset,
u32 batch_length, unsigned long batch_length,
struct i915_vma *shadow, struct i915_vma *shadow,
bool trampoline) bool trampoline)
{ {
......
...@@ -1931,8 +1931,8 @@ void intel_engine_init_cmd_parser(struct intel_engine_cs *engine); ...@@ -1931,8 +1931,8 @@ void intel_engine_init_cmd_parser(struct intel_engine_cs *engine);
void intel_engine_cleanup_cmd_parser(struct intel_engine_cs *engine); void intel_engine_cleanup_cmd_parser(struct intel_engine_cs *engine);
int intel_engine_cmd_parser(struct intel_engine_cs *engine, int intel_engine_cmd_parser(struct intel_engine_cs *engine,
struct i915_vma *batch, struct i915_vma *batch,
u32 batch_offset, unsigned long batch_offset,
u32 batch_length, unsigned long batch_length,
struct i915_vma *shadow, struct i915_vma *shadow,
bool trampoline); bool trampoline);
#define I915_CMD_PARSER_TRAMPOLINE_SIZE 8 #define I915_CMD_PARSER_TRAMPOLINE_SIZE 8
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment