Commit 6aacb5a3 authored by Chris Wilson's avatar Chris Wilson

drm/i915: Align start for memcpy_from_wc

The movntqda requires 16-byte alignment for the source pointer. Avoid
falling back to clflush if the source pointer is misaligned by doing the
doing a small uncached memcpy to fixup the alignments.

v2: Turn the unaligned copy into a genuine helper
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarJoonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191211110437.4082687-5-chris@chris-wilson.co.uk
parent 51696691
...@@ -1151,13 +1151,12 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj, ...@@ -1151,13 +1151,12 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj,
} }
src = ERR_PTR(-ENODEV); src = ERR_PTR(-ENODEV);
if (src_needs_clflush && if (src_needs_clflush && i915_has_memcpy_from_wc()) {
i915_can_memcpy_from_wc(NULL, offset, 0)) {
src = i915_gem_object_pin_map(src_obj, I915_MAP_WC); src = i915_gem_object_pin_map(src_obj, I915_MAP_WC);
if (!IS_ERR(src)) { if (!IS_ERR(src)) {
i915_memcpy_from_wc(dst, i915_unaligned_memcpy_from_wc(dst,
src + offset, src + offset,
ALIGN(length, 16)); length);
i915_gem_object_unpin_map(src_obj); i915_gem_object_unpin_map(src_obj);
} }
} }
......
...@@ -27,6 +27,12 @@ ...@@ -27,6 +27,12 @@
#include "i915_memcpy.h" #include "i915_memcpy.h"
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
#define CI_BUG_ON(expr) BUG_ON(expr)
#else
#define CI_BUG_ON(expr) BUILD_BUG_ON_INVALID(expr)
#endif
static DEFINE_STATIC_KEY_FALSE(has_movntdqa); static DEFINE_STATIC_KEY_FALSE(has_movntdqa);
#ifdef CONFIG_AS_MOVNTDQA #ifdef CONFIG_AS_MOVNTDQA
...@@ -34,7 +40,6 @@ static void __memcpy_ntdqa(void *dst, const void *src, unsigned long len) ...@@ -34,7 +40,6 @@ static void __memcpy_ntdqa(void *dst, const void *src, unsigned long len)
{ {
kernel_fpu_begin(); kernel_fpu_begin();
len >>= 4;
while (len >= 4) { while (len >= 4) {
asm("movntdqa (%0), %%xmm0\n" asm("movntdqa (%0), %%xmm0\n"
"movntdqa 16(%0), %%xmm1\n" "movntdqa 16(%0), %%xmm1\n"
...@@ -59,6 +64,38 @@ static void __memcpy_ntdqa(void *dst, const void *src, unsigned long len) ...@@ -59,6 +64,38 @@ static void __memcpy_ntdqa(void *dst, const void *src, unsigned long len)
kernel_fpu_end(); kernel_fpu_end();
} }
static void __memcpy_ntdqu(void *dst, const void *src, unsigned long len)
{
kernel_fpu_begin();
while (len >= 4) {
asm("movntdqa (%0), %%xmm0\n"
"movntdqa 16(%0), %%xmm1\n"
"movntdqa 32(%0), %%xmm2\n"
"movntdqa 48(%0), %%xmm3\n"
"movups %%xmm0, (%1)\n"
"movups %%xmm1, 16(%1)\n"
"movups %%xmm2, 32(%1)\n"
"movups %%xmm3, 48(%1)\n"
:: "r" (src), "r" (dst) : "memory");
src += 64;
dst += 64;
len -= 4;
}
while (len--) {
asm("movntdqa (%0), %%xmm0\n"
"movups %%xmm0, (%1)\n"
:: "r" (src), "r" (dst) : "memory");
src += 16;
dst += 16;
}
kernel_fpu_end();
}
#else
static void __memcpy_ntdqa(void *dst, const void *src, unsigned long len) {}
static void __memcpy_ntdqu(void *dst, const void *src, unsigned long len) {}
#endif #endif
/** /**
...@@ -83,17 +120,47 @@ bool i915_memcpy_from_wc(void *dst, const void *src, unsigned long len) ...@@ -83,17 +120,47 @@ bool i915_memcpy_from_wc(void *dst, const void *src, unsigned long len)
if (unlikely(((unsigned long)dst | (unsigned long)src | len) & 15)) if (unlikely(((unsigned long)dst | (unsigned long)src | len) & 15))
return false; return false;
#ifdef CONFIG_AS_MOVNTDQA
if (static_branch_likely(&has_movntdqa)) { if (static_branch_likely(&has_movntdqa)) {
if (likely(len)) if (likely(len))
__memcpy_ntdqa(dst, src, len); __memcpy_ntdqa(dst, src, len >> 4);
return true; return true;
} }
#endif
return false; return false;
} }
/**
* i915_unaligned_memcpy_from_wc: perform a mostly accelerated read from WC
* @dst: destination pointer
* @src: source pointer
* @len: how many bytes to copy
*
* Like i915_memcpy_from_wc(), the unaligned variant copies @len bytes from
* @src to @dst using * non-temporal instructions where available, but
* accepts that its arguments may not be aligned, but are valid for the
* potential 16-byte read past the end.
*/
void i915_unaligned_memcpy_from_wc(void *dst, void *src, unsigned long len)
{
unsigned long addr;
CI_BUG_ON(!i915_has_memcpy_from_wc());
addr = (unsigned long)src;
if (!IS_ALIGNED(addr, 16)) {
unsigned long x = min(ALIGN(addr, 16) - addr, len);
memcpy(dst, src, x);
len -= x;
dst += x;
src += x;
}
if (likely(len))
__memcpy_ntdqu(dst, src, DIV_ROUND_UP(len, 16));
}
void i915_memcpy_init_early(struct drm_i915_private *dev_priv) void i915_memcpy_init_early(struct drm_i915_private *dev_priv)
{ {
/* /*
......
...@@ -11,7 +11,9 @@ ...@@ -11,7 +11,9 @@
struct drm_i915_private; struct drm_i915_private;
void i915_memcpy_init_early(struct drm_i915_private *i915); void i915_memcpy_init_early(struct drm_i915_private *i915);
bool i915_memcpy_from_wc(void *dst, const void *src, unsigned long len); bool i915_memcpy_from_wc(void *dst, const void *src, unsigned long len);
void i915_unaligned_memcpy_from_wc(void *dst, void *src, unsigned long len);
/* The movntdqa instructions used for memcpy-from-wc require 16-byte alignment, /* The movntdqa instructions used for memcpy-from-wc require 16-byte alignment,
* as well as SSE4.1 support. i915_memcpy_from_wc() will report if it cannot * as well as SSE4.1 support. i915_memcpy_from_wc() will report if it cannot
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment