Commit 542c6f6d authored by Thomas Hellstrom's avatar Thomas Hellstrom Committed by Dave Airlie

drm/ttm: Fix ttm in-kernel copying of pages with non-standard caching attributes.

For x86 this affected highmem pages only, since they were always kmapped
cache-coherent, and this is fixed using kmap_atomic_prot().

For other architectures that may not modify the linear kernel map we
resort to vmap() for now, since kmap_atomic_prot() generally uses the
linear kernel map for lowmem pages. This of course comes with a
performance impact and should be optimized when possible.
Signed-off-by: default avatarThomas Hellstrom <thellstrom@vmware.com>
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>
parent 4677f15c
...@@ -136,7 +136,8 @@ static int ttm_copy_io_page(void *dst, void *src, unsigned long page) ...@@ -136,7 +136,8 @@ static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
} }
static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src, static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
unsigned long page) unsigned long page,
pgprot_t prot)
{ {
struct page *d = ttm_tt_get_page(ttm, page); struct page *d = ttm_tt_get_page(ttm, page);
void *dst; void *dst;
...@@ -145,17 +146,35 @@ static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src, ...@@ -145,17 +146,35 @@ static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
return -ENOMEM; return -ENOMEM;
src = (void *)((unsigned long)src + (page << PAGE_SHIFT)); src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
dst = kmap(d);
#ifdef CONFIG_X86
dst = kmap_atomic_prot(d, KM_USER0, prot);
#else
if (prot != PAGE_KERNEL)
dst = vmap(&d, 1, 0, prot);
else
dst = kmap(d);
#endif
if (!dst) if (!dst)
return -ENOMEM; return -ENOMEM;
memcpy_fromio(dst, src, PAGE_SIZE); memcpy_fromio(dst, src, PAGE_SIZE);
kunmap(d);
#ifdef CONFIG_X86
kunmap_atomic(dst, KM_USER0);
#else
if (prot != PAGE_KERNEL)
vunmap(dst);
else
kunmap(d);
#endif
return 0; return 0;
} }
static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst, static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
unsigned long page) unsigned long page,
pgprot_t prot)
{ {
struct page *s = ttm_tt_get_page(ttm, page); struct page *s = ttm_tt_get_page(ttm, page);
void *src; void *src;
...@@ -164,12 +183,28 @@ static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst, ...@@ -164,12 +183,28 @@ static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
return -ENOMEM; return -ENOMEM;
dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT)); dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
src = kmap(s); #ifdef CONFIG_X86
src = kmap_atomic_prot(s, KM_USER0, prot);
#else
if (prot != PAGE_KERNEL)
src = vmap(&s, 1, 0, prot);
else
src = kmap(s);
#endif
if (!src) if (!src)
return -ENOMEM; return -ENOMEM;
memcpy_toio(dst, src, PAGE_SIZE); memcpy_toio(dst, src, PAGE_SIZE);
kunmap(s);
#ifdef CONFIG_X86
kunmap_atomic(src, KM_USER0);
#else
if (prot != PAGE_KERNEL)
vunmap(src);
else
kunmap(s);
#endif
return 0; return 0;
} }
...@@ -214,11 +249,17 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, ...@@ -214,11 +249,17 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
for (i = 0; i < new_mem->num_pages; ++i) { for (i = 0; i < new_mem->num_pages; ++i) {
page = i * dir + add; page = i * dir + add;
if (old_iomap == NULL) if (old_iomap == NULL) {
ret = ttm_copy_ttm_io_page(ttm, new_iomap, page); pgprot_t prot = ttm_io_prot(old_mem->placement,
else if (new_iomap == NULL) PAGE_KERNEL);
ret = ttm_copy_io_ttm_page(ttm, old_iomap, page); ret = ttm_copy_ttm_io_page(ttm, new_iomap, page,
else prot);
} else if (new_iomap == NULL) {
pgprot_t prot = ttm_io_prot(new_mem->placement,
PAGE_KERNEL);
ret = ttm_copy_io_ttm_page(ttm, old_iomap, page,
prot);
} else
ret = ttm_copy_io_page(new_iomap, old_iomap, page); ret = ttm_copy_io_page(new_iomap, old_iomap, page);
if (ret) if (ret)
goto out1; goto out1;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment