Commit 4af9027d authored by Guo Ren's avatar Guo Ren

csky/dma: Fixup cache_op failed when cross memory ZONEs

If the paddr and size are cross between NORMAL_ZONE and HIGHMEM_ZONE
memory range, cache_op will panic in do_page_fault with bad_area.

Optimize the code to support the range which cross memory ZONEs.

Changes for V2:
 - Revert back to postcore_initcall
Signed-off-by: default avatarGuo Ren <ren_guo@c-sky.com>
Cc: Christoph Hellwig <hch@infradead.org>
Cc: Arnd Bergmann <arnd@arndb.de>
parent 7f80fe20
...@@ -20,69 +20,50 @@ static int __init atomic_pool_init(void) ...@@ -20,69 +20,50 @@ static int __init atomic_pool_init(void)
} }
postcore_initcall(atomic_pool_init); postcore_initcall(atomic_pool_init);
void arch_dma_prep_coherent(struct page *page, size_t size)
{
if (PageHighMem(page)) {
unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
do {
void *ptr = kmap_atomic(page);
size_t _size = (size < PAGE_SIZE) ? size : PAGE_SIZE;
memset(ptr, 0, _size);
dma_wbinv_range((unsigned long)ptr,
(unsigned long)ptr + _size);
kunmap_atomic(ptr);
page++;
size -= PAGE_SIZE;
count--;
} while (count);
} else {
void *ptr = page_address(page);
memset(ptr, 0, size);
dma_wbinv_range((unsigned long)ptr, (unsigned long)ptr + size);
}
}
static inline void cache_op(phys_addr_t paddr, size_t size, static inline void cache_op(phys_addr_t paddr, size_t size,
void (*fn)(unsigned long start, unsigned long end)) void (*fn)(unsigned long start, unsigned long end))
{ {
struct page *page = pfn_to_page(paddr >> PAGE_SHIFT); struct page *page = phys_to_page(paddr);
unsigned int offset = paddr & ~PAGE_MASK; void *start = __va(page_to_phys(page));
unsigned long offset = offset_in_page(paddr);
size_t left = size; size_t left = size;
unsigned long start;
do { do {
size_t len = left; size_t len = left;
if (offset + len > PAGE_SIZE)
len = PAGE_SIZE - offset;
if (PageHighMem(page)) { if (PageHighMem(page)) {
void *addr; start = kmap_atomic(page);
if (offset + len > PAGE_SIZE) { fn((unsigned long)start + offset,
if (offset >= PAGE_SIZE) { (unsigned long)start + offset + len);
page += offset >> PAGE_SHIFT;
offset &= ~PAGE_MASK;
}
len = PAGE_SIZE - offset;
}
addr = kmap_atomic(page); kunmap_atomic(start);
start = (unsigned long)(addr + offset);
fn(start, start + len);
kunmap_atomic(addr);
} else { } else {
start = (unsigned long)phys_to_virt(paddr); fn((unsigned long)start + offset,
fn(start, start + size); (unsigned long)start + offset + len);
} }
offset = 0; offset = 0;
page++; page++;
start += PAGE_SIZE;
left -= len; left -= len;
} while (left); } while (left);
} }
static void dma_wbinv_set_zero_range(unsigned long start, unsigned long end)
{
memset((void *)start, 0, end - start);
dma_wbinv_range(start, end);
}
void arch_dma_prep_coherent(struct page *page, size_t size)
{
cache_op(page_to_phys(page), size, dma_wbinv_set_zero_range);
}
void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr, void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
size_t size, enum dma_data_direction dir) size_t size, enum dma_data_direction dir)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment