Commit be819aa6 authored by Guo Ren's avatar Guo Ren

csky: Fixup arch_get_unmapped_area() implementation

Current arch_get_unmapped_area() of abiv1 doesn't use standard kernel
api. After referring to the implementation of arch/arm, we implement
it with vm_unmapped_area() from linux/mm.h.
Signed-off-by: default avatarGuo Ren <ren_guo@c-sky.com>
Cc: Arnd Bergmann <arnd@arndb.de>
parent 5336c179
/* SPDX-License-Identifier: GPL-2.0 */ /* SPDX-License-Identifier: GPL-2.0 */
// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
extern unsigned long shm_align_mask; #include <asm/shmparam.h>
extern void flush_dcache_page(struct page *page); extern void flush_dcache_page(struct page *page);
static inline unsigned long pages_do_alias(unsigned long addr1, static inline unsigned long pages_do_alias(unsigned long addr1,
unsigned long addr2) unsigned long addr2)
{ {
return (addr1 ^ addr2) & shm_align_mask; return (addr1 ^ addr2) & (SHMLBA-1);
} }
static inline void clear_user_page(void *addr, unsigned long vaddr, static inline void clear_user_page(void *addr, unsigned long vaddr,
......
...@@ -9,58 +9,63 @@ ...@@ -9,58 +9,63 @@
#include <linux/random.h> #include <linux/random.h>
#include <linux/io.h> #include <linux/io.h>
unsigned long shm_align_mask = (0x4000 >> 1) - 1; /* Sane caches */ #define COLOUR_ALIGN(addr,pgoff) \
((((addr)+SHMLBA-1)&~(SHMLBA-1)) + \
(((pgoff)<<PAGE_SHIFT) & (SHMLBA-1)))
#define COLOUR_ALIGN(addr, pgoff) \ /*
((((addr) + shm_align_mask) & ~shm_align_mask) + \ * We need to ensure that shared mappings are correctly aligned to
(((pgoff) << PAGE_SHIFT) & shm_align_mask)) * avoid aliasing issues with VIPT caches. We need to ensure that
* a specific page of an object is always mapped at a multiple of
unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, * SHMLBA bytes.
*
* We unconditionally provide this function for all cases.
*/
unsigned long
arch_get_unmapped_area(struct file *filp, unsigned long addr,
unsigned long len, unsigned long pgoff, unsigned long flags) unsigned long len, unsigned long pgoff, unsigned long flags)
{ {
struct vm_area_struct *vmm; struct mm_struct *mm = current->mm;
int do_color_align; struct vm_area_struct *vma;
int do_align = 0;
struct vm_unmapped_area_info info;
/*
* We only need to do colour alignment if either the I or D
* caches alias.
*/
do_align = filp || (flags & MAP_SHARED);
if (flags & MAP_FIXED) {
/* /*
* We do not accept a shared mapping if it would violate * We enforce the MAP_FIXED case.
* cache aliasing constraints.
*/ */
if ((flags & MAP_SHARED) && if (flags & MAP_FIXED) {
((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask)) if (flags & MAP_SHARED &&
(addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))
return -EINVAL; return -EINVAL;
return addr; return addr;
} }
if (len > TASK_SIZE) if (len > TASK_SIZE)
return -ENOMEM; return -ENOMEM;
do_color_align = 0;
if (filp || (flags & MAP_SHARED))
do_color_align = 1;
if (addr) { if (addr) {
if (do_color_align) if (do_align)
addr = COLOUR_ALIGN(addr, pgoff); addr = COLOUR_ALIGN(addr, pgoff);
else else
addr = PAGE_ALIGN(addr); addr = PAGE_ALIGN(addr);
vmm = find_vma(current->mm, addr);
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr && if (TASK_SIZE - len >= addr &&
(!vmm || addr + len <= vmm->vm_start)) (!vma || addr + len <= vm_start_gap(vma)))
return addr; return addr;
} }
addr = TASK_UNMAPPED_BASE;
if (do_color_align)
addr = COLOUR_ALIGN(addr, pgoff);
else
addr = PAGE_ALIGN(addr);
for (vmm = find_vma(current->mm, addr); ; vmm = vmm->vm_next) { info.flags = 0;
/* At this point: (!vmm || addr < vmm->vm_end). */ info.length = len;
if (TASK_SIZE - len < addr) info.low_limit = mm->mmap_base;
return -ENOMEM; info.high_limit = TASK_SIZE;
if (!vmm || addr + len <= vmm->vm_start) info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
return addr; info.align_offset = pgoff << PAGE_SHIFT;
addr = vmm->vm_end; return vm_unmapped_area(&info);
if (do_color_align)
addr = COLOUR_ALIGN(addr, pgoff);
}
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment