Commit 3b539082 authored by Zou Nanhai's avatar Zou Nanhai Committed by Linus Torvalds

[PATCH] ia64/x86_64/s390 overlapping vma fix

IA64 is also vulnerable to the huge-vma-in-executable bug in 64 bit elf
support, it just insert a vma of zero page without checking overlap, so user
can construct a elf with section begin from 0x0 to trigger this BUGON().

However, I think it's safe to check overlap before we actually insert a vma
into vma list.  And I also feel check vma overlap everywhere is unnecessary,
because invert_vm_struct will check it again, so the check is duplicated.
It's better to have invert_vm_struct return a value then let caller check if
it successes.  Here is a patch against 2.6.10.rc2-mm3 I have tested it on
i386, x86_64 and ia64 machines.
Signed-off-by: default avatarTony Luck <tony.luck@intel.com>
Signed-off-by: default avatarZou Nan hai <Nanhai.zou@intel.com>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 3b720a8b
......@@ -100,7 +100,11 @@ ia64_elf32_init (struct pt_regs *regs)
vma->vm_ops = &ia32_shared_page_vm_ops;
down_write(&current->mm->mmap_sem);
{
insert_vm_struct(current->mm, vma);
if (insert_vm_struct(current->mm, vma)) {
kmem_cache_free(vm_area_cachep, vma);
up_write(&current->mm->mmap_sem);
return;
}
}
up_write(&current->mm->mmap_sem);
}
......@@ -123,7 +127,11 @@ ia64_elf32_init (struct pt_regs *regs)
vma->vm_ops = &ia32_gate_page_vm_ops;
down_write(&current->mm->mmap_sem);
{
insert_vm_struct(current->mm, vma);
if (insert_vm_struct(current->mm, vma)) {
kmem_cache_free(vm_area_cachep, vma);
up_write(&current->mm->mmap_sem);
return;
}
}
up_write(&current->mm->mmap_sem);
}
......@@ -142,7 +150,11 @@ ia64_elf32_init (struct pt_regs *regs)
vma->vm_flags = VM_READ|VM_WRITE|VM_MAYREAD|VM_MAYWRITE;
down_write(&current->mm->mmap_sem);
{
insert_vm_struct(current->mm, vma);
if (insert_vm_struct(current->mm, vma)) {
kmem_cache_free(vm_area_cachep, vma);
up_write(&current->mm->mmap_sem);
return;
}
}
up_write(&current->mm->mmap_sem);
}
......@@ -190,7 +202,7 @@ ia32_setup_arg_pages (struct linux_binprm *bprm, int executable_stack)
unsigned long stack_base;
struct vm_area_struct *mpnt;
struct mm_struct *mm = current->mm;
int i;
int i, ret;
stack_base = IA32_STACK_TOP - MAX_ARG_PAGES*PAGE_SIZE;
mm->arg_start = bprm->p + stack_base;
......@@ -225,7 +237,11 @@ ia32_setup_arg_pages (struct linux_binprm *bprm, int executable_stack)
mpnt->vm_flags = VM_STACK_FLAGS;
mpnt->vm_page_prot = (mpnt->vm_flags & VM_EXEC)?
PAGE_COPY_EXEC: PAGE_COPY;
insert_vm_struct(current->mm, mpnt);
if ((ret = insert_vm_struct(current->mm, mpnt))) {
up_write(&current->mm->mmap_sem);
kmem_cache_free(vm_area_cachep, mpnt);
return ret;
}
current->mm->stack_vm = current->mm->total_vm = vma_pages(mpnt);
}
......
......@@ -131,7 +131,13 @@ ia64_init_addr_space (void)
vma->vm_end = vma->vm_start + PAGE_SIZE;
vma->vm_page_prot = protection_map[VM_DATA_DEFAULT_FLAGS & 0x7];
vma->vm_flags = VM_DATA_DEFAULT_FLAGS | VM_GROWSUP;
insert_vm_struct(current->mm, vma);
down_write(&current->mm->mmap_sem);
if (insert_vm_struct(current->mm, vma)) {
up_write(&current->mm->mmap_sem);
kmem_cache_free(vm_area_cachep, vma);
return;
}
up_write(&current->mm->mmap_sem);
}
/* map NaT-page at address zero to speed up speculative dereferencing of NULL: */
......@@ -143,7 +149,13 @@ ia64_init_addr_space (void)
vma->vm_end = PAGE_SIZE;
vma->vm_page_prot = __pgprot(pgprot_val(PAGE_READONLY) | _PAGE_MA_NAT);
vma->vm_flags = VM_READ | VM_MAYREAD | VM_IO | VM_RESERVED;
insert_vm_struct(current->mm, vma);
down_write(&current->mm->mmap_sem);
if (insert_vm_struct(current->mm, vma)) {
up_write(&current->mm->mmap_sem);
kmem_cache_free(vm_area_cachep, vma);
return;
}
up_write(&current->mm->mmap_sem);
}
}
}
......
......@@ -39,7 +39,7 @@ int setup_arg_pages32(struct linux_binprm *bprm, int executable_stack)
unsigned long stack_base;
struct vm_area_struct *mpnt;
struct mm_struct *mm = current->mm;
int i;
int i, ret;
stack_base = STACK_TOP - MAX_ARG_PAGES*PAGE_SIZE;
mm->arg_start = bprm->p + stack_base;
......@@ -68,7 +68,11 @@ int setup_arg_pages32(struct linux_binprm *bprm, int executable_stack)
/* executable stack setting would be applied here */
mpnt->vm_page_prot = PAGE_COPY;
mpnt->vm_flags = VM_STACK_FLAGS;
insert_vm_struct(mm, mpnt);
if ((ret = insert_vm_struct(mm, mpnt))) {
up_write(&mm->mmap_sem);
kmem_cache_free(vm_area_cachep, mpnt);
return ret;
}
mm->stack_vm = mm->total_vm = vma_pages(mpnt);
}
......
......@@ -334,7 +334,7 @@ int setup_arg_pages(struct linux_binprm *bprm, int executable_stack)
unsigned long stack_base;
struct vm_area_struct *mpnt;
struct mm_struct *mm = current->mm;
int i;
int i, ret;
stack_base = IA32_STACK_TOP - MAX_ARG_PAGES * PAGE_SIZE;
mm->arg_start = bprm->p + stack_base;
......@@ -368,7 +368,11 @@ int setup_arg_pages(struct linux_binprm *bprm, int executable_stack)
mpnt->vm_flags = VM_STACK_FLAGS;
mpnt->vm_page_prot = (mpnt->vm_flags & VM_EXEC) ?
PAGE_COPY_EXEC : PAGE_COPY;
insert_vm_struct(mm, mpnt);
if ((ret = insert_vm_struct(mm, mpnt))) {
up_write(&mm->mmap_sem);
kmem_cache_free(vm_area_cachep, mpnt);
return ret;
}
mm->stack_vm = mm->total_vm = vma_pages(mpnt);
}
......
......@@ -342,7 +342,7 @@ int setup_arg_pages(struct linux_binprm *bprm, int executable_stack)
unsigned long stack_base;
struct vm_area_struct *mpnt;
struct mm_struct *mm = current->mm;
int i;
int i, ret;
long arg_size;
#ifdef CONFIG_STACK_GROWSUP
......@@ -413,7 +413,6 @@ int setup_arg_pages(struct linux_binprm *bprm, int executable_stack)
down_write(&mm->mmap_sem);
{
struct vm_area_struct *vma;
mpnt->vm_mm = mm;
#ifdef CONFIG_STACK_GROWSUP
mpnt->vm_start = stack_base;
......@@ -434,13 +433,11 @@ int setup_arg_pages(struct linux_binprm *bprm, int executable_stack)
mpnt->vm_flags = VM_STACK_FLAGS;
mpnt->vm_flags |= mm->def_flags;
mpnt->vm_page_prot = protection_map[mpnt->vm_flags & 0x7];
vma = find_vma(mm, mpnt->vm_start);
if (vma) {
if ((ret = insert_vm_struct(mm, mpnt))) {
up_write(&mm->mmap_sem);
kmem_cache_free(vm_area_cachep, mpnt);
return -ENOMEM;
return ret;
}
insert_vm_struct(mm, mpnt);
mm->stack_vm = mm->total_vm = vma_pages(mpnt);
}
......
......@@ -675,7 +675,7 @@ extern struct vm_area_struct *vma_merge(struct mm_struct *,
extern struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *);
extern int split_vma(struct mm_struct *,
struct vm_area_struct *, unsigned long addr, int new_below);
extern void insert_vm_struct(struct mm_struct *, struct vm_area_struct *);
extern int insert_vm_struct(struct mm_struct *, struct vm_area_struct *);
extern void __vma_link_rb(struct mm_struct *, struct vm_area_struct *,
struct rb_node **, struct rb_node *);
extern struct vm_area_struct *copy_vma(struct vm_area_struct **,
......
......@@ -1871,7 +1871,7 @@ void exit_mmap(struct mm_struct *mm)
* and into the inode's i_mmap tree. If vm_file is non-NULL
* then i_mmap_lock is taken here.
*/
void insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
{
struct vm_area_struct * __vma, * prev;
struct rb_node ** rb_link, * rb_parent;
......@@ -1894,8 +1894,9 @@ void insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
}
__vma = find_vma_prepare(mm,vma->vm_start,&prev,&rb_link,&rb_parent);
if (__vma && __vma->vm_start < vma->vm_end)
BUG();
return -ENOMEM;
vma_link(mm, vma, prev, rb_link, rb_parent);
return 0;
}
/*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment