Commit 6865a163 authored by Matthew Wilcox's avatar Matthew Wilcox Committed by Kai Germaschewski

[PATCH] fix expand_stack for upward-growing stacks

 - trivial: cache file->f_dentry->d_inode; saves a few bytes of compiled
   size.
 - move expand_stack inside ARCH_STACK_GROWSUP, add an alternate
   implementation for PA-RISC.
 - partially fix the comment (mmap_sem is held for READ, not for WRITE).
   It still doesn't make sense, saying we don't need to take the spinlock
   right before we take it.  I expect one of the vm hackers will know
   what the right thing is.
parent 3dab2bd8
...@@ -422,6 +422,7 @@ unsigned long do_mmap_pgoff(struct file * file, unsigned long addr, ...@@ -422,6 +422,7 @@ unsigned long do_mmap_pgoff(struct file * file, unsigned long addr,
{ {
struct mm_struct * mm = current->mm; struct mm_struct * mm = current->mm;
struct vm_area_struct * vma, * prev; struct vm_area_struct * vma, * prev;
struct inode *inode = NULL;
unsigned int vm_flags; unsigned int vm_flags;
int correct_wcount = 0; int correct_wcount = 0;
int error; int error;
...@@ -469,17 +470,18 @@ unsigned long do_mmap_pgoff(struct file * file, unsigned long addr, ...@@ -469,17 +470,18 @@ unsigned long do_mmap_pgoff(struct file * file, unsigned long addr,
} }
if (file) { if (file) {
inode = file->f_dentry->d_inode;
switch (flags & MAP_TYPE) { switch (flags & MAP_TYPE) {
case MAP_SHARED: case MAP_SHARED:
if ((prot & PROT_WRITE) && !(file->f_mode & FMODE_WRITE)) if ((prot & PROT_WRITE) && !(file->f_mode & FMODE_WRITE))
return -EACCES; return -EACCES;
/* Make sure we don't allow writing to an append-only file.. */ /* Make sure we don't allow writing to an append-only file.. */
if (IS_APPEND(file->f_dentry->d_inode) && (file->f_mode & FMODE_WRITE)) if (IS_APPEND(inode) && (file->f_mode & FMODE_WRITE))
return -EACCES; return -EACCES;
/* make sure there are no mandatory locks on the file. */ /* make sure there are no mandatory locks on the file. */
if (locks_verify_locked(file->f_dentry->d_inode)) if (locks_verify_locked(inode))
return -EAGAIN; return -EAGAIN;
vm_flags |= VM_SHARED | VM_MAYSHARE; vm_flags |= VM_SHARED | VM_MAYSHARE;
...@@ -603,7 +605,7 @@ unsigned long do_mmap_pgoff(struct file * file, unsigned long addr, ...@@ -603,7 +605,7 @@ unsigned long do_mmap_pgoff(struct file * file, unsigned long addr,
vma_link(mm, vma, prev, rb_link, rb_parent); vma_link(mm, vma, prev, rb_link, rb_parent);
if (correct_wcount) if (correct_wcount)
atomic_inc(&file->f_dentry->d_inode->i_writecount); atomic_inc(&inode->i_writecount);
out: out:
mm->total_vm += len >> PAGE_SHIFT; mm->total_vm += len >> PAGE_SHIFT;
...@@ -615,7 +617,7 @@ unsigned long do_mmap_pgoff(struct file * file, unsigned long addr, ...@@ -615,7 +617,7 @@ unsigned long do_mmap_pgoff(struct file * file, unsigned long addr,
unmap_and_free_vma: unmap_and_free_vma:
if (correct_wcount) if (correct_wcount)
atomic_inc(&file->f_dentry->d_inode->i_writecount); atomic_inc(&inode->i_writecount);
vma->vm_file = NULL; vma->vm_file = NULL;
fput(file); fput(file);
...@@ -755,38 +757,41 @@ struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr, ...@@ -755,38 +757,41 @@ struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
return prev ? prev->vm_next : vma; return prev ? prev->vm_next : vma;
} }
#ifdef ARCH_STACK_GROWSUP
/* /*
* vma is the first one with address < vma->vm_end, * vma is the first one with address > vma->vm_end. Have to extend vma.
* and even address < vma->vm_start. Have to extend vma.
*/ */
int expand_stack(struct vm_area_struct * vma, unsigned long address) int expand_stack(struct vm_area_struct * vma, unsigned long address)
{ {
unsigned long grow; unsigned long grow;
if (!(vma->vm_flags & VM_GROWSUP))
return -EFAULT;
/* /*
* vma->vm_start/vm_end cannot change under us because the caller * vma->vm_start/vm_end cannot change under us because the caller
* is required to hold the mmap_sem in write mode. We need to get * is required to hold the mmap_sem in read mode. We need to get
* the spinlock only before relocating the vma range ourself. * the spinlock only before relocating the vma range ourself.
*/ */
address += 4 + PAGE_SIZE - 1;
address &= PAGE_MASK; address &= PAGE_MASK;
spin_lock(&vma->vm_mm->page_table_lock); spin_lock(&vma->vm_mm->page_table_lock);
grow = (vma->vm_start - address) >> PAGE_SHIFT; grow = (address - vma->vm_end) >> PAGE_SHIFT;
/* Overcommit.. */ /* Overcommit.. */
if(!vm_enough_memory(grow)) { if (!vm_enough_memory(grow)) {
spin_unlock(&vma->vm_mm->page_table_lock); spin_unlock(&vma->vm_mm->page_table_lock);
return -ENOMEM; return -ENOMEM;
} }
if (vma->vm_end - address > current->rlim[RLIMIT_STACK].rlim_cur || if (address - vma->vm_start > current->rlim[RLIMIT_STACK].rlim_cur ||
((vma->vm_mm->total_vm + grow) << PAGE_SHIFT) > ((vma->vm_mm->total_vm + grow) << PAGE_SHIFT) >
current->rlim[RLIMIT_AS].rlim_cur) { current->rlim[RLIMIT_AS].rlim_cur) {
spin_unlock(&vma->vm_mm->page_table_lock); spin_unlock(&vma->vm_mm->page_table_lock);
vm_unacct_memory(grow); vm_unacct_memory(grow);
return -ENOMEM; return -ENOMEM;
} }
vma->vm_start = address; vma->vm_end = address;
vma->vm_pgoff -= grow;
vma->vm_mm->total_vm += grow; vma->vm_mm->total_vm += grow;
if (vma->vm_flags & VM_LOCKED) if (vma->vm_flags & VM_LOCKED)
vma->vm_mm->locked_vm += grow; vma->vm_mm->locked_vm += grow;
...@@ -794,7 +799,6 @@ int expand_stack(struct vm_area_struct * vma, unsigned long address) ...@@ -794,7 +799,6 @@ int expand_stack(struct vm_area_struct * vma, unsigned long address)
return 0; return 0;
} }
#ifdef ARCH_STACK_GROWSUP
struct vm_area_struct * find_extend_vma(struct mm_struct * mm, unsigned long addr) struct vm_area_struct * find_extend_vma(struct mm_struct * mm, unsigned long addr)
{ {
struct vm_area_struct *vma, *prev; struct vm_area_struct *vma, *prev;
...@@ -811,6 +815,44 @@ struct vm_area_struct * find_extend_vma(struct mm_struct * mm, unsigned long add ...@@ -811,6 +815,44 @@ struct vm_area_struct * find_extend_vma(struct mm_struct * mm, unsigned long add
return prev; return prev;
} }
#else #else
/*
* vma is the first one with address < vma->vm_start. Have to extend vma.
*/
int expand_stack(struct vm_area_struct * vma, unsigned long address)
{
unsigned long grow;
/*
* vma->vm_start/vm_end cannot change under us because the caller
* is required to hold the mmap_sem in read mode. We need to get
* the spinlock only before relocating the vma range ourself.
*/
address &= PAGE_MASK;
spin_lock(&vma->vm_mm->page_table_lock);
grow = (vma->vm_start - address) >> PAGE_SHIFT;
/* Overcommit.. */
if (!vm_enough_memory(grow)) {
spin_unlock(&vma->vm_mm->page_table_lock);
return -ENOMEM;
}
if (vma->vm_end - address > current->rlim[RLIMIT_STACK].rlim_cur ||
((vma->vm_mm->total_vm + grow) << PAGE_SHIFT) >
current->rlim[RLIMIT_AS].rlim_cur) {
spin_unlock(&vma->vm_mm->page_table_lock);
vm_unacct_memory(grow);
return -ENOMEM;
}
vma->vm_start = address;
vma->vm_pgoff -= grow;
vma->vm_mm->total_vm += grow;
if (vma->vm_flags & VM_LOCKED)
vma->vm_mm->locked_vm += grow;
spin_unlock(&vma->vm_mm->page_table_lock);
return 0;
}
struct vm_area_struct * find_extend_vma(struct mm_struct * mm, unsigned long addr) struct vm_area_struct * find_extend_vma(struct mm_struct * mm, unsigned long addr)
{ {
struct vm_area_struct * vma; struct vm_area_struct * vma;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment