Commit d11da519 authored by Stefan Bader's avatar Stefan Bader

Revert "mm: enlarge stack guard gap"

This reverts commit b9f2a4fb to be
replaced by the upstream patch set.

CVE-2017-1000364
Signed-off-by: default avatarStefan Bader <stefan.bader@canonical.com>
parent 17fe844b
...@@ -224,7 +224,7 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re ...@@ -224,7 +224,7 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
*/ */
if (address > vma->vm_end + PAGE_SIZE - sizeof(long)) if (address > vma->vm_end + PAGE_SIZE - sizeof(long))
goto bad_area; goto bad_area;
if (expand_upwards(vma, address, 0)) if (expand_upwards(vma, address))
goto bad_area; goto bad_area;
} }
goto good_area; goto good_area;
......
...@@ -204,7 +204,7 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos, ...@@ -204,7 +204,7 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
#ifdef CONFIG_STACK_GROWSUP #ifdef CONFIG_STACK_GROWSUP
if (write) { if (write) {
ret = expand_downwards(bprm->vma, pos, 0); ret = expand_downwards(bprm->vma, pos);
if (ret < 0) if (ret < 0)
return NULL; return NULL;
} }
...@@ -218,12 +218,6 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos, ...@@ -218,12 +218,6 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
unsigned long size = bprm->vma->vm_end - bprm->vma->vm_start; unsigned long size = bprm->vma->vm_end - bprm->vma->vm_start;
struct rlimit *rlim; struct rlimit *rlim;
/*
* GRWOSUP doesn't really have any gap at this stage because we grow
* the stack down now. See the expand_downwards above.
*/
if (!IS_ENABLED(CONFIG_STACK_GROWSUP))
size -= stack_guard_gap;
acct_arg_size(bprm, size / PAGE_SIZE); acct_arg_size(bprm, size / PAGE_SIZE);
/* /*
......
...@@ -298,14 +298,11 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid) ...@@ -298,14 +298,11 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
/* We don't show the stack guard page in /proc/maps */ /* We don't show the stack guard page in /proc/maps */
start = vma->vm_start; start = vma->vm_start;
if (stack_guard_page_start(vma, start))
start += PAGE_SIZE;
end = vma->vm_end; end = vma->vm_end;
if (vma->vm_flags & VM_GROWSDOWN) { if (stack_guard_page_end(vma, end))
if (stack_guard_area(vma, start)) end -= PAGE_SIZE;
start += stack_guard_gap;
} else if (vma->vm_flags & VM_GROWSUP) {
if (stack_guard_area(vma, end))
end -= stack_guard_gap;
}
seq_setwidth(m, 25 + sizeof(void *) * 6 - 1); seq_setwidth(m, 25 + sizeof(void *) * 6 - 1);
seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu ", seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu ",
......
...@@ -1300,11 +1300,39 @@ int clear_page_dirty_for_io(struct page *page); ...@@ -1300,11 +1300,39 @@ int clear_page_dirty_for_io(struct page *page);
int get_cmdline(struct task_struct *task, char *buffer, int buflen); int get_cmdline(struct task_struct *task, char *buffer, int buflen);
/* Is the vma a continuation of the stack vma above it? */
static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
{
return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
}
static inline bool vma_is_anonymous(struct vm_area_struct *vma) static inline bool vma_is_anonymous(struct vm_area_struct *vma)
{ {
return !vma->vm_ops; return !vma->vm_ops;
} }
static inline int stack_guard_page_start(struct vm_area_struct *vma,
unsigned long addr)
{
return (vma->vm_flags & VM_GROWSDOWN) &&
(vma->vm_start == addr) &&
!vma_growsdown(vma->vm_prev, addr);
}
/* Is the vma a continuation of the stack vma below it? */
static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
{
return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
}
static inline int stack_guard_page_end(struct vm_area_struct *vma,
unsigned long addr)
{
return (vma->vm_flags & VM_GROWSUP) &&
(vma->vm_end == addr) &&
!vma_growsup(vma->vm_next, addr);
}
int vma_is_stack_for_task(struct vm_area_struct *vma, struct task_struct *t); int vma_is_stack_for_task(struct vm_area_struct *vma, struct task_struct *t);
extern unsigned long move_page_tables(struct vm_area_struct *vma, extern unsigned long move_page_tables(struct vm_area_struct *vma,
...@@ -2007,22 +2035,16 @@ void page_cache_async_readahead(struct address_space *mapping, ...@@ -2007,22 +2035,16 @@ void page_cache_async_readahead(struct address_space *mapping,
pgoff_t offset, pgoff_t offset,
unsigned long size); unsigned long size);
extern unsigned long stack_guard_gap;
/* Generic expand stack which grows the stack according to GROWS{UP,DOWN} */ /* Generic expand stack which grows the stack according to GROWS{UP,DOWN} */
extern int expand_stack(struct vm_area_struct *vma, unsigned long address); extern int expand_stack(struct vm_area_struct *vma, unsigned long address);
extern int stack_guard_area(struct vm_area_struct *vma, unsigned long address);
/* CONFIG_STACK_GROWSUP still needs to to grow downwards at some places */ /* CONFIG_STACK_GROWSUP still needs to to grow downwards at some places */
extern int expand_downwards(struct vm_area_struct *vma, extern int expand_downwards(struct vm_area_struct *vma,
unsigned long address, unsigned long gap); unsigned long address);
unsigned long expandable_stack_area(struct vm_area_struct *vma,
unsigned long address, unsigned long *gap);
#if VM_GROWSUP #if VM_GROWSUP
extern int expand_upwards(struct vm_area_struct *vma, extern int expand_upwards(struct vm_area_struct *vma, unsigned long address);
unsigned long address, unsigned long gap);
#else #else
#define expand_upwards(vma, address, gap) (0) #define expand_upwards(vma, address) (0)
#endif #endif
/* Look up the first VMA which satisfies addr < vm_end, NULL if none. */ /* Look up the first VMA which satisfies addr < vm_end, NULL if none. */
......
...@@ -313,7 +313,9 @@ static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma, ...@@ -313,7 +313,9 @@ static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma,
if ((*flags & (FOLL_POPULATE | FOLL_MLOCK)) == FOLL_MLOCK) if ((*flags & (FOLL_POPULATE | FOLL_MLOCK)) == FOLL_MLOCK)
return -ENOENT; return -ENOENT;
/* For mm_populate(), just skip the stack guard page. */ /* For mm_populate(), just skip the stack guard page. */
if ((*flags & FOLL_POPULATE) && stack_guard_area(vma, address)) if ((*flags & FOLL_POPULATE) &&
(stack_guard_page_start(vma, address) ||
stack_guard_page_end(vma, address + PAGE_SIZE)))
return -ENOENT; return -ENOENT;
if (*flags & FOLL_WRITE) if (*flags & FOLL_WRITE)
fault_flags |= FAULT_FLAG_WRITE; fault_flags |= FAULT_FLAG_WRITE;
......
...@@ -2661,7 +2661,39 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -2661,7 +2661,39 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
return ret; return ret;
} }
/*
* This is like a special single-page "expand_{down|up}wards()",
* except we must first make sure that 'address{-|+}PAGE_SIZE'
* doesn't hit another vma.
*/
static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
{
address &= PAGE_MASK;
if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
struct vm_area_struct *prev = vma->vm_prev;
/*
* Is there a mapping abutting this one below?
*
* That's only ok if it's the same stack mapping
* that has gotten split..
*/
if (prev && prev->vm_end == address)
return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
return expand_downwards(vma, address - PAGE_SIZE);
}
if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
struct vm_area_struct *next = vma->vm_next;
/* As VM_GROWSDOWN but s/below/above/ */
if (next && next->vm_start == address + PAGE_SIZE)
return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
return expand_upwards(vma, address + PAGE_SIZE);
}
return 0;
}
/* /*
* We enter with non-exclusive mmap_sem (to exclude vma changes, * We enter with non-exclusive mmap_sem (to exclude vma changes,
...@@ -2684,8 +2716,7 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -2684,8 +2716,7 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
return VM_FAULT_SIGBUS; return VM_FAULT_SIGBUS;
/* Check if we need to add a guard page to the stack */ /* Check if we need to add a guard page to the stack */
if ((vma->vm_flags & (VM_GROWSDOWN|VM_GROWSUP)) && if (check_stack_guard_page(vma, address) < 0)
expand_stack(vma, address) < 0)
return VM_FAULT_SIGSEGV; return VM_FAULT_SIGSEGV;
/* Use the zero-page for reads */ /* Use the zero-page for reads */
......
...@@ -2099,8 +2099,7 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr, ...@@ -2099,8 +2099,7 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr,
* update accounting. This is shared with both the * update accounting. This is shared with both the
* grow-up and grow-down cases. * grow-up and grow-down cases.
*/ */
static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, unsigned long grow, static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, unsigned long grow)
unsigned long gap)
{ {
struct mm_struct *mm = vma->vm_mm; struct mm_struct *mm = vma->vm_mm;
struct rlimit *rlim = current->signal->rlim; struct rlimit *rlim = current->signal->rlim;
...@@ -2113,7 +2112,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns ...@@ -2113,7 +2112,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
/* Stack limit test */ /* Stack limit test */
actual_size = size; actual_size = size;
if (size && (vma->vm_flags & (VM_GROWSUP | VM_GROWSDOWN))) if (size && (vma->vm_flags & (VM_GROWSUP | VM_GROWSDOWN)))
actual_size -= gap; actual_size -= PAGE_SIZE;
if (actual_size > READ_ONCE(rlim[RLIMIT_STACK].rlim_cur)) if (actual_size > READ_ONCE(rlim[RLIMIT_STACK].rlim_cur))
return -ENOMEM; return -ENOMEM;
...@@ -2149,7 +2148,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns ...@@ -2149,7 +2148,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
* PA-RISC uses this for its stack; IA64 for its Register Backing Store. * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
* vma is the last one with address > vma->vm_end. Have to extend vma. * vma is the last one with address > vma->vm_end. Have to extend vma.
*/ */
int expand_upwards(struct vm_area_struct *vma, unsigned long address, unsigned long gap) int expand_upwards(struct vm_area_struct *vma, unsigned long address)
{ {
struct mm_struct *mm = vma->vm_mm; struct mm_struct *mm = vma->vm_mm;
int error = 0; int error = 0;
...@@ -2157,6 +2156,12 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address, unsigned l ...@@ -2157,6 +2156,12 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address, unsigned l
if (!(vma->vm_flags & VM_GROWSUP)) if (!(vma->vm_flags & VM_GROWSUP))
return -EFAULT; return -EFAULT;
/* Guard against wrapping around to address 0. */
if (address < PAGE_ALIGN(address+4))
address = PAGE_ALIGN(address+4);
else
return -ENOMEM;
/* We must make sure the anon_vma is allocated. */ /* We must make sure the anon_vma is allocated. */
if (unlikely(anon_vma_prepare(vma))) if (unlikely(anon_vma_prepare(vma)))
return -ENOMEM; return -ENOMEM;
...@@ -2177,7 +2182,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address, unsigned l ...@@ -2177,7 +2182,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address, unsigned l
error = -ENOMEM; error = -ENOMEM;
if (vma->vm_pgoff + (size >> PAGE_SHIFT) >= vma->vm_pgoff) { if (vma->vm_pgoff + (size >> PAGE_SHIFT) >= vma->vm_pgoff) {
error = acct_stack_growth(vma, size, grow, gap); error = acct_stack_growth(vma, size, grow);
if (!error) { if (!error) {
/* /*
* vma_gap_update() doesn't support concurrent * vma_gap_update() doesn't support concurrent
...@@ -2219,7 +2224,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address, unsigned l ...@@ -2219,7 +2224,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address, unsigned l
* vma is the first one with address < vma->vm_start. Have to extend vma. * vma is the first one with address < vma->vm_start. Have to extend vma.
*/ */
int expand_downwards(struct vm_area_struct *vma, int expand_downwards(struct vm_area_struct *vma,
unsigned long address, unsigned long gap) unsigned long address)
{ {
struct mm_struct *mm = vma->vm_mm; struct mm_struct *mm = vma->vm_mm;
int error; int error;
...@@ -2249,7 +2254,7 @@ int expand_downwards(struct vm_area_struct *vma, ...@@ -2249,7 +2254,7 @@ int expand_downwards(struct vm_area_struct *vma,
error = -ENOMEM; error = -ENOMEM;
if (grow <= vma->vm_pgoff) { if (grow <= vma->vm_pgoff) {
error = acct_stack_growth(vma, size, grow, gap); error = acct_stack_growth(vma, size, grow);
if (!error) { if (!error) {
/* /*
* vma_gap_update() doesn't support concurrent * vma_gap_update() doesn't support concurrent
...@@ -2284,72 +2289,29 @@ int expand_downwards(struct vm_area_struct *vma, ...@@ -2284,72 +2289,29 @@ int expand_downwards(struct vm_area_struct *vma,
return error; return error;
} }
/* enforced gap between the expanding stack and other mappings. */
unsigned long stack_guard_gap = 256UL<<PAGE_SHIFT;
/* /*
* Note how expand_stack() refuses to expand the stack all the way to * Note how expand_stack() refuses to expand the stack all the way to
* abut the next virtual mapping, *unless* that mapping itself is also * abut the next virtual mapping, *unless* that mapping itself is also
* a stack mapping. We want to leave room for a guard area, after all * a stack mapping. We want to leave room for a guard page, after all
* (the guard page itself is not added here, that is done by the * (the guard page itself is not added here, that is done by the
* actual page faulting logic) * actual page faulting logic)
*
* This matches the behavior of the guard page logic (see mm/memory.c:
* check_stack_guard_page()), which only allows the guard page to be
* removed under these circumstances.
*/ */
#ifdef CONFIG_STACK_GROWSUP #ifdef CONFIG_STACK_GROWSUP
unsigned long expandable_stack_area(struct vm_area_struct *vma,
unsigned long address, unsigned long *gap)
{
struct vm_area_struct *next = vma->vm_next;
unsigned long guard_gap = stack_guard_gap;
unsigned long guard_addr;
address = ALIGN(address, PAGE_SIZE);;
if (!next)
goto out;
if (next->vm_flags & VM_GROWSUP) {
guard_gap = min(guard_gap, next->vm_start - address);
goto out;
}
if (next->vm_start - address < guard_gap)
return -ENOMEM;
out:
if (TASK_SIZE - address < guard_gap)
guard_gap = TASK_SIZE - address;
guard_addr = address + guard_gap;
*gap = guard_gap;
return guard_addr;
}
int expand_stack(struct vm_area_struct *vma, unsigned long address) int expand_stack(struct vm_area_struct *vma, unsigned long address)
{
unsigned long gap;
address = expandable_stack_area(vma, address, &gap);
if (IS_ERR_VALUE(address))
return -ENOMEM;
return expand_upwards(vma, address, gap);
}
int stack_guard_area(struct vm_area_struct *vma, unsigned long address)
{ {
struct vm_area_struct *next; struct vm_area_struct *next;
if (!(vma->vm_flags & VM_GROWSUP)) address &= PAGE_MASK;
return 0;
/*
* strictly speaking there is a guard gap between disjoint stacks
* but the gap is not canonical (it might be smaller) and it is
* reasonably safe to assume that we can ignore that gap for stack
* POPULATE or /proc/<pid>[s]maps purposes
*/
next = vma->vm_next; next = vma->vm_next;
if (next && next->vm_flags & VM_GROWSUP) if (next && next->vm_start == address + PAGE_SIZE) {
return 0; if (!(next->vm_flags & VM_GROWSUP))
return -ENOMEM;
return vma->vm_end - address <= stack_guard_gap; }
return expand_upwards(vma, address);
} }
struct vm_area_struct * struct vm_area_struct *
...@@ -2368,73 +2330,17 @@ find_extend_vma(struct mm_struct *mm, unsigned long addr) ...@@ -2368,73 +2330,17 @@ find_extend_vma(struct mm_struct *mm, unsigned long addr)
return prev; return prev;
} }
#else #else
unsigned long expandable_stack_area(struct vm_area_struct *vma,
unsigned long address, unsigned long *gap)
{
struct vm_area_struct *prev = vma->vm_prev;
unsigned long guard_gap = stack_guard_gap;
unsigned long guard_addr;
address &= PAGE_MASK;
if (!prev)
goto out;
/*
* Is there a mapping abutting this one below?
*
* That's only ok if it's the same stack mapping
* that has gotten split or there is sufficient gap
* between mappings
*/
if (prev->vm_flags & VM_GROWSDOWN) {
guard_gap = min(guard_gap, address - prev->vm_end);
goto out;
}
if (address - prev->vm_end < guard_gap)
return -ENOMEM;
out:
/* make sure we won't underflow */
if (address < mmap_min_addr)
return -ENOMEM;
if (address - mmap_min_addr < guard_gap)
guard_gap = address - mmap_min_addr;
guard_addr = address - guard_gap;
*gap = guard_gap;
return guard_addr;
}
int expand_stack(struct vm_area_struct *vma, unsigned long address) int expand_stack(struct vm_area_struct *vma, unsigned long address)
{
unsigned long gap;
address = expandable_stack_area(vma, address, &gap);
if (IS_ERR_VALUE(address))
return -ENOMEM;
return expand_downwards(vma, address, gap);
}
int stack_guard_area(struct vm_area_struct *vma, unsigned long address)
{ {
struct vm_area_struct *prev; struct vm_area_struct *prev;
if (!(vma->vm_flags & VM_GROWSDOWN)) address &= PAGE_MASK;
return 0;
/*
* strictly speaking there is a guard gap between disjoint stacks
* but the gap is not canonical (it might be smaller) and it is
* reasonably safe to assume that we can ignore that gap for stack
* POPULATE or /proc/<pid>[s]maps purposes
*/
prev = vma->vm_prev; prev = vma->vm_prev;
if (prev && prev->vm_flags & VM_GROWSDOWN) if (prev && prev->vm_end == address) {
return 0; if (!(prev->vm_flags & VM_GROWSDOWN))
return -ENOMEM;
return address - vma->vm_start < stack_guard_gap; }
return expand_downwards(vma, address);
} }
struct vm_area_struct * struct vm_area_struct *
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment