Commit 1335d9a1 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'core-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull core fixes from Ingo Molnar:
 "This fixes a particularly thorny munmap() bug with MPX, plus fixes a
  host build environment assumption in objtool"

* 'core-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  objtool: Allow AR to be overridden with HOSTAR
  x86/mpx, mm/core: Fix recursive munmap() corruption
parents 4c4a5c99 8ea58f1e
...@@ -232,7 +232,6 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, ...@@ -232,7 +232,6 @@ static inline void enter_lazy_tlb(struct mm_struct *mm,
extern void arch_exit_mmap(struct mm_struct *mm); extern void arch_exit_mmap(struct mm_struct *mm);
static inline void arch_unmap(struct mm_struct *mm, static inline void arch_unmap(struct mm_struct *mm,
struct vm_area_struct *vma,
unsigned long start, unsigned long end) unsigned long start, unsigned long end)
{ {
if (start <= mm->context.vdso_base && mm->context.vdso_base < end) if (start <= mm->context.vdso_base && mm->context.vdso_base < end)
......
...@@ -22,7 +22,6 @@ static inline int arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm) ...@@ -22,7 +22,6 @@ static inline int arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
} }
extern void arch_exit_mmap(struct mm_struct *mm); extern void arch_exit_mmap(struct mm_struct *mm);
static inline void arch_unmap(struct mm_struct *mm, static inline void arch_unmap(struct mm_struct *mm,
struct vm_area_struct *vma,
unsigned long start, unsigned long end) unsigned long start, unsigned long end)
{ {
} }
......
...@@ -88,7 +88,6 @@ static inline int arch_dup_mmap(struct mm_struct *oldmm, ...@@ -88,7 +88,6 @@ static inline int arch_dup_mmap(struct mm_struct *oldmm,
} }
static inline void arch_unmap(struct mm_struct *mm, static inline void arch_unmap(struct mm_struct *mm,
struct vm_area_struct *vma,
unsigned long start, unsigned long end) unsigned long start, unsigned long end)
{ {
} }
......
...@@ -278,8 +278,8 @@ static inline void arch_bprm_mm_init(struct mm_struct *mm, ...@@ -278,8 +278,8 @@ static inline void arch_bprm_mm_init(struct mm_struct *mm,
mpx_mm_init(mm); mpx_mm_init(mm);
} }
static inline void arch_unmap(struct mm_struct *mm, struct vm_area_struct *vma, static inline void arch_unmap(struct mm_struct *mm, unsigned long start,
unsigned long start, unsigned long end) unsigned long end)
{ {
/* /*
* mpx_notify_unmap() goes and reads a rarely-hot * mpx_notify_unmap() goes and reads a rarely-hot
...@@ -299,7 +299,7 @@ static inline void arch_unmap(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -299,7 +299,7 @@ static inline void arch_unmap(struct mm_struct *mm, struct vm_area_struct *vma,
* consistently wrong. * consistently wrong.
*/ */
if (unlikely(cpu_feature_enabled(X86_FEATURE_MPX))) if (unlikely(cpu_feature_enabled(X86_FEATURE_MPX)))
mpx_notify_unmap(mm, vma, start, end); mpx_notify_unmap(mm, start, end);
} }
/* /*
......
...@@ -64,12 +64,15 @@ struct mpx_fault_info { ...@@ -64,12 +64,15 @@ struct mpx_fault_info {
}; };
#ifdef CONFIG_X86_INTEL_MPX #ifdef CONFIG_X86_INTEL_MPX
int mpx_fault_info(struct mpx_fault_info *info, struct pt_regs *regs);
int mpx_handle_bd_fault(void); extern int mpx_fault_info(struct mpx_fault_info *info, struct pt_regs *regs);
extern int mpx_handle_bd_fault(void);
static inline int kernel_managing_mpx_tables(struct mm_struct *mm) static inline int kernel_managing_mpx_tables(struct mm_struct *mm)
{ {
return (mm->context.bd_addr != MPX_INVALID_BOUNDS_DIR); return (mm->context.bd_addr != MPX_INVALID_BOUNDS_DIR);
} }
static inline void mpx_mm_init(struct mm_struct *mm) static inline void mpx_mm_init(struct mm_struct *mm)
{ {
/* /*
...@@ -78,11 +81,10 @@ static inline void mpx_mm_init(struct mm_struct *mm) ...@@ -78,11 +81,10 @@ static inline void mpx_mm_init(struct mm_struct *mm)
*/ */
mm->context.bd_addr = MPX_INVALID_BOUNDS_DIR; mm->context.bd_addr = MPX_INVALID_BOUNDS_DIR;
} }
void mpx_notify_unmap(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long start, unsigned long end);
unsigned long mpx_unmapped_area_check(unsigned long addr, unsigned long len, extern void mpx_notify_unmap(struct mm_struct *mm, unsigned long start, unsigned long end);
unsigned long flags); extern unsigned long mpx_unmapped_area_check(unsigned long addr, unsigned long len, unsigned long flags);
#else #else
static inline int mpx_fault_info(struct mpx_fault_info *info, struct pt_regs *regs) static inline int mpx_fault_info(struct mpx_fault_info *info, struct pt_regs *regs)
{ {
...@@ -100,7 +102,6 @@ static inline void mpx_mm_init(struct mm_struct *mm) ...@@ -100,7 +102,6 @@ static inline void mpx_mm_init(struct mm_struct *mm)
{ {
} }
static inline void mpx_notify_unmap(struct mm_struct *mm, static inline void mpx_notify_unmap(struct mm_struct *mm,
struct vm_area_struct *vma,
unsigned long start, unsigned long end) unsigned long start, unsigned long end)
{ {
} }
......
...@@ -881,9 +881,10 @@ static int mpx_unmap_tables(struct mm_struct *mm, ...@@ -881,9 +881,10 @@ static int mpx_unmap_tables(struct mm_struct *mm,
* the virtual address region start...end have already been split if * the virtual address region start...end have already been split if
* necessary, and the 'vma' is the first vma in this range (start -> end). * necessary, and the 'vma' is the first vma in this range (start -> end).
*/ */
void mpx_notify_unmap(struct mm_struct *mm, struct vm_area_struct *vma, void mpx_notify_unmap(struct mm_struct *mm, unsigned long start,
unsigned long start, unsigned long end) unsigned long end)
{ {
struct vm_area_struct *vma;
int ret; int ret;
/* /*
...@@ -902,11 +903,12 @@ void mpx_notify_unmap(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -902,11 +903,12 @@ void mpx_notify_unmap(struct mm_struct *mm, struct vm_area_struct *vma,
* which should not occur normally. Being strict about it here * which should not occur normally. Being strict about it here
* helps ensure that we do not have an exploitable stack overflow. * helps ensure that we do not have an exploitable stack overflow.
*/ */
do { vma = find_vma(mm, start);
while (vma && vma->vm_start < end) {
if (vma->vm_flags & VM_MPX) if (vma->vm_flags & VM_MPX)
return; return;
vma = vma->vm_next; vma = vma->vm_next;
} while (vma && vma->vm_start < end); }
ret = mpx_unmap_tables(mm, start, end); ret = mpx_unmap_tables(mm, start, end);
if (ret) if (ret)
......
...@@ -18,7 +18,6 @@ static inline void arch_exit_mmap(struct mm_struct *mm) ...@@ -18,7 +18,6 @@ static inline void arch_exit_mmap(struct mm_struct *mm)
} }
static inline void arch_unmap(struct mm_struct *mm, static inline void arch_unmap(struct mm_struct *mm,
struct vm_area_struct *vma,
unsigned long start, unsigned long end) unsigned long start, unsigned long end)
{ {
} }
......
...@@ -2735,9 +2735,17 @@ int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len, ...@@ -2735,9 +2735,17 @@ int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len,
return -EINVAL; return -EINVAL;
len = PAGE_ALIGN(len); len = PAGE_ALIGN(len);
end = start + len;
if (len == 0) if (len == 0)
return -EINVAL; return -EINVAL;
/*
* arch_unmap() might do unmaps itself. It must be called
* and finish any rbtree manipulation before this code
* runs and also starts to manipulate the rbtree.
*/
arch_unmap(mm, start, end);
/* Find the first overlapping VMA */ /* Find the first overlapping VMA */
vma = find_vma(mm, start); vma = find_vma(mm, start);
if (!vma) if (!vma)
...@@ -2746,7 +2754,6 @@ int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len, ...@@ -2746,7 +2754,6 @@ int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len,
/* we have start < vma->vm_end */ /* we have start < vma->vm_end */
/* if it doesn't overlap, we have nothing.. */ /* if it doesn't overlap, we have nothing.. */
end = start + len;
if (vma->vm_start >= end) if (vma->vm_start >= end)
return 0; return 0;
...@@ -2816,12 +2823,6 @@ int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len, ...@@ -2816,12 +2823,6 @@ int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len,
/* Detach vmas from rbtree */ /* Detach vmas from rbtree */
detach_vmas_to_be_unmapped(mm, vma, prev, end); detach_vmas_to_be_unmapped(mm, vma, prev, end);
/*
* mpx unmap needs to be called with mmap_sem held for write.
* It is safe to call it before unmap_region().
*/
arch_unmap(mm, vma, start, end);
if (downgrade) if (downgrade)
downgrade_write(&mm->mmap_sem); downgrade_write(&mm->mmap_sem);
......
...@@ -7,11 +7,12 @@ ARCH := x86 ...@@ -7,11 +7,12 @@ ARCH := x86
endif endif
# always use the host compiler # always use the host compiler
HOSTAR ?= ar
HOSTCC ?= gcc HOSTCC ?= gcc
HOSTLD ?= ld HOSTLD ?= ld
AR = $(HOSTAR)
CC = $(HOSTCC) CC = $(HOSTCC)
LD = $(HOSTLD) LD = $(HOSTLD)
AR = ar
ifeq ($(srctree),) ifeq ($(srctree),)
srctree := $(patsubst %/,%,$(dir $(CURDIR))) srctree := $(patsubst %/,%,$(dir $(CURDIR)))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment