Commit fd1102f0 authored by Nicholas Piggin's avatar Nicholas Piggin Committed by Linus Torvalds

mm: mmu_notifier fix for tlb_end_vma

The generic tlb_end_vma does not call invalidate_range mmu notifier, and
it resets resets the mmu_gather range, which means the notifier won't be
called on part of the range in case of an unmap that spans multiple
vmas.

ARM64 seems to be the only arch I could see that has notifiers and uses
the generic tlb_end_vma.  I have not actually tested it.

[ Catalin and Will point out that ARM64 currently only uses the
  notifiers for KVM, which doesn't use the ->invalidate_range()
  callback right now, so it's a bug, but one that happens to
  not affect them.  So not necessary for stable.  - Linus ]
Signed-off-by: default avatarNicholas Piggin <npiggin@gmail.com>
Acked-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
Acked-by: default avatarWill Deacon <will.deacon@arm.com>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent d86564a2
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
#ifndef _ASM_GENERIC__TLB_H #ifndef _ASM_GENERIC__TLB_H
#define _ASM_GENERIC__TLB_H #define _ASM_GENERIC__TLB_H
#include <linux/mmu_notifier.h>
#include <linux/swap.h> #include <linux/swap.h>
#include <asm/pgalloc.h> #include <asm/pgalloc.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
...@@ -138,6 +139,16 @@ static inline void __tlb_reset_range(struct mmu_gather *tlb) ...@@ -138,6 +139,16 @@ static inline void __tlb_reset_range(struct mmu_gather *tlb)
} }
} }
static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
{
if (!tlb->end)
return;
tlb_flush(tlb);
mmu_notifier_invalidate_range(tlb->mm, tlb->start, tlb->end);
__tlb_reset_range(tlb);
}
static inline void tlb_remove_page_size(struct mmu_gather *tlb, static inline void tlb_remove_page_size(struct mmu_gather *tlb,
struct page *page, int page_size) struct page *page, int page_size)
{ {
...@@ -186,10 +197,8 @@ static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb, ...@@ -186,10 +197,8 @@ static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
#define __tlb_end_vma(tlb, vma) \ #define __tlb_end_vma(tlb, vma) \
do { \ do { \
if (!tlb->fullmm && tlb->end) { \ if (!tlb->fullmm) \
tlb_flush(tlb); \ tlb_flush_mmu_tlbonly(tlb); \
__tlb_reset_range(tlb); \
} \
} while (0) } while (0)
#ifndef tlb_end_vma #ifndef tlb_end_vma
......
...@@ -238,16 +238,6 @@ void arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, ...@@ -238,16 +238,6 @@ void arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
__tlb_reset_range(tlb); __tlb_reset_range(tlb);
} }
static void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
{
if (!tlb->end)
return;
tlb_flush(tlb);
mmu_notifier_invalidate_range(tlb->mm, tlb->start, tlb->end);
__tlb_reset_range(tlb);
}
static void tlb_flush_mmu_free(struct mmu_gather *tlb) static void tlb_flush_mmu_free(struct mmu_gather *tlb)
{ {
struct mmu_gather_batch *batch; struct mmu_gather_batch *batch;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment