Commit ea25c431 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'mmu_notifier_fixes'

Merge mmu_notifier fixes from Jérôme Glisse:
 "The invalidate_page callback suffered from 2 pitfalls. First it used
  to happen after page table lock was release and thus a new page might
  have been setup for the virtual address before the call to
  invalidate_page().

  This is in a weird way fixed by commit c7ab0d2f ("mm: convert
  try_to_unmap_one() to use page_vma_mapped_walk()") which moved the
  callback under the page table lock. Which also broke several existing
  user of the mmu_notifier API that assumed they could sleep inside this
  callback.

  The second pitfall was invalidate_page being the only callback not
  taking a range of address in respect to invalidation but was giving an
  address and a page. Lot of the callback implementer assumed this could
  never be THP and thus failed to invalidate the appropriate range for
  THP pages.

  By killing this callback we unify the mmu_notifier callback API to
  always take a virtual address range as input.

  There is now two clear API (I am not mentioning the youngess API which
  is seldomly used):

   - invalidate_range_start()/end() callback (which allow you to sleep)

   - invalidate_range() where you can not sleep but happen right after
     page table update under page table lock

  Note that a lot of existing user feels broken in respect to
  range_start/ range_end. Many user only have range_start() callback but
  there is nothing preventing them to undo what was invalidated in their
  range_start() callback after it returns but before any CPU page table
  update take place.

  The code pattern use in kvm or umem odp is an example on how to
  properly avoid such race. In a nutshell use some kind of sequence
  number and active range invalidation counter to block anything that
  might undo what the range_start() callback did.

  If you do not care about keeping fully in sync with CPU page table (ie
  you can live with CPU page table pointing to new different page for a
  given virtual address) then you can take a reference on the pages
  inside the range_start callback and drop it in range_end or when your
  driver is done with those pages.

  Last alternative is to use invalidate_range() if you can do
  invalidation without sleeping as invalidate_range() callback happens
  under the CPU page table spinlock right after the page table is
  updated.

  The first two patches convert existing mmu_notifier_invalidate_page()
  calls to mmu_notifier_invalidate_range() and bracket those call with
  call to mmu_notifier_invalidate_range_start()/end().

  The next ten patches remove existing invalidate_page() callback as it
  can no longer happen.

  Finally the last page remove the invalidate_page() callback completely
  so it can RIP.

  Changes since v1:
   - remove more dead code in kvm (no testing impact)
   - more accurate end address computation (patch 2) in page_mkclean_one
     and try_to_unmap_one
   - added tested-by/reviewed-by gotten so far"

* emailed patches from Jérôme Glisse <jglisse@redhat.com>:
  mm/mmu_notifier: kill invalidate_page
  KVM: update to new mmu_notifier semantic v2
  xen/gntdev: update to new mmu_notifier semantic
  sgi-gru: update to new mmu_notifier semantic
  misc/mic/scif: update to new mmu_notifier semantic
  iommu/intel: update to new mmu_notifier semantic
  iommu/amd: update to new mmu_notifier semantic
  IB/hfi1: update to new mmu_notifier semantic
  IB/umem: update to new mmu_notifier semantic
  drm/amdgpu: update to new mmu_notifier semantic
  powerpc/powernv: update to new mmu_notifier semantic
  mm/rmap: update to new mmu_notifier semantic v2
  dax: update to new mmu_notifier semantic
parents c227390c 5f32b265
...@@ -225,12 +225,6 @@ int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices); ...@@ -225,12 +225,6 @@ int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices);
int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end); int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
int kvm_test_age_hva(struct kvm *kvm, unsigned long hva); int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
/* We do not have shadow page tables, hence the empty hooks */
static inline void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm,
unsigned long address)
{
}
struct kvm_vcpu *kvm_arm_get_running_vcpu(void); struct kvm_vcpu *kvm_arm_get_running_vcpu(void);
struct kvm_vcpu __percpu **kvm_get_running_vcpus(void); struct kvm_vcpu __percpu **kvm_get_running_vcpus(void);
void kvm_arm_halt_guest(struct kvm *kvm); void kvm_arm_halt_guest(struct kvm *kvm);
......
...@@ -326,12 +326,6 @@ void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); ...@@ -326,12 +326,6 @@ void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end); int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
int kvm_test_age_hva(struct kvm *kvm, unsigned long hva); int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
/* We do not have shadow page tables, hence the empty hooks */
static inline void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm,
unsigned long address)
{
}
struct kvm_vcpu *kvm_arm_get_running_vcpu(void); struct kvm_vcpu *kvm_arm_get_running_vcpu(void);
struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void); struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void);
void kvm_arm_halt_guest(struct kvm *kvm); void kvm_arm_halt_guest(struct kvm *kvm);
......
...@@ -938,11 +938,6 @@ void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); ...@@ -938,11 +938,6 @@ void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end); int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
int kvm_test_age_hva(struct kvm *kvm, unsigned long hva); int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
static inline void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm,
unsigned long address)
{
}
/* Emulation */ /* Emulation */
int kvm_get_inst(u32 *opc, struct kvm_vcpu *vcpu, u32 *out); int kvm_get_inst(u32 *opc, struct kvm_vcpu *vcpu, u32 *out);
enum emulation_result update_pc(struct kvm_vcpu *vcpu, u32 cause); enum emulation_result update_pc(struct kvm_vcpu *vcpu, u32 cause);
......
...@@ -67,11 +67,6 @@ extern int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end); ...@@ -67,11 +67,6 @@ extern int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
extern int kvm_test_age_hva(struct kvm *kvm, unsigned long hva); extern int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
extern void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); extern void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
static inline void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm,
unsigned long address)
{
}
#define HPTEG_CACHE_NUM (1 << 15) #define HPTEG_CACHE_NUM (1 << 15)
#define HPTEG_HASH_BITS_PTE 13 #define HPTEG_HASH_BITS_PTE 13
#define HPTEG_HASH_BITS_PTE_LONG 12 #define HPTEG_HASH_BITS_PTE_LONG 12
......
...@@ -614,15 +614,6 @@ static void pnv_npu2_mn_change_pte(struct mmu_notifier *mn, ...@@ -614,15 +614,6 @@ static void pnv_npu2_mn_change_pte(struct mmu_notifier *mn,
mmio_invalidate(npu_context, 1, address, true); mmio_invalidate(npu_context, 1, address, true);
} }
static void pnv_npu2_mn_invalidate_page(struct mmu_notifier *mn,
struct mm_struct *mm,
unsigned long address)
{
struct npu_context *npu_context = mn_to_npu_context(mn);
mmio_invalidate(npu_context, 1, address, true);
}
static void pnv_npu2_mn_invalidate_range(struct mmu_notifier *mn, static void pnv_npu2_mn_invalidate_range(struct mmu_notifier *mn,
struct mm_struct *mm, struct mm_struct *mm,
unsigned long start, unsigned long end) unsigned long start, unsigned long end)
...@@ -640,7 +631,6 @@ static void pnv_npu2_mn_invalidate_range(struct mmu_notifier *mn, ...@@ -640,7 +631,6 @@ static void pnv_npu2_mn_invalidate_range(struct mmu_notifier *mn,
static const struct mmu_notifier_ops nv_nmmu_notifier_ops = { static const struct mmu_notifier_ops nv_nmmu_notifier_ops = {
.release = pnv_npu2_mn_release, .release = pnv_npu2_mn_release,
.change_pte = pnv_npu2_mn_change_pte, .change_pte = pnv_npu2_mn_change_pte,
.invalidate_page = pnv_npu2_mn_invalidate_page,
.invalidate_range = pnv_npu2_mn_invalidate_range, .invalidate_range = pnv_npu2_mn_invalidate_range,
}; };
......
...@@ -1375,8 +1375,6 @@ int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu); ...@@ -1375,8 +1375,6 @@ int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu);
int kvm_cpu_get_interrupt(struct kvm_vcpu *v); int kvm_cpu_get_interrupt(struct kvm_vcpu *v);
void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event); void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event);
void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu); void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu);
void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm,
unsigned long address);
void kvm_define_shared_msr(unsigned index, u32 msr); void kvm_define_shared_msr(unsigned index, u32 msr);
int kvm_set_shared_msr(unsigned index, u64 val, u64 mask); int kvm_set_shared_msr(unsigned index, u64 val, u64 mask);
......
...@@ -6734,17 +6734,6 @@ void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu) ...@@ -6734,17 +6734,6 @@ void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu)
} }
EXPORT_SYMBOL_GPL(kvm_vcpu_reload_apic_access_page); EXPORT_SYMBOL_GPL(kvm_vcpu_reload_apic_access_page);
void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm,
unsigned long address)
{
/*
* The physical address of apic access page is stored in the VMCS.
* Update it when it becomes invalid.
*/
if (address == gfn_to_hva(kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT))
kvm_make_all_cpus_request(kvm, KVM_REQ_APIC_PAGE_RELOAD);
}
/* /*
* Returns 1 to let vcpu_run() continue the guest execution loop without * Returns 1 to let vcpu_run() continue the guest execution loop without
* exiting to the userspace. Otherwise, the value will be returned to the * exiting to the userspace. Otherwise, the value will be returned to the
......
...@@ -146,36 +146,6 @@ static void amdgpu_mn_invalidate_node(struct amdgpu_mn_node *node, ...@@ -146,36 +146,6 @@ static void amdgpu_mn_invalidate_node(struct amdgpu_mn_node *node,
} }
} }
/**
* amdgpu_mn_invalidate_page - callback to notify about mm change
*
* @mn: our notifier
* @mn: the mm this callback is about
* @address: address of invalidate page
*
* Invalidation of a single page. Blocks for all BOs mapping it
* and unmap them by move them into system domain again.
*/
static void amdgpu_mn_invalidate_page(struct mmu_notifier *mn,
struct mm_struct *mm,
unsigned long address)
{
struct amdgpu_mn *rmn = container_of(mn, struct amdgpu_mn, mn);
struct interval_tree_node *it;
mutex_lock(&rmn->lock);
it = interval_tree_iter_first(&rmn->objects, address, address);
if (it) {
struct amdgpu_mn_node *node;
node = container_of(it, struct amdgpu_mn_node, it);
amdgpu_mn_invalidate_node(node, address, address);
}
mutex_unlock(&rmn->lock);
}
/** /**
* amdgpu_mn_invalidate_range_start - callback to notify about mm change * amdgpu_mn_invalidate_range_start - callback to notify about mm change
* *
...@@ -215,7 +185,6 @@ static void amdgpu_mn_invalidate_range_start(struct mmu_notifier *mn, ...@@ -215,7 +185,6 @@ static void amdgpu_mn_invalidate_range_start(struct mmu_notifier *mn,
static const struct mmu_notifier_ops amdgpu_mn_ops = { static const struct mmu_notifier_ops amdgpu_mn_ops = {
.release = amdgpu_mn_release, .release = amdgpu_mn_release,
.invalidate_page = amdgpu_mn_invalidate_page,
.invalidate_range_start = amdgpu_mn_invalidate_range_start, .invalidate_range_start = amdgpu_mn_invalidate_range_start,
}; };
......
...@@ -166,24 +166,6 @@ static int invalidate_page_trampoline(struct ib_umem *item, u64 start, ...@@ -166,24 +166,6 @@ static int invalidate_page_trampoline(struct ib_umem *item, u64 start,
return 0; return 0;
} }
static void ib_umem_notifier_invalidate_page(struct mmu_notifier *mn,
struct mm_struct *mm,
unsigned long address)
{
struct ib_ucontext *context = container_of(mn, struct ib_ucontext, mn);
if (!context->invalidate_range)
return;
ib_ucontext_notifier_start_account(context);
down_read(&context->umem_rwsem);
rbt_ib_umem_for_each_in_range(&context->umem_tree, address,
address + PAGE_SIZE,
invalidate_page_trampoline, NULL);
up_read(&context->umem_rwsem);
ib_ucontext_notifier_end_account(context);
}
static int invalidate_range_start_trampoline(struct ib_umem *item, u64 start, static int invalidate_range_start_trampoline(struct ib_umem *item, u64 start,
u64 end, void *cookie) u64 end, void *cookie)
{ {
...@@ -237,7 +219,6 @@ static void ib_umem_notifier_invalidate_range_end(struct mmu_notifier *mn, ...@@ -237,7 +219,6 @@ static void ib_umem_notifier_invalidate_range_end(struct mmu_notifier *mn,
static const struct mmu_notifier_ops ib_umem_notifiers = { static const struct mmu_notifier_ops ib_umem_notifiers = {
.release = ib_umem_notifier_release, .release = ib_umem_notifier_release,
.invalidate_page = ib_umem_notifier_invalidate_page,
.invalidate_range_start = ib_umem_notifier_invalidate_range_start, .invalidate_range_start = ib_umem_notifier_invalidate_range_start,
.invalidate_range_end = ib_umem_notifier_invalidate_range_end, .invalidate_range_end = ib_umem_notifier_invalidate_range_end,
}; };
......
...@@ -67,8 +67,6 @@ struct mmu_rb_handler { ...@@ -67,8 +67,6 @@ struct mmu_rb_handler {
static unsigned long mmu_node_start(struct mmu_rb_node *); static unsigned long mmu_node_start(struct mmu_rb_node *);
static unsigned long mmu_node_last(struct mmu_rb_node *); static unsigned long mmu_node_last(struct mmu_rb_node *);
static inline void mmu_notifier_page(struct mmu_notifier *, struct mm_struct *,
unsigned long);
static inline void mmu_notifier_range_start(struct mmu_notifier *, static inline void mmu_notifier_range_start(struct mmu_notifier *,
struct mm_struct *, struct mm_struct *,
unsigned long, unsigned long); unsigned long, unsigned long);
...@@ -82,7 +80,6 @@ static void do_remove(struct mmu_rb_handler *handler, ...@@ -82,7 +80,6 @@ static void do_remove(struct mmu_rb_handler *handler,
static void handle_remove(struct work_struct *work); static void handle_remove(struct work_struct *work);
static const struct mmu_notifier_ops mn_opts = { static const struct mmu_notifier_ops mn_opts = {
.invalidate_page = mmu_notifier_page,
.invalidate_range_start = mmu_notifier_range_start, .invalidate_range_start = mmu_notifier_range_start,
}; };
...@@ -285,12 +282,6 @@ void hfi1_mmu_rb_remove(struct mmu_rb_handler *handler, ...@@ -285,12 +282,6 @@ void hfi1_mmu_rb_remove(struct mmu_rb_handler *handler,
handler->ops->remove(handler->ops_arg, node); handler->ops->remove(handler->ops_arg, node);
} }
static inline void mmu_notifier_page(struct mmu_notifier *mn,
struct mm_struct *mm, unsigned long addr)
{
mmu_notifier_mem_invalidate(mn, mm, addr, addr + PAGE_SIZE);
}
static inline void mmu_notifier_range_start(struct mmu_notifier *mn, static inline void mmu_notifier_range_start(struct mmu_notifier *mn,
struct mm_struct *mm, struct mm_struct *mm,
unsigned long start, unsigned long start,
......
...@@ -391,13 +391,6 @@ static int mn_clear_flush_young(struct mmu_notifier *mn, ...@@ -391,13 +391,6 @@ static int mn_clear_flush_young(struct mmu_notifier *mn,
return 0; return 0;
} }
static void mn_invalidate_page(struct mmu_notifier *mn,
struct mm_struct *mm,
unsigned long address)
{
__mn_flush_page(mn, address);
}
static void mn_invalidate_range(struct mmu_notifier *mn, static void mn_invalidate_range(struct mmu_notifier *mn,
struct mm_struct *mm, struct mm_struct *mm,
unsigned long start, unsigned long end) unsigned long start, unsigned long end)
...@@ -436,7 +429,6 @@ static void mn_release(struct mmu_notifier *mn, struct mm_struct *mm) ...@@ -436,7 +429,6 @@ static void mn_release(struct mmu_notifier *mn, struct mm_struct *mm)
static const struct mmu_notifier_ops iommu_mn = { static const struct mmu_notifier_ops iommu_mn = {
.release = mn_release, .release = mn_release,
.clear_flush_young = mn_clear_flush_young, .clear_flush_young = mn_clear_flush_young,
.invalidate_page = mn_invalidate_page,
.invalidate_range = mn_invalidate_range, .invalidate_range = mn_invalidate_range,
}; };
......
...@@ -223,14 +223,6 @@ static void intel_change_pte(struct mmu_notifier *mn, struct mm_struct *mm, ...@@ -223,14 +223,6 @@ static void intel_change_pte(struct mmu_notifier *mn, struct mm_struct *mm,
intel_flush_svm_range(svm, address, 1, 1, 0); intel_flush_svm_range(svm, address, 1, 1, 0);
} }
static void intel_invalidate_page(struct mmu_notifier *mn, struct mm_struct *mm,
unsigned long address)
{
struct intel_svm *svm = container_of(mn, struct intel_svm, notifier);
intel_flush_svm_range(svm, address, 1, 1, 0);
}
/* Pages have been freed at this point */ /* Pages have been freed at this point */
static void intel_invalidate_range(struct mmu_notifier *mn, static void intel_invalidate_range(struct mmu_notifier *mn,
struct mm_struct *mm, struct mm_struct *mm,
...@@ -285,7 +277,6 @@ static void intel_mm_release(struct mmu_notifier *mn, struct mm_struct *mm) ...@@ -285,7 +277,6 @@ static void intel_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
static const struct mmu_notifier_ops intel_mmuops = { static const struct mmu_notifier_ops intel_mmuops = {
.release = intel_mm_release, .release = intel_mm_release,
.change_pte = intel_change_pte, .change_pte = intel_change_pte,
.invalidate_page = intel_invalidate_page,
.invalidate_range = intel_invalidate_range, .invalidate_range = intel_invalidate_range,
}; };
......
...@@ -200,16 +200,6 @@ static void scif_mmu_notifier_release(struct mmu_notifier *mn, ...@@ -200,16 +200,6 @@ static void scif_mmu_notifier_release(struct mmu_notifier *mn,
schedule_work(&scif_info.misc_work); schedule_work(&scif_info.misc_work);
} }
static void scif_mmu_notifier_invalidate_page(struct mmu_notifier *mn,
struct mm_struct *mm,
unsigned long address)
{
struct scif_mmu_notif *mmn;
mmn = container_of(mn, struct scif_mmu_notif, ep_mmu_notifier);
scif_rma_destroy_tcw(mmn, address, PAGE_SIZE);
}
static void scif_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn, static void scif_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
struct mm_struct *mm, struct mm_struct *mm,
unsigned long start, unsigned long start,
...@@ -235,7 +225,6 @@ static void scif_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn, ...@@ -235,7 +225,6 @@ static void scif_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn,
static const struct mmu_notifier_ops scif_mmu_notifier_ops = { static const struct mmu_notifier_ops scif_mmu_notifier_ops = {
.release = scif_mmu_notifier_release, .release = scif_mmu_notifier_release,
.clear_flush_young = NULL, .clear_flush_young = NULL,
.invalidate_page = scif_mmu_notifier_invalidate_page,
.invalidate_range_start = scif_mmu_notifier_invalidate_range_start, .invalidate_range_start = scif_mmu_notifier_invalidate_range_start,
.invalidate_range_end = scif_mmu_notifier_invalidate_range_end}; .invalidate_range_end = scif_mmu_notifier_invalidate_range_end};
......
...@@ -247,17 +247,6 @@ static void gru_invalidate_range_end(struct mmu_notifier *mn, ...@@ -247,17 +247,6 @@ static void gru_invalidate_range_end(struct mmu_notifier *mn,
gru_dbg(grudev, "gms %p, start 0x%lx, end 0x%lx\n", gms, start, end); gru_dbg(grudev, "gms %p, start 0x%lx, end 0x%lx\n", gms, start, end);
} }
static void gru_invalidate_page(struct mmu_notifier *mn, struct mm_struct *mm,
unsigned long address)
{
struct gru_mm_struct *gms = container_of(mn, struct gru_mm_struct,
ms_notifier);
STAT(mmu_invalidate_page);
gru_flush_tlb_range(gms, address, PAGE_SIZE);
gru_dbg(grudev, "gms %p, address 0x%lx\n", gms, address);
}
static void gru_release(struct mmu_notifier *mn, struct mm_struct *mm) static void gru_release(struct mmu_notifier *mn, struct mm_struct *mm)
{ {
struct gru_mm_struct *gms = container_of(mn, struct gru_mm_struct, struct gru_mm_struct *gms = container_of(mn, struct gru_mm_struct,
...@@ -269,7 +258,6 @@ static void gru_release(struct mmu_notifier *mn, struct mm_struct *mm) ...@@ -269,7 +258,6 @@ static void gru_release(struct mmu_notifier *mn, struct mm_struct *mm)
static const struct mmu_notifier_ops gru_mmuops = { static const struct mmu_notifier_ops gru_mmuops = {
.invalidate_page = gru_invalidate_page,
.invalidate_range_start = gru_invalidate_range_start, .invalidate_range_start = gru_invalidate_range_start,
.invalidate_range_end = gru_invalidate_range_end, .invalidate_range_end = gru_invalidate_range_end,
.release = gru_release, .release = gru_release,
......
...@@ -484,13 +484,6 @@ static void mn_invl_range_start(struct mmu_notifier *mn, ...@@ -484,13 +484,6 @@ static void mn_invl_range_start(struct mmu_notifier *mn,
mutex_unlock(&priv->lock); mutex_unlock(&priv->lock);
} }
static void mn_invl_page(struct mmu_notifier *mn,
struct mm_struct *mm,
unsigned long address)
{
mn_invl_range_start(mn, mm, address, address + PAGE_SIZE);
}
static void mn_release(struct mmu_notifier *mn, static void mn_release(struct mmu_notifier *mn,
struct mm_struct *mm) struct mm_struct *mm)
{ {
...@@ -522,7 +515,6 @@ static void mn_release(struct mmu_notifier *mn, ...@@ -522,7 +515,6 @@ static void mn_release(struct mmu_notifier *mn,
static const struct mmu_notifier_ops gntdev_mmu_ops = { static const struct mmu_notifier_ops gntdev_mmu_ops = {
.release = mn_release, .release = mn_release,
.invalidate_page = mn_invl_page,
.invalidate_range_start = mn_invl_range_start, .invalidate_range_start = mn_invl_range_start,
}; };
......
...@@ -646,11 +646,10 @@ static void dax_mapping_entry_mkclean(struct address_space *mapping, ...@@ -646,11 +646,10 @@ static void dax_mapping_entry_mkclean(struct address_space *mapping,
pte_t pte, *ptep = NULL; pte_t pte, *ptep = NULL;
pmd_t *pmdp = NULL; pmd_t *pmdp = NULL;
spinlock_t *ptl; spinlock_t *ptl;
bool changed;
i_mmap_lock_read(mapping); i_mmap_lock_read(mapping);
vma_interval_tree_foreach(vma, &mapping->i_mmap, index, index) { vma_interval_tree_foreach(vma, &mapping->i_mmap, index, index) {
unsigned long address; unsigned long address, start, end;
cond_resched(); cond_resched();
...@@ -658,8 +657,13 @@ static void dax_mapping_entry_mkclean(struct address_space *mapping, ...@@ -658,8 +657,13 @@ static void dax_mapping_entry_mkclean(struct address_space *mapping,
continue; continue;
address = pgoff_address(index, vma); address = pgoff_address(index, vma);
changed = false;
if (follow_pte_pmd(vma->vm_mm, address, &ptep, &pmdp, &ptl)) /*
* Note because we provide start/end to follow_pte_pmd it will
* call mmu_notifier_invalidate_range_start() on our behalf
* before taking any lock.
*/
if (follow_pte_pmd(vma->vm_mm, address, &start, &end, &ptep, &pmdp, &ptl))
continue; continue;
if (pmdp) { if (pmdp) {
...@@ -676,7 +680,7 @@ static void dax_mapping_entry_mkclean(struct address_space *mapping, ...@@ -676,7 +680,7 @@ static void dax_mapping_entry_mkclean(struct address_space *mapping,
pmd = pmd_wrprotect(pmd); pmd = pmd_wrprotect(pmd);
pmd = pmd_mkclean(pmd); pmd = pmd_mkclean(pmd);
set_pmd_at(vma->vm_mm, address, pmdp, pmd); set_pmd_at(vma->vm_mm, address, pmdp, pmd);
changed = true; mmu_notifier_invalidate_range(vma->vm_mm, start, end);
unlock_pmd: unlock_pmd:
spin_unlock(ptl); spin_unlock(ptl);
#endif #endif
...@@ -691,13 +695,12 @@ static void dax_mapping_entry_mkclean(struct address_space *mapping, ...@@ -691,13 +695,12 @@ static void dax_mapping_entry_mkclean(struct address_space *mapping,
pte = pte_wrprotect(pte); pte = pte_wrprotect(pte);
pte = pte_mkclean(pte); pte = pte_mkclean(pte);
set_pte_at(vma->vm_mm, address, ptep, pte); set_pte_at(vma->vm_mm, address, ptep, pte);
changed = true; mmu_notifier_invalidate_range(vma->vm_mm, start, end);
unlock_pte: unlock_pte:
pte_unmap_unlock(ptep, ptl); pte_unmap_unlock(ptep, ptl);
} }
if (changed) mmu_notifier_invalidate_range_end(vma->vm_mm, start, end);
mmu_notifier_invalidate_page(vma->vm_mm, address);
} }
i_mmap_unlock_read(mapping); i_mmap_unlock_read(mapping);
} }
......
...@@ -1260,6 +1260,7 @@ int copy_page_range(struct mm_struct *dst, struct mm_struct *src, ...@@ -1260,6 +1260,7 @@ int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
void unmap_mapping_range(struct address_space *mapping, void unmap_mapping_range(struct address_space *mapping,
loff_t const holebegin, loff_t const holelen, int even_cows); loff_t const holebegin, loff_t const holelen, int even_cows);
int follow_pte_pmd(struct mm_struct *mm, unsigned long address, int follow_pte_pmd(struct mm_struct *mm, unsigned long address,
unsigned long *start, unsigned long *end,
pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp); pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp);
int follow_pfn(struct vm_area_struct *vma, unsigned long address, int follow_pfn(struct vm_area_struct *vma, unsigned long address,
unsigned long *pfn); unsigned long *pfn);
......
...@@ -94,17 +94,6 @@ struct mmu_notifier_ops { ...@@ -94,17 +94,6 @@ struct mmu_notifier_ops {
unsigned long address, unsigned long address,
pte_t pte); pte_t pte);
/*
* Before this is invoked any secondary MMU is still ok to
* read/write to the page previously pointed to by the Linux
* pte because the page hasn't been freed yet and it won't be
* freed until this returns. If required set_page_dirty has to
* be called internally to this method.
*/
void (*invalidate_page)(struct mmu_notifier *mn,
struct mm_struct *mm,
unsigned long address);
/* /*
* invalidate_range_start() and invalidate_range_end() must be * invalidate_range_start() and invalidate_range_end() must be
* paired and are called only when the mmap_sem and/or the * paired and are called only when the mmap_sem and/or the
...@@ -220,8 +209,6 @@ extern int __mmu_notifier_test_young(struct mm_struct *mm, ...@@ -220,8 +209,6 @@ extern int __mmu_notifier_test_young(struct mm_struct *mm,
unsigned long address); unsigned long address);
extern void __mmu_notifier_change_pte(struct mm_struct *mm, extern void __mmu_notifier_change_pte(struct mm_struct *mm,
unsigned long address, pte_t pte); unsigned long address, pte_t pte);
extern void __mmu_notifier_invalidate_page(struct mm_struct *mm,
unsigned long address);
extern void __mmu_notifier_invalidate_range_start(struct mm_struct *mm, extern void __mmu_notifier_invalidate_range_start(struct mm_struct *mm,
unsigned long start, unsigned long end); unsigned long start, unsigned long end);
extern void __mmu_notifier_invalidate_range_end(struct mm_struct *mm, extern void __mmu_notifier_invalidate_range_end(struct mm_struct *mm,
...@@ -268,13 +255,6 @@ static inline void mmu_notifier_change_pte(struct mm_struct *mm, ...@@ -268,13 +255,6 @@ static inline void mmu_notifier_change_pte(struct mm_struct *mm,
__mmu_notifier_change_pte(mm, address, pte); __mmu_notifier_change_pte(mm, address, pte);
} }
static inline void mmu_notifier_invalidate_page(struct mm_struct *mm,
unsigned long address)
{
if (mm_has_notifiers(mm))
__mmu_notifier_invalidate_page(mm, address);
}
static inline void mmu_notifier_invalidate_range_start(struct mm_struct *mm, static inline void mmu_notifier_invalidate_range_start(struct mm_struct *mm,
unsigned long start, unsigned long end) unsigned long start, unsigned long end)
{ {
...@@ -442,11 +422,6 @@ static inline void mmu_notifier_change_pte(struct mm_struct *mm, ...@@ -442,11 +422,6 @@ static inline void mmu_notifier_change_pte(struct mm_struct *mm,
{ {
} }
static inline void mmu_notifier_invalidate_page(struct mm_struct *mm,
unsigned long address)
{
}
static inline void mmu_notifier_invalidate_range_start(struct mm_struct *mm, static inline void mmu_notifier_invalidate_range_start(struct mm_struct *mm,
unsigned long start, unsigned long end) unsigned long start, unsigned long end)
{ {
......
...@@ -4008,7 +4008,8 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address) ...@@ -4008,7 +4008,8 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
#endif /* __PAGETABLE_PMD_FOLDED */ #endif /* __PAGETABLE_PMD_FOLDED */
static int __follow_pte_pmd(struct mm_struct *mm, unsigned long address, static int __follow_pte_pmd(struct mm_struct *mm, unsigned long address,
pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp) unsigned long *start, unsigned long *end,
pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp)
{ {
pgd_t *pgd; pgd_t *pgd;
p4d_t *p4d; p4d_t *p4d;
...@@ -4035,17 +4036,29 @@ static int __follow_pte_pmd(struct mm_struct *mm, unsigned long address, ...@@ -4035,17 +4036,29 @@ static int __follow_pte_pmd(struct mm_struct *mm, unsigned long address,
if (!pmdpp) if (!pmdpp)
goto out; goto out;
if (start && end) {
*start = address & PMD_MASK;
*end = *start + PMD_SIZE;
mmu_notifier_invalidate_range_start(mm, *start, *end);
}
*ptlp = pmd_lock(mm, pmd); *ptlp = pmd_lock(mm, pmd);
if (pmd_huge(*pmd)) { if (pmd_huge(*pmd)) {
*pmdpp = pmd; *pmdpp = pmd;
return 0; return 0;
} }
spin_unlock(*ptlp); spin_unlock(*ptlp);
if (start && end)
mmu_notifier_invalidate_range_end(mm, *start, *end);
} }
if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd))) if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
goto out; goto out;
if (start && end) {
*start = address & PAGE_MASK;
*end = *start + PAGE_SIZE;
mmu_notifier_invalidate_range_start(mm, *start, *end);
}
ptep = pte_offset_map_lock(mm, pmd, address, ptlp); ptep = pte_offset_map_lock(mm, pmd, address, ptlp);
if (!pte_present(*ptep)) if (!pte_present(*ptep))
goto unlock; goto unlock;
...@@ -4053,6 +4066,8 @@ static int __follow_pte_pmd(struct mm_struct *mm, unsigned long address, ...@@ -4053,6 +4066,8 @@ static int __follow_pte_pmd(struct mm_struct *mm, unsigned long address,
return 0; return 0;
unlock: unlock:
pte_unmap_unlock(ptep, *ptlp); pte_unmap_unlock(ptep, *ptlp);
if (start && end)
mmu_notifier_invalidate_range_end(mm, *start, *end);
out: out:
return -EINVAL; return -EINVAL;
} }
...@@ -4064,20 +4079,21 @@ static inline int follow_pte(struct mm_struct *mm, unsigned long address, ...@@ -4064,20 +4079,21 @@ static inline int follow_pte(struct mm_struct *mm, unsigned long address,
/* (void) is needed to make gcc happy */ /* (void) is needed to make gcc happy */
(void) __cond_lock(*ptlp, (void) __cond_lock(*ptlp,
!(res = __follow_pte_pmd(mm, address, ptepp, NULL, !(res = __follow_pte_pmd(mm, address, NULL, NULL,
ptlp))); ptepp, NULL, ptlp)));
return res; return res;
} }
int follow_pte_pmd(struct mm_struct *mm, unsigned long address, int follow_pte_pmd(struct mm_struct *mm, unsigned long address,
unsigned long *start, unsigned long *end,
pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp) pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp)
{ {
int res; int res;
/* (void) is needed to make gcc happy */ /* (void) is needed to make gcc happy */
(void) __cond_lock(*ptlp, (void) __cond_lock(*ptlp,
!(res = __follow_pte_pmd(mm, address, ptepp, pmdpp, !(res = __follow_pte_pmd(mm, address, start, end,
ptlp))); ptepp, pmdpp, ptlp)));
return res; return res;
} }
EXPORT_SYMBOL(follow_pte_pmd); EXPORT_SYMBOL(follow_pte_pmd);
......
...@@ -174,20 +174,6 @@ void __mmu_notifier_change_pte(struct mm_struct *mm, unsigned long address, ...@@ -174,20 +174,6 @@ void __mmu_notifier_change_pte(struct mm_struct *mm, unsigned long address,
srcu_read_unlock(&srcu, id); srcu_read_unlock(&srcu, id);
} }
void __mmu_notifier_invalidate_page(struct mm_struct *mm,
unsigned long address)
{
struct mmu_notifier *mn;
int id;
id = srcu_read_lock(&srcu);
hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
if (mn->ops->invalidate_page)
mn->ops->invalidate_page(mn, mm, address);
}
srcu_read_unlock(&srcu, id);
}
void __mmu_notifier_invalidate_range_start(struct mm_struct *mm, void __mmu_notifier_invalidate_range_start(struct mm_struct *mm,
unsigned long start, unsigned long end) unsigned long start, unsigned long end)
{ {
......
...@@ -887,11 +887,21 @@ static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma, ...@@ -887,11 +887,21 @@ static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma,
.address = address, .address = address,
.flags = PVMW_SYNC, .flags = PVMW_SYNC,
}; };
unsigned long start = address, end;
int *cleaned = arg; int *cleaned = arg;
/*
* We have to assume the worse case ie pmd for invalidation. Note that
* the page can not be free from this function.
*/
end = min(vma->vm_end, start + (PAGE_SIZE << compound_order(page)));
mmu_notifier_invalidate_range_start(vma->vm_mm, start, end);
while (page_vma_mapped_walk(&pvmw)) { while (page_vma_mapped_walk(&pvmw)) {
unsigned long cstart, cend;
int ret = 0; int ret = 0;
address = pvmw.address;
cstart = address = pvmw.address;
if (pvmw.pte) { if (pvmw.pte) {
pte_t entry; pte_t entry;
pte_t *pte = pvmw.pte; pte_t *pte = pvmw.pte;
...@@ -904,6 +914,7 @@ static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma, ...@@ -904,6 +914,7 @@ static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma,
entry = pte_wrprotect(entry); entry = pte_wrprotect(entry);
entry = pte_mkclean(entry); entry = pte_mkclean(entry);
set_pte_at(vma->vm_mm, address, pte, entry); set_pte_at(vma->vm_mm, address, pte, entry);
cend = cstart + PAGE_SIZE;
ret = 1; ret = 1;
} else { } else {
#ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
...@@ -918,6 +929,8 @@ static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma, ...@@ -918,6 +929,8 @@ static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma,
entry = pmd_wrprotect(entry); entry = pmd_wrprotect(entry);
entry = pmd_mkclean(entry); entry = pmd_mkclean(entry);
set_pmd_at(vma->vm_mm, address, pmd, entry); set_pmd_at(vma->vm_mm, address, pmd, entry);
cstart &= PMD_MASK;
cend = cstart + PMD_SIZE;
ret = 1; ret = 1;
#else #else
/* unexpected pmd-mapped page? */ /* unexpected pmd-mapped page? */
...@@ -926,11 +939,13 @@ static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma, ...@@ -926,11 +939,13 @@ static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma,
} }
if (ret) { if (ret) {
mmu_notifier_invalidate_page(vma->vm_mm, address); mmu_notifier_invalidate_range(vma->vm_mm, cstart, cend);
(*cleaned)++; (*cleaned)++;
} }
} }
mmu_notifier_invalidate_range_end(vma->vm_mm, start, end);
return true; return true;
} }
...@@ -1324,6 +1339,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma, ...@@ -1324,6 +1339,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
pte_t pteval; pte_t pteval;
struct page *subpage; struct page *subpage;
bool ret = true; bool ret = true;
unsigned long start = address, end;
enum ttu_flags flags = (enum ttu_flags)arg; enum ttu_flags flags = (enum ttu_flags)arg;
/* munlock has nothing to gain from examining un-locked vmas */ /* munlock has nothing to gain from examining un-locked vmas */
...@@ -1335,6 +1351,14 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma, ...@@ -1335,6 +1351,14 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
flags & TTU_MIGRATION, page); flags & TTU_MIGRATION, page);
} }
/*
* We have to assume the worse case ie pmd for invalidation. Note that
* the page can not be free in this function as call of try_to_unmap()
* must hold a reference on the page.
*/
end = min(vma->vm_end, start + (PAGE_SIZE << compound_order(page)));
mmu_notifier_invalidate_range_start(vma->vm_mm, start, end);
while (page_vma_mapped_walk(&pvmw)) { while (page_vma_mapped_walk(&pvmw)) {
/* /*
* If the page is mlock()d, we cannot swap it out. * If the page is mlock()d, we cannot swap it out.
...@@ -1445,6 +1469,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma, ...@@ -1445,6 +1469,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
if (unlikely(PageSwapBacked(page) != PageSwapCache(page))) { if (unlikely(PageSwapBacked(page) != PageSwapCache(page))) {
WARN_ON_ONCE(1); WARN_ON_ONCE(1);
ret = false; ret = false;
/* We have to invalidate as we cleared the pte */
page_vma_mapped_walk_done(&pvmw); page_vma_mapped_walk_done(&pvmw);
break; break;
} }
...@@ -1490,8 +1515,12 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma, ...@@ -1490,8 +1515,12 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
discard: discard:
page_remove_rmap(subpage, PageHuge(page)); page_remove_rmap(subpage, PageHuge(page));
put_page(page); put_page(page);
mmu_notifier_invalidate_page(mm, address); mmu_notifier_invalidate_range(mm, address,
address + PAGE_SIZE);
} }
mmu_notifier_invalidate_range_end(vma->vm_mm, start, end);
return ret; return ret;
} }
......
...@@ -322,47 +322,6 @@ static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn) ...@@ -322,47 +322,6 @@ static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn)
return container_of(mn, struct kvm, mmu_notifier); return container_of(mn, struct kvm, mmu_notifier);
} }
static void kvm_mmu_notifier_invalidate_page(struct mmu_notifier *mn,
struct mm_struct *mm,
unsigned long address)
{
struct kvm *kvm = mmu_notifier_to_kvm(mn);
int need_tlb_flush, idx;
/*
* When ->invalidate_page runs, the linux pte has been zapped
* already but the page is still allocated until
* ->invalidate_page returns. So if we increase the sequence
* here the kvm page fault will notice if the spte can't be
* established because the page is going to be freed. If
* instead the kvm page fault establishes the spte before
* ->invalidate_page runs, kvm_unmap_hva will release it
* before returning.
*
* The sequence increase only need to be seen at spin_unlock
* time, and not at spin_lock time.
*
* Increasing the sequence after the spin_unlock would be
* unsafe because the kvm page fault could then establish the
* pte after kvm_unmap_hva returned, without noticing the page
* is going to be freed.
*/
idx = srcu_read_lock(&kvm->srcu);
spin_lock(&kvm->mmu_lock);
kvm->mmu_notifier_seq++;
need_tlb_flush = kvm_unmap_hva(kvm, address) | kvm->tlbs_dirty;
/* we've to flush the tlb before the pages can be freed */
if (need_tlb_flush)
kvm_flush_remote_tlbs(kvm);
spin_unlock(&kvm->mmu_lock);
kvm_arch_mmu_notifier_invalidate_page(kvm, address);
srcu_read_unlock(&kvm->srcu, idx);
}
static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn, static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn,
struct mm_struct *mm, struct mm_struct *mm,
unsigned long address, unsigned long address,
...@@ -510,7 +469,6 @@ static void kvm_mmu_notifier_release(struct mmu_notifier *mn, ...@@ -510,7 +469,6 @@ static void kvm_mmu_notifier_release(struct mmu_notifier *mn,
} }
static const struct mmu_notifier_ops kvm_mmu_notifier_ops = { static const struct mmu_notifier_ops kvm_mmu_notifier_ops = {
.invalidate_page = kvm_mmu_notifier_invalidate_page,
.invalidate_range_start = kvm_mmu_notifier_invalidate_range_start, .invalidate_range_start = kvm_mmu_notifier_invalidate_range_start,
.invalidate_range_end = kvm_mmu_notifier_invalidate_range_end, .invalidate_range_end = kvm_mmu_notifier_invalidate_range_end,
.clear_flush_young = kvm_mmu_notifier_clear_flush_young, .clear_flush_young = kvm_mmu_notifier_clear_flush_young,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment