Commit 97a89413 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Linus Torvalds

mm: Remove i_mmap_lock lockbreak

Hugh says:
 "The only significant loser, I think, would be page reclaim (when
  concurrent with truncation): could spin for a long time waiting for
  the i_mmap_mutex it expects would soon be dropped? "

Counter points:
 - cpu contention makes the spin stop (need_resched())
 - zap pages should be freeing pages at a higher rate than reclaim
   ever can

I think the simplification of the truncate code is definitely worth it.

Effectively reverts: 2aa15890 ("mm: prevent concurrent
unmap_mapping_range() on the same inode") and takes out the code that
caused its problem.
Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Reviewed-by: default avatarKAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: David Miller <davem@davemloft.net>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Russell King <rmk@arm.linux.org.uk>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: Jeff Dike <jdike@addtoit.com>
Cc: Richard Weinberger <richard@nod.at>
Cc: Tony Luck <tony.luck@intel.com>
Cc: Mel Gorman <mel@csn.ul.ie>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Nick Piggin <npiggin@kernel.dk>
Cc: Namhyung Kim <namhyung@gmail.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent e4c70a66
...@@ -331,7 +331,6 @@ void address_space_init_once(struct address_space *mapping) ...@@ -331,7 +331,6 @@ void address_space_init_once(struct address_space *mapping)
spin_lock_init(&mapping->private_lock); spin_lock_init(&mapping->private_lock);
INIT_RAW_PRIO_TREE_ROOT(&mapping->i_mmap); INIT_RAW_PRIO_TREE_ROOT(&mapping->i_mmap);
INIT_LIST_HEAD(&mapping->i_mmap_nonlinear); INIT_LIST_HEAD(&mapping->i_mmap_nonlinear);
mutex_init(&mapping->unmap_mutex);
} }
EXPORT_SYMBOL(address_space_init_once); EXPORT_SYMBOL(address_space_init_once);
......
...@@ -635,7 +635,6 @@ struct address_space { ...@@ -635,7 +635,6 @@ struct address_space {
struct prio_tree_root i_mmap; /* tree of private and shared mappings */ struct prio_tree_root i_mmap; /* tree of private and shared mappings */
struct list_head i_mmap_nonlinear;/*list VM_NONLINEAR mappings */ struct list_head i_mmap_nonlinear;/*list VM_NONLINEAR mappings */
spinlock_t i_mmap_lock; /* protect tree, count, list */ spinlock_t i_mmap_lock; /* protect tree, count, list */
unsigned int truncate_count; /* Cover race condition with truncate */
unsigned long nrpages; /* number of total pages */ unsigned long nrpages; /* number of total pages */
pgoff_t writeback_index;/* writeback starts here */ pgoff_t writeback_index;/* writeback starts here */
const struct address_space_operations *a_ops; /* methods */ const struct address_space_operations *a_ops; /* methods */
...@@ -644,7 +643,6 @@ struct address_space { ...@@ -644,7 +643,6 @@ struct address_space {
spinlock_t private_lock; /* for use by the address_space */ spinlock_t private_lock; /* for use by the address_space */
struct list_head private_list; /* ditto */ struct list_head private_list; /* ditto */
struct address_space *assoc_mapping; /* ditto */ struct address_space *assoc_mapping; /* ditto */
struct mutex unmap_mutex; /* to protect unmapping */
} __attribute__((aligned(sizeof(long)))); } __attribute__((aligned(sizeof(long))));
/* /*
* On most architectures that alignment is already the case; but * On most architectures that alignment is already the case; but
......
...@@ -895,8 +895,6 @@ struct zap_details { ...@@ -895,8 +895,6 @@ struct zap_details {
struct address_space *check_mapping; /* Check page->mapping if set */ struct address_space *check_mapping; /* Check page->mapping if set */
pgoff_t first_index; /* Lowest page->index to unmap */ pgoff_t first_index; /* Lowest page->index to unmap */
pgoff_t last_index; /* Highest page->index to unmap */ pgoff_t last_index; /* Highest page->index to unmap */
spinlock_t *i_mmap_lock; /* For unmap_mapping_range: */
unsigned long truncate_count; /* Compare vm_truncate_count */
}; };
struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
......
...@@ -175,7 +175,6 @@ struct vm_area_struct { ...@@ -175,7 +175,6 @@ struct vm_area_struct {
units, *not* PAGE_CACHE_SIZE */ units, *not* PAGE_CACHE_SIZE */
struct file * vm_file; /* File we map to (can be NULL). */ struct file * vm_file; /* File we map to (can be NULL). */
void * vm_private_data; /* was vm_pte (shared mem) */ void * vm_private_data; /* was vm_pte (shared mem) */
unsigned long vm_truncate_count;/* truncate_count or restart_addr */
#ifndef CONFIG_MMU #ifndef CONFIG_MMU
struct vm_region *vm_region; /* NOMMU mapping region */ struct vm_region *vm_region; /* NOMMU mapping region */
......
...@@ -386,7 +386,6 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) ...@@ -386,7 +386,6 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
spin_lock(&mapping->i_mmap_lock); spin_lock(&mapping->i_mmap_lock);
if (tmp->vm_flags & VM_SHARED) if (tmp->vm_flags & VM_SHARED)
mapping->i_mmap_writable++; mapping->i_mmap_writable++;
tmp->vm_truncate_count = mpnt->vm_truncate_count;
flush_dcache_mmap_lock(mapping); flush_dcache_mmap_lock(mapping);
/* insert tmp into the share list, just after mpnt */ /* insert tmp into the share list, just after mpnt */
vma_prio_tree_add(tmp, mpnt); vma_prio_tree_add(tmp, mpnt);
......
This diff is collapsed.
...@@ -445,10 +445,8 @@ static void vma_link(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -445,10 +445,8 @@ static void vma_link(struct mm_struct *mm, struct vm_area_struct *vma,
if (vma->vm_file) if (vma->vm_file)
mapping = vma->vm_file->f_mapping; mapping = vma->vm_file->f_mapping;
if (mapping) { if (mapping)
spin_lock(&mapping->i_mmap_lock); spin_lock(&mapping->i_mmap_lock);
vma->vm_truncate_count = mapping->truncate_count;
}
__vma_link(mm, vma, prev, rb_link, rb_parent); __vma_link(mm, vma, prev, rb_link, rb_parent);
__vma_link_file(vma); __vma_link_file(vma);
...@@ -558,16 +556,7 @@ again: remove_next = 1 + (end > next->vm_end); ...@@ -558,16 +556,7 @@ again: remove_next = 1 + (end > next->vm_end);
if (!(vma->vm_flags & VM_NONLINEAR)) if (!(vma->vm_flags & VM_NONLINEAR))
root = &mapping->i_mmap; root = &mapping->i_mmap;
spin_lock(&mapping->i_mmap_lock); spin_lock(&mapping->i_mmap_lock);
if (importer &&
vma->vm_truncate_count != next->vm_truncate_count) {
/*
* unmap_mapping_range might be in progress:
* ensure that the expanding vma is rescanned.
*/
importer->vm_truncate_count = 0;
}
if (insert) { if (insert) {
insert->vm_truncate_count = vma->vm_truncate_count;
/* /*
* Put into prio_tree now, so instantiated pages * Put into prio_tree now, so instantiated pages
* are visible to arm/parisc __flush_dcache_page * are visible to arm/parisc __flush_dcache_page
......
...@@ -94,7 +94,6 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, ...@@ -94,7 +94,6 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
*/ */
mapping = vma->vm_file->f_mapping; mapping = vma->vm_file->f_mapping;
spin_lock(&mapping->i_mmap_lock); spin_lock(&mapping->i_mmap_lock);
new_vma->vm_truncate_count = 0;
} }
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment