Commit 01d8b20d authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Linus Torvalds

mm: simplify anon_vma refcounts

This patch changes the anon_vma refcount to be 0 when the object is free.
It does this by adding 1 ref to being in use in the anon_vma structure
(iow.  the anon_vma->head list is not empty).

This allows a simpler release scheme without having to check both the
refcount and the list as well as avoids taking a ref for each entry on the
list.
Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Reviewed-by: default avatarKAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Acked-by: default avatarHugh Dickins <hughd@google.com>
Acked-by: default avatarMel Gorman <mel@csn.ul.ie>
Acked-by: default avatarRik van Riel <riel@redhat.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 83813267
...@@ -73,7 +73,13 @@ static inline void get_anon_vma(struct anon_vma *anon_vma) ...@@ -73,7 +73,13 @@ static inline void get_anon_vma(struct anon_vma *anon_vma)
atomic_inc(&anon_vma->refcount); atomic_inc(&anon_vma->refcount);
} }
void put_anon_vma(struct anon_vma *); void __put_anon_vma(struct anon_vma *anon_vma);
static inline void put_anon_vma(struct anon_vma *anon_vma)
{
if (atomic_dec_and_test(&anon_vma->refcount))
__put_anon_vma(anon_vma);
}
static inline struct anon_vma *page_anon_vma(struct page *page) static inline struct anon_vma *page_anon_vma(struct page *page)
{ {
...@@ -116,7 +122,6 @@ void unlink_anon_vmas(struct vm_area_struct *); ...@@ -116,7 +122,6 @@ void unlink_anon_vmas(struct vm_area_struct *);
int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *); int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *); int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
void __anon_vma_link(struct vm_area_struct *); void __anon_vma_link(struct vm_area_struct *);
void anon_vma_free(struct anon_vma *);
static inline void anon_vma_merge(struct vm_area_struct *vma, static inline void anon_vma_merge(struct vm_area_struct *vma,
struct vm_area_struct *next) struct vm_area_struct *next)
...@@ -125,6 +130,8 @@ static inline void anon_vma_merge(struct vm_area_struct *vma, ...@@ -125,6 +130,8 @@ static inline void anon_vma_merge(struct vm_area_struct *vma,
unlink_anon_vmas(next); unlink_anon_vmas(next);
} }
struct anon_vma *page_get_anon_vma(struct page *page);
/* /*
* rmap interfaces called when adding or removing pte of page * rmap interfaces called when adding or removing pte of page
*/ */
......
...@@ -67,11 +67,24 @@ static struct kmem_cache *anon_vma_chain_cachep; ...@@ -67,11 +67,24 @@ static struct kmem_cache *anon_vma_chain_cachep;
static inline struct anon_vma *anon_vma_alloc(void) static inline struct anon_vma *anon_vma_alloc(void)
{ {
return kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL); struct anon_vma *anon_vma;
anon_vma = kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL);
if (anon_vma) {
atomic_set(&anon_vma->refcount, 1);
/*
* Initialise the anon_vma root to point to itself. If called
* from fork, the root will be reset to the parents anon_vma.
*/
anon_vma->root = anon_vma;
}
return anon_vma;
} }
void anon_vma_free(struct anon_vma *anon_vma) static inline void anon_vma_free(struct anon_vma *anon_vma)
{ {
VM_BUG_ON(atomic_read(&anon_vma->refcount));
kmem_cache_free(anon_vma_cachep, anon_vma); kmem_cache_free(anon_vma_cachep, anon_vma);
} }
...@@ -133,11 +146,6 @@ int anon_vma_prepare(struct vm_area_struct *vma) ...@@ -133,11 +146,6 @@ int anon_vma_prepare(struct vm_area_struct *vma)
if (unlikely(!anon_vma)) if (unlikely(!anon_vma))
goto out_enomem_free_avc; goto out_enomem_free_avc;
allocated = anon_vma; allocated = anon_vma;
/*
* This VMA had no anon_vma yet. This anon_vma is
* the root of any anon_vma tree that might form.
*/
anon_vma->root = anon_vma;
} }
anon_vma_lock(anon_vma); anon_vma_lock(anon_vma);
...@@ -156,7 +164,7 @@ int anon_vma_prepare(struct vm_area_struct *vma) ...@@ -156,7 +164,7 @@ int anon_vma_prepare(struct vm_area_struct *vma)
anon_vma_unlock(anon_vma); anon_vma_unlock(anon_vma);
if (unlikely(allocated)) if (unlikely(allocated))
anon_vma_free(allocated); put_anon_vma(allocated);
if (unlikely(avc)) if (unlikely(avc))
anon_vma_chain_free(avc); anon_vma_chain_free(avc);
} }
...@@ -241,9 +249,9 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma) ...@@ -241,9 +249,9 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
*/ */
anon_vma->root = pvma->anon_vma->root; anon_vma->root = pvma->anon_vma->root;
/* /*
* With KSM refcounts, an anon_vma can stay around longer than the * With refcounts, an anon_vma can stay around longer than the
* process it belongs to. The root anon_vma needs to be pinned * process it belongs to. The root anon_vma needs to be pinned until
* until this anon_vma is freed, because the lock lives in the root. * this anon_vma is freed, because the lock lives in the root.
*/ */
get_anon_vma(anon_vma->root); get_anon_vma(anon_vma->root);
/* Mark this anon_vma as the one where our new (COWed) pages go. */ /* Mark this anon_vma as the one where our new (COWed) pages go. */
...@@ -253,7 +261,7 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma) ...@@ -253,7 +261,7 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
return 0; return 0;
out_error_free_anon_vma: out_error_free_anon_vma:
anon_vma_free(anon_vma); put_anon_vma(anon_vma);
out_error: out_error:
unlink_anon_vmas(vma); unlink_anon_vmas(vma);
return -ENOMEM; return -ENOMEM;
...@@ -272,15 +280,11 @@ static void anon_vma_unlink(struct anon_vma_chain *anon_vma_chain) ...@@ -272,15 +280,11 @@ static void anon_vma_unlink(struct anon_vma_chain *anon_vma_chain)
list_del(&anon_vma_chain->same_anon_vma); list_del(&anon_vma_chain->same_anon_vma);
/* We must garbage collect the anon_vma if it's empty */ /* We must garbage collect the anon_vma if it's empty */
empty = list_empty(&anon_vma->head) && !atomic_read(&anon_vma->refcount); empty = list_empty(&anon_vma->head);
anon_vma_unlock(anon_vma); anon_vma_unlock(anon_vma);
if (empty) { if (empty)
/* We no longer need the root anon_vma */ put_anon_vma(anon_vma);
if (anon_vma->root != anon_vma)
put_anon_vma(anon_vma->root);
anon_vma_free(anon_vma);
}
} }
void unlink_anon_vmas(struct vm_area_struct *vma) void unlink_anon_vmas(struct vm_area_struct *vma)
...@@ -1486,38 +1490,14 @@ int try_to_munlock(struct page *page) ...@@ -1486,38 +1490,14 @@ int try_to_munlock(struct page *page)
return try_to_unmap_file(page, TTU_MUNLOCK); return try_to_unmap_file(page, TTU_MUNLOCK);
} }
/* void __put_anon_vma(struct anon_vma *anon_vma)
* Drop an anon_vma refcount, freeing the anon_vma and anon_vma->root
* if necessary. Be careful to do all the tests under the lock. Once
* we know we are the last user, nobody else can get a reference and we
* can do the freeing without the lock.
*/
void put_anon_vma(struct anon_vma *anon_vma)
{ {
BUG_ON(atomic_read(&anon_vma->refcount) <= 0);
if (atomic_dec_and_lock(&anon_vma->refcount, &anon_vma->root->lock)) {
struct anon_vma *root = anon_vma->root; struct anon_vma *root = anon_vma->root;
int empty = list_empty(&anon_vma->head);
int last_root_user = 0;
int root_empty = 0;
/* if (root != anon_vma && atomic_dec_and_test(&root->refcount))
* The refcount on a non-root anon_vma got dropped. Drop anon_vma_free(root);
* the refcount on the root and check if we need to free it.
*/
if (empty && anon_vma != root) {
BUG_ON(atomic_read(&root->refcount) <= 0);
last_root_user = atomic_dec_and_test(&root->refcount);
root_empty = list_empty(&root->head);
}
anon_vma_unlock(anon_vma);
if (empty) {
anon_vma_free(anon_vma); anon_vma_free(anon_vma);
if (root_empty && last_root_user)
anon_vma_free(root);
}
}
} }
#ifdef CONFIG_MIGRATION #ifdef CONFIG_MIGRATION
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment