Commit 6367b491 authored by Al Viro's avatar Al Viro

retain_dentry(): introduce a trimmed-down lockless variant

	fast_dput() contains a small piece of code, preceded by scary
comments about 5 times longer than it.	What is actually done there is
a trimmed-down subset of retain_dentry() - in some situations we can
tell that retain_dentry() would have returned true without ever needing
->d_lock and that's what that code checks.  If these checks come true
fast_dput() can declare that we are done, without bothering with ->d_lock;
otherwise it has to take the lock and do full variant of retain_dentry()
checks.

	Trimmed-down variant of the checks is hard to follow and
it's asking for trouble - if we ever decide to change the rules in
retain_dentry(), we'll have to remember to update that code.  It turns
out that an equivalent variant of these checks more obviously parallel
to retain_dentry() is not just possible, but easy to unify with
retain_dentry() itself, passing it a new boolean argument ('locked')
to distinguish between the full semantics and trimmed down one.

	Note that in lockless case true is returned only when locked
variant would have returned true without ever needing the lock; false
means "punt to the locking path and recheck there".
Signed-off-by: default avatarAl Viro <viro@zeniv.linux.org.uk>
parent 1c18edd1
...@@ -665,30 +665,57 @@ static bool lock_for_kill(struct dentry *dentry) ...@@ -665,30 +665,57 @@ static bool lock_for_kill(struct dentry *dentry)
return false; return false;
} }
static inline bool retain_dentry(struct dentry *dentry) /*
* Decide if dentry is worth retaining. Usually this is called with dentry
* locked; if not locked, we are more limited and might not be able to tell
* without a lock. False in this case means "punt to locked path and recheck".
*
* In case we aren't locked, these predicates are not "stable". However, it is
* sufficient that at some point after we dropped the reference the dentry was
* hashed and the flags had the proper value. Other dentry users may have
* re-gotten a reference to the dentry and change that, but our work is done -
* we can leave the dentry around with a zero refcount.
*/
static inline bool retain_dentry(struct dentry *dentry, bool locked)
{ {
WARN_ON(d_in_lookup(dentry)); unsigned int d_flags;
smp_rmb();
d_flags = READ_ONCE(dentry->d_flags);
/* Unreachable? Get rid of it */ // Unreachable? Nobody would be able to look it up, no point retaining
if (unlikely(d_unhashed(dentry))) if (unlikely(d_unhashed(dentry)))
return false; return false;
if (unlikely(dentry->d_flags & DCACHE_DISCONNECTED)) // Same if it's disconnected
if (unlikely(d_flags & DCACHE_DISCONNECTED))
return false; return false;
if (unlikely(dentry->d_flags & DCACHE_OP_DELETE)) { // ->d_delete() might tell us not to bother, but that requires
if (dentry->d_op->d_delete(dentry)) // ->d_lock; can't decide without it
if (unlikely(d_flags & DCACHE_OP_DELETE)) {
if (!locked || dentry->d_op->d_delete(dentry))
return false; return false;
} }
if (unlikely(dentry->d_flags & DCACHE_DONTCACHE)) // Explicitly told not to bother
if (unlikely(d_flags & DCACHE_DONTCACHE))
return false; return false;
/* retain; LRU fodder */ // At this point it looks like we ought to keep it. We also might
if (unlikely(!(dentry->d_flags & DCACHE_LRU_LIST))) // need to do something - put it on LRU if it wasn't there already
// and mark it referenced if it was on LRU, but not marked yet.
// Unfortunately, both actions require ->d_lock, so in lockless
// case we'd have to punt rather than doing those.
if (unlikely(!(d_flags & DCACHE_LRU_LIST))) {
if (!locked)
return false;
d_lru_add(dentry); d_lru_add(dentry);
else if (unlikely(!(dentry->d_flags & DCACHE_REFERENCED))) } else if (unlikely(!(d_flags & DCACHE_REFERENCED))) {
if (!locked)
return false;
dentry->d_flags |= DCACHE_REFERENCED; dentry->d_flags |= DCACHE_REFERENCED;
}
return true; return true;
} }
...@@ -720,7 +747,6 @@ EXPORT_SYMBOL(d_mark_dontcache); ...@@ -720,7 +747,6 @@ EXPORT_SYMBOL(d_mark_dontcache);
static inline bool fast_dput(struct dentry *dentry) static inline bool fast_dput(struct dentry *dentry)
{ {
int ret; int ret;
unsigned int d_flags;
/* /*
* try to decrement the lockref optimistically. * try to decrement the lockref optimistically.
...@@ -749,45 +775,18 @@ static inline bool fast_dput(struct dentry *dentry) ...@@ -749,45 +775,18 @@ static inline bool fast_dput(struct dentry *dentry)
return true; return true;
/* /*
* Careful, careful. The reference count went down * Can we decide that decrement of refcount is all we needed without
* to zero, but we don't hold the dentry lock, so * taking the lock? There's a very common case when it's all we need -
* somebody else could get it again, and do another * dentry looks like it ought to be retained and there's nothing else
* dput(), and we need to not race with that. * to do.
*
* However, there is a very special and common case
* where we don't care, because there is nothing to
* do: the dentry is still hashed, it does not have
* a 'delete' op, and it's referenced and already on
* the LRU list.
*
* NOTE! Since we aren't locked, these values are
* not "stable". However, it is sufficient that at
* some point after we dropped the reference the
* dentry was hashed and the flags had the proper
* value. Other dentry users may have re-gotten
* a reference to the dentry and change that, but
* our work is done - we can leave the dentry
* around with a zero refcount.
*
* Nevertheless, there are two cases that we should kill
* the dentry anyway.
* 1. free disconnected dentries as soon as their refcount
* reached zero.
* 2. free dentries if they should not be cached.
*/ */
smp_rmb(); if (retain_dentry(dentry, false))
d_flags = READ_ONCE(dentry->d_flags);
d_flags &= DCACHE_REFERENCED | DCACHE_LRU_LIST | DCACHE_OP_DELETE |
DCACHE_DISCONNECTED | DCACHE_DONTCACHE;
/* Nothing to do? Dropping the reference was all we needed? */
if (d_flags == (DCACHE_REFERENCED | DCACHE_LRU_LIST) && !d_unhashed(dentry))
return true; return true;
/* /*
* Not the fast normal case? Get the lock. We've already decremented * Either not worth retaining or we can't tell without the lock.
* the refcount, but we'll need to re-check the situation after * Get the lock, then. We've already decremented the refcount to 0,
* getting the lock. * but we'll need to re-check the situation after getting the lock.
*/ */
spin_lock(&dentry->d_lock); spin_lock(&dentry->d_lock);
...@@ -798,7 +797,7 @@ static inline bool fast_dput(struct dentry *dentry) ...@@ -798,7 +797,7 @@ static inline bool fast_dput(struct dentry *dentry)
* don't need to do anything else. * don't need to do anything else.
*/ */
locked: locked:
if (dentry->d_lockref.count || retain_dentry(dentry)) { if (dentry->d_lockref.count || retain_dentry(dentry, true)) {
spin_unlock(&dentry->d_lock); spin_unlock(&dentry->d_lock);
return true; return true;
} }
...@@ -847,7 +846,7 @@ void dput(struct dentry *dentry) ...@@ -847,7 +846,7 @@ void dput(struct dentry *dentry)
dentry = __dentry_kill(dentry); dentry = __dentry_kill(dentry);
if (!dentry) if (!dentry)
return; return;
if (retain_dentry(dentry)) { if (retain_dentry(dentry, true)) {
spin_unlock(&dentry->d_lock); spin_unlock(&dentry->d_lock);
return; return;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment