Commit 4b39f99c authored by Peter Zijlstra's avatar Peter Zijlstra

futex: Remove {get,drop}_futex_key_refs()

Now that {get,drop}_futex_key_refs() have become a glorified NOP,
remove them entirely.

The only thing get_futex_key_refs() is still doing is an smp_mb(), and
now that we don't need to (ab)use existing atomic ops to obtain them,
we can place it explicitly where we need it.
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
parent 22299339
......@@ -135,8 +135,7 @@
*
* Where (A) orders the waiters increment and the futex value read through
* atomic operations (see hb_waiters_inc) and where (B) orders the write
* to futex and the waiters read -- this is done by the barriers for both
* shared and private futexes in get_futex_key_refs().
* to futex and the waiters read (see hb_waiters_pending()).
*
* This yields the following case (where X:=waiters, Y:=futex):
*
......@@ -359,6 +358,10 @@ static inline void hb_waiters_dec(struct futex_hash_bucket *hb)
static inline int hb_waiters_pending(struct futex_hash_bucket *hb)
{
#ifdef CONFIG_SMP
/*
* Full barrier (B), see the ordering comment above.
*/
smp_mb();
return atomic_read(&hb->waiters);
#else
return 1;
......@@ -396,68 +399,6 @@ static inline int match_futex(union futex_key *key1, union futex_key *key2)
&& key1->both.offset == key2->both.offset);
}
/*
* Take a reference to the resource addressed by a key.
* Can be called while holding spinlocks.
*
*/
static void get_futex_key_refs(union futex_key *key)
{
if (!key->both.ptr)
return;
/*
* On MMU less systems futexes are always "private" as there is no per
* process address space. We need the smp wmb nevertheless - yes,
* arch/blackfin has MMU less SMP ...
*/
if (!IS_ENABLED(CONFIG_MMU)) {
smp_mb(); /* explicit smp_mb(); (B) */
return;
}
switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
case FUT_OFF_INODE:
smp_mb(); /* explicit smp_mb(); (B) */
break;
case FUT_OFF_MMSHARED:
smp_mb(); /* explicit smp_mb(); (B) */
break;
default:
/*
* Private futexes do not hold reference on an inode or
* mm, therefore the only purpose of calling get_futex_key_refs
* is because we need the barrier for the lockless waiter check.
*/
smp_mb(); /* explicit smp_mb(); (B) */
}
}
/*
* Drop a reference to the resource addressed by a key.
* The hash bucket spinlock must not be held. This is
* a no-op for private futexes, see comment in the get
* counterpart.
*/
static void drop_futex_key_refs(union futex_key *key)
{
if (!key->both.ptr) {
/* If we're here then we tried to put a key we failed to get */
WARN_ON_ONCE(1);
return;
}
if (!IS_ENABLED(CONFIG_MMU))
return;
switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
case FUT_OFF_INODE:
break;
case FUT_OFF_MMSHARED:
break;
}
}
enum futex_access {
FUTEX_READ,
FUTEX_WRITE
......@@ -589,7 +530,6 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, enum futex_a
if (!fshared) {
key->private.mm = mm;
key->private.address = address;
get_futex_key_refs(key); /* implies smp_mb(); (B) */
return 0;
}
......@@ -729,8 +669,6 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, enum futex_a
rcu_read_unlock();
}
get_futex_key_refs(key); /* implies smp_mb(); (B) */
out:
put_page(page);
return err;
......@@ -738,7 +676,6 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, enum futex_a
static inline void put_futex_key(union futex_key *key)
{
drop_futex_key_refs(key);
}
/**
......@@ -1873,7 +1810,6 @@ void requeue_futex(struct futex_q *q, struct futex_hash_bucket *hb1,
plist_add(&q->list, &hb2->chain);
q->lock_ptr = &hb2->lock;
}
get_futex_key_refs(key2);
q->key = *key2;
}
......@@ -1895,7 +1831,6 @@ static inline
void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key,
struct futex_hash_bucket *hb)
{
get_futex_key_refs(key);
q->key = *key;
__unqueue_futex(q);
......@@ -2006,7 +1941,7 @@ static int futex_requeue(u32 __user *uaddr1, unsigned int flags,
u32 *cmpval, int requeue_pi)
{
union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT;
int drop_count = 0, task_count = 0, ret;
int task_count = 0, ret;
struct futex_pi_state *pi_state = NULL;
struct futex_hash_bucket *hb1, *hb2;
struct futex_q *this, *next;
......@@ -2127,7 +2062,6 @@ static int futex_requeue(u32 __user *uaddr1, unsigned int flags,
*/
if (ret > 0) {
WARN_ON(pi_state);
drop_count++;
task_count++;
/*
* If we acquired the lock, then the user space value
......@@ -2247,7 +2181,6 @@ static int futex_requeue(u32 __user *uaddr1, unsigned int flags,
* doing so.
*/
requeue_pi_wake_futex(this, &key2, hb2);
drop_count++;
continue;
} else if (ret) {
/*
......@@ -2268,7 +2201,6 @@ static int futex_requeue(u32 __user *uaddr1, unsigned int flags,
}
}
requeue_futex(this, hb1, hb2, &key2);
drop_count++;
}
/*
......@@ -2283,15 +2215,6 @@ static int futex_requeue(u32 __user *uaddr1, unsigned int flags,
wake_up_q(&wake_q);
hb_waiters_dec(hb2);
/*
* drop_futex_key_refs() must be called outside the spinlocks. During
* the requeue we moved futex_q's from the hash bucket at key1 to the
* one at key2 and updated their key pointer. We no longer need to
* hold the references to key1.
*/
while (--drop_count >= 0)
drop_futex_key_refs(&key1);
out_put_keys:
put_futex_key(&key2);
out_put_key1:
......@@ -2421,7 +2344,6 @@ static int unqueue_me(struct futex_q *q)
ret = 1;
}
drop_futex_key_refs(&q->key);
return ret;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment