Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
ae0b5c2f
Commit
ae0b5c2f
authored
Jun 08, 2016
by
Ingo Molnar
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'locking/urgent' into locking/core, to pick up dependency
Signed-off-by:
Ingo Molnar
<
mingo@kernel.org
>
parents
331b6d8c
2c610022
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
77 additions
and
36 deletions
+77
-36
include/asm-generic/qspinlock.h
include/asm-generic/qspinlock.h
+17
-36
kernel/locking/qspinlock.c
kernel/locking/qspinlock.c
+60
-0
No files found.
include/asm-generic/qspinlock.h
View file @
ae0b5c2f
...
...
@@ -21,38 +21,34 @@
#include <asm-generic/qspinlock_types.h>
/**
* queued_spin_unlock_wait - wait until the _current_ lock holder releases the lock
* @lock : Pointer to queued spinlock structure
*
* There is a very slight possibility of live-lock if the lockers keep coming
* and the waiter is just unfortunate enough to not see any unlock state.
*/
#ifndef queued_spin_unlock_wait
extern
void
queued_spin_unlock_wait
(
struct
qspinlock
*
lock
);
#endif
/**
* queued_spin_is_locked - is the spinlock locked?
* @lock: Pointer to queued spinlock structure
* Return: 1 if it is locked, 0 otherwise
*/
#ifndef queued_spin_is_locked
static
__always_inline
int
queued_spin_is_locked
(
struct
qspinlock
*
lock
)
{
/*
* queued_spin_lock_slowpath() can ACQUIRE the lock before
* issuing the unordered store that sets _Q_LOCKED_VAL.
*
* See both smp_cond_acquire() sites for more detail.
*
* This however means that in code like:
*
* spin_lock(A) spin_lock(B)
* spin_unlock_wait(B) spin_is_locked(A)
* do_something() do_something()
*
* Both CPUs can end up running do_something() because the store
* setting _Q_LOCKED_VAL will pass through the loads in
* spin_unlock_wait() and/or spin_is_locked().
* See queued_spin_unlock_wait().
*
* Avoid this by issuing a full memory barrier between the spin_lock()
* and the loads in spin_unlock_wait() and spin_is_locked().
*
* Note that regular mutual exclusion doesn't care about this
* delayed store.
* Any !0 state indicates it is locked, even if _Q_LOCKED_VAL
* isn't immediately observable.
*/
smp_mb
();
return
atomic_read
(
&
lock
->
val
)
&
_Q_LOCKED_MASK
;
return
atomic_read
(
&
lock
->
val
);
}
#endif
/**
* queued_spin_value_unlocked - is the spinlock structure unlocked?
...
...
@@ -122,21 +118,6 @@ static __always_inline void queued_spin_unlock(struct qspinlock *lock)
}
#endif
/**
* queued_spin_unlock_wait - wait until current lock holder releases the lock
* @lock : Pointer to queued spinlock structure
*
* There is a very slight possibility of live-lock if the lockers keep coming
* and the waiter is just unfortunate enough to not see any unlock state.
*/
static
inline
void
queued_spin_unlock_wait
(
struct
qspinlock
*
lock
)
{
/* See queued_spin_is_locked() */
smp_mb
();
while
(
atomic_read
(
&
lock
->
val
)
&
_Q_LOCKED_MASK
)
cpu_relax
();
}
#ifndef virt_spin_lock
static
__always_inline
bool
virt_spin_lock
(
struct
qspinlock
*
lock
)
{
...
...
kernel/locking/qspinlock.c
View file @
ae0b5c2f
...
...
@@ -267,6 +267,66 @@ static __always_inline u32 __pv_wait_head_or_lock(struct qspinlock *lock,
#define queued_spin_lock_slowpath native_queued_spin_lock_slowpath
#endif
/*
* queued_spin_lock_slowpath() can (load-)ACQUIRE the lock before
* issuing an _unordered_ store to set _Q_LOCKED_VAL.
*
* This means that the store can be delayed, but no later than the
* store-release from the unlock. This means that simply observing
* _Q_LOCKED_VAL is not sufficient to determine if the lock is acquired.
*
* There are two paths that can issue the unordered store:
*
* (1) clear_pending_set_locked(): *,1,0 -> *,0,1
*
* (2) set_locked(): t,0,0 -> t,0,1 ; t != 0
* atomic_cmpxchg_relaxed(): t,0,0 -> 0,0,1
*
* However, in both cases we have other !0 state we've set before to queue
* ourseves:
*
* For (1) we have the atomic_cmpxchg_acquire() that set _Q_PENDING_VAL, our
* load is constrained by that ACQUIRE to not pass before that, and thus must
* observe the store.
*
* For (2) we have a more intersting scenario. We enqueue ourselves using
* xchg_tail(), which ends up being a RELEASE. This in itself is not
* sufficient, however that is followed by an smp_cond_acquire() on the same
* word, giving a RELEASE->ACQUIRE ordering. This again constrains our load and
* guarantees we must observe that store.
*
* Therefore both cases have other !0 state that is observable before the
* unordered locked byte store comes through. This means we can use that to
* wait for the lock store, and then wait for an unlock.
*/
#ifndef queued_spin_unlock_wait
void
queued_spin_unlock_wait
(
struct
qspinlock
*
lock
)
{
u32
val
;
for
(;;)
{
val
=
atomic_read
(
&
lock
->
val
);
if
(
!
val
)
/* not locked, we're done */
goto
done
;
if
(
val
&
_Q_LOCKED_MASK
)
/* locked, go wait for unlock */
break
;
/* not locked, but pending, wait until we observe the lock */
cpu_relax
();
}
/* any unlock is good */
while
(
atomic_read
(
&
lock
->
val
)
&
_Q_LOCKED_MASK
)
cpu_relax
();
done:
smp_rmb
();
/* CTRL + RMB -> ACQUIRE */
}
EXPORT_SYMBOL
(
queued_spin_unlock_wait
);
#endif
#endif
/* _GEN_PV_LOCK_SLOWPATH */
/**
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment