Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
3f48565b
Commit
3f48565b
authored
Oct 07, 2021
by
Peter Zijlstra
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'tip/locking/urgent'
Pull in dependencies.
parents
9321f815
81121524
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
45 additions
and
20 deletions
+45
-20
kernel/locking/rwbase_rt.c
kernel/locking/rwbase_rt.c
+45
-20
No files found.
kernel/locking/rwbase_rt.c
View file @
3f48565b
...
...
@@ -41,6 +41,12 @@
* The risk of writer starvation is there, but the pathological use cases
* which trigger it are not necessarily the typical RT workloads.
*
* Fast-path orderings:
* The lock/unlock of readers can run in fast paths: lock and unlock are only
* atomic ops, and there is no inner lock to provide ACQUIRE and RELEASE
* semantics of rwbase_rt. Atomic ops should thus provide _acquire()
* and _release() (or stronger).
*
* Common code shared between RT rw_semaphore and rwlock
*/
...
...
@@ -53,6 +59,7 @@ static __always_inline int rwbase_read_trylock(struct rwbase_rt *rwb)
* set.
*/
for
(
r
=
atomic_read
(
&
rwb
->
readers
);
r
<
0
;)
{
/* Fully-ordered if cmpxchg() succeeds, provides ACQUIRE */
if
(
likely
(
atomic_try_cmpxchg
(
&
rwb
->
readers
,
&
r
,
r
+
1
)))
return
1
;
}
...
...
@@ -166,6 +173,8 @@ static __always_inline void rwbase_read_unlock(struct rwbase_rt *rwb,
/*
* rwb->readers can only hit 0 when a writer is waiting for the
* active readers to leave the critical section.
*
* dec_and_test() is fully ordered, provides RELEASE.
*/
if
(
unlikely
(
atomic_dec_and_test
(
&
rwb
->
readers
)))
__rwbase_read_unlock
(
rwb
,
state
);
...
...
@@ -176,7 +185,11 @@ static inline void __rwbase_write_unlock(struct rwbase_rt *rwb, int bias,
{
struct
rt_mutex_base
*
rtm
=
&
rwb
->
rtmutex
;
atomic_add
(
READER_BIAS
-
bias
,
&
rwb
->
readers
);
/*
* _release() is needed in case that reader is in fast path, pairing
* with atomic_try_cmpxchg() in rwbase_read_trylock(), provides RELEASE
*/
(
void
)
atomic_add_return_release
(
READER_BIAS
-
bias
,
&
rwb
->
readers
);
raw_spin_unlock_irqrestore
(
&
rtm
->
wait_lock
,
flags
);
rwbase_rtmutex_unlock
(
rtm
);
}
...
...
@@ -200,6 +213,23 @@ static inline void rwbase_write_downgrade(struct rwbase_rt *rwb)
__rwbase_write_unlock
(
rwb
,
WRITER_BIAS
-
1
,
flags
);
}
static
inline
bool
__rwbase_write_trylock
(
struct
rwbase_rt
*
rwb
)
{
/* Can do without CAS because we're serialized by wait_lock. */
lockdep_assert_held
(
&
rwb
->
rtmutex
.
wait_lock
);
/*
* _acquire is needed in case the reader is in the fast path, pairing
* with rwbase_read_unlock(), provides ACQUIRE.
*/
if
(
!
atomic_read_acquire
(
&
rwb
->
readers
))
{
atomic_set
(
&
rwb
->
readers
,
WRITER_BIAS
);
return
1
;
}
return
0
;
}
static
int
__sched
rwbase_write_lock
(
struct
rwbase_rt
*
rwb
,
unsigned
int
state
)
{
...
...
@@ -214,34 +244,30 @@ static int __sched rwbase_write_lock(struct rwbase_rt *rwb,
atomic_sub
(
READER_BIAS
,
&
rwb
->
readers
);
raw_spin_lock_irqsave
(
&
rtm
->
wait_lock
,
flags
);
/*
* set_current_state() for rw_semaphore
* current_save_and_set_rtlock_wait_state() for rwlock
*/
rwbase_set_and_save_current_state
(
state
);
if
(
__rwbase_write_trylock
(
rwb
))
goto
out_unlock
;
/* Block until all readers have left the critical section. */
for
(;
atomic_read
(
&
rwb
->
readers
)
;)
{
rwbase_set_and_save_current_state
(
state
);
for
(;;)
{
/* Optimized out for rwlocks */
if
(
rwbase_signal_pending_state
(
state
,
current
))
{
__set_current_state
(
TASK_RUNNING
);
rwbase_restore_current_state
(
);
__rwbase_write_unlock
(
rwb
,
0
,
flags
);
return
-
EINTR
;
}
if
(
__rwbase_write_trylock
(
rwb
))
break
;
raw_spin_unlock_irqrestore
(
&
rtm
->
wait_lock
,
flags
);
rwbase_schedule
();
raw_spin_lock_irqsave
(
&
rtm
->
wait_lock
,
flags
);
/*
* Schedule and wait for the readers to leave the critical
* section. The last reader leaving it wakes the waiter.
*/
if
(
atomic_read
(
&
rwb
->
readers
)
!=
0
)
rwbase_schedule
();
set_current_state
(
state
);
raw_spin_lock_irqsave
(
&
rtm
->
wait_lock
,
flags
);
}
atomic_set
(
&
rwb
->
readers
,
WRITER_BIAS
);
rwbase_restore_current_state
();
out_unlock:
raw_spin_unlock_irqrestore
(
&
rtm
->
wait_lock
,
flags
);
return
0
;
}
...
...
@@ -257,8 +283,7 @@ static inline int rwbase_write_trylock(struct rwbase_rt *rwb)
atomic_sub
(
READER_BIAS
,
&
rwb
->
readers
);
raw_spin_lock_irqsave
(
&
rtm
->
wait_lock
,
flags
);
if
(
!
atomic_read
(
&
rwb
->
readers
))
{
atomic_set
(
&
rwb
->
readers
,
WRITER_BIAS
);
if
(
__rwbase_write_trylock
(
rwb
))
{
raw_spin_unlock_irqrestore
(
&
rtm
->
wait_lock
,
flags
);
return
1
;
}
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment