Commit d269a8b8 authored by Davidlohr Bueso's avatar Davidlohr Bueso Committed by Ingo Molnar

kernel/locking: Compute 'current' directly

This patch effectively replaces the tsk pointer dereference
(which is obviously == current), to directly use get_current()
macro. This is to make the removal of setting foreign task
states smoother and painfully obvious. Performance win on some
archs such as x86-64 and ppc64. On a microbenchmark that calls
set_task_state() vs set_current_state() and an inode rwsem
pounding benchmark doing unlink:

== 1. x86-64 ==

Avg runtime set_task_state():    601 msecs
Avg runtime set_current_state(): 552 msecs

                                            vanilla                 dirty
Hmean    unlink1-processes-2      36089.26 (  0.00%)    38977.33 (  8.00%)
Hmean    unlink1-processes-5      28555.01 (  0.00%)    29832.55 (  4.28%)
Hmean    unlink1-processes-8      37323.75 (  0.00%)    44974.57 ( 20.50%)
Hmean    unlink1-processes-12     43571.88 (  0.00%)    44283.01 (  1.63%)
Hmean    unlink1-processes-21     34431.52 (  0.00%)    38284.45 ( 11.19%)
Hmean    unlink1-processes-30     34813.26 (  0.00%)    37975.17 (  9.08%)
Hmean    unlink1-processes-48     37048.90 (  0.00%)    39862.78 (  7.59%)
Hmean    unlink1-processes-79     35630.01 (  0.00%)    36855.30 (  3.44%)
Hmean    unlink1-processes-110    36115.85 (  0.00%)    39843.91 ( 10.32%)
Hmean    unlink1-processes-141    32546.96 (  0.00%)    35418.52 (  8.82%)
Hmean    unlink1-processes-172    34674.79 (  0.00%)    36899.21 (  6.42%)
Hmean    unlink1-processes-203    37303.11 (  0.00%)    36393.04 ( -2.44%)
Hmean    unlink1-processes-224    35712.13 (  0.00%)    36685.96 (  2.73%)

== 2. ppc64le ==

Avg runtime set_task_state():  938 msecs
Avg runtime set_current_state: 940 msecs

                                            vanilla                 dirty
Hmean    unlink1-processes-2      19269.19 (  0.00%)    30704.50 ( 59.35%)
Hmean    unlink1-processes-5      20106.15 (  0.00%)    21804.15 (  8.45%)
Hmean    unlink1-processes-8      17496.97 (  0.00%)    17243.28 ( -1.45%)
Hmean    unlink1-processes-12     14224.15 (  0.00%)    17240.21 ( 21.20%)
Hmean    unlink1-processes-21     14155.66 (  0.00%)    15681.23 ( 10.78%)
Hmean    unlink1-processes-30     14450.70 (  0.00%)    15995.83 ( 10.69%)
Hmean    unlink1-processes-48     16945.57 (  0.00%)    16370.42 ( -3.39%)
Hmean    unlink1-processes-79     15788.39 (  0.00%)    14639.27 ( -7.28%)
Hmean    unlink1-processes-110    14268.48 (  0.00%)    14377.40 (  0.76%)
Hmean    unlink1-processes-141    14023.65 (  0.00%)    16271.69 ( 16.03%)
Hmean    unlink1-processes-172    13417.62 (  0.00%)    16067.55 ( 19.75%)
Hmean    unlink1-processes-203    15293.08 (  0.00%)    15440.40 (  0.96%)
Hmean    unlink1-processes-234    13719.32 (  0.00%)    16190.74 ( 18.01%)
Hmean    unlink1-processes-265    16400.97 (  0.00%)    16115.22 ( -1.74%)
Hmean    unlink1-processes-296    14388.60 (  0.00%)    16216.13 ( 12.70%)
Hmean    unlink1-processes-320    15771.85 (  0.00%)    15905.96 (  0.85%)
Signed-off-by: default avatarDavidlohr Bueso <dbueso@suse.de>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: dave@stgolabs.net
Cc: mark.rutland@arm.com
Link: http://lkml.kernel.org/r/1483479794-14013-4-git-send-email-dave@stgolabs.netSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 5376f2e7
...@@ -622,7 +622,6 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, ...@@ -622,7 +622,6 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
struct lockdep_map *nest_lock, unsigned long ip, struct lockdep_map *nest_lock, unsigned long ip,
struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx) struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
{ {
struct task_struct *task = current;
struct mutex_waiter waiter; struct mutex_waiter waiter;
unsigned long flags; unsigned long flags;
bool first = false; bool first = false;
...@@ -656,18 +655,18 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, ...@@ -656,18 +655,18 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
goto skip_wait; goto skip_wait;
debug_mutex_lock_common(lock, &waiter); debug_mutex_lock_common(lock, &waiter);
debug_mutex_add_waiter(lock, &waiter, task); debug_mutex_add_waiter(lock, &waiter, current);
/* add waiting tasks to the end of the waitqueue (FIFO): */ /* add waiting tasks to the end of the waitqueue (FIFO): */
list_add_tail(&waiter.list, &lock->wait_list); list_add_tail(&waiter.list, &lock->wait_list);
waiter.task = task; waiter.task = current;
if (__mutex_waiter_is_first(lock, &waiter)) if (__mutex_waiter_is_first(lock, &waiter))
__mutex_set_flag(lock, MUTEX_FLAG_WAITERS); __mutex_set_flag(lock, MUTEX_FLAG_WAITERS);
lock_contended(&lock->dep_map, ip); lock_contended(&lock->dep_map, ip);
set_task_state(task, state); set_task_state(current, state);
for (;;) { for (;;) {
/* /*
* Once we hold wait_lock, we're serialized against * Once we hold wait_lock, we're serialized against
...@@ -683,7 +682,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, ...@@ -683,7 +682,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
* wait_lock. This ensures the lock cancellation is ordered * wait_lock. This ensures the lock cancellation is ordered
* against mutex_unlock() and wake-ups do not go missing. * against mutex_unlock() and wake-ups do not go missing.
*/ */
if (unlikely(signal_pending_state(state, task))) { if (unlikely(signal_pending_state(state, current))) {
ret = -EINTR; ret = -EINTR;
goto err; goto err;
} }
...@@ -702,7 +701,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, ...@@ -702,7 +701,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
__mutex_set_flag(lock, MUTEX_FLAG_HANDOFF); __mutex_set_flag(lock, MUTEX_FLAG_HANDOFF);
} }
set_task_state(task, state); set_task_state(current, state);
/* /*
* Here we order against unlock; we must either see it change * Here we order against unlock; we must either see it change
* state back to RUNNING and fall through the next schedule(), * state back to RUNNING and fall through the next schedule(),
...@@ -716,9 +715,9 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, ...@@ -716,9 +715,9 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
} }
spin_lock_mutex(&lock->wait_lock, flags); spin_lock_mutex(&lock->wait_lock, flags);
acquired: acquired:
__set_task_state(task, TASK_RUNNING); __set_task_state(current, TASK_RUNNING);
mutex_remove_waiter(lock, &waiter, task); mutex_remove_waiter(lock, &waiter, current);
if (likely(list_empty(&lock->wait_list))) if (likely(list_empty(&lock->wait_list)))
__mutex_clear_flag(lock, MUTEX_FLAGS); __mutex_clear_flag(lock, MUTEX_FLAGS);
...@@ -736,8 +735,8 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, ...@@ -736,8 +735,8 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
return 0; return 0;
err: err:
__set_task_state(task, TASK_RUNNING); __set_task_state(current, TASK_RUNNING);
mutex_remove_waiter(lock, &waiter, task); mutex_remove_waiter(lock, &waiter, current);
spin_unlock_mutex(&lock->wait_lock, flags); spin_unlock_mutex(&lock->wait_lock, flags);
debug_mutex_free_waiter(&waiter); debug_mutex_free_waiter(&waiter);
mutex_release(&lock->dep_map, 1, ip); mutex_release(&lock->dep_map, 1, ip);
......
...@@ -128,7 +128,6 @@ __rwsem_wake_one_writer(struct rw_semaphore *sem) ...@@ -128,7 +128,6 @@ __rwsem_wake_one_writer(struct rw_semaphore *sem)
void __sched __down_read(struct rw_semaphore *sem) void __sched __down_read(struct rw_semaphore *sem)
{ {
struct rwsem_waiter waiter; struct rwsem_waiter waiter;
struct task_struct *tsk;
unsigned long flags; unsigned long flags;
raw_spin_lock_irqsave(&sem->wait_lock, flags); raw_spin_lock_irqsave(&sem->wait_lock, flags);
...@@ -140,13 +139,12 @@ void __sched __down_read(struct rw_semaphore *sem) ...@@ -140,13 +139,12 @@ void __sched __down_read(struct rw_semaphore *sem)
goto out; goto out;
} }
tsk = current; set_task_state(current, TASK_UNINTERRUPTIBLE);
set_task_state(tsk, TASK_UNINTERRUPTIBLE);
/* set up my own style of waitqueue */ /* set up my own style of waitqueue */
waiter.task = tsk; waiter.task = current;
waiter.type = RWSEM_WAITING_FOR_READ; waiter.type = RWSEM_WAITING_FOR_READ;
get_task_struct(tsk); get_task_struct(current);
list_add_tail(&waiter.list, &sem->wait_list); list_add_tail(&waiter.list, &sem->wait_list);
...@@ -158,10 +156,10 @@ void __sched __down_read(struct rw_semaphore *sem) ...@@ -158,10 +156,10 @@ void __sched __down_read(struct rw_semaphore *sem)
if (!waiter.task) if (!waiter.task)
break; break;
schedule(); schedule();
set_task_state(tsk, TASK_UNINTERRUPTIBLE); set_task_state(current, TASK_UNINTERRUPTIBLE);
} }
__set_task_state(tsk, TASK_RUNNING); __set_task_state(current, TASK_RUNNING);
out: out:
; ;
} }
...@@ -194,15 +192,13 @@ int __down_read_trylock(struct rw_semaphore *sem) ...@@ -194,15 +192,13 @@ int __down_read_trylock(struct rw_semaphore *sem)
int __sched __down_write_common(struct rw_semaphore *sem, int state) int __sched __down_write_common(struct rw_semaphore *sem, int state)
{ {
struct rwsem_waiter waiter; struct rwsem_waiter waiter;
struct task_struct *tsk;
unsigned long flags; unsigned long flags;
int ret = 0; int ret = 0;
raw_spin_lock_irqsave(&sem->wait_lock, flags); raw_spin_lock_irqsave(&sem->wait_lock, flags);
/* set up my own style of waitqueue */ /* set up my own style of waitqueue */
tsk = current; waiter.task = current;
waiter.task = tsk;
waiter.type = RWSEM_WAITING_FOR_WRITE; waiter.type = RWSEM_WAITING_FOR_WRITE;
list_add_tail(&waiter.list, &sem->wait_list); list_add_tail(&waiter.list, &sem->wait_list);
...@@ -220,7 +216,7 @@ int __sched __down_write_common(struct rw_semaphore *sem, int state) ...@@ -220,7 +216,7 @@ int __sched __down_write_common(struct rw_semaphore *sem, int state)
ret = -EINTR; ret = -EINTR;
goto out; goto out;
} }
set_task_state(tsk, state); set_task_state(current, state);
raw_spin_unlock_irqrestore(&sem->wait_lock, flags); raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
schedule(); schedule();
raw_spin_lock_irqsave(&sem->wait_lock, flags); raw_spin_lock_irqsave(&sem->wait_lock, flags);
......
...@@ -224,10 +224,9 @@ struct rw_semaphore __sched *rwsem_down_read_failed(struct rw_semaphore *sem) ...@@ -224,10 +224,9 @@ struct rw_semaphore __sched *rwsem_down_read_failed(struct rw_semaphore *sem)
{ {
long count, adjustment = -RWSEM_ACTIVE_READ_BIAS; long count, adjustment = -RWSEM_ACTIVE_READ_BIAS;
struct rwsem_waiter waiter; struct rwsem_waiter waiter;
struct task_struct *tsk = current;
DEFINE_WAKE_Q(wake_q); DEFINE_WAKE_Q(wake_q);
waiter.task = tsk; waiter.task = current;
waiter.type = RWSEM_WAITING_FOR_READ; waiter.type = RWSEM_WAITING_FOR_READ;
raw_spin_lock_irq(&sem->wait_lock); raw_spin_lock_irq(&sem->wait_lock);
...@@ -254,13 +253,13 @@ struct rw_semaphore __sched *rwsem_down_read_failed(struct rw_semaphore *sem) ...@@ -254,13 +253,13 @@ struct rw_semaphore __sched *rwsem_down_read_failed(struct rw_semaphore *sem)
/* wait to be given the lock */ /* wait to be given the lock */
while (true) { while (true) {
set_task_state(tsk, TASK_UNINTERRUPTIBLE); set_task_state(current, TASK_UNINTERRUPTIBLE);
if (!waiter.task) if (!waiter.task)
break; break;
schedule(); schedule();
} }
__set_task_state(tsk, TASK_RUNNING); __set_task_state(current, TASK_RUNNING);
return sem; return sem;
} }
EXPORT_SYMBOL(rwsem_down_read_failed); EXPORT_SYMBOL(rwsem_down_read_failed);
......
...@@ -204,19 +204,18 @@ struct semaphore_waiter { ...@@ -204,19 +204,18 @@ struct semaphore_waiter {
static inline int __sched __down_common(struct semaphore *sem, long state, static inline int __sched __down_common(struct semaphore *sem, long state,
long timeout) long timeout)
{ {
struct task_struct *task = current;
struct semaphore_waiter waiter; struct semaphore_waiter waiter;
list_add_tail(&waiter.list, &sem->wait_list); list_add_tail(&waiter.list, &sem->wait_list);
waiter.task = task; waiter.task = current;
waiter.up = false; waiter.up = false;
for (;;) { for (;;) {
if (signal_pending_state(state, task)) if (signal_pending_state(state, current))
goto interrupted; goto interrupted;
if (unlikely(timeout <= 0)) if (unlikely(timeout <= 0))
goto timed_out; goto timed_out;
__set_task_state(task, state); __set_task_state(current, state);
raw_spin_unlock_irq(&sem->lock); raw_spin_unlock_irq(&sem->lock);
timeout = schedule_timeout(timeout); timeout = schedule_timeout(timeout);
raw_spin_lock_irq(&sem->lock); raw_spin_lock_irq(&sem->lock);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment