Commit 9a2b764b authored by Frederic Weisbecker's avatar Frederic Weisbecker Committed by Thomas Gleixner

timers: Move trigger_dyntick_cpu() to enqueue_timer()

Consolidate the code by calling trigger_dyntick_cpu() from
enqueue_timer() instead of calling it from all its callers.
Signed-off-by: default avatarFrederic Weisbecker <frederic@kernel.org>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Tested-by: default avatarJuri Lelli <juri.lelli@redhat.com>
Link: https://lkml.kernel.org/r/20200717140551.29076-5-frederic@kernel.org
parent 1f32cab0
...@@ -533,30 +533,6 @@ static int calc_wheel_index(unsigned long expires, unsigned long clk, ...@@ -533,30 +533,6 @@ static int calc_wheel_index(unsigned long expires, unsigned long clk,
return idx; return idx;
} }
/*
* Enqueue the timer into the hash bucket, mark it pending in
* the bitmap and store the index in the timer flags.
*/
static void enqueue_timer(struct timer_base *base, struct timer_list *timer,
unsigned int idx)
{
hlist_add_head(&timer->entry, base->vectors + idx);
__set_bit(idx, base->pending_map);
timer_set_idx(timer, idx);
trace_timer_start(timer, timer->expires, timer->flags);
}
static void
__internal_add_timer(struct timer_base *base, struct timer_list *timer,
unsigned long *bucket_expiry)
{
unsigned int idx;
idx = calc_wheel_index(timer->expires, base->clk, bucket_expiry);
enqueue_timer(base, timer, idx);
}
static void static void
trigger_dyntick_cpu(struct timer_base *base, struct timer_list *timer, trigger_dyntick_cpu(struct timer_base *base, struct timer_list *timer,
unsigned long bucket_expiry) unsigned long bucket_expiry)
...@@ -598,15 +574,31 @@ trigger_dyntick_cpu(struct timer_base *base, struct timer_list *timer, ...@@ -598,15 +574,31 @@ trigger_dyntick_cpu(struct timer_base *base, struct timer_list *timer,
wake_up_nohz_cpu(base->cpu); wake_up_nohz_cpu(base->cpu);
} }
static void /*
internal_add_timer(struct timer_base *base, struct timer_list *timer) * Enqueue the timer into the hash bucket, mark it pending in
* the bitmap, store the index in the timer flags then wake up
* the target CPU if needed.
*/
static void enqueue_timer(struct timer_base *base, struct timer_list *timer,
unsigned int idx, unsigned long bucket_expiry)
{ {
unsigned long bucket_expiry; hlist_add_head(&timer->entry, base->vectors + idx);
__set_bit(idx, base->pending_map);
timer_set_idx(timer, idx);
__internal_add_timer(base, timer, &bucket_expiry); trace_timer_start(timer, timer->expires, timer->flags);
trigger_dyntick_cpu(base, timer, bucket_expiry); trigger_dyntick_cpu(base, timer, bucket_expiry);
} }
static void internal_add_timer(struct timer_base *base, struct timer_list *timer)
{
unsigned long bucket_expiry;
unsigned int idx;
idx = calc_wheel_index(timer->expires, base->clk, &bucket_expiry);
enqueue_timer(base, timer, idx, bucket_expiry);
}
#ifdef CONFIG_DEBUG_OBJECTS_TIMERS #ifdef CONFIG_DEBUG_OBJECTS_TIMERS
static struct debug_obj_descr timer_debug_descr; static struct debug_obj_descr timer_debug_descr;
...@@ -1057,16 +1049,13 @@ __mod_timer(struct timer_list *timer, unsigned long expires, unsigned int option ...@@ -1057,16 +1049,13 @@ __mod_timer(struct timer_list *timer, unsigned long expires, unsigned int option
/* /*
* If 'idx' was calculated above and the base time did not advance * If 'idx' was calculated above and the base time did not advance
* between calculating 'idx' and possibly switching the base, only * between calculating 'idx' and possibly switching the base, only
* enqueue_timer() and trigger_dyntick_cpu() is required. Otherwise * enqueue_timer() is required. Otherwise we need to (re)calculate
* we need to (re)calculate the wheel index via * the wheel index via internal_add_timer().
* internal_add_timer().
*/ */
if (idx != UINT_MAX && clk == base->clk) { if (idx != UINT_MAX && clk == base->clk)
enqueue_timer(base, timer, idx); enqueue_timer(base, timer, idx, bucket_expiry);
trigger_dyntick_cpu(base, timer, bucket_expiry); else
} else {
internal_add_timer(base, timer); internal_add_timer(base, timer);
}
out_unlock: out_unlock:
raw_spin_unlock_irqrestore(&base->lock, flags); raw_spin_unlock_irqrestore(&base->lock, flags);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment