Commit d5096aa6 authored by Sebastian Andrzej Siewior's avatar Sebastian Andrzej Siewior Committed by Thomas Gleixner

sched: Mark hrtimers to expire in hard interrupt context

The scheduler related hrtimers need to expire in hard interrupt context
even on PREEMPT_RT enabled kernels. Mark then as such.

No functional change.

[ tglx: Split out from larger combo patch. Add changelog. ]
Signed-off-by: default avatarSebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Acked-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/20190726185753.077004842@linutronix.de

parent 0ab6a3dd
...@@ -255,7 +255,7 @@ static void __hrtick_restart(struct rq *rq) ...@@ -255,7 +255,7 @@ static void __hrtick_restart(struct rq *rq)
{ {
struct hrtimer *timer = &rq->hrtick_timer; struct hrtimer *timer = &rq->hrtick_timer;
hrtimer_start_expires(timer, HRTIMER_MODE_ABS_PINNED); hrtimer_start_expires(timer, HRTIMER_MODE_ABS_PINNED_HARD);
} }
/* /*
...@@ -314,7 +314,7 @@ void hrtick_start(struct rq *rq, u64 delay) ...@@ -314,7 +314,7 @@ void hrtick_start(struct rq *rq, u64 delay)
*/ */
delay = max_t(u64, delay, 10000LL); delay = max_t(u64, delay, 10000LL);
hrtimer_start(&rq->hrtick_timer, ns_to_ktime(delay), hrtimer_start(&rq->hrtick_timer, ns_to_ktime(delay),
HRTIMER_MODE_REL_PINNED); HRTIMER_MODE_REL_PINNED_HARD);
} }
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
...@@ -328,7 +328,7 @@ static void hrtick_rq_init(struct rq *rq) ...@@ -328,7 +328,7 @@ static void hrtick_rq_init(struct rq *rq)
rq->hrtick_csd.info = rq; rq->hrtick_csd.info = rq;
#endif #endif
hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
rq->hrtick_timer.function = hrtick; rq->hrtick_timer.function = hrtick;
} }
#else /* CONFIG_SCHED_HRTICK */ #else /* CONFIG_SCHED_HRTICK */
......
...@@ -923,7 +923,7 @@ static int start_dl_timer(struct task_struct *p) ...@@ -923,7 +923,7 @@ static int start_dl_timer(struct task_struct *p)
*/ */
if (!hrtimer_is_queued(timer)) { if (!hrtimer_is_queued(timer)) {
get_task_struct(p); get_task_struct(p);
hrtimer_start(timer, act, HRTIMER_MODE_ABS); hrtimer_start(timer, act, HRTIMER_MODE_ABS_HARD);
} }
return 1; return 1;
...@@ -1053,7 +1053,7 @@ void init_dl_task_timer(struct sched_dl_entity *dl_se) ...@@ -1053,7 +1053,7 @@ void init_dl_task_timer(struct sched_dl_entity *dl_se)
{ {
struct hrtimer *timer = &dl_se->dl_timer; struct hrtimer *timer = &dl_se->dl_timer;
hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
timer->function = dl_task_timer; timer->function = dl_task_timer;
} }
......
...@@ -45,8 +45,8 @@ void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime) ...@@ -45,8 +45,8 @@ void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
raw_spin_lock_init(&rt_b->rt_runtime_lock); raw_spin_lock_init(&rt_b->rt_runtime_lock);
hrtimer_init(&rt_b->rt_period_timer, hrtimer_init(&rt_b->rt_period_timer, CLOCK_MONOTONIC,
CLOCK_MONOTONIC, HRTIMER_MODE_REL); HRTIMER_MODE_REL_HARD);
rt_b->rt_period_timer.function = sched_rt_period_timer; rt_b->rt_period_timer.function = sched_rt_period_timer;
} }
...@@ -67,7 +67,8 @@ static void start_rt_bandwidth(struct rt_bandwidth *rt_b) ...@@ -67,7 +67,8 @@ static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
* to update the period. * to update the period.
*/ */
hrtimer_forward_now(&rt_b->rt_period_timer, ns_to_ktime(0)); hrtimer_forward_now(&rt_b->rt_period_timer, ns_to_ktime(0));
hrtimer_start_expires(&rt_b->rt_period_timer, HRTIMER_MODE_ABS_PINNED); hrtimer_start_expires(&rt_b->rt_period_timer,
HRTIMER_MODE_ABS_PINNED_HARD);
} }
raw_spin_unlock(&rt_b->rt_runtime_lock); raw_spin_unlock(&rt_b->rt_runtime_lock);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment