Commit a56c41e5 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'timers-urgent-2020-01-27' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull timer fixes from Thomas Gleixner:
 "Two fixes for the generic VDSO code which missed 5.5:

   - Make the update to the coarse timekeeper unconditional.

     This is required because the coarse timekeeper interfaces in the
     VDSO do not depend on a VDSO capable clocksource. If the system
     does not have a VDSO capable clocksource and the update is
     depending on the VDSO capable clocksource, the coarse VDSO
     interfaces would operate on stale data forever.

   - Invert the logic of __arch_update_vdso_data() to avoid further head
     scratching.

     Tripped over this several times while analyzing the update problem
     above"

* tag 'timers-urgent-2020-01-27' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  lib/vdso: Update coarse timekeeper unconditionally
  lib/vdso: Make __arch_update_vdso_data() logic understandable
parents b1dba247 9f24c540
...@@ -34,9 +34,9 @@ struct vdso_data *__arm_get_k_vdso_data(void) ...@@ -34,9 +34,9 @@ struct vdso_data *__arm_get_k_vdso_data(void)
#define __arch_get_k_vdso_data __arm_get_k_vdso_data #define __arch_get_k_vdso_data __arm_get_k_vdso_data
static __always_inline static __always_inline
int __arm_update_vdso_data(void) bool __arm_update_vdso_data(void)
{ {
return !cntvct_ok; return cntvct_ok;
} }
#define __arch_update_vdso_data __arm_update_vdso_data #define __arch_update_vdso_data __arm_update_vdso_data
......
...@@ -12,9 +12,9 @@ static __always_inline struct vdso_data *__arch_get_k_vdso_data(void) ...@@ -12,9 +12,9 @@ static __always_inline struct vdso_data *__arch_get_k_vdso_data(void)
#endif /* __arch_get_k_vdso_data */ #endif /* __arch_get_k_vdso_data */
#ifndef __arch_update_vdso_data #ifndef __arch_update_vdso_data
static __always_inline int __arch_update_vdso_data(void) static __always_inline bool __arch_update_vdso_data(void)
{ {
return 0; return true;
} }
#endif /* __arch_update_vdso_data */ #endif /* __arch_update_vdso_data */
......
...@@ -28,11 +28,6 @@ static inline void update_vdso_data(struct vdso_data *vdata, ...@@ -28,11 +28,6 @@ static inline void update_vdso_data(struct vdso_data *vdata,
vdata[CS_RAW].mult = tk->tkr_raw.mult; vdata[CS_RAW].mult = tk->tkr_raw.mult;
vdata[CS_RAW].shift = tk->tkr_raw.shift; vdata[CS_RAW].shift = tk->tkr_raw.shift;
/* CLOCK_REALTIME */
vdso_ts = &vdata[CS_HRES_COARSE].basetime[CLOCK_REALTIME];
vdso_ts->sec = tk->xtime_sec;
vdso_ts->nsec = tk->tkr_mono.xtime_nsec;
/* CLOCK_MONOTONIC */ /* CLOCK_MONOTONIC */
vdso_ts = &vdata[CS_HRES_COARSE].basetime[CLOCK_MONOTONIC]; vdso_ts = &vdata[CS_HRES_COARSE].basetime[CLOCK_MONOTONIC];
vdso_ts->sec = tk->xtime_sec + tk->wall_to_monotonic.tv_sec; vdso_ts->sec = tk->xtime_sec + tk->wall_to_monotonic.tv_sec;
...@@ -70,12 +65,6 @@ static inline void update_vdso_data(struct vdso_data *vdata, ...@@ -70,12 +65,6 @@ static inline void update_vdso_data(struct vdso_data *vdata,
vdso_ts = &vdata[CS_HRES_COARSE].basetime[CLOCK_TAI]; vdso_ts = &vdata[CS_HRES_COARSE].basetime[CLOCK_TAI];
vdso_ts->sec = tk->xtime_sec + (s64)tk->tai_offset; vdso_ts->sec = tk->xtime_sec + (s64)tk->tai_offset;
vdso_ts->nsec = tk->tkr_mono.xtime_nsec; vdso_ts->nsec = tk->tkr_mono.xtime_nsec;
/*
* Read without the seqlock held by clock_getres().
* Note: No need to have a second copy.
*/
WRITE_ONCE(vdata[CS_HRES_COARSE].hrtimer_res, hrtimer_resolution);
} }
void update_vsyscall(struct timekeeper *tk) void update_vsyscall(struct timekeeper *tk)
...@@ -84,20 +73,17 @@ void update_vsyscall(struct timekeeper *tk) ...@@ -84,20 +73,17 @@ void update_vsyscall(struct timekeeper *tk)
struct vdso_timestamp *vdso_ts; struct vdso_timestamp *vdso_ts;
u64 nsec; u64 nsec;
if (__arch_update_vdso_data()) {
/*
* Some architectures might want to skip the update of the
* data page.
*/
return;
}
/* copy vsyscall data */ /* copy vsyscall data */
vdso_write_begin(vdata); vdso_write_begin(vdata);
vdata[CS_HRES_COARSE].clock_mode = __arch_get_clock_mode(tk); vdata[CS_HRES_COARSE].clock_mode = __arch_get_clock_mode(tk);
vdata[CS_RAW].clock_mode = __arch_get_clock_mode(tk); vdata[CS_RAW].clock_mode = __arch_get_clock_mode(tk);
/* CLOCK_REALTIME also required for time() */
vdso_ts = &vdata[CS_HRES_COARSE].basetime[CLOCK_REALTIME];
vdso_ts->sec = tk->xtime_sec;
vdso_ts->nsec = tk->tkr_mono.xtime_nsec;
/* CLOCK_REALTIME_COARSE */ /* CLOCK_REALTIME_COARSE */
vdso_ts = &vdata[CS_HRES_COARSE].basetime[CLOCK_REALTIME_COARSE]; vdso_ts = &vdata[CS_HRES_COARSE].basetime[CLOCK_REALTIME_COARSE];
vdso_ts->sec = tk->xtime_sec; vdso_ts->sec = tk->xtime_sec;
...@@ -110,7 +96,18 @@ void update_vsyscall(struct timekeeper *tk) ...@@ -110,7 +96,18 @@ void update_vsyscall(struct timekeeper *tk)
nsec = nsec + tk->wall_to_monotonic.tv_nsec; nsec = nsec + tk->wall_to_monotonic.tv_nsec;
vdso_ts->sec += __iter_div_u64_rem(nsec, NSEC_PER_SEC, &vdso_ts->nsec); vdso_ts->sec += __iter_div_u64_rem(nsec, NSEC_PER_SEC, &vdso_ts->nsec);
update_vdso_data(vdata, tk); /*
* Read without the seqlock held by clock_getres().
* Note: No need to have a second copy.
*/
WRITE_ONCE(vdata[CS_HRES_COARSE].hrtimer_res, hrtimer_resolution);
/*
* Architectures can opt out of updating the high resolution part
* of the VDSO.
*/
if (__arch_update_vdso_data())
update_vdso_data(vdata, tk);
__arch_update_vsyscall(vdata, tk); __arch_update_vsyscall(vdata, tk);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment