Commit f8bd2258 authored by Roman Zippel's avatar Roman Zippel Committed by Linus Torvalds

remove div_long_long_rem

x86 is the only arch right now, which provides an optimized for
div_long_long_rem and it has the downside that one has to be very careful that
the divide doesn't overflow.

The API is a little akward, as the arguments for the unsigned divide are
signed.  The signed version also doesn't handle a negative divisor and
produces worse code on 64bit archs.

There is little incentive to keep this API alive, so this converts the few
users to the new API.
Signed-off-by: default avatarRoman Zippel <zippel@linux-m68k.org>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: john stultz <johnstul@us.ibm.com>
Cc: Christoph Lameter <clameter@sgi.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 6f6d6a1a
......@@ -54,6 +54,7 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
#include <linux/module.h>
#include <linux/elfcore.h>
#include <linux/compat.h>
#include <linux/math64.h>
#define elf_prstatus elf_prstatus32
struct elf_prstatus32
......@@ -102,8 +103,8 @@ jiffies_to_compat_timeval(unsigned long jiffies, struct compat_timeval *value)
* one divide.
*/
u64 nsec = (u64)jiffies * TICK_NSEC;
long rem;
value->tv_sec = div_long_long_rem(nsec, NSEC_PER_SEC, &rem);
u32 rem;
value->tv_sec = div_u64_rem(nsec, NSEC_PER_SEC, &rem);
value->tv_usec = rem / NSEC_PER_USEC;
}
......
......@@ -56,6 +56,7 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
#include <linux/module.h>
#include <linux/elfcore.h>
#include <linux/compat.h>
#include <linux/math64.h>
#define elf_prstatus elf_prstatus32
struct elf_prstatus32
......@@ -104,8 +105,8 @@ jiffies_to_compat_timeval(unsigned long jiffies, struct compat_timeval *value)
* one divide.
*/
u64 nsec = (u64)jiffies * TICK_NSEC;
long rem;
value->tv_sec = div_long_long_rem(nsec, NSEC_PER_SEC, &rem);
u32 rem;
value->tv_sec = div_u64_rem(nsec, NSEC_PER_SEC, &rem);
value->tv_usec = rem / NSEC_PER_USEC;
}
......
......@@ -30,6 +30,8 @@
#include <linux/miscdevice.h>
#include <linux/posix-timers.h>
#include <linux/interrupt.h>
#include <linux/time.h>
#include <linux/math64.h>
#include <asm/uaccess.h>
#include <asm/sn/addrs.h>
......@@ -472,8 +474,8 @@ static int sgi_clock_get(clockid_t clockid, struct timespec *tp)
nsec = rtc_time() * sgi_clock_period
+ sgi_clock_offset.tv_nsec;
tp->tv_sec = div_long_long_rem(nsec, NSEC_PER_SEC, &tp->tv_nsec)
+ sgi_clock_offset.tv_sec;
*tp = ns_to_timespec(nsec);
tp->tv_sec += sgi_clock_offset.tv_sec;
return 0;
};
......@@ -481,11 +483,11 @@ static int sgi_clock_set(clockid_t clockid, struct timespec *tp)
{
u64 nsec;
u64 rem;
u32 rem;
nsec = rtc_time() * sgi_clock_period;
sgi_clock_offset.tv_sec = tp->tv_sec - div_long_long_rem(nsec, NSEC_PER_SEC, &rem);
sgi_clock_offset.tv_sec = tp->tv_sec - div_u64_rem(nsec, NSEC_PER_SEC, &rem);
if (rem <= tp->tv_nsec)
sgi_clock_offset.tv_nsec = tp->tv_sec - rem;
......@@ -644,9 +646,6 @@ static int sgi_timer_del(struct k_itimer *timr)
return 0;
}
#define timespec_to_ns(x) ((x).tv_nsec + (x).tv_sec * NSEC_PER_SEC)
#define ns_to_timespec(ts, nsec) (ts).tv_sec = div_long_long_rem(nsec, NSEC_PER_SEC, &(ts).tv_nsec)
/* Assumption: it_lock is already held with irq's disabled */
static void sgi_timer_get(struct k_itimer *timr, struct itimerspec *cur_setting)
{
......@@ -659,9 +658,8 @@ static void sgi_timer_get(struct k_itimer *timr, struct itimerspec *cur_setting)
return;
}
ns_to_timespec(cur_setting->it_interval, timr->it.mmtimer.incr * sgi_clock_period);
ns_to_timespec(cur_setting->it_value, (timr->it.mmtimer.expires - rtc_time())* sgi_clock_period);
return;
cur_setting->it_interval = ns_to_timespec(timr->it.mmtimer.incr * sgi_clock_period);
cur_setting->it_value = ns_to_timespec((timr->it.mmtimer.expires - rtc_time()) * sgi_clock_period);
}
......@@ -679,8 +677,8 @@ static int sgi_timer_set(struct k_itimer *timr, int flags,
sgi_timer_get(timr, old_setting);
sgi_timer_del(timr);
when = timespec_to_ns(new_setting->it_value);
period = timespec_to_ns(new_setting->it_interval);
when = timespec_to_ns(&new_setting->it_value);
period = timespec_to_ns(&new_setting->it_interval);
if (when == 0)
/* Clear timer */
......@@ -695,7 +693,7 @@ static int sgi_timer_set(struct k_itimer *timr, int flags,
unsigned long now;
getnstimeofday(&n);
now = timespec_to_ns(n);
now = timespec_to_ns(&n);
if (when > now)
when -= now;
else
......
......@@ -33,24 +33,6 @@
__mod; \
})
/*
* (long)X = ((long long)divs) / (long)div
* (long)rem = ((long long)divs) % (long)div
*
* Warning, this will do an exception if X overflows.
*/
#define div_long_long_rem(a, b, c) div_ll_X_l_rem(a, b, c)
static inline long div_ll_X_l_rem(long long divs, long div, long *rem)
{
long dum2;
asm("divl %2":"=a"(dum2), "=d"(*rem)
: "rm"(div), "A"(divs));
return dum2;
}
static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
{
union {
......
#ifndef _LINUX_CALC64_H
#define _LINUX_CALC64_H
#include <linux/types.h>
#include <asm/div64.h>
/*
* This is a generic macro which is used when the architecture
* specific div64.h does not provide a optimized one.
*
* The 64bit dividend is divided by the divisor (data type long), the
* result is returned and the remainder stored in the variable
* referenced by remainder (data type long *). In contrast to the
* do_div macro the dividend is kept intact.
*/
#ifndef div_long_long_rem
#define div_long_long_rem(dividend, divisor, remainder) \
do_div_llr((dividend), divisor, remainder)
static inline unsigned long do_div_llr(const long long dividend,
const long divisor, long *remainder)
{
u64 result = dividend;
*(remainder) = do_div(result, divisor);
return (unsigned long) result;
}
#endif
/*
* Sign aware variation of the above. On some architectures a
* negative dividend leads to an divide overflow exception, which
* is avoided by the sign check.
*/
static inline long div_long_long_rem_signed(const long long dividend,
const long divisor, long *remainder)
{
long res;
if (unlikely(dividend < 0)) {
res = -div_long_long_rem(-dividend, divisor, remainder);
*remainder = -(*remainder);
} else
res = div_long_long_rem(dividend, divisor, remainder);
return res;
}
#endif
#ifndef _LINUX_JIFFIES_H
#define _LINUX_JIFFIES_H
#include <linux/calc64.h>
#include <linux/math64.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/time.h>
......
......@@ -4,8 +4,9 @@
#include <linux/sched.h>
#include <linux/posix-timers.h>
#include <asm/uaccess.h>
#include <linux/errno.h>
#include <linux/math64.h>
#include <asm/uaccess.h>
static int check_clock(const clockid_t which_clock)
{
......@@ -47,12 +48,10 @@ static void sample_to_timespec(const clockid_t which_clock,
union cpu_time_count cpu,
struct timespec *tp)
{
if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
tp->tv_sec = div_long_long_rem(cpu.sched,
NSEC_PER_SEC, &tp->tv_nsec);
} else {
if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED)
*tp = ns_to_timespec(cpu.sched);
else
cputime_to_timespec(cpu.cpu, tp);
}
}
static inline int cpu_time_before(const clockid_t which_clock,
......
......@@ -392,13 +392,17 @@ EXPORT_SYMBOL(set_normalized_timespec);
struct timespec ns_to_timespec(const s64 nsec)
{
struct timespec ts;
s32 rem;
if (!nsec)
return (struct timespec) {0, 0};
ts.tv_sec = div_long_long_rem_signed(nsec, NSEC_PER_SEC, &ts.tv_nsec);
if (unlikely(nsec < 0))
set_normalized_timespec(&ts, ts.tv_sec, ts.tv_nsec);
ts.tv_sec = div_s64_rem(nsec, NSEC_PER_SEC, &rem);
if (unlikely(rem < 0)) {
ts.tv_sec--;
rem += NSEC_PER_SEC;
}
ts.tv_nsec = rem;
return ts;
}
......@@ -528,8 +532,10 @@ jiffies_to_timespec(const unsigned long jiffies, struct timespec *value)
* Convert jiffies to nanoseconds and separate with
* one divide.
*/
u64 nsec = (u64)jiffies * TICK_NSEC;
value->tv_sec = div_long_long_rem(nsec, NSEC_PER_SEC, &value->tv_nsec);
u32 rem;
value->tv_sec = div_u64_rem((u64)jiffies * TICK_NSEC,
NSEC_PER_SEC, &rem);
value->tv_nsec = rem;
}
EXPORT_SYMBOL(jiffies_to_timespec);
......@@ -567,12 +573,11 @@ void jiffies_to_timeval(const unsigned long jiffies, struct timeval *value)
* Convert jiffies to nanoseconds and separate with
* one divide.
*/
u64 nsec = (u64)jiffies * TICK_NSEC;
long tv_usec;
u32 rem;
value->tv_sec = div_long_long_rem(nsec, NSEC_PER_SEC, &tv_usec);
tv_usec /= NSEC_PER_USEC;
value->tv_usec = tv_usec;
value->tv_sec = div_u64_rem((u64)jiffies * TICK_NSEC,
NSEC_PER_SEC, &rem);
value->tv_usec = rem / NSEC_PER_USEC;
}
EXPORT_SYMBOL(jiffies_to_timeval);
......
......@@ -234,7 +234,7 @@ static inline void notify_cmos_timer(void) { }
*/
int do_adjtimex(struct timex *txc)
{
long mtemp, save_adjust, rem;
long mtemp, save_adjust;
s64 freq_adj;
int result;
......@@ -345,9 +345,7 @@ int do_adjtimex(struct timex *txc)
freq_adj += time_freq;
freq_adj = min(freq_adj, (s64)MAXFREQ_NSEC);
time_freq = max(freq_adj, (s64)-MAXFREQ_NSEC);
time_offset = div_long_long_rem_signed(time_offset,
NTP_INTERVAL_FREQ,
&rem);
time_offset = div_s64(time_offset, NTP_INTERVAL_FREQ);
time_offset <<= SHIFT_UPDATE;
} /* STA_PLL */
} /* txc->modes & ADJ_OFFSET */
......
......@@ -22,6 +22,7 @@
#include <linux/debugobjects.h>
#include <linux/kallsyms.h>
#include <linux/memory.h>
#include <linux/math64.h>
/*
* Lock order:
......@@ -3621,12 +3622,10 @@ static int list_locations(struct kmem_cache *s, char *buf,
len += sprintf(buf + len, "<not-available>");
if (l->sum_time != l->min_time) {
unsigned long remainder;
len += sprintf(buf + len, " age=%ld/%ld/%ld",
l->min_time,
div_long_long_rem(l->sum_time, l->count, &remainder),
l->max_time);
l->min_time,
(long)div_u64(l->sum_time, l->count),
l->max_time);
} else
len += sprintf(buf + len, " age=%ld",
l->min_time);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment