remove div_long_long_rem

x86 is the only arch right now, which provides an optimized for
div_long_long_rem and it has the downside that one has to be very careful that
the divide doesn't overflow.

The API is a little akward, as the arguments for the unsigned divide are
signed.  The signed version also doesn't handle a negative divisor and
produces worse code on 64bit archs.

There is little incentive to keep this API alive, so this converts the few
users to the new API.

Signed-off-by: Roman Zippel <zippel@linux-m68k.org>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: john stultz <johnstul@us.ibm.com>
Cc: Christoph Lameter <clameter@sgi.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Roman Zippel 2008-05-01 04:34:31 -07:00 committed by Linus Torvalds
parent 6f6d6a1a6a
commit f8bd2258e2
10 changed files with 44 additions and 110 deletions

View file

@ -4,8 +4,9 @@
#include <linux/sched.h>
#include <linux/posix-timers.h>
#include <asm/uaccess.h>
#include <linux/errno.h>
#include <linux/math64.h>
#include <asm/uaccess.h>
static int check_clock(const clockid_t which_clock)
{
@ -47,12 +48,10 @@ static void sample_to_timespec(const clockid_t which_clock,
union cpu_time_count cpu,
struct timespec *tp)
{
if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
tp->tv_sec = div_long_long_rem(cpu.sched,
NSEC_PER_SEC, &tp->tv_nsec);
} else {
if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED)
*tp = ns_to_timespec(cpu.sched);
else
cputime_to_timespec(cpu.cpu, tp);
}
}
static inline int cpu_time_before(const clockid_t which_clock,