Commit ce7d0226 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'alpha-next' of git://git.kernel.org/pub/scm/linux/kernel/git/mattst88/alpha-2.6

* 'alpha-next' of git://git.kernel.org/pub/scm/linux/kernel/git/mattst88/alpha-2.6:
  alpha: simplify and optimize sched_find_first_bit
  alpha: invoke oom-killer from page fault
  Convert alpha to use clocksources instead of arch_gettimeoffset
parents b142ebb6 a75f5f0f
...@@ -51,10 +51,6 @@ config GENERIC_TIME ...@@ -51,10 +51,6 @@ config GENERIC_TIME
bool bool
default y default y
config ARCH_USES_GETTIMEOFFSET
bool
default y
config GENERIC_CMOS_UPDATE config GENERIC_CMOS_UPDATE
def_bool y def_bool y
......
...@@ -438,22 +438,20 @@ static inline unsigned int __arch_hweight8(unsigned int w) ...@@ -438,22 +438,20 @@ static inline unsigned int __arch_hweight8(unsigned int w)
/* /*
* Every architecture must define this function. It's the fastest * Every architecture must define this function. It's the fastest
* way of searching a 140-bit bitmap where the first 100 bits are * way of searching a 100-bit bitmap. It's guaranteed that at least
* unlikely to be set. It's guaranteed that at least one of the 140 * one of the 100 bits is cleared.
* bits is set.
*/ */
static inline unsigned long static inline unsigned long
sched_find_first_bit(unsigned long b[3]) sched_find_first_bit(const unsigned long b[2])
{ {
unsigned long b0 = b[0], b1 = b[1], b2 = b[2]; unsigned long b0, b1, ofs, tmp;
unsigned long ofs;
ofs = (b1 ? 64 : 128); b0 = b[0];
b1 = (b1 ? b1 : b2); b1 = b[1];
ofs = (b0 ? 0 : ofs); ofs = (b0 ? 0 : 64);
b0 = (b0 ? b0 : b1); tmp = (b0 ? b0 : b1);
return __ffs(b0) + ofs; return __ffs(tmp) + ofs;
} }
#include <asm-generic/bitops/ext2-non-atomic.h> #include <asm-generic/bitops/ext2-non-atomic.h>
......
...@@ -51,6 +51,7 @@ ...@@ -51,6 +51,7 @@
#include <linux/mc146818rtc.h> #include <linux/mc146818rtc.h>
#include <linux/time.h> #include <linux/time.h>
#include <linux/timex.h> #include <linux/timex.h>
#include <linux/clocksource.h>
#include "proto.h" #include "proto.h"
#include "irq_impl.h" #include "irq_impl.h"
...@@ -332,6 +333,34 @@ rpcc_after_update_in_progress(void) ...@@ -332,6 +333,34 @@ rpcc_after_update_in_progress(void)
return rpcc(); return rpcc();
} }
#ifndef CONFIG_SMP
/* Until and unless we figure out how to get cpu cycle counters
in sync and keep them there, we can't use the rpcc. */
static cycle_t read_rpcc(struct clocksource *cs)
{
cycle_t ret = (cycle_t)rpcc();
return ret;
}
static struct clocksource clocksource_rpcc = {
.name = "rpcc",
.rating = 300,
.read = read_rpcc,
.mask = CLOCKSOURCE_MASK(32),
.flags = CLOCK_SOURCE_IS_CONTINUOUS
};
static inline void register_rpcc_clocksource(long cycle_freq)
{
clocksource_calc_mult_shift(&clocksource_rpcc, cycle_freq, 4);
clocksource_register(&clocksource_rpcc);
}
#else /* !CONFIG_SMP */
static inline void register_rpcc_clocksource(long cycle_freq)
{
}
#endif /* !CONFIG_SMP */
void __init void __init
time_init(void) time_init(void)
{ {
...@@ -385,6 +414,8 @@ time_init(void) ...@@ -385,6 +414,8 @@ time_init(void)
__you_loose(); __you_loose();
} }
register_rpcc_clocksource(cycle_freq);
state.last_time = cc1; state.last_time = cc1;
state.scaled_ticks_per_cycle state.scaled_ticks_per_cycle
= ((unsigned long) HZ << FIX_SHIFT) / cycle_freq; = ((unsigned long) HZ << FIX_SHIFT) / cycle_freq;
...@@ -394,44 +425,6 @@ time_init(void) ...@@ -394,44 +425,6 @@ time_init(void)
alpha_mv.init_rtc(); alpha_mv.init_rtc();
} }
/*
* Use the cycle counter to estimate an displacement from the last time
* tick. Unfortunately the Alpha designers made only the low 32-bits of
* the cycle counter active, so we overflow on 8.2 seconds on a 500MHz
* part. So we can't do the "find absolute time in terms of cycles" thing
* that the other ports do.
*/
u32 arch_gettimeoffset(void)
{
#ifdef CONFIG_SMP
/* Until and unless we figure out how to get cpu cycle counters
in sync and keep them there, we can't use the rpcc tricks. */
return 0;
#else
unsigned long delta_cycles, delta_usec, partial_tick;
delta_cycles = rpcc() - state.last_time;
partial_tick = state.partial_tick;
/*
* usec = cycles * ticks_per_cycle * 2**48 * 1e6 / (2**48 * ticks)
* = cycles * (s_t_p_c) * 1e6 / (2**48 * ticks)
* = cycles * (s_t_p_c) * 15625 / (2**42 * ticks)
*
* which, given a 600MHz cycle and a 1024Hz tick, has a
* dynamic range of about 1.7e17, which is less than the
* 1.8e19 in an unsigned long, so we are safe from overflow.
*
* Round, but with .5 up always, since .5 to even is harder
* with no clear gain.
*/
delta_usec = (delta_cycles * state.scaled_ticks_per_cycle
+ partial_tick) * 15625;
delta_usec = ((delta_usec / ((1UL << (FIX_SHIFT-6-1)) * HZ)) + 1) / 2;
return delta_usec * 1000;
#endif
}
/* /*
* In order to set the CMOS clock precisely, set_rtc_mmss has to be * In order to set the CMOS clock precisely, set_rtc_mmss has to be
* called 500 ms after the second nowtime has started, because when * called 500 ms after the second nowtime has started, because when
......
...@@ -142,7 +142,6 @@ do_page_fault(unsigned long address, unsigned long mmcsr, ...@@ -142,7 +142,6 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
goto bad_area; goto bad_area;
} }
survive:
/* If for any reason at all we couldn't handle the fault, /* If for any reason at all we couldn't handle the fault,
make sure we exit gracefully rather than endlessly redo make sure we exit gracefully rather than endlessly redo
the fault. */ the fault. */
...@@ -188,16 +187,10 @@ do_page_fault(unsigned long address, unsigned long mmcsr, ...@@ -188,16 +187,10 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
/* We ran out of memory, or some other thing happened to us that /* We ran out of memory, or some other thing happened to us that
made us unable to handle the page fault gracefully. */ made us unable to handle the page fault gracefully. */
out_of_memory: out_of_memory:
if (is_global_init(current)) {
yield();
down_read(&mm->mmap_sem);
goto survive;
}
printk(KERN_ALERT "VM: killing process %s(%d)\n",
current->comm, task_pid_nr(current));
if (!user_mode(regs)) if (!user_mode(regs))
goto no_context; goto no_context;
do_group_exit(SIGKILL); pagefault_out_of_memory();
return;
do_sigbus: do_sigbus:
/* Send a sigbus, regardless of whether we were in kernel /* Send a sigbus, regardless of whether we were in kernel
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment