Commit 27b46d76 authored by Simon Arlott's avatar Simon Arlott Committed by Adrian Bunk

spelling fixes: arch/i386/

Spelling fixes in arch/i386/.
Signed-off-by: default avatarSimon Arlott <simon@fire.lp0.eu>
Signed-off-by: default avatarAdrian Bunk <bunk@kernel.org>
parent 5e71c605
...@@ -25,7 +25,7 @@ ...@@ -25,7 +25,7 @@
/* /*
* Getting to provable safe in place decompression is hard. * Getting to provable safe in place decompression is hard.
* Worst case behaviours need to be analized. * Worst case behaviours need to be analyzed.
* Background information: * Background information:
* *
* The file layout is: * The file layout is:
...@@ -94,7 +94,7 @@ ...@@ -94,7 +94,7 @@
* Adding 32768 instead of 32767 just makes for round numbers. * Adding 32768 instead of 32767 just makes for round numbers.
* Adding the decompressor_size is necessary as it musht live after all * Adding the decompressor_size is necessary as it musht live after all
* of the data as well. Last I measured the decompressor is about 14K. * of the data as well. Last I measured the decompressor is about 14K.
* 10K of actuall data and 4K of bss. * 10K of actual data and 4K of bss.
* *
*/ */
......
...@@ -99,7 +99,7 @@ static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE; ...@@ -99,7 +99,7 @@ static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE;
/* /*
* The default interrupt routing model is PIC (8259). This gets * The default interrupt routing model is PIC (8259). This gets
* overriden if IOAPICs are enumerated (below). * overridden if IOAPICs are enumerated (below).
*/ */
enum acpi_irq_model_id acpi_irq_model = ACPI_IRQ_MODEL_PIC; enum acpi_irq_model_id acpi_irq_model = ACPI_IRQ_MODEL_PIC;
...@@ -414,8 +414,8 @@ acpi_parse_nmi_src(struct acpi_subtable_header * header, const unsigned long end ...@@ -414,8 +414,8 @@ acpi_parse_nmi_src(struct acpi_subtable_header * header, const unsigned long end
* *
* Port 0x4d0-4d1 are ECLR1 and ECLR2, the Edge/Level Control Registers * Port 0x4d0-4d1 are ECLR1 and ECLR2, the Edge/Level Control Registers
* for the 8259 PIC. bit[n] = 1 means irq[n] is Level, otherwise Edge. * for the 8259 PIC. bit[n] = 1 means irq[n] is Level, otherwise Edge.
* ECLR1 is IRQ's 0-7 (IRQ 0, 1, 2 must be 0) * ECLR1 is IRQs 0-7 (IRQ 0, 1, 2 must be 0)
* ECLR2 is IRQ's 8-15 (IRQ 8, 13 must be 0) * ECLR2 is IRQs 8-15 (IRQ 8, 13 must be 0)
*/ */
void __init acpi_pic_sci_set_trigger(unsigned int irq, u16 trigger) void __init acpi_pic_sci_set_trigger(unsigned int irq, u16 trigger)
...@@ -427,7 +427,7 @@ void __init acpi_pic_sci_set_trigger(unsigned int irq, u16 trigger) ...@@ -427,7 +427,7 @@ void __init acpi_pic_sci_set_trigger(unsigned int irq, u16 trigger)
old = inb(0x4d0) | (inb(0x4d1) << 8); old = inb(0x4d0) | (inb(0x4d1) << 8);
/* /*
* If we use ACPI to set PCI irq's, then we should clear ELCR * If we use ACPI to set PCI IRQs, then we should clear ELCR
* since we will set it correctly as we enable the PCI irq * since we will set it correctly as we enable the PCI irq
* routing. * routing.
*/ */
......
...@@ -947,7 +947,7 @@ void __devinit setup_local_APIC(void) ...@@ -947,7 +947,7 @@ void __devinit setup_local_APIC(void)
* Set up LVT0, LVT1: * Set up LVT0, LVT1:
* *
* set up through-local-APIC on the BP's LINT0. This is not * set up through-local-APIC on the BP's LINT0. This is not
* strictly necessery in pure symmetric-IO mode, but sometimes * strictly necessary in pure symmetric-IO mode, but sometimes
* we delegate interrupts to the 8259A. * we delegate interrupts to the 8259A.
*/ */
/* /*
...@@ -998,7 +998,7 @@ void __devinit setup_local_APIC(void) ...@@ -998,7 +998,7 @@ void __devinit setup_local_APIC(void)
} else { } else {
if (esr_disable) if (esr_disable)
/* /*
* Something untraceble is creating bad interrupts on * Something untraceable is creating bad interrupts on
* secondary quads ... for the moment, just leave the * secondary quads ... for the moment, just leave the
* ESR disabled - we can't do anything useful with the * ESR disabled - we can't do anything useful with the
* errors anyway - mbligh * errors anyway - mbligh
......
...@@ -57,7 +57,7 @@ ...@@ -57,7 +57,7 @@
* screen-blanking and gpm (Stephen Rothwell); Linux 1.99.4 * screen-blanking and gpm (Stephen Rothwell); Linux 1.99.4
* 1.2a:Simple change to stop mysterious bug reports with SMP also added * 1.2a:Simple change to stop mysterious bug reports with SMP also added
* levels to the printk calls. APM is not defined for SMP machines. * levels to the printk calls. APM is not defined for SMP machines.
* The new replacment for it is, but Linux doesn't yet support this. * The new replacement for it is, but Linux doesn't yet support this.
* Alan Cox Linux 2.1.55 * Alan Cox Linux 2.1.55
* 1.3: Set up a valid data descriptor 0x40 for buggy BIOS's * 1.3: Set up a valid data descriptor 0x40 for buggy BIOS's
* 1.4: Upgraded to support APM 1.2. Integrated ThinkPad suspend patch by * 1.4: Upgraded to support APM 1.2. Integrated ThinkPad suspend patch by
......
...@@ -266,7 +266,7 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c) ...@@ -266,7 +266,7 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
#ifdef CONFIG_X86_HT #ifdef CONFIG_X86_HT
/* /*
* On a AMD multi core setup the lower bits of the APIC id * On a AMD multi core setup the lower bits of the APIC id
* distingush the cores. * distinguish the cores.
*/ */
if (c->x86_max_cores > 1) { if (c->x86_max_cores > 1) {
int cpu = smp_processor_id(); int cpu = smp_processor_id();
......
...@@ -53,7 +53,7 @@ static u32 __cpuinit ramtop(void) /* 16388 */ ...@@ -53,7 +53,7 @@ static u32 __cpuinit ramtop(void) /* 16388 */
continue; continue;
/* /*
* Don't MCR over reserved space. Ignore the ISA hole * Don't MCR over reserved space. Ignore the ISA hole
* we frob around that catastrophy already * we frob around that catastrophe already
*/ */
if (e820.map[i].type == E820_RESERVED) if (e820.map[i].type == E820_RESERVED)
...@@ -287,7 +287,7 @@ static void __cpuinit init_c3(struct cpuinfo_x86 *c) ...@@ -287,7 +287,7 @@ static void __cpuinit init_c3(struct cpuinfo_x86 *c)
c->x86_capability[5] = cpuid_edx(0xC0000001); c->x86_capability[5] = cpuid_edx(0xC0000001);
} }
/* Cyrix III family needs CX8 & PGE explicity enabled. */ /* Cyrix III family needs CX8 & PGE explicitly enabled. */
if (c->x86_model >=6 && c->x86_model <= 9) { if (c->x86_model >=6 && c->x86_model <= 9) {
rdmsr (MSR_VIA_FCR, lo, hi); rdmsr (MSR_VIA_FCR, lo, hi);
lo |= (1<<1 | 1<<7); lo |= (1<<1 | 1<<7);
......
...@@ -207,7 +207,7 @@ static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c, int early) ...@@ -207,7 +207,7 @@ static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c, int early)
static int __init x86_fxsr_setup(char * s) static int __init x86_fxsr_setup(char * s)
{ {
/* Tell all the other CPU's to not use it... */ /* Tell all the other CPUs to not use it... */
disable_x86_fxsr = 1; disable_x86_fxsr = 1;
/* /*
......
...@@ -260,7 +260,7 @@ static int nforce2_target(struct cpufreq_policy *policy, ...@@ -260,7 +260,7 @@ static int nforce2_target(struct cpufreq_policy *policy,
freqs.old = nforce2_get(policy->cpu); freqs.old = nforce2_get(policy->cpu);
freqs.new = target_fsb * fid * 100; freqs.new = target_fsb * fid * 100;
freqs.cpu = 0; /* Only one CPU on nForce2 plattforms */ freqs.cpu = 0; /* Only one CPU on nForce2 platforms */
if (freqs.old == freqs.new) if (freqs.old == freqs.new)
return 0; return 0;
......
...@@ -12,12 +12,12 @@ ...@@ -12,12 +12,12 @@
* of any nature resulting due to the use of this software. This * of any nature resulting due to the use of this software. This
* software is provided AS-IS with no warranties. * software is provided AS-IS with no warranties.
* *
* Theoritical note: * Theoretical note:
* *
* (see Geode(tm) CS5530 manual (rev.4.1) page.56) * (see Geode(tm) CS5530 manual (rev.4.1) page.56)
* *
* CPU frequency control on NatSemi Geode GX1/GXLV processor and CS55x0 * CPU frequency control on NatSemi Geode GX1/GXLV processor and CS55x0
* are based on Suspend Moduration. * are based on Suspend Modulation.
* *
* Suspend Modulation works by asserting and de-asserting the SUSP# pin * Suspend Modulation works by asserting and de-asserting the SUSP# pin
* to CPU(GX1/GXLV) for configurable durations. When asserting SUSP# * to CPU(GX1/GXLV) for configurable durations. When asserting SUSP#
...@@ -101,11 +101,11 @@ ...@@ -101,11 +101,11 @@
/* SUSCFG bits */ /* SUSCFG bits */
#define SUSMOD (1<<0) /* enable/disable suspend modulation */ #define SUSMOD (1<<0) /* enable/disable suspend modulation */
/* the belows support only with cs5530 (after rev.1.2)/cs5530A */ /* the below is supported only with cs5530 (after rev.1.2)/cs5530A */
#define SMISPDUP (1<<1) /* select how SMI re-enable suspend modulation: */ #define SMISPDUP (1<<1) /* select how SMI re-enable suspend modulation: */
/* IRQTC timer or read SMI speedup disable reg.(F1BAR[08-09h]) */ /* IRQTC timer or read SMI speedup disable reg.(F1BAR[08-09h]) */
#define SUSCFG (1<<2) /* enable powering down a GXLV processor. "Special 3Volt Suspend" mode */ #define SUSCFG (1<<2) /* enable powering down a GXLV processor. "Special 3Volt Suspend" mode */
/* the belows support only with cs5530A */ /* the below is supported only with cs5530A */
#define PWRSVE_ISA (1<<3) /* stop ISA clock */ #define PWRSVE_ISA (1<<3) /* stop ISA clock */
#define PWRSVE (1<<4) /* active idle */ #define PWRSVE (1<<4) /* active idle */
......
...@@ -168,7 +168,7 @@ static void count_off_irt(struct powernow_k8_data *data) ...@@ -168,7 +168,7 @@ static void count_off_irt(struct powernow_k8_data *data)
return; return;
} }
/* the voltage stabalization time */ /* the voltage stabilization time */
static void count_off_vst(struct powernow_k8_data *data) static void count_off_vst(struct powernow_k8_data *data)
{ {
udelay(data->vstable * VST_UNITS_20US); udelay(data->vstable * VST_UNITS_20US);
......
...@@ -148,10 +148,10 @@ struct powernow_k8_data { ...@@ -148,10 +148,10 @@ struct powernow_k8_data {
#define PLL_LOCK_CONVERSION (1000/5) /* ms to ns, then divide by clock period */ #define PLL_LOCK_CONVERSION (1000/5) /* ms to ns, then divide by clock period */
#define MAXIMUM_VID_STEPS 1 /* Current cpus only allow a single step of 25mV */ #define MAXIMUM_VID_STEPS 1 /* Current cpus only allow a single step of 25mV */
#define VST_UNITS_20US 20 /* Voltage Stabalization Time is in units of 20us */ #define VST_UNITS_20US 20 /* Voltage Stabilization Time is in units of 20us */
/* /*
* Most values of interest are enocoded in a single field of the _PSS * Most values of interest are encoded in a single field of the _PSS
* entries: the "control" value. * entries: the "control" value.
*/ */
......
...@@ -256,7 +256,7 @@ static void __cpuinit init_cyrix(struct cpuinfo_x86 *c) ...@@ -256,7 +256,7 @@ static void __cpuinit init_cyrix(struct cpuinfo_x86 *c)
u32 vendor, device; u32 vendor, device;
/* It isn't really a PCI quirk directly, but the cure is the /* It isn't really a PCI quirk directly, but the cure is the
same. The MediaGX has deep magic SMM stuff that handles the same. The MediaGX has deep magic SMM stuff that handles the
SB emulation. It thows away the fifo on disable_dma() which SB emulation. It throws away the fifo on disable_dma() which
is wrong and ruins the audio. is wrong and ruins the audio.
Bug2: VSA1 has a wrap bug so that using maximum sized DMA Bug2: VSA1 has a wrap bug so that using maximum sized DMA
......
...@@ -147,10 +147,10 @@ static void prepare_set(void) ...@@ -147,10 +147,10 @@ static void prepare_set(void)
write_cr0(cr0); write_cr0(cr0);
wbinvd(); wbinvd();
/* Cyrix ARRs - everything else were excluded at the top */ /* Cyrix ARRs - everything else was excluded at the top */
ccr3 = getCx86(CX86_CCR3); ccr3 = getCx86(CX86_CCR3);
/* Cyrix ARRs - everything else were excluded at the top */ /* Cyrix ARRs - everything else was excluded at the top */
setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10);
} }
......
...@@ -182,7 +182,7 @@ static inline void k8_enable_fixed_iorrs(void) ...@@ -182,7 +182,7 @@ static inline void k8_enable_fixed_iorrs(void)
/** /**
* Checks and updates an fixed-range MTRR if it differs from the value it * Checks and updates an fixed-range MTRR if it differs from the value it
* should have. If K8 extenstions are wanted, update the K8 SYSCFG MSR also. * should have. If K8 extentions are wanted, update the K8 SYSCFG MSR also.
* see AMD publication no. 24593, chapter 7.8.1, page 233 for more information * see AMD publication no. 24593, chapter 7.8.1, page 233 for more information
* \param msr MSR address of the MTTR which should be checked and updated * \param msr MSR address of the MTTR which should be checked and updated
* \param changed pointer which indicates whether the MTRR needed to be changed * \param changed pointer which indicates whether the MTRR needed to be changed
......
...@@ -748,7 +748,7 @@ static int __init mtrr_init_finialize(void) ...@@ -748,7 +748,7 @@ static int __init mtrr_init_finialize(void)
if (use_intel()) if (use_intel())
mtrr_state_warn(); mtrr_state_warn();
else { else {
/* The CPUs haven't MTRR and seemes not support SMP. They have /* The CPUs haven't MTRR and seem to not support SMP. They have
* specific drivers, we use a tricky method to support * specific drivers, we use a tricky method to support
* suspend/resume for them. * suspend/resume for them.
* TBD: is there any system with such CPU which supports * TBD: is there any system with such CPU which supports
......
...@@ -706,7 +706,7 @@ void __init e820_register_memory(void) ...@@ -706,7 +706,7 @@ void __init e820_register_memory(void)
int i; int i;
/* /*
* Search for the bigest gap in the low 32 bits of the e820 * Search for the biggest gap in the low 32 bits of the e820
* memory space. * memory space.
*/ */
last = 0x100000000ull; last = 0x100000000ull;
......
...@@ -350,7 +350,7 @@ static int hpet_clocksource_register(void) ...@@ -350,7 +350,7 @@ static int hpet_clocksource_register(void)
* *
* hpet period is in femto seconds per cycle * hpet period is in femto seconds per cycle
* so we need to convert this to ns/cyc units * so we need to convert this to ns/cyc units
* aproximated by mult/2^shift * approximated by mult/2^shift
* *
* fsec/cyc * 1nsec/1000000fsec = nsec/cyc = mult/2^shift * fsec/cyc * 1nsec/1000000fsec = nsec/cyc = mult/2^shift
* fsec/cyc * 1ns/1000000fsec * 2^shift = mult * fsec/cyc * 1ns/1000000fsec * 2^shift = mult
......
...@@ -86,7 +86,7 @@ static int pit_next_event(unsigned long delta, struct clock_event_device *evt) ...@@ -86,7 +86,7 @@ static int pit_next_event(unsigned long delta, struct clock_event_device *evt)
* On UP the PIT can serve all of the possible timer functions. On SMP systems * On UP the PIT can serve all of the possible timer functions. On SMP systems
* it can be solely used for the global tick. * it can be solely used for the global tick.
* *
* The profiling and update capabilites are switched off once the local apic is * The profiling and update capabilities are switched off once the local apic is
* registered. This mechanism replaces the previous #ifdef LOCAL_APIC - * registered. This mechanism replaces the previous #ifdef LOCAL_APIC -
* !using_apic_timer decisions in do_timer_interrupt_hook() * !using_apic_timer decisions in do_timer_interrupt_hook()
*/ */
......
...@@ -584,7 +584,7 @@ static void do_irq_balance(void) ...@@ -584,7 +584,7 @@ static void do_irq_balance(void)
imbalance = move_this_load; imbalance = move_this_load;
/* For physical_balance case, we accumlated both load /* For physical_balance case, we accumulated both load
* values in the one of the siblings cpu_irq[], * values in the one of the siblings cpu_irq[],
* to use the same code for physical and logical processors * to use the same code for physical and logical processors
* as much as possible. * as much as possible.
...@@ -2472,7 +2472,7 @@ void destroy_irq(unsigned int irq) ...@@ -2472,7 +2472,7 @@ void destroy_irq(unsigned int irq)
} }
/* /*
* MSI mesage composition * MSI message composition
*/ */
#ifdef CONFIG_PCI_MSI #ifdef CONFIG_PCI_MSI
static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_msg *msg) static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_msg *msg)
......
...@@ -1001,7 +1001,7 @@ void __init mp_config_acpi_legacy_irqs (void) ...@@ -1001,7 +1001,7 @@ void __init mp_config_acpi_legacy_irqs (void)
/* /*
* Use the default configuration for the IRQs 0-15. Unless * Use the default configuration for the IRQs 0-15. Unless
* overriden by (MADT) interrupt source override entries. * overridden by (MADT) interrupt source override entries.
*/ */
for (i = 0; i < 16; i++) { for (i = 0; i < 16; i++) {
int idx; int idx;
......
...@@ -632,7 +632,7 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, int error_code) ...@@ -632,7 +632,7 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, int error_code)
/* User-mode eip? */ /* User-mode eip? */
info.si_addr = user_mode_vm(regs) ? (void __user *) regs->eip : NULL; info.si_addr = user_mode_vm(regs) ? (void __user *) regs->eip : NULL;
/* Send us the fakey SIGTRAP */ /* Send us the fake SIGTRAP */
force_sig_info(SIGTRAP, &info, tsk); force_sig_info(SIGTRAP, &info, tsk);
} }
......
...@@ -624,7 +624,7 @@ void __init setup_arch(char **cmdline_p) ...@@ -624,7 +624,7 @@ void __init setup_arch(char **cmdline_p)
/* /*
* NOTE: before this point _nobody_ is allowed to allocate * NOTE: before this point _nobody_ is allowed to allocate
* any memory using the bootmem allocator. Although the * any memory using the bootmem allocator. Although the
* alloctor is now initialised only the first 8Mb of the kernel * allocator is now initialised only the first 8Mb of the kernel
* virtual address space has been mapped. All allocations before * virtual address space has been mapped. All allocations before
* paging_init() has completed must use the alloc_bootmem_low_pages() * paging_init() has completed must use the alloc_bootmem_low_pages()
* variant (which allocates DMA'able memory) and care must be taken * variant (which allocates DMA'able memory) and care must be taken
......
...@@ -594,7 +594,7 @@ static void fastcall do_signal(struct pt_regs *regs) ...@@ -594,7 +594,7 @@ static void fastcall do_signal(struct pt_regs *regs)
signr = get_signal_to_deliver(&info, &ka, regs, NULL); signr = get_signal_to_deliver(&info, &ka, regs, NULL);
if (signr > 0) { if (signr > 0) {
/* Reenable any watchpoints before delivering the /* Re-enable any watchpoints before delivering the
* signal to user space. The processor register will * signal to user space. The processor register will
* have been cleared if the watchpoint triggered * have been cleared if the watchpoint triggered
* inside the kernel. * inside the kernel.
......
...@@ -69,7 +69,7 @@ ...@@ -69,7 +69,7 @@
* *
* B stepping CPUs may hang. There are hardware work arounds * B stepping CPUs may hang. There are hardware work arounds
* for this. We warn about it in case your board doesn't have the work * for this. We warn about it in case your board doesn't have the work
* arounds. Basically thats so I can tell anyone with a B stepping * arounds. Basically that's so I can tell anyone with a B stepping
* CPU and SMP problems "tough". * CPU and SMP problems "tough".
* *
* Specific items [From Pentium Processor Specification Update] * Specific items [From Pentium Processor Specification Update]
...@@ -273,7 +273,7 @@ void leave_mm(unsigned long cpu) ...@@ -273,7 +273,7 @@ void leave_mm(unsigned long cpu)
* 1a1) cpu_clear(cpu, old_mm->cpu_vm_mask); * 1a1) cpu_clear(cpu, old_mm->cpu_vm_mask);
* Stop ipi delivery for the old mm. This is not synchronized with * Stop ipi delivery for the old mm. This is not synchronized with
* the other cpus, but smp_invalidate_interrupt ignore flush ipis * the other cpus, but smp_invalidate_interrupt ignore flush ipis
* for the wrong mm, and in the worst case we perform a superflous * for the wrong mm, and in the worst case we perform a superfluous
* tlb flush. * tlb flush.
* 1a2) set cpu_tlbstate to TLBSTATE_OK * 1a2) set cpu_tlbstate to TLBSTATE_OK
* Now the smp_invalidate_interrupt won't call leave_mm if cpu0 * Now the smp_invalidate_interrupt won't call leave_mm if cpu0
......
...@@ -412,7 +412,7 @@ static void __cpuinit start_secondary(void *unused) ...@@ -412,7 +412,7 @@ static void __cpuinit start_secondary(void *unused)
/* /*
* We need to hold call_lock, so there is no inconsistency * We need to hold call_lock, so there is no inconsistency
* between the time smp_call_function() determines number of * between the time smp_call_function() determines number of
* IPI receipients, and the time when the determination is made * IPI recipients, and the time when the determination is made
* for which cpus receive the IPI. Holding this * for which cpus receive the IPI. Holding this
* lock helps us to not include this cpu in a currently in progress * lock helps us to not include this cpu in a currently in progress
* smp_call_function(). * smp_call_function().
......
...@@ -64,7 +64,7 @@ static int __init setup_pci_node_map_for_wpeg(int wpeg_num, int last_bus) ...@@ -64,7 +64,7 @@ static int __init setup_pci_node_map_for_wpeg(int wpeg_num, int last_bus)
switch (rio_devs[wpeg_num]->type){ switch (rio_devs[wpeg_num]->type){
case CompatWPEG: case CompatWPEG:
/* The Compatability Winnipeg controls the 2 legacy buses, /* The Compatibility Winnipeg controls the 2 legacy buses,
* the 66MHz PCI bus [2 slots] and the 2 "extra" buses in case * the 66MHz PCI bus [2 slots] and the 2 "extra" buses in case
* a PCI-PCI bridge card is used in either slot: total 5 buses. * a PCI-PCI bridge card is used in either slot: total 5 buses.
*/ */
......
...@@ -59,7 +59,7 @@ int check_tsc_unstable(void) ...@@ -59,7 +59,7 @@ int check_tsc_unstable(void)
} }
EXPORT_SYMBOL_GPL(check_tsc_unstable); EXPORT_SYMBOL_GPL(check_tsc_unstable);
/* Accellerators for sched_clock() /* Accelerators for sched_clock()
* convert from cycles(64bits) => nanoseconds (64bits) * convert from cycles(64bits) => nanoseconds (64bits)
* basic equation: * basic equation:
* ns = cycles / (freq / ns_per_sec) * ns = cycles / (freq / ns_per_sec)
......
...@@ -108,7 +108,7 @@ void __init time_init_hook(void) ...@@ -108,7 +108,7 @@ void __init time_init_hook(void)
* mca_nmi_hook - hook into MCA specific NMI chain * mca_nmi_hook - hook into MCA specific NMI chain
* *
* Description: * Description:
* The MCA (Microchannel Arcitecture) has an NMI chain for NMI sources * The MCA (Microchannel Architecture) has an NMI chain for NMI sources
* along the MCA bus. Use this to hook into that chain if you will need * along the MCA bus. Use this to hook into that chain if you will need
* it. * it.
**/ **/
......
/* /*
* Default generic APIC driver. This handles upto 8 CPUs. * Default generic APIC driver. This handles up to 8 CPUs.
*/ */
#define APIC_DEFINITION 1 #define APIC_DEFINITION 1
#include <linux/threads.h> #include <linux/threads.h>
......
...@@ -56,7 +56,7 @@ void __init generic_bigsmp_probe(void) ...@@ -56,7 +56,7 @@ void __init generic_bigsmp_probe(void)
/* /*
* This routine is used to switch to bigsmp mode when * This routine is used to switch to bigsmp mode when
* - There is no apic= option specified by the user * - There is no apic= option specified by the user
* - generic_apic_probe() has choosen apic_default as the sub_arch * - generic_apic_probe() has chosen apic_default as the sub_arch
* - we find more than 8 CPUs in acpi LAPIC listing with xAPIC support * - we find more than 8 CPUs in acpi LAPIC listing with xAPIC support
*/ */
......
...@@ -389,7 +389,7 @@ find_smp_config(void) ...@@ -389,7 +389,7 @@ find_smp_config(void)
/* The boot CPU must be extended */ /* The boot CPU must be extended */
voyager_extended_vic_processors = 1<<boot_cpu_id; voyager_extended_vic_processors = 1<<boot_cpu_id;
/* initially, all of the first 8 cpu's can boot */ /* initially, all of the first 8 CPUs can boot */
voyager_allowed_boot_processors = 0xff; voyager_allowed_boot_processors = 0xff;
/* set up everything for just this CPU, we can alter /* set up everything for just this CPU, we can alter
* this as we start the other CPUs later */ * this as we start the other CPUs later */
...@@ -1010,7 +1010,7 @@ static struct call_data_struct * call_data; ...@@ -1010,7 +1010,7 @@ static struct call_data_struct * call_data;
/* execute a thread on a new CPU. The function to be called must be /* execute a thread on a new CPU. The function to be called must be
* previously set up. This is used to schedule a function for * previously set up. This is used to schedule a function for
* execution on all CPU's - set up the function then broadcast a * execution on all CPUs - set up the function then broadcast a
* function_interrupt CPI to come here on each CPU */ * function_interrupt CPI to come here on each CPU */
static void static void
smp_call_function_interrupt(void) smp_call_function_interrupt(void)
...@@ -1095,7 +1095,7 @@ voyager_smp_call_function_mask (cpumask_t cpumask, ...@@ -1095,7 +1095,7 @@ voyager_smp_call_function_mask (cpumask_t cpumask,
* CPI here. We don't use this actually for counting so losing * CPI here. We don't use this actually for counting so losing
* ticks doesn't matter * ticks doesn't matter
* *
* FIXME: For those CPU's which actually have a local APIC, we could * FIXME: For those CPUs which actually have a local APIC, we could
* try to use it to trigger this interrupt instead of having to * try to use it to trigger this interrupt instead of having to
* broadcast the timer tick. Unfortunately, all my pentium DYADs have * broadcast the timer tick. Unfortunately, all my pentium DYADs have
* no local APIC, so I can't do this * no local APIC, so I can't do this
...@@ -1287,7 +1287,7 @@ smp_local_timer_interrupt(void) ...@@ -1287,7 +1287,7 @@ smp_local_timer_interrupt(void)
/* /*
* We take the 'long' return path, and there every subsystem * We take the 'long' return path, and there every subsystem
* grabs the apropriate locks (kernel lock/ irq lock). * grabs the appropriate locks (kernel lock/ irq lock).
* *
* we might want to decouple profiling from the 'long path', * we might want to decouple profiling from the 'long path',
* and do the profiling totally in assembly. * and do the profiling totally in assembly.
...@@ -1759,7 +1759,7 @@ set_vic_irq_affinity(unsigned int irq, cpumask_t mask) ...@@ -1759,7 +1759,7 @@ set_vic_irq_affinity(unsigned int irq, cpumask_t mask)
real_mask = cpus_addr(mask)[0] & voyager_extended_vic_processors; real_mask = cpus_addr(mask)[0] & voyager_extended_vic_processors;
if(cpus_addr(mask)[0] == 0) if(cpus_addr(mask)[0] == 0)
/* can't have no cpu's to accept the interrupt -- extremely /* can't have no CPUs to accept the interrupt -- extremely
* bad things will happen */ * bad things will happen */
return; return;
...@@ -1791,7 +1791,7 @@ set_vic_irq_affinity(unsigned int irq, cpumask_t mask) ...@@ -1791,7 +1791,7 @@ set_vic_irq_affinity(unsigned int irq, cpumask_t mask)
} }
/* this is magic, we now have the correct affinity maps, so /* this is magic, we now have the correct affinity maps, so
* enable the interrupt. This will send an enable CPI to * enable the interrupt. This will send an enable CPI to
* those cpu's who need to enable it in their local masks, * those CPUs who need to enable it in their local masks,
* causing them to correct for the new affinity . If the * causing them to correct for the new affinity . If the
* interrupt is currently globally disabled, it will simply be * interrupt is currently globally disabled, it will simply be
* disabled again as it comes in (voyager lazy disable). If * disabled again as it comes in (voyager lazy disable). If
......
...@@ -64,7 +64,7 @@ check_from_kernel(void) ...@@ -64,7 +64,7 @@ check_from_kernel(void)
{ {
if(voyager_status.switch_off) { if(voyager_status.switch_off) {
/* FIXME: This should be configureable via proc */ /* FIXME: This should be configurable via proc */
execute("umask 600; echo 0 > /etc/initrunlvl; kill -HUP 1"); execute("umask 600; echo 0 > /etc/initrunlvl; kill -HUP 1");
} else if(voyager_status.power_fail) { } else if(voyager_status.power_fail) {
VDEBUG(("Voyager daemon detected AC power failure\n")); VDEBUG(("Voyager daemon detected AC power failure\n"));
......
...@@ -10,7 +10,7 @@ ...@@ -10,7 +10,7 @@
/* /*
* We need to use the 2-level pagetable functions, but CONFIG_X86_PAE * We need to use the 2-level pagetable functions, but CONFIG_X86_PAE
* keeps that from happenning. If anyone has a better way, I'm listening. * keeps that from happening. If anyone has a better way, I'm listening.
* *
* boot_pte_t is defined only if this all works correctly * boot_pte_t is defined only if this all works correctly
*/ */
......
...@@ -273,7 +273,7 @@ unsigned long __init setup_memory(void) ...@@ -273,7 +273,7 @@ unsigned long __init setup_memory(void)
* When mapping a NUMA machine we allocate the node_mem_map arrays * When mapping a NUMA machine we allocate the node_mem_map arrays
* from node local memory. They are then mapped directly into KVA * from node local memory. They are then mapped directly into KVA
* between zone normal and vmalloc space. Calculate the size of * between zone normal and vmalloc space. Calculate the size of
* this space and use it to adjust the boundry between ZONE_NORMAL * this space and use it to adjust the boundary between ZONE_NORMAL
* and ZONE_HIGHMEM. * and ZONE_HIGHMEM.
*/ */
find_max_pfn(); find_max_pfn();
......
...@@ -354,7 +354,7 @@ fastcall void __kprobes do_page_fault(struct pt_regs *regs, ...@@ -354,7 +354,7 @@ fastcall void __kprobes do_page_fault(struct pt_regs *regs,
/* When running in the kernel we expect faults to occur only to /* When running in the kernel we expect faults to occur only to
* addresses in user space. All other faults represent errors in the * addresses in user space. All other faults represent errors in the
* kernel and should generate an OOPS. Unfortunatly, in the case of an * kernel and should generate an OOPS. Unfortunately, in the case of an
* erroneous fault occurring in a code path which already holds mmap_sem * erroneous fault occurring in a code path which already holds mmap_sem
* we will deadlock attempting to validate the fault against the * we will deadlock attempting to validate the fault against the
* address space. Luckily the kernel only validly references user * address space. Luckily the kernel only validly references user
...@@ -362,7 +362,7 @@ fastcall void __kprobes do_page_fault(struct pt_regs *regs, ...@@ -362,7 +362,7 @@ fastcall void __kprobes do_page_fault(struct pt_regs *regs,
* exceptions table. * exceptions table.
* *
* As the vast majority of faults will be valid we will only perform * As the vast majority of faults will be valid we will only perform
* the source reference check when there is a possibilty of a deadlock. * the source reference check when there is a possibility of a deadlock.
* Attempt to lock the address space, if we cannot we then validate the * Attempt to lock the address space, if we cannot we then validate the
* source. If this is invalid we can skip the address space check, * source. If this is invalid we can skip the address space check,
* thus avoiding the deadlock. * thus avoiding the deadlock.
......
...@@ -29,7 +29,7 @@ struct op_msrs { ...@@ -29,7 +29,7 @@ struct op_msrs {
struct pt_regs; struct pt_regs;
/* The model vtable abstracts the differences between /* The model vtable abstracts the differences between
* various x86 CPU model's perfctr support. * various x86 CPU models' perfctr support.
*/ */
struct op_x86_model_spec { struct op_x86_model_spec {
unsigned int const num_counters; unsigned int const num_counters;
......
...@@ -169,7 +169,7 @@ void eisa_set_level_irq(unsigned int irq) ...@@ -169,7 +169,7 @@ void eisa_set_level_irq(unsigned int irq)
} }
/* /*
* Common IRQ routing practice: nybbles in config space, * Common IRQ routing practice: nibbles in config space,
* offset by some magic constant. * offset by some magic constant.
*/ */
static unsigned int read_config_nybble(struct pci_dev *router, unsigned offset, unsigned nr) static unsigned int read_config_nybble(struct pci_dev *router, unsigned offset, unsigned nr)
...@@ -585,7 +585,7 @@ static __init int via_router_probe(struct irq_router *r, ...@@ -585,7 +585,7 @@ static __init int via_router_probe(struct irq_router *r,
/* FIXME: We should move some of the quirk fixup stuff here */ /* FIXME: We should move some of the quirk fixup stuff here */
/* /*
* work arounds for some buggy BIOSes * workarounds for some buggy BIOSes
*/ */
if (device == PCI_DEVICE_ID_VIA_82C586_0) { if (device == PCI_DEVICE_ID_VIA_82C586_0) {
switch(router->device) { switch(router->device) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment