Commit a97673a1 authored by Ingo Molnar's avatar Ingo Molnar

x86: Fix various typos in comments

Go over arch/x86/ and fix common typos in comments,
and a typo in an actual function argument name.

No change in functionality intended.

Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent df606731
/*
* Glue Code for the AVX assembler implemention of the Cast5 Cipher
* Glue Code for the AVX assembler implementation of the Cast5 Cipher
*
* Copyright (C) 2012 Johannes Goetzfried
* <Johannes.Goetzfried@informatik.stud.uni-erlangen.de>
......
/*
* Glue Code for the AVX assembler implemention of the Cast6 Cipher
* Glue Code for the AVX assembler implementation of the Cast6 Cipher
*
* Copyright (C) 2012 Johannes Goetzfried
* <Johannes.Goetzfried@informatik.stud.uni-erlangen.de>
......
......@@ -140,7 +140,7 @@ static void exit_to_usermode_loop(struct pt_regs *regs, u32 cached_flags)
/*
* In order to return to user mode, we need to have IRQs off with
* none of EXIT_TO_USERMODE_LOOP_FLAGS set. Several of these flags
* can be set at any time on preemptable kernels if we have IRQs on,
* can be set at any time on preemptible kernels if we have IRQs on,
* so we need to loop. Disabling preemption wouldn't help: doing the
* work to clear some of the flags can sleep.
*/
......
......@@ -261,7 +261,7 @@ int map_vdso_once(const struct vdso_image *image, unsigned long addr)
* abusing from userspace install_speciall_mapping, which may
* not do accounting and rlimit right.
* We could search vma near context.vdso, but it's a slowpath,
* so let's explicitely check all VMAs to be completely sure.
* so let's explicitly check all VMAs to be completely sure.
*/
for (vma = mm->mmap; vma; vma = vma->vm_next) {
if (vma_is_special_mapping(vma, &vdso_mapping) ||
......
......@@ -589,7 +589,7 @@ static __init int bts_init(void)
* the AUX buffer.
*
* However, since this driver supports per-CPU and per-task inherit
* we cannot use the user mapping since it will not be availble
* we cannot use the user mapping since it will not be available
* if we're not running the owning process.
*
* With PTI we can't use the kernal map either, because its not
......
......@@ -1930,7 +1930,7 @@ static void intel_pmu_enable_all(int added)
* in sequence on the same PMC or on different PMCs.
*
* In practise it appears some of these events do in fact count, and
* we need to programm all 4 events.
* we need to program all 4 events.
*/
static void intel_pmu_nhm_workaround(void)
{
......
......@@ -1199,7 +1199,7 @@ static void setup_pebs_sample_data(struct perf_event *event,
/*
* We must however always use iregs for the unwinder to stay sane; the
* record BP,SP,IP can point into thin air when the record is from a
* previous PMI context or an (I)RET happend between the record and
* previous PMI context or an (I)RET happened between the record and
* PMI.
*/
if (sample_type & PERF_SAMPLE_CALLCHAIN)
......
......@@ -1259,7 +1259,7 @@ static int p4_pmu_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign
}
/*
* Perf does test runs to see if a whole group can be assigned
* together succesfully. There can be multiple rounds of this.
* together successfully. There can be multiple rounds of this.
* Unfortunately, p4_pmu_swap_config_ts touches the hwc->config
* bits, such that the next round of group assignments will
* cause the above p4_should_swap_ts to pass instead of fail.
......
......@@ -167,7 +167,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
/*
* Alternative inline assembly with input.
*
* Pecularities:
* Peculiarities:
* No memory clobber here.
* Argument numbers start with 1.
* Best is to use constraints that are fixed size (like (%1) ... "r")
......
......@@ -7,7 +7,7 @@
#include <asm/alternative.h> /* Provides LOCK_PREFIX */
/*
* Non-existant functions to indicate usage errors at link time
* Non-existent functions to indicate usage errors at link time
* (or compile-time if the compiler implements __compiletime_error().
*/
extern void __xchg_wrong_size(void)
......
......@@ -19,7 +19,7 @@
* This is the main reason why we're doing stable VA mappings for RT
* services.
*
* This flag is used in conjuction with a chicken bit called
* This flag is used in conjunction with a chicken bit called
* "efi=old_map" which can be used as a fallback to the old runtime
* services mapping method in case there's some b0rkage with a
* particular EFI implementation (haha, it is hard to hold up the
......
......@@ -848,7 +848,7 @@ EXPORT_SYMBOL(acpi_unregister_ioapic);
/**
* acpi_ioapic_registered - Check whether IOAPIC assoicatied with @gsi_base
* has been registered
* @handle: ACPI handle of the IOAPIC deivce
* @handle: ACPI handle of the IOAPIC device
* @gsi_base: GSI base associated with the IOAPIC
*
* Assume caller holds some type of lock to serialize acpi_ioapic_registered()
......
......@@ -686,7 +686,7 @@ DEFINE_PER_CPU(unsigned, mce_poll_count);
* errors here. However this would be quite problematic --
* we would need to reimplement the Monarch handling and
* it would mess up the exclusion between exception handler
* and poll hander -- * so we skip this for now.
* and poll handler -- * so we skip this for now.
* These cases should not happen anyways, or only when the CPU
* is already totally * confused. In this case it's likely it will
* not fully execute the machine check handler either.
......
......@@ -62,7 +62,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf, size_t csize,
/**
* copy_oldmem_page_encrypted - same as copy_oldmem_page() above but ioremap the
* memory with the encryption mask set to accomodate kdump on SME-enabled
* memory with the encryption mask set to accommodate kdump on SME-enabled
* machines.
*/
ssize_t copy_oldmem_page_encrypted(unsigned long pfn, char *buf, size_t csize,
......
......@@ -684,7 +684,7 @@ void set_personality_64bit(void)
/* TBD: overwrites user setup. Should have two bits.
But 64bit processes have always behaved this way,
so it's not too bad. The main problem is just that
32bit childs are affected again. */
32bit children are affected again. */
current->personality &= ~READ_IMPLIES_EXEC;
}
......
......@@ -485,7 +485,7 @@ struct __packed vmcs12 {
/*
* To allow migration of L1 (complete with its L2 guests) between
* machines of different natural widths (32 or 64 bit), we cannot have
* unsigned long fields with no explict size. We use u64 (aliased
* unsigned long fields with no explicit size. We use u64 (aliased
* natural_width) instead. Luckily, x86 is little-endian.
*/
natural_width cr0_guest_host_mask;
......@@ -4936,7 +4936,7 @@ static __init int alloc_kvm_area(void)
* vmcs->revision_id to KVM_EVMCS_VERSION instead of
* revision_id reported by MSR_IA32_VMX_BASIC.
*
* However, even though not explictly documented by
* However, even though not explicitly documented by
* TLFS, VMXArea passed as VMXON argument should
* still be marked with revision_id reported by
* physical CPU.
......
......@@ -9280,7 +9280,7 @@ static void kvm_mmu_slot_apply_flags(struct kvm *kvm,
* with dirty logging disabled in order to eliminate unnecessary GPA
* logging in PML buffer (and potential PML buffer full VMEXT). This
* guarantees leaving PML enabled during guest's lifetime won't have
* any additonal overhead from PML when guest is running with dirty
* any additional overhead from PML when guest is running with dirty
* logging disabled for memory slots.
*
* kvm_x86_ops->slot_enable_log_dirty is called when switching new slot
......
......@@ -1704,7 +1704,7 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages,
} else if (!(in_flag & CPA_PAGES_ARRAY)) {
/*
* in_flag of CPA_PAGES_ARRAY implies it is aligned.
* No need to cehck in that case
* No need to check in that case
*/
if (*addr & ~PAGE_MASK) {
*addr &= PAGE_MASK;
......
......@@ -84,7 +84,7 @@ static void ce4100_mem_serial_out(struct uart_port *p, int offset, int value)
}
static void ce4100_serial_fixup(int port, struct uart_port *up,
u32 *capabilites)
u32 *capabilities)
{
#ifdef CONFIG_EARLY_PRINTK
/*
......@@ -111,7 +111,7 @@ static void ce4100_serial_fixup(int port, struct uart_port *up,
up->serial_in = ce4100_mem_serial_in;
up->serial_out = ce4100_mem_serial_out;
*capabilites |= (1 << 12);
*capabilities |= (1 << 12);
}
static __init void sdv_serial_fixup(void)
......
/*
* platform_bcm43xx.c: bcm43xx platform data initilization file
* platform_bcm43xx.c: bcm43xx platform data initialization file
*
* (C) Copyright 2016 Intel Corporation
* Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
......
/*
* spidev platform data initilization file
* spidev platform data initialization file
*
* (C) Copyright 2014, 2016 Intel Corporation
* Authors: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
......
/*
* PCAL9555a platform data initilization file
* PCAL9555a platform data initialization file
*
* Copyright (C) 2016, Intel Corporation
*
......
......@@ -13,7 +13,7 @@
*
*
* The IOSF-SB is a fabric bus available on Atom based SOC's that uses a
* mailbox interface (MBI) to communicate with mutiple devices. This
* mailbox interface (MBI) to communicate with multiple devices. This
* driver implements access to this interface for those platforms that can
* enumerate the device using PCI.
*/
......
......@@ -109,7 +109,7 @@ static void detect_lid_state(void)
* the edge detector hookup on the gpio inputs on the geode is
* odd, to say the least. See http://dev.laptop.org/ticket/5703
* for details, but in a nutshell: we don't use the edge
* detectors. instead, we make use of an anomoly: with the both
* detectors. instead, we make use of an anomaly: with the both
* edge detectors turned off, we still get an edge event on a
* positive edge transition. to take advantage of this, we use the
* front-end inverter to ensure that that's the edge we're always
......
......@@ -560,7 +560,7 @@ static inline void uv_clear_nmi(int cpu)
}
}
/* Ping non-responding CPU's attemping to force them into the NMI handler */
/* Ping non-responding CPU's attempting to force them into the NMI handler */
static void uv_nmi_nr_cpus_ping(void)
{
int cpu;
......
......@@ -493,7 +493,7 @@ static unsigned long __init xen_foreach_remap_area(unsigned long nr_pages,
* The remap information (which mfn remap to which pfn) is contained in the
* to be remapped memory itself in a linked list anchored at xen_remap_mfn.
* This scheme allows to remap the different chunks in arbitrary order while
* the resulting mapping will be independant from the order.
* the resulting mapping will be independent from the order.
*/
void __init xen_remap_memory(void)
{
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment