Commit 2547476a authored by Andrea Gelmini's avatar Andrea Gelmini Committed by Vineet Gupta

Fix typos

Signed-off-by: default avatarAndrea Gelmini <andrea.gelmini@gelma.net>
Signed-off-by: default avatarVineet Gupta <vgupta@synopsys.com>
parent 1a695a90
...@@ -127,7 +127,7 @@ libs-y += arch/arc/lib/ $(LIBGCC) ...@@ -127,7 +127,7 @@ libs-y += arch/arc/lib/ $(LIBGCC)
boot := arch/arc/boot boot := arch/arc/boot
#default target for make without any arguements. #default target for make without any arguments.
KBUILD_IMAGE := bootpImage KBUILD_IMAGE := bootpImage
all: $(KBUILD_IMAGE) all: $(KBUILD_IMAGE)
......
...@@ -76,8 +76,8 @@ ...@@ -76,8 +76,8 @@
* We need to be a bit more cautious here. What if a kernel bug in * We need to be a bit more cautious here. What if a kernel bug in
* L1 ISR, caused SP to go whaco (some small value which looks like * L1 ISR, caused SP to go whaco (some small value which looks like
* USER stk) and then we take L2 ISR. * USER stk) and then we take L2 ISR.
* Above brlo alone would treat it as a valid L1-L2 sceanrio * Above brlo alone would treat it as a valid L1-L2 scenario
* instead of shouting alound * instead of shouting around
* The only feasible way is to make sure this L2 happened in * The only feasible way is to make sure this L2 happened in
* L1 prelogue ONLY i.e. ilink2 is less than a pre-set marker in * L1 prelogue ONLY i.e. ilink2 is less than a pre-set marker in
* L1 ISR before it switches stack * L1 ISR before it switches stack
......
...@@ -83,7 +83,7 @@ static inline void get_new_mmu_context(struct mm_struct *mm) ...@@ -83,7 +83,7 @@ static inline void get_new_mmu_context(struct mm_struct *mm)
local_flush_tlb_all(); local_flush_tlb_all();
/* /*
* Above checke for rollover of 8 bit ASID in 32 bit container. * Above check for rollover of 8 bit ASID in 32 bit container.
* If the container itself wrapped around, set it to a non zero * If the container itself wrapped around, set it to a non zero
* "generation" to distinguish from no context * "generation" to distinguish from no context
*/ */
......
...@@ -47,7 +47,7 @@ ...@@ -47,7 +47,7 @@
* Page Tables are purely for Linux VM's consumption and the bits below are * Page Tables are purely for Linux VM's consumption and the bits below are
* suited to that (uniqueness). Hence some are not implemented in the TLB and * suited to that (uniqueness). Hence some are not implemented in the TLB and
* some have different value in TLB. * some have different value in TLB.
* e.g. MMU v2: K_READ bit is 8 and so is GLOBAL (possible becoz they live in * e.g. MMU v2: K_READ bit is 8 and so is GLOBAL (possible because they live in
* seperate PD0 and PD1, which combined forms a translation entry) * seperate PD0 and PD1, which combined forms a translation entry)
* while for PTE perspective, they are 8 and 9 respectively * while for PTE perspective, they are 8 and 9 respectively
* with MMU v3: Most bits (except SHARED) represent the exact hardware pos * with MMU v3: Most bits (except SHARED) represent the exact hardware pos
......
...@@ -78,7 +78,7 @@ struct task_struct; ...@@ -78,7 +78,7 @@ struct task_struct;
#define KSTK_ESP(tsk) (task_pt_regs(tsk)->sp) #define KSTK_ESP(tsk) (task_pt_regs(tsk)->sp)
/* /*
* Where abouts of Task's sp, fp, blink when it was last seen in kernel mode. * Where about of Task's sp, fp, blink when it was last seen in kernel mode.
* Look in process.c for details of kernel stack layout * Look in process.c for details of kernel stack layout
*/ */
#define TSK_K_ESP(tsk) (tsk->thread.ksp) #define TSK_K_ESP(tsk) (tsk->thread.ksp)
......
...@@ -86,7 +86,7 @@ static inline const char *arc_platform_smp_cpuinfo(void) ...@@ -86,7 +86,7 @@ static inline const char *arc_platform_smp_cpuinfo(void)
* (1) These insn were introduced only in 4.10 release. So for older released * (1) These insn were introduced only in 4.10 release. So for older released
* support needed. * support needed.
* *
* (2) In a SMP setup, the LLOCK/SCOND atomiticity across CPUs needs to be * (2) In a SMP setup, the LLOCK/SCOND atomicity across CPUs needs to be
* gaurantted by the platform (not something which core handles). * gaurantted by the platform (not something which core handles).
* Assuming a platform won't, SMP Linux needs to use spinlocks + local IRQ * Assuming a platform won't, SMP Linux needs to use spinlocks + local IRQ
* disabling for atomicity. * disabling for atomicity.
......
...@@ -103,7 +103,7 @@ static inline __attribute_const__ struct thread_info *current_thread_info(void) ...@@ -103,7 +103,7 @@ static inline __attribute_const__ struct thread_info *current_thread_info(void)
/* /*
* _TIF_ALLWORK_MASK includes SYSCALL_TRACE, but we don't need it. * _TIF_ALLWORK_MASK includes SYSCALL_TRACE, but we don't need it.
* SYSCALL_TRACE is anways seperately/unconditionally tested right after a * SYSCALL_TRACE is anyway seperately/unconditionally tested right after a
* syscall, so all that reamins to be tested is _TIF_WORK_MASK * syscall, so all that reamins to be tested is _TIF_WORK_MASK
*/ */
......
...@@ -32,7 +32,7 @@ ...@@ -32,7 +32,7 @@
#define __kernel_ok (segment_eq(get_fs(), KERNEL_DS)) #define __kernel_ok (segment_eq(get_fs(), KERNEL_DS))
/* /*
* Algorthmically, for __user_ok() we want do: * Algorithmically, for __user_ok() we want do:
* (start < TASK_SIZE) && (start+len < TASK_SIZE) * (start < TASK_SIZE) && (start+len < TASK_SIZE)
* where TASK_SIZE could either be retrieved from thread_info->addr_limit or * where TASK_SIZE could either be retrieved from thread_info->addr_limit or
* emitted directly in code. * emitted directly in code.
......
...@@ -74,7 +74,7 @@ ...@@ -74,7 +74,7 @@
__tmp ^ __in; \ __tmp ^ __in; \
}) })
#elif (ARC_BSWAP_TYPE == 2) /* Custom single cycle bwap instruction */ #elif (ARC_BSWAP_TYPE == 2) /* Custom single cycle bswap instruction */
#define __arch_swab32(x) \ #define __arch_swab32(x) \
({ \ ({ \
......
...@@ -108,7 +108,7 @@ static void arc_perf_event_update(struct perf_event *event, ...@@ -108,7 +108,7 @@ static void arc_perf_event_update(struct perf_event *event,
int64_t delta = new_raw_count - prev_raw_count; int64_t delta = new_raw_count - prev_raw_count;
/* /*
* We don't afaraid of hwc->prev_count changing beneath our feet * We aren't afraid of hwc->prev_count changing beneath our feet
* because there's no way for us to re-enter this function anytime. * because there's no way for us to re-enter this function anytime.
*/ */
local64_set(&hwc->prev_count, new_raw_count); local64_set(&hwc->prev_count, new_raw_count);
......
...@@ -392,7 +392,7 @@ void __init setup_arch(char **cmdline_p) ...@@ -392,7 +392,7 @@ void __init setup_arch(char **cmdline_p)
/* /*
* If we are here, it is established that @uboot_arg didn't * If we are here, it is established that @uboot_arg didn't
* point to DT blob. Instead if u-boot says it is cmdline, * point to DT blob. Instead if u-boot says it is cmdline,
* Appent to embedded DT cmdline. * append to embedded DT cmdline.
* setup_machine_fdt() would have populated @boot_command_line * setup_machine_fdt() would have populated @boot_command_line
*/ */
if (uboot_tag == 1) { if (uboot_tag == 1) {
......
...@@ -34,7 +34,7 @@ ...@@ -34,7 +34,7 @@
* -ViXS were still seeing crashes when using insmod to load drivers. * -ViXS were still seeing crashes when using insmod to load drivers.
* It turned out that the code to change Execute permssions for TLB entries * It turned out that the code to change Execute permssions for TLB entries
* of user was not guarded for interrupts (mod_tlb_permission) * of user was not guarded for interrupts (mod_tlb_permission)
* This was cauing TLB entries to be overwritten on unrelated indexes * This was causing TLB entries to be overwritten on unrelated indexes
* *
* Vineetg: July 15th 2008: Bug #94183 * Vineetg: July 15th 2008: Bug #94183
* -Exception happens in Delay slot of a JMP, and before user space resumes, * -Exception happens in Delay slot of a JMP, and before user space resumes,
......
...@@ -276,7 +276,7 @@ static int tlb_stats_open(struct inode *inode, struct file *file) ...@@ -276,7 +276,7 @@ static int tlb_stats_open(struct inode *inode, struct file *file)
return 0; return 0;
} }
/* called on user read(): display the couters */ /* called on user read(): display the counters */
static ssize_t tlb_stats_output(struct file *file, /* file descriptor */ static ssize_t tlb_stats_output(struct file *file, /* file descriptor */
char __user *user_buf, /* user buffer */ char __user *user_buf, /* user buffer */
size_t len, /* length of buffer */ size_t len, /* length of buffer */
......
...@@ -215,7 +215,7 @@ void read_decode_cache_bcr(void) ...@@ -215,7 +215,7 @@ void read_decode_cache_bcr(void)
* ------------------ * ------------------
* This ver of MMU supports variable page sizes (1k-16k): although Linux will * This ver of MMU supports variable page sizes (1k-16k): although Linux will
* only support 8k (default), 16k and 4k. * only support 8k (default), 16k and 4k.
* However from hardware perspective, smaller page sizes aggrevate aliasing * However from hardware perspective, smaller page sizes aggravate aliasing
* meaning more vaddr bits needed to disambiguate the cache-line-op ; * meaning more vaddr bits needed to disambiguate the cache-line-op ;
* the existing scheme of piggybacking won't work for certain configurations. * the existing scheme of piggybacking won't work for certain configurations.
* Two new registers IC_PTAG and DC_PTAG inttoduced. * Two new registers IC_PTAG and DC_PTAG inttoduced.
...@@ -302,7 +302,7 @@ void __cache_line_loop_v3(phys_addr_t paddr, unsigned long vaddr, ...@@ -302,7 +302,7 @@ void __cache_line_loop_v3(phys_addr_t paddr, unsigned long vaddr,
/* /*
* This is technically for MMU v4, using the MMU v3 programming model * This is technically for MMU v4, using the MMU v3 programming model
* Special work for HS38 aliasing I-cache configuratino with PAE40 * Special work for HS38 aliasing I-cache configuration with PAE40
* - upper 8 bits of paddr need to be written into PTAG_HI * - upper 8 bits of paddr need to be written into PTAG_HI
* - (and needs to be written before the lower 32 bits) * - (and needs to be written before the lower 32 bits)
* Note that PTAG_HI is hoisted outside the line loop * Note that PTAG_HI is hoisted outside the line loop
...@@ -936,7 +936,7 @@ void arc_cache_init(void) ...@@ -936,7 +936,7 @@ void arc_cache_init(void)
ic->ver, CONFIG_ARC_MMU_VER); ic->ver, CONFIG_ARC_MMU_VER);
/* /*
* In MMU v4 (HS38x) the alising icache config uses IVIL/PTAG * In MMU v4 (HS38x) the aliasing icache config uses IVIL/PTAG
* pair to provide vaddr/paddr respectively, just as in MMU v3 * pair to provide vaddr/paddr respectively, just as in MMU v3
*/ */
if (is_isa_arcv2() && ic->alias) if (is_isa_arcv2() && ic->alias)
......
...@@ -10,7 +10,7 @@ ...@@ -10,7 +10,7 @@
* DMA Coherent API Notes * DMA Coherent API Notes
* *
* I/O is inherently non-coherent on ARC. So a coherent DMA buffer is * I/O is inherently non-coherent on ARC. So a coherent DMA buffer is
* implemented by accessintg it using a kernel virtual address, with * implemented by accessing it using a kernel virtual address, with
* Cache bit off in the TLB entry. * Cache bit off in the TLB entry.
* *
* The default DMA address == Phy address which is 0x8000_0000 based. * The default DMA address == Phy address which is 0x8000_0000 based.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment