Commit 5fb682b0 authored by Chris Metcalf's avatar Chris Metcalf

arch/tile: fix some comments and whitespace

This is a grab bag of changes with no actual change to generated code.
This includes whitespace and comment typos, plus a couple of stale
comments being removed.
Signed-off-by: default avatarChris Metcalf <cmetcalf@tilera.com>
parent 00dce031
# For a description of the syntax of this configuration file,
# see Documentation/kbuild/config-language.txt.
# see Documentation/kbuild/kconfig-language.txt.
config TILE
def_bool y
......@@ -15,14 +15,14 @@ config TILE
# FIXME: investigate whether we need/want these options.
# select HAVE_IOREMAP_PROT
# select HAVE_OPTPROBES
# select HAVE_REGS_AND_STACK_ACCESS_API
# select HAVE_HW_BREAKPOINT
# select PERF_EVENTS
# select HAVE_USER_RETURN_NOTIFIER
# config NO_BOOTMEM
# config ARCH_SUPPORTS_DEBUG_PAGEALLOC
# config HUGETLB_PAGE_SIZE_VARIABLE
# select HAVE_OPTPROBES
# select HAVE_REGS_AND_STACK_ACCESS_API
# select HAVE_HW_BREAKPOINT
# select PERF_EVENTS
# select HAVE_USER_RETURN_NOTIFIER
# config NO_BOOTMEM
# config ARCH_SUPPORTS_DEBUG_PAGEALLOC
# config HUGETLB_PAGE_SIZE_VARIABLE
config MMU
def_bool y
......@@ -40,7 +40,7 @@ config HAVE_SETUP_PER_CPU_AREA
def_bool y
config NEED_PER_CPU_PAGE_FIRST_CHUNK
def_bool y
def_bool y
config SYS_SUPPORTS_HUGETLBFS
def_bool y
......
......@@ -122,7 +122,7 @@ static inline int test_and_change_bit(unsigned nr,
return (_atomic_xor(addr, mask) & mask) != 0;
}
/* See discussion at smp_mb__before_atomic_dec() in <asm/atomic.h>. */
/* See discussion at smp_mb__before_atomic_dec() in <asm/atomic_32.h>. */
#define smp_mb__before_clear_bit() smp_mb()
#define smp_mb__after_clear_bit() do {} while (0)
......
......@@ -269,7 +269,6 @@ extern char chip_model[64];
/* Data on which physical memory controller corresponds to which NUMA node. */
extern int node_controller[];
/* Do we dump information to the console when a user application crashes? */
extern int show_crashinfo;
......
......@@ -1584,7 +1584,7 @@ ENTRY(sys_cmpxchg)
* about aliasing among multiple mappings of the same physical page,
* and we ignore the low 3 bits so we have one lock that covers
* both a cmpxchg64() and a cmpxchg() on either its low or high word.
* NOTE: this code must match __atomic_hashed_lock() in lib/atomic.c.
* NOTE: this must match __atomic_hashed_lock() in lib/atomic_32.c.
*/
#if ATOMIC_LOCKS_FOUND_VIA_TABLE()
......@@ -1718,7 +1718,7 @@ ENTRY(sys_cmpxchg)
/*
* Perform the actual cmpxchg or atomic_update.
* Note that __futex_mark_unlocked() in uClibc relies on
* Note that the system <arch/atomic.h> header relies on
* atomic_update() to always perform an "mf", so don't make
* it optional or conditional without modifying that code.
*/
......
......@@ -52,7 +52,7 @@ int atomic_locks[PAGE_SIZE / sizeof(int)] __page_aligned_bss;
static inline int *__atomic_hashed_lock(volatile void *v)
{
/* NOTE: this code must match "sys_cmpxchg" in kernel/intvec.S */
/* NOTE: this code must match "sys_cmpxchg" in kernel/intvec_32.S */
#if ATOMIC_LOCKS_FOUND_VIA_TABLE()
unsigned long i =
(unsigned long) v & ((PAGE_SIZE-1) & -sizeof(long long));
......
......@@ -14,7 +14,7 @@
* Support routines for atomic operations. Each function takes:
*
* r0: address to manipulate
* r1: pointer to atomic lock guarding this operation (for FUTEX_LOCK_REG)
* r1: pointer to atomic lock guarding this operation (for ATOMIC_LOCK_REG)
* r2: new value to write, or for cmpxchg/add_unless, value to compare against
* r3: (cmpxchg/xchg_add_unless) new value to write or add;
* (atomic64 ops) high word of value to write
......
......@@ -654,14 +654,6 @@ struct intvec_state do_page_fault_ics(struct pt_regs *regs, int fault_num,
regs->ex1 = PL_ICS_EX1(KERNEL_PL, 0);
}
/*
* NOTE: the one other type of access that might bring us here
* are the memory ops in __tns_atomic_acquire/__tns_atomic_release,
* but we don't have to check specially for them since we can
* always safely return to the address of the fault and retry,
* since no separate atomic locks are involved.
*/
/*
* Now that we have released the atomic lock (if necessary),
* it's safe to spin if the PTE that caused the fault was migrating.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment