Commit 08351fc6 authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/cmetcalf/linux-tile

* git://git.kernel.org/pub/scm/linux/kernel/git/cmetcalf/linux-tile: (27 commits)
  arch/tile: support newer binutils assembler shift semantics
  arch/tile: fix deadlock bugs in rwlock implementation
  drivers/edac: provide support for tile architecture
  tile on-chip network driver: sync up with latest fixes
  arch/tile: support 4KB page size as well as 64KB
  arch/tile: add some more VMSPLIT options and use consistent naming
  arch/tile: fix some comments and whitespace
  arch/tile: export some additional module symbols
  arch/tile: enhance existing finv_buffer_remote() routine
  arch/tile: fix two bugs in the backtracer code
  arch/tile: use extended assembly to inline __mb_incoherent()
  arch/tile: use a cleaner technique to enable interrupt for cpu_idle()
  arch/tile: sync up with <arch/sim.h> and <arch/sim_def.h> changes
  arch/tile: fix reversed test of strict_strtol() return value
  arch/tile: avoid a simulator warning during bootup
  arch/tile: export <asm/hardwall.h> to userspace
  arch/tile: warn and retry if an IPI is not accepted by the target cpu
  arch/tile: stop disabling INTCTRL_1 interrupts during hypervisor downcalls
  arch/tile: fix __ndelay etc to work better
  arch/tile: bug fix: exec'ed task thought it was still single-stepping
  ...

Fix up trivial conflict in arch/tile/kernel/vmlinux.lds.S (percpu
alignment vs section naming convention fix)
parents 0df0914d 0dccb048
......@@ -6127,6 +6127,7 @@ S: Supported
F: arch/tile/
F: drivers/tty/hvc/hvc_tile.c
F: drivers/net/tile/
F: drivers/edac/tile_edac.c
TLAN NETWORK DRIVER
M: Samuel Chessman <chessman@tux.org>
......
......@@ -24,7 +24,7 @@ ON WHAT HARDWARE DOES IT RUN?
today Linux also runs on (at least) the Compaq Alpha AXP, Sun SPARC and
UltraSPARC, Motorola 68000, PowerPC, PowerPC64, ARM, Hitachi SuperH, Cell,
IBM S/390, MIPS, HP PA-RISC, Intel IA-64, DEC VAX, AMD x86-64, AXIS CRIS,
Xtensa, AVR32 and Renesas M32R architectures.
Xtensa, Tilera TILE, AVR32 and Renesas M32R architectures.
Linux is easily portable to most general-purpose 32- or 64-bit architectures
as long as they have a paged memory management unit (PMMU) and a port of the
......
# For a description of the syntax of this configuration file,
# see Documentation/kbuild/config-language.txt.
# see Documentation/kbuild/kconfig-language.txt.
config TILE
def_bool y
......@@ -11,6 +11,7 @@ config TILE
select HAVE_GENERIC_HARDIRQS
select GENERIC_IRQ_PROBE
select GENERIC_PENDING_IRQ if SMP
select GENERIC_HARDIRQS_NO_DEPRECATED
# FIXME: investigate whether we need/want these options.
# select HAVE_IOREMAP_PROT
......@@ -201,12 +202,6 @@ config NODES_SHIFT
By default, 2, i.e. 2^2 == 4 DDR2 controllers.
In a system with more controllers, this value should be raised.
# Need 16MB areas to enable hugetlb
# See build-time check in arch/tile/mm/init.c.
config FORCE_MAX_ZONEORDER
int
default 9
choice
depends on !TILEGX
prompt "Memory split" if EXPERT
......@@ -233,8 +228,12 @@ choice
bool "3.5G/0.5G user/kernel split"
config VMSPLIT_3G
bool "3G/1G user/kernel split"
config VMSPLIT_3G_OPT
bool "3G/1G user/kernel split (for full 1G low memory)"
config VMSPLIT_2_75G
bool "2.75G/1.25G user/kernel split (for full 1G low memory)"
config VMSPLIT_2_5G
bool "2.5G/1.5G user/kernel split"
config VMSPLIT_2_25G
bool "2.25G/1.75G user/kernel split"
config VMSPLIT_2G
bool "2G/2G user/kernel split"
config VMSPLIT_1G
......@@ -245,7 +244,9 @@ config PAGE_OFFSET
hex
default 0xF0000000 if VMSPLIT_3_75G
default 0xE0000000 if VMSPLIT_3_5G
default 0xB0000000 if VMSPLIT_3G_OPT
default 0xB0000000 if VMSPLIT_2_75G
default 0xA0000000 if VMSPLIT_2_5G
default 0x90000000 if VMSPLIT_2_25G
default 0x80000000 if VMSPLIT_2G
default 0x40000000 if VMSPLIT_1G
default 0xC0000000
......
......@@ -16,10 +16,11 @@
#define __ARCH_INTERRUPTS_H__
/** Mask for an interrupt. */
#ifdef __ASSEMBLER__
/* Note: must handle breaking interrupts into high and low words manually. */
#define INT_MASK(intno) (1 << (intno))
#else
#define INT_MASK_LO(intno) (1 << (intno))
#define INT_MASK_HI(intno) (1 << ((intno) - 32))
#ifndef __ASSEMBLER__
#define INT_MASK(intno) (1ULL << (intno))
#endif
......@@ -89,6 +90,7 @@
#define NUM_INTERRUPTS 49
#ifndef __ASSEMBLER__
#define QUEUED_INTERRUPTS ( \
INT_MASK(INT_MEM_ERROR) | \
INT_MASK(INT_DMATLB_MISS) | \
......@@ -301,4 +303,5 @@
INT_MASK(INT_DOUBLE_FAULT) | \
INT_MASK(INT_AUX_PERF_COUNT) | \
0)
#endif /* !__ASSEMBLER__ */
#endif /* !__ARCH_INTERRUPTS_H__ */
......@@ -152,16 +152,33 @@ sim_dump(unsigned int mask)
/**
* Print a string to the simulator stdout.
*
* @param str The string to be written; a newline is automatically added.
* @param str The string to be written.
*/
static __inline void
sim_print(const char* str)
{
for ( ; *str != '\0'; str++)
{
__insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_PUTC |
(*str << _SIM_CONTROL_OPERATOR_BITS));
}
__insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_PUTC |
(SIM_PUTC_FLUSH_BINARY << _SIM_CONTROL_OPERATOR_BITS));
}
/**
* Print a string to the simulator stdout.
*
* @param str The string to be written (a newline is automatically added).
*/
static __inline void
sim_print_string(const char* str)
{
int i;
for (i = 0; str[i] != 0; i++)
for ( ; *str != '\0'; str++)
{
__insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_PUTC |
(str[i] << _SIM_CONTROL_OPERATOR_BITS));
(*str << _SIM_CONTROL_OPERATOR_BITS));
}
__insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_PUTC |
(SIM_PUTC_FLUSH_STRING << _SIM_CONTROL_OPERATOR_BITS));
......@@ -203,7 +220,7 @@ sim_command(const char* str)
* we are passing to the simulator are actually valid in the registers
* (i.e. returned from memory) prior to the SIM_CONTROL spr.
*/
static __inline int _sim_syscall0(int val)
static __inline long _sim_syscall0(int val)
{
long result;
__asm__ __volatile__ ("mtspr SIM_CONTROL, r0"
......@@ -211,7 +228,7 @@ static __inline int _sim_syscall0(int val)
return result;
}
static __inline int _sim_syscall1(int val, long arg1)
static __inline long _sim_syscall1(int val, long arg1)
{
long result;
__asm__ __volatile__ ("{ and zero, r1, r1; mtspr SIM_CONTROL, r0 }"
......@@ -219,7 +236,7 @@ static __inline int _sim_syscall1(int val, long arg1)
return result;
}
static __inline int _sim_syscall2(int val, long arg1, long arg2)
static __inline long _sim_syscall2(int val, long arg1, long arg2)
{
long result;
__asm__ __volatile__ ("{ and zero, r1, r2; mtspr SIM_CONTROL, r0 }"
......@@ -233,7 +250,7 @@ static __inline int _sim_syscall2(int val, long arg1, long arg2)
the register values for arguments 3 and up may still be in flight
to the core from a stack frame reload. */
static __inline int _sim_syscall3(int val, long arg1, long arg2, long arg3)
static __inline long _sim_syscall3(int val, long arg1, long arg2, long arg3)
{
long result;
__asm__ __volatile__ ("{ and zero, r3, r3 };"
......@@ -244,7 +261,7 @@ static __inline int _sim_syscall3(int val, long arg1, long arg2, long arg3)
return result;
}
static __inline int _sim_syscall4(int val, long arg1, long arg2, long arg3,
static __inline long _sim_syscall4(int val, long arg1, long arg2, long arg3,
long arg4)
{
long result;
......@@ -256,7 +273,7 @@ static __inline int _sim_syscall4(int val, long arg1, long arg2, long arg3,
return result;
}
static __inline int _sim_syscall5(int val, long arg1, long arg2, long arg3,
static __inline long _sim_syscall5(int val, long arg1, long arg2, long arg3,
long arg4, long arg5)
{
long result;
......@@ -268,7 +285,6 @@ static __inline int _sim_syscall5(int val, long arg1, long arg2, long arg3,
return result;
}
/**
* Make a special syscall to the simulator itself, if running under
* simulation. This is used as the implementation of other functions
......@@ -281,7 +297,8 @@ static __inline int _sim_syscall5(int val, long arg1, long arg2, long arg3,
*/
#define _sim_syscall(syscall_num, nr, args...) \
_sim_syscall##nr( \
((syscall_num) << _SIM_CONTROL_OPERATOR_BITS) | SIM_CONTROL_SYSCALL, args)
((syscall_num) << _SIM_CONTROL_OPERATOR_BITS) | SIM_CONTROL_SYSCALL, \
##args)
/* Values for the "access_mask" parameters below. */
......@@ -365,6 +382,13 @@ sim_validate_lines_evicted(unsigned long long pa, unsigned long length)
}
/* Return the current CPU speed in cycles per second. */
static __inline long
sim_query_cpu_speed(void)
{
return _sim_syscall(SIM_SYSCALL_QUERY_CPU_SPEED, 0);
}
#endif /* !__DOXYGEN__ */
......
......@@ -243,6 +243,9 @@
*/
#define SIM_SYSCALL_VALIDATE_LINES_EVICTED 5
/** Syscall number for sim_query_cpu_speed(). */
#define SIM_SYSCALL_QUERY_CPU_SPEED 6
/*
* Bit masks which can be shifted by 8, combined with
......
include include/asm-generic/Kbuild.asm
header-y += ucontext.h
header-y += hardwall.h
......@@ -32,7 +32,7 @@
*/
static inline int atomic_read(const atomic_t *v)
{
return v->counter;
return ACCESS_ONCE(v->counter);
}
/**
......
......@@ -122,7 +122,7 @@ static inline int test_and_change_bit(unsigned nr,
return (_atomic_xor(addr, mask) & mask) != 0;
}
/* See discussion at smp_mb__before_atomic_dec() in <asm/atomic.h>. */
/* See discussion at smp_mb__before_atomic_dec() in <asm/atomic_32.h>. */
#define smp_mb__before_clear_bit() smp_mb()
#define smp_mb__after_clear_bit() do {} while (0)
......
......@@ -40,7 +40,7 @@
#define INTERNODE_CACHE_BYTES L2_CACHE_BYTES
/* Group together read-mostly things to avoid cache false sharing */
#define __read_mostly __attribute__((__section__(".data.read_mostly")))
#define __read_mostly __attribute__((__section__(".data..read_mostly")))
/*
* Attribute for data that is kept read/write coherent until the end of
......
......@@ -138,55 +138,12 @@ static inline void finv_buffer(void *buffer, size_t size)
}
/*
* Flush & invalidate a VA range that is homed remotely on a single core,
* waiting until the memory controller holds the flushed values.
* Flush and invalidate a VA range that is homed remotely, waiting
* until the memory controller holds the flushed values. If "hfh" is
* true, we will do a more expensive flush involving additional loads
* to make sure we have touched all the possible home cpus of a buffer
* that is homed with "hash for home".
*/
static inline void finv_buffer_remote(void *buffer, size_t size)
{
char *p;
int i;
/*
* Flush and invalidate the buffer out of the local L1/L2
* and request the home cache to flush and invalidate as well.
*/
__finv_buffer(buffer, size);
/*
* Wait for the home cache to acknowledge that it has processed
* all the flush-and-invalidate requests. This does not mean
* that the flushed data has reached the memory controller yet,
* but it does mean the home cache is processing the flushes.
*/
__insn_mf();
/*
* Issue a load to the last cache line, which can't complete
* until all the previously-issued flushes to the same memory
* controller have also completed. If we weren't striping
* memory, that one load would be sufficient, but since we may
* be, we also need to back up to the last load issued to
* another memory controller, which would be the point where
* we crossed an 8KB boundary (the granularity of striping
* across memory controllers). Keep backing up and doing this
* until we are before the beginning of the buffer, or have
* hit all the controllers.
*/
for (i = 0, p = (char *)buffer + size - 1;
i < (1 << CHIP_LOG_NUM_MSHIMS()) && p >= (char *)buffer;
++i) {
const unsigned long STRIPE_WIDTH = 8192;
/* Force a load instruction to issue. */
*(volatile char *)p;
/* Jump to end of previous stripe. */
p -= STRIPE_WIDTH;
p = (char *)((unsigned long)p | (STRIPE_WIDTH - 1));
}
/* Wait for the loads (and thus flushes) to have completed. */
__insn_mf();
}
void finv_buffer_remote(void *buffer, size_t size, int hfh);
#endif /* _ASM_TILE_CACHEFLUSH_H */
/*
* Copyright 2010 Tilera Corporation. All Rights Reserved.
* Copyright 2011 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
......@@ -10,25 +10,20 @@
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*
* Assembly code for invoking the HV's fence_incoherent syscall.
*/
#include <linux/linkage.h>
#include <hv/syscall_public.h>
#include <arch/abi.h>
#include <arch/chip.h>
#ifndef _ASM_TILE_EDAC_H
#define _ASM_TILE_EDAC_H
#if !CHIP_HAS_MF_WAITS_FOR_VICTIMS()
/* ECC atomic, DMA, SMP and interrupt safe scrub function */
/*
* Invoke the hypervisor's fence_incoherent syscall, which guarantees
* that all victims for cachelines homed on this tile have reached memory.
static inline void atomic_scrub(void *va, u32 size)
{
/*
* These is nothing to be done here because CE is
* corrected by the mshim.
*/
STD_ENTRY(__mb_incoherent)
moveli TREG_SYSCALL_NR_NAME, HV_SYS_fence_incoherent
swint2
jrp lr
STD_ENDPROC(__mb_incoherent)
return;
}
#endif
#endif /* _ASM_TILE_EDAC_H */
......@@ -54,7 +54,7 @@ static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte)
{
set_pte_order(ptep, pte, HUGETLB_PAGE_ORDER);
set_pte(ptep, pte);
}
static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
......
......@@ -18,12 +18,24 @@
#include <arch/interrupts.h>
#include <arch/chip.h>
#if !defined(__tilegx__) && defined(__ASSEMBLY__)
/*
* The set of interrupts we want to allow when interrupts are nominally
* disabled. The remainder are effectively "NMI" interrupts from
* the point of view of the generic Linux code. Note that synchronous
* interrupts (aka "non-queued") are not blocked by the mask in any case.
*/
#if CHIP_HAS_AUX_PERF_COUNTERS()
#define LINUX_MASKABLE_INTERRUPTS_HI \
(~(INT_MASK_HI(INT_PERF_COUNT) | INT_MASK_HI(INT_AUX_PERF_COUNT)))
#else
#define LINUX_MASKABLE_INTERRUPTS_HI \
(~(INT_MASK_HI(INT_PERF_COUNT)))
#endif
#else
#if CHIP_HAS_AUX_PERF_COUNTERS()
#define LINUX_MASKABLE_INTERRUPTS \
(~(INT_MASK(INT_PERF_COUNT) | INT_MASK(INT_AUX_PERF_COUNT)))
......@@ -32,6 +44,8 @@
(~(INT_MASK(INT_PERF_COUNT)))
#endif
#endif
#ifndef __ASSEMBLY__
/* NOTE: we can't include <linux/percpu.h> due to #include dependencies. */
......@@ -224,11 +238,11 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask);
#define IRQ_DISABLE(tmp0, tmp1) \
{ \
movei tmp0, -1; \
moveli tmp1, lo16(LINUX_MASKABLE_INTERRUPTS) \
moveli tmp1, lo16(LINUX_MASKABLE_INTERRUPTS_HI) \
}; \
{ \
mtspr SPR_INTERRUPT_MASK_SET_K_0, tmp0; \
auli tmp1, tmp1, ha16(LINUX_MASKABLE_INTERRUPTS) \
auli tmp1, tmp1, ha16(LINUX_MASKABLE_INTERRUPTS_HI) \
}; \
mtspr SPR_INTERRUPT_MASK_SET_K_1, tmp1
......
......@@ -16,10 +16,11 @@
#define _ASM_TILE_PAGE_H
#include <linux/const.h>
#include <hv/pagesize.h>
/* PAGE_SHIFT and HPAGE_SHIFT determine the page sizes. */
#define PAGE_SHIFT 16
#define HPAGE_SHIFT 24
#define PAGE_SHIFT HV_LOG2_PAGE_SIZE_SMALL
#define HPAGE_SHIFT HV_LOG2_PAGE_SIZE_LARGE
#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
#define HPAGE_SIZE (_AC(1, UL) << HPAGE_SHIFT)
......@@ -29,25 +30,18 @@
#ifdef __KERNEL__
#include <hv/hypervisor.h>
#include <arch/chip.h>
/*
* The {,H}PAGE_SHIFT values must match the HV_LOG2_PAGE_SIZE_xxx
* definitions in <hv/hypervisor.h>. We validate this at build time
* here, and again at runtime during early boot. We provide a
* separate definition since userspace doesn't have <hv/hypervisor.h>.
*
* Be careful to distinguish PAGE_SHIFT from HV_PTE_INDEX_PFN, since
* they are the same on i386 but not TILE.
* If the Kconfig doesn't specify, set a maximum zone order that
* is enough so that we can create huge pages from small pages given
* the respective sizes of the two page types. See <linux/mmzone.h>.
*/
#if HV_LOG2_PAGE_SIZE_SMALL != PAGE_SHIFT
# error Small page size mismatch in Linux
#endif
#if HV_LOG2_PAGE_SIZE_LARGE != HPAGE_SHIFT
# error Huge page size mismatch in Linux
#ifndef CONFIG_FORCE_MAX_ZONEORDER
#define CONFIG_FORCE_MAX_ZONEORDER (HPAGE_SHIFT - PAGE_SHIFT + 1)
#endif
#include <hv/hypervisor.h>
#include <arch/chip.h>
#ifndef __ASSEMBLY__
#include <linux/types.h>
......@@ -81,12 +75,6 @@ static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
* Hypervisor page tables are made of the same basic structure.
*/
typedef __u64 pteval_t;
typedef __u64 pmdval_t;
typedef __u64 pudval_t;
typedef __u64 pgdval_t;
typedef __u64 pgprotval_t;
typedef HV_PTE pte_t;
typedef HV_PTE pgd_t;
typedef HV_PTE pgprot_t;
......
......@@ -41,9 +41,9 @@
static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
{
#ifdef CONFIG_64BIT
set_pte_order(pmdp, pmd, L2_USER_PGTABLE_ORDER);
set_pte(pmdp, pmd);
#else
set_pte_order(&pmdp->pud.pgd, pmd.pud.pgd, L2_USER_PGTABLE_ORDER);
set_pte(&pmdp->pud.pgd, pmd.pud.pgd);
#endif
}
......@@ -100,6 +100,9 @@ pte_t *get_prealloc_pte(unsigned long pfn);
/* During init, we can shatter kernel huge pages if needed. */
void shatter_pmd(pmd_t *pmd);
/* After init, a more complex technique is required. */
void shatter_huge_page(unsigned long addr);
#ifdef __tilegx__
/* We share a single page allocator for both L1 and L2 page tables. */
#if HV_L1_SIZE != HV_L2_SIZE
......
......@@ -233,15 +233,23 @@ static inline void __pte_clear(pte_t *ptep)
#define pgd_ERROR(e) \
pr_err("%s:%d: bad pgd 0x%016llx.\n", __FILE__, __LINE__, pgd_val(e))
/* Return PA and protection info for a given kernel VA. */
int va_to_cpa_and_pte(void *va, phys_addr_t *cpa, pte_t *pte);
/*
* __set_pte() ensures we write the 64-bit PTE with 32-bit words in
* the right order on 32-bit platforms and also allows us to write
* hooks to check valid PTEs, etc., if we want.
*/
void __set_pte(pte_t *ptep, pte_t pte);
/*
* set_pte_order() sets the given PTE and also sanity-checks the
* set_pte() sets the given PTE and also sanity-checks the
* requested PTE against the page homecaching. Unspecified parts
* of the PTE are filled in when it is written to memory, i.e. all
* caching attributes if "!forcecache", or the home cpu if "anyhome".
*/
extern void set_pte_order(pte_t *ptep, pte_t pte, int order);
#define set_pte(ptep, pteval) set_pte_order(ptep, pteval, 0)
extern void set_pte(pte_t *ptep, pte_t pte);
#define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval)
#define set_pte_atomic(pteptr, pteval) set_pte(pteptr, pteval)
......@@ -292,21 +300,6 @@ extern void check_mm_caching(struct mm_struct *prev, struct mm_struct *next);
#define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).val >> 32 })
#define __swp_entry_to_pte(swp) ((pte_t) { (((long long) ((swp).val)) << 32) })
/*
* clone_pgd_range(pgd_t *dst, pgd_t *src, int count);
*
* dst - pointer to pgd range anwhere on a pgd page
* src - ""
* count - the number of pgds to copy.
*
* dst and src can be on the same page, but the range must not overlap,
* and must not cross a page boundary.
*/
static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
{
memcpy(dst, src, count * sizeof(pgd_t));
}
/*
* Conversion functions: convert a page and protection to a page entry,
* and a page entry and page directory to the page they refer to.
......
......@@ -24,6 +24,7 @@
#define PGDIR_SIZE HV_PAGE_SIZE_LARGE
#define PGDIR_MASK (~(PGDIR_SIZE-1))
#define PTRS_PER_PGD (1 << (32 - PGDIR_SHIFT))
#define SIZEOF_PGD (PTRS_PER_PGD * sizeof(pgd_t))
/*
* The level-2 index is defined by the difference between the huge
......@@ -33,6 +34,7 @@
* this nomenclature is somewhat confusing.
*/
#define PTRS_PER_PTE (1 << (HV_LOG2_PAGE_SIZE_LARGE - HV_LOG2_PAGE_SIZE_SMALL))
#define SIZEOF_PTE (PTRS_PER_PTE * sizeof(pte_t))
#ifndef __ASSEMBLY__
......@@ -94,7 +96,6 @@ static inline int pgd_addr_invalid(unsigned long addr)
*/
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
#define __HAVE_ARCH_PTEP_SET_WRPROTECT
#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
extern int ptep_test_and_clear_young(struct vm_area_struct *,
unsigned long addr, pte_t *);
......@@ -110,6 +111,11 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
return pte;
}
static inline void __set_pmd(pmd_t *pmdp, pmd_t pmdval)
{
set_pte(&pmdp->pud.pgd, pmdval.pud.pgd);
}
/* Create a pmd from a PTFN. */
static inline pmd_t ptfn_pmd(unsigned long ptfn, pgprot_t prot)
{
......
......@@ -269,7 +269,6 @@ extern char chip_model[64];
/* Data on which physical memory controller corresponds to which NUMA node. */
extern int node_controller[];
/* Do we dump information to the console when a user application crashes? */
extern int show_crashinfo;
......
......@@ -141,6 +141,9 @@ struct single_step_state {
/* Single-step the instruction at regs->pc */
extern void single_step_once(struct pt_regs *regs);
/* Clean up after execve(). */
extern void single_step_execve(void);
struct task_struct;
extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
......
......@@ -78,13 +78,6 @@ void arch_spin_unlock_wait(arch_spinlock_t *lock);
#define _RD_COUNT_SHIFT 24
#define _RD_COUNT_WIDTH 8
/* Internal functions; do not use. */
void arch_read_lock_slow(arch_rwlock_t *, u32);
int arch_read_trylock_slow(arch_rwlock_t *);
void arch_read_unlock_slow(arch_rwlock_t *);
void arch_write_lock_slow(arch_rwlock_t *, u32);
void arch_write_unlock_slow(arch_rwlock_t *, u32);
/**
* arch_read_can_lock() - would read_trylock() succeed?
*/
......@@ -104,94 +97,32 @@ static inline int arch_write_can_lock(arch_rwlock_t *rwlock)
/**
* arch_read_lock() - acquire a read lock.
*/
static inline void arch_read_lock(arch_rwlock_t *rwlock)
{
u32 val = __insn_tns((int *)&rwlock->lock);
if (unlikely(val << _RD_COUNT_WIDTH)) {
arch_read_lock_slow(rwlock, val);
return;
}
rwlock->lock = val + (1 << _RD_COUNT_SHIFT);
}
void arch_read_lock(arch_rwlock_t *rwlock);
/**
* arch_read_lock() - acquire a write lock.
* arch_write_lock() - acquire a write lock.
*/
static inline void arch_write_lock(arch_rwlock_t *rwlock)
{
u32 val = __insn_tns((int *)&rwlock->lock);
if (unlikely(val != 0)) {
arch_write_lock_slow(rwlock, val);
return;
}
rwlock->lock = 1 << _WR_NEXT_SHIFT;
}
void arch_write_lock(arch_rwlock_t *rwlock);
/**
* arch_read_trylock() - try to acquire a read lock.
*/
static inline int arch_read_trylock(arch_rwlock_t *rwlock)
{
int locked;
u32 val = __insn_tns((int *)&rwlock->lock);
if (unlikely(val & 1))
return arch_read_trylock_slow(rwlock);
locked = (val << _RD_COUNT_WIDTH) == 0;
rwlock->lock = val + (locked << _RD_COUNT_SHIFT);
return locked;
}
int arch_read_trylock(arch_rwlock_t *rwlock);
/**
* arch_write_trylock() - try to acquire a write lock.
*/
static inline int arch_write_trylock(arch_rwlock_t *rwlock)
{
u32 val = __insn_tns((int *)&rwlock->lock);
/*
* If a tns is in progress, or there's a waiting or active locker,
* or active readers, we can't take the lock, so give up.
*/
if (unlikely(val != 0)) {
if (!(val & 1))
rwlock->lock = val;
return 0;
}
/* Set the "next" field to mark it locked. */
rwlock->lock = 1 << _WR_NEXT_SHIFT;
return 1;
}
int arch_write_trylock(arch_rwlock_t *rwlock);
/**
* arch_read_unlock() - release a read lock.
*/
static inline void arch_read_unlock(arch_rwlock_t *rwlock)
{
u32 val;
mb(); /* guarantee anything modified under the lock is visible */
val = __insn_tns((int *)&rwlock->lock);
if (unlikely(val & 1)) {
arch_read_unlock_slow(rwlock);
return;
}
rwlock->lock = val - (1 << _RD_COUNT_SHIFT);
}
void arch_read_unlock(arch_rwlock_t *rwlock);
/**
* arch_write_unlock() - release a write lock.
*/
static inline void arch_write_unlock(arch_rwlock_t *rwlock)
{
u32 val;
mb(); /* guarantee anything modified under the lock is visible */
val = __insn_tns((int *)&rwlock->lock);
if (unlikely(val != (1 << _WR_NEXT_SHIFT))) {
arch_write_unlock_slow(rwlock, val);
return;
}
rwlock->lock = 0;
}
void arch_write_unlock(arch_rwlock_t *rwlock);
#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
......
......@@ -18,13 +18,14 @@
#include <linux/types.h>
#include <linux/sched.h>
#include <asm/backtrace.h>
#include <asm/page.h>
#include <hv/hypervisor.h>
/* Everything we need to keep track of a backtrace iteration */
struct KBacktraceIterator {
BacktraceIterator it;
struct task_struct *task; /* task we are backtracing */
HV_PTE *pgtable; /* page table for user space access */
pte_t *pgtable; /* page table for user space access */
int end; /* iteration complete. */
int new_context; /* new context is starting */
int profile; /* profiling, so stop on async intrpt */
......
......@@ -90,7 +90,24 @@
#endif
#if !CHIP_HAS_MF_WAITS_FOR_VICTIMS()
int __mb_incoherent(void); /* Helper routine for mb_incoherent(). */
#include <hv/syscall_public.h>
/*
* Issue an uncacheable load to each memory controller, then
* wait until those loads have completed.
*/
static inline void __mb_incoherent(void)
{
long clobber_r10;
asm volatile("swint2"
: "=R10" (clobber_r10)
: "R10" (HV_SYS_fence_incoherent)
: "r0", "r1", "r2", "r3", "r4",
"r5", "r6", "r7", "r8", "r9",
"r11", "r12", "r13", "r14",
"r15", "r16", "r17", "r18", "r19",
"r20", "r21", "r22", "r23", "r24",
"r25", "r26", "r27", "r28", "r29");
}
#endif
/* Fence to guarantee visibility of stores to incoherent memory. */
......
......@@ -68,6 +68,7 @@ struct thread_info {
#else
#define THREAD_SIZE_ORDER (0)
#endif
#define THREAD_SIZE_PAGES (1 << THREAD_SIZE_ORDER)
#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)
#define LOG2_THREAD_SIZE (PAGE_SHIFT + THREAD_SIZE_ORDER)
......
......@@ -38,6 +38,9 @@ static inline cycles_t get_cycles(void)
cycles_t get_clock_rate(void);
/* Convert nanoseconds to core clock cycles. */
cycles_t ns2cycles(unsigned long nsecs);
/* Called at cpu initialization to set some low-level constants. */
void setup_clock(void);
......
/*
* Copyright 2011 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*/
/**
* @file drv_mshim_intf.h
* Interface definitions for the Linux EDAC memory controller driver.
*/
#ifndef _SYS_HV_INCLUDE_DRV_MSHIM_INTF_H
#define _SYS_HV_INCLUDE_DRV_MSHIM_INTF_H
/** Number of memory controllers in the public API. */
#define TILE_MAX_MSHIMS 4
/** Memory info under each memory controller. */
struct mshim_mem_info
{
uint64_t mem_size; /**< Total memory size in bytes. */
uint8_t mem_type; /**< Memory type, DDR2 or DDR3. */
uint8_t mem_ecc; /**< Memory supports ECC. */
};
/**
* DIMM error structure.
* For now, only correctable errors are counted and the mshim doesn't record
* the error PA. HV takes panic upon uncorrectable errors.
*/
struct mshim_mem_error
{
uint32_t sbe_count; /**< Number of single-bit errors. */
};
/** Read this offset to get the memory info per mshim. */
#define MSHIM_MEM_INFO_OFF 0x100
/** Read this offset to check DIMM error. */
#define MSHIM_MEM_ERROR_OFF 0x200
#endif /* _SYS_HV_INCLUDE_DRV_MSHIM_INTF_H */
......@@ -338,9 +338,10 @@ typedef int HV_Errno;
#define HV_ENOTREADY -812 /**< Device not ready */
#define HV_EIO -813 /**< I/O error */
#define HV_ENOMEM -814 /**< Out of memory */
#define HV_EAGAIN -815 /**< Try again */
#define HV_ERR_MAX -801 /**< Largest HV error code */
#define HV_ERR_MIN -814 /**< Smallest HV error code */
#define HV_ERR_MIN -815 /**< Smallest HV error code */
#ifndef __ASSEMBLER__
......@@ -867,6 +868,43 @@ typedef struct
*/
HV_PhysAddrRange hv_inquire_physical(int idx);
/** Possible DIMM types. */
typedef enum
{
NO_DIMM = 0, /**< No DIMM */
DDR2 = 1, /**< DDR2 */
DDR3 = 2 /**< DDR3 */
} HV_DIMM_Type;
#ifdef __tilegx__
/** Log2 of minimum DIMM bytes supported by the memory controller. */
#define HV_MSH_MIN_DIMM_SIZE_SHIFT 29
/** Max number of DIMMs contained by one memory controller. */
#define HV_MSH_MAX_DIMMS 8
#else
/** Log2 of minimum DIMM bytes supported by the memory controller. */
#define HV_MSH_MIN_DIMM_SIZE_SHIFT 26
/** Max number of DIMMs contained by one memory controller. */
#define HV_MSH_MAX_DIMMS 2
#endif
/** Number of bits to right-shift to get the DIMM type. */
#define HV_DIMM_TYPE_SHIFT 0
/** Bits to mask to get the DIMM type. */
#define HV_DIMM_TYPE_MASK 0xf
/** Number of bits to right-shift to get the DIMM size. */
#define HV_DIMM_SIZE_SHIFT 4
/** Bits to mask to get the DIMM size. */
#define HV_DIMM_SIZE_MASK 0xf
/** Memory controller information. */
typedef struct
......@@ -963,6 +1001,11 @@ HV_ASIDRange hv_inquire_asid(int idx);
/** Waits for at least the specified number of nanoseconds then returns.
*
* NOTE: this deprecated function currently assumes a 750 MHz clock,
* and is thus not generally suitable for use. New code should call
* hv_sysconf(HV_SYSCONF_CPU_SPEED), compute a cycle count to wait for,
* and delay by looping while checking the cycle counter SPR.
*
* @param nanosecs The number of nanoseconds to sleep.
*/
......@@ -1038,6 +1081,7 @@ int hv_console_write(HV_VirtAddr bytes, int len);
* downcall:
*
* INT_MESSAGE_RCV_DWNCL (hypervisor message available)
* INT_DEV_INTR_DWNCL (device interrupt)
* INT_DMATLB_MISS_DWNCL (DMA TLB miss)
* INT_SNITLB_MISS_DWNCL (SNI TLB miss)
* INT_DMATLB_ACCESS_DWNCL (DMA TLB access violation)
......
......@@ -38,12 +38,6 @@ STD_ENTRY(kernel_execve)
jrp lr
STD_ENDPROC(kernel_execve)
/* Delay a fixed number of cycles. */
STD_ENTRY(__delay)
{ addi r0, r0, -1; bnzt r0, . }
jrp lr
STD_ENDPROC(__delay)
/*
* We don't run this function directly, but instead copy it to a page
* we map into every user process. See vdso_setup().
......@@ -97,23 +91,17 @@ STD_ENTRY(smp_nap)
/*
* Enable interrupts racelessly and then nap until interrupted.
* Architecturally, we are guaranteed that enabling interrupts via
* mtspr to INTERRUPT_CRITICAL_SECTION only interrupts at the next PC.
* This function's _cpu_idle_nap address is special; see intvec.S.
* When interrupted at _cpu_idle_nap, we bump the PC forward 8, and
* as a result return to the function that called _cpu_idle().
*/
STD_ENTRY(_cpu_idle)
{
lnk r0
movei r1, KERNEL_PL
}
{
addli r0, r0, _cpu_idle_nap - .
movei r1, 1
mtspr INTERRUPT_CRITICAL_SECTION, r1
}
IRQ_ENABLE(r2, r3) /* unmask, but still with ICS set */
mtspr SPR_EX_CONTEXT_K_1, r1 /* Kernel PL, ICS clear */
mtspr SPR_EX_CONTEXT_K_0, r0
iret
mtspr INTERRUPT_CRITICAL_SECTION, zero
.global _cpu_idle_nap
_cpu_idle_nap:
nap
......
......@@ -133,7 +133,7 @@ ENTRY(_start)
}
ENDPROC(_start)
.section ".bss.page_aligned","w"
__PAGE_ALIGNED_BSS
.align PAGE_SIZE
ENTRY(empty_zero_page)
.fill PAGE_SIZE,1,0
......@@ -145,10 +145,10 @@ ENTRY(empty_zero_page)
.endif
.word HV_PTE_PAGE | HV_PTE_DIRTY | HV_PTE_PRESENT | HV_PTE_ACCESSED | \
(HV_PTE_MODE_CACHE_NO_L3 << HV_PTE_INDEX_MODE)
.word (\bits1) | (HV_CPA_TO_PFN(\cpa) << HV_PTE_INDEX_PFN)
.word (\bits1) | (HV_CPA_TO_PFN(\cpa) << (HV_PTE_INDEX_PFN - 32))
.endm
.section ".data.page_aligned","wa"
__PAGE_ALIGNED_DATA
.align PAGE_SIZE
ENTRY(swapper_pg_dir)
/*
......@@ -158,12 +158,14 @@ ENTRY(swapper_pg_dir)
*/
.set addr, 0
.rept (MEM_USER_INTRPT - PAGE_OFFSET) >> PGDIR_SHIFT
PTE addr + PAGE_OFFSET, addr, HV_PTE_READABLE | HV_PTE_WRITABLE
PTE addr + PAGE_OFFSET, addr, (1 << (HV_PTE_INDEX_READABLE - 32)) | \
(1 << (HV_PTE_INDEX_WRITABLE - 32))
.set addr, addr + PGDIR_SIZE
.endr
/* The true text VAs are mapped as VA = PA + MEM_SV_INTRPT */
PTE MEM_SV_INTRPT, 0, HV_PTE_READABLE | HV_PTE_EXECUTABLE
PTE MEM_SV_INTRPT, 0, (1 << (HV_PTE_INDEX_READABLE - 32)) | \
(1 << (HV_PTE_INDEX_EXECUTABLE - 32))
.org swapper_pg_dir + HV_L1_SIZE
END(swapper_pg_dir)
......@@ -176,6 +178,7 @@ ENTRY(swapper_pg_dir)
__INITDATA
.align CHIP_L2_LINE_SIZE()
ENTRY(swapper_pgprot)
PTE 0, 0, HV_PTE_READABLE | HV_PTE_WRITABLE, 1
PTE 0, 0, (1 << (HV_PTE_INDEX_READABLE - 32)) | \
(1 << (HV_PTE_INDEX_WRITABLE - 32)), 1
.align CHIP_L2_LINE_SIZE()
END(swapper_pgprot)
......@@ -32,10 +32,6 @@
# error "No support for kernel preemption currently"
#endif
#if INT_INTCTRL_K < 32 || INT_INTCTRL_K >= 48
# error INT_INTCTRL_K coded to set high interrupt mask
#endif
#define PTREGS_PTR(reg, ptreg) addli reg, sp, C_ABI_SAVE_AREA_SIZE + (ptreg)
#define PTREGS_OFFSET_SYSCALL PTREGS_OFFSET_REG(TREG_SYSCALL_NR)
......@@ -1198,46 +1194,6 @@ STD_ENTRY(interrupt_return)
STD_ENDPROC(interrupt_return)
/*
* This interrupt variant clears the INT_INTCTRL_K interrupt mask bit
* before returning, so we can properly get more downcalls.
*/
.pushsection .text.handle_interrupt_downcall,"ax"
handle_interrupt_downcall:
finish_interrupt_save handle_interrupt_downcall
check_single_stepping normal, .Ldispatch_downcall
.Ldispatch_downcall:
/* Clear INTCTRL_K from the set of interrupts we ever enable. */
GET_INTERRUPTS_ENABLED_MASK_PTR(r30)
{
addi r30, r30, 4
movei r31, INT_MASK(INT_INTCTRL_K)
}
{
lw r20, r30
nor r21, r31, zero
}
and r20, r20, r21
sw r30, r20
{
jalr r0
PTREGS_PTR(r0, PTREGS_OFFSET_BASE)
}
FEEDBACK_REENTER(handle_interrupt_downcall)
/* Allow INTCTRL_K to be enabled next time we enable interrupts. */
lw r20, r30
or r20, r20, r31
sw r30, r20
{
movei r30, 0 /* not an NMI */
j interrupt_return
}
STD_ENDPROC(handle_interrupt_downcall)
/*
* Some interrupts don't check for single stepping
*/
......@@ -1600,7 +1556,10 @@ STD_ENTRY(_sys_clone)
.align 64
/* Align much later jump on the start of a cache line. */
#if !ATOMIC_LOCKS_FOUND_VIA_TABLE()
nop; nop
nop
#if PAGE_SIZE >= 0x10000
nop
#endif
#endif
ENTRY(sys_cmpxchg)
......@@ -1628,9 +1587,13 @@ ENTRY(sys_cmpxchg)
* about aliasing among multiple mappings of the same physical page,
* and we ignore the low 3 bits so we have one lock that covers
* both a cmpxchg64() and a cmpxchg() on either its low or high word.
* NOTE: this code must match __atomic_hashed_lock() in lib/atomic.c.
* NOTE: this must match __atomic_hashed_lock() in lib/atomic_32.c.
*/
#if (PAGE_OFFSET & 0xffff) != 0
# error Code here assumes PAGE_OFFSET can be loaded with just hi16()
#endif
#if ATOMIC_LOCKS_FOUND_VIA_TABLE()
{
/* Check for unaligned input. */
......@@ -1723,11 +1686,14 @@ ENTRY(sys_cmpxchg)
lw r26, r0
}
{
/* atomic_locks is page aligned so this suffices to get its addr. */
auli r21, zero, hi16(atomic_locks)
auli r21, zero, ha16(atomic_locks)
bbns r23, .Lcmpxchg_badaddr
}
#if PAGE_SIZE < 0x10000
/* atomic_locks is page-aligned so for big pages we don't need this. */
addli r21, r21, lo16(atomic_locks)
#endif
{
/*
* Insert the hash bits into the page-aligned pointer.
......@@ -1762,7 +1728,7 @@ ENTRY(sys_cmpxchg)
/*
* Perform the actual cmpxchg or atomic_update.
* Note that __futex_mark_unlocked() in uClibc relies on
* Note that the system <arch/atomic.h> header relies on
* atomic_update() to always perform an "mf", so don't make
* it optional or conditional without modifying that code.
*/
......@@ -2014,17 +1980,17 @@ int_unalign:
#endif
int_hand INT_INTCTRL_0, INTCTRL_0, bad_intr
int_hand INT_MESSAGE_RCV_DWNCL, MESSAGE_RCV_DWNCL, \
hv_message_intr, handle_interrupt_downcall
hv_message_intr
int_hand INT_DEV_INTR_DWNCL, DEV_INTR_DWNCL, \
tile_dev_intr, handle_interrupt_downcall
tile_dev_intr
int_hand INT_I_ASID, I_ASID, bad_intr
int_hand INT_D_ASID, D_ASID, bad_intr
int_hand INT_DMATLB_MISS_DWNCL, DMATLB_MISS_DWNCL, \
do_page_fault, handle_interrupt_downcall
do_page_fault
int_hand INT_SNITLB_MISS_DWNCL, SNITLB_MISS_DWNCL, \
do_page_fault, handle_interrupt_downcall
do_page_fault
int_hand INT_DMATLB_ACCESS_DWNCL, DMATLB_ACCESS_DWNCL, \
do_page_fault, handle_interrupt_downcall
do_page_fault
int_hand INT_SN_CPL, SN_CPL, bad_intr
int_hand INT_DOUBLE_FAULT, DOUBLE_FAULT, do_trap
#if CHIP_HAS_AUX_PERF_COUNTERS()
......
......@@ -176,43 +176,43 @@ void disable_percpu_irq(unsigned int irq)
EXPORT_SYMBOL(disable_percpu_irq);
/* Mask an interrupt. */
static void tile_irq_chip_mask(unsigned int irq)
static void tile_irq_chip_mask(struct irq_data *d)
{
mask_irqs(1UL << irq);
mask_irqs(1UL << d->irq);
}
/* Unmask an interrupt. */
static void tile_irq_chip_unmask(unsigned int irq)
static void tile_irq_chip_unmask(struct irq_data *d)
{
unmask_irqs(1UL << irq);
unmask_irqs(1UL << d->irq);
}
/*
* Clear an interrupt before processing it so that any new assertions
* will trigger another irq.
*/
static void tile_irq_chip_ack(unsigned int irq)
static void tile_irq_chip_ack(struct irq_data *d)
{
if ((unsigned long)get_irq_chip_data(irq) != IS_HW_CLEARED)
clear_irqs(1UL << irq);
if ((unsigned long)irq_data_get_irq_chip_data(d) != IS_HW_CLEARED)
clear_irqs(1UL << d->irq);
}
/*
* For per-cpu interrupts, we need to avoid unmasking any interrupts
* that we disabled via disable_percpu_irq().
*/
static void tile_irq_chip_eoi(unsigned int irq)
static void tile_irq_chip_eoi(struct irq_data *d)
{
if (!(__get_cpu_var(irq_disable_mask) & (1UL << irq)))
unmask_irqs(1UL << irq);
if (!(__get_cpu_var(irq_disable_mask) & (1UL << d->irq)))
unmask_irqs(1UL << d->irq);
}
static struct irq_chip tile_irq_chip = {
.name = "tile_irq_chip",
.ack = tile_irq_chip_ack,
.eoi = tile_irq_chip_eoi,
.mask = tile_irq_chip_mask,
.unmask = tile_irq_chip_unmask,
.irq_ack = tile_irq_chip_ack,
.irq_eoi = tile_irq_chip_eoi,
.irq_mask = tile_irq_chip_mask,
.irq_unmask = tile_irq_chip_unmask,
};
void __init init_IRQ(void)
......@@ -277,8 +277,10 @@ int show_interrupts(struct seq_file *p, void *v)
}
if (i < NR_IRQS) {
raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
action = irq_desc[i].action;
struct irq_desc *desc = irq_to_desc(i);
raw_spin_lock_irqsave(&desc->lock, flags);
action = desc->action;
if (!action)
goto skip;
seq_printf(p, "%3d: ", i);
......@@ -288,7 +290,7 @@ int show_interrupts(struct seq_file *p, void *v)
for_each_online_cpu(j)
seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
#endif
seq_printf(p, " %14s", irq_desc[i].chip->name);
seq_printf(p, " %14s", get_irq_desc_chip(desc)->name);
seq_printf(p, " %s", action->name);
for (action = action->next; action; action = action->next)
......@@ -296,7 +298,7 @@ int show_interrupts(struct seq_file *p, void *v)
seq_putc(p, '\n');
skip:
raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
raw_spin_unlock_irqrestore(&desc->lock, flags);
}
return 0;
}
......
......@@ -240,8 +240,11 @@ static void setup_quasi_va_is_pa(void)
pte = hv_pte(_PAGE_KERNEL | _PAGE_HUGE_PAGE);
pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_NO_L3);
for (i = 0; i < pgd_index(PAGE_OFFSET); i++)
pgtable[i] = pfn_pte(i << (HPAGE_SHIFT - PAGE_SHIFT), pte);
for (i = 0; i < pgd_index(PAGE_OFFSET); i++) {
unsigned long pfn = i << (HPAGE_SHIFT - PAGE_SHIFT);
if (pfn_valid(pfn))
__set_pte(&pgtable[i], pfn_pte(pfn, pte));
}
}
......
......@@ -86,6 +86,21 @@ EXPORT_SYMBOL(dma_free_coherent);
* can count on nothing having been touched.
*/
/* Flush a PA range from cache page by page. */
static void __dma_map_pa_range(dma_addr_t dma_addr, size_t size)
{
struct page *page = pfn_to_page(PFN_DOWN(dma_addr));
size_t bytesleft = PAGE_SIZE - (dma_addr & (PAGE_SIZE - 1));
while ((ssize_t)size > 0) {
/* Flush the page. */
homecache_flush_cache(page++, 0);
/* Figure out if we need to continue on the next page. */
size -= bytesleft;
bytesleft = PAGE_SIZE;
}
}
/*
* dma_map_single can be passed any memory address, and there appear
......@@ -97,26 +112,12 @@ EXPORT_SYMBOL(dma_free_coherent);
dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
enum dma_data_direction direction)
{
struct page *page;
dma_addr_t dma_addr;
int thispage;
dma_addr_t dma_addr = __pa(ptr);
BUG_ON(!valid_dma_direction(direction));
WARN_ON(size == 0);
dma_addr = __pa(ptr);
/* We might have been handed a buffer that wraps a page boundary */
while ((int)size > 0) {
/* The amount to flush that's on this page */
thispage = PAGE_SIZE - ((unsigned long)ptr & (PAGE_SIZE - 1));
thispage = min((int)thispage, (int)size);
/* Is this valid for any page we could be handed? */
page = pfn_to_page(kaddr_to_pfn(ptr));
homecache_flush_cache(page, 0);
ptr += thispage;
size -= thispage;
}
__dma_map_pa_range(dma_addr, size);
return dma_addr;
}
......@@ -140,10 +141,8 @@ int dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
WARN_ON(nents == 0 || sglist->length == 0);
for_each_sg(sglist, sg, nents, i) {
struct page *page;
sg->dma_address = sg_phys(sg);
page = pfn_to_page(sg->dma_address >> PAGE_SHIFT);
homecache_flush_cache(page, 0);
__dma_map_pa_range(sg->dma_address, sg->length);
}
return nents;
......@@ -163,6 +162,7 @@ dma_addr_t dma_map_page(struct device *dev, struct page *page,
{
BUG_ON(!valid_dma_direction(direction));
BUG_ON(offset + size > PAGE_SIZE);
homecache_flush_cache(page, 0);
return page_to_pa(page) + offset;
......
......@@ -165,7 +165,7 @@ void free_thread_info(struct thread_info *info)
kfree(step_state);
}
free_page((unsigned long)info);
free_pages((unsigned long)info, THREAD_SIZE_ORDER);
}
static void save_arch_state(struct thread_struct *t);
......@@ -574,6 +574,8 @@ SYSCALL_DEFINE4(execve, const char __user *, path,
goto out;
error = do_execve(filename, argv, envp, regs);
putname(filename);
if (error == 0)
single_step_execve();
out:
return error;
}
......@@ -593,6 +595,8 @@ long compat_sys_execve(const char __user *path,
goto out;
error = compat_do_execve(filename, argv, envp, regs);
putname(filename);
if (error == 0)
single_step_execve();
out:
return error;
}
......
......@@ -59,6 +59,8 @@ unsigned long __initdata node_memmap_pfn[MAX_NUMNODES];
unsigned long __initdata node_percpu_pfn[MAX_NUMNODES];
unsigned long __initdata node_free_pfn[MAX_NUMNODES];
static unsigned long __initdata node_percpu[MAX_NUMNODES];
#ifdef CONFIG_HIGHMEM
/* Page frame index of end of lowmem on each controller. */
unsigned long __cpuinitdata node_lowmem_end_pfn[MAX_NUMNODES];
......@@ -554,7 +556,6 @@ static void __init setup_bootmem_allocator(void)
reserve_bootmem(crashk_res.start,
crashk_res.end - crashk_res.start + 1, 0);
#endif
}
void *__init alloc_remap(int nid, unsigned long size)
......@@ -568,11 +569,13 @@ void *__init alloc_remap(int nid, unsigned long size)
static int __init percpu_size(void)
{
int size = ALIGN(__per_cpu_end - __per_cpu_start, PAGE_SIZE);
#ifdef CONFIG_MODULES
if (size < PERCPU_ENOUGH_ROOM)
size = PERCPU_ENOUGH_ROOM;
#endif
int size = __per_cpu_end - __per_cpu_start;
size += PERCPU_MODULE_RESERVE;
size += PERCPU_DYNAMIC_EARLY_SIZE;
if (size < PCPU_MIN_UNIT_SIZE)
size = PCPU_MIN_UNIT_SIZE;
size = roundup(size, PAGE_SIZE);
/* In several places we assume the per-cpu data fits on a huge page. */
BUG_ON(kdata_huge && size > HPAGE_SIZE);
return size;
......@@ -589,7 +592,6 @@ static inline unsigned long alloc_bootmem_pfn(int size, unsigned long goal)
static void __init zone_sizes_init(void)
{
unsigned long zones_size[MAX_NR_ZONES] = { 0 };
unsigned long node_percpu[MAX_NUMNODES] = { 0 };
int size = percpu_size();
int num_cpus = smp_height * smp_width;
int i;
......@@ -674,7 +676,7 @@ static void __init zone_sizes_init(void)
NODE_DATA(i)->bdata = NODE_DATA(0)->bdata;
free_area_init_node(i, zones_size, start, NULL);
printk(KERN_DEBUG " DMA zone: %ld per-cpu pages\n",
printk(KERN_DEBUG " Normal zone: %ld per-cpu pages\n",
PFN_UP(node_percpu[i]));
/* Track the type of memory on each node */
......@@ -1312,6 +1314,8 @@ static void *__init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align)
BUG_ON(size % PAGE_SIZE != 0);
pfn_offset[nid] += size / PAGE_SIZE;
BUG_ON(node_percpu[nid] < size);
node_percpu[nid] -= size;
if (percpu_pfn[cpu] == 0)
percpu_pfn[cpu] = pfn;
return pfn_to_kaddr(pfn);
......
......@@ -56,7 +56,7 @@ enum mem_op {
MEMOP_STORE_POSTINCR
};
static inline tile_bundle_bits set_BrOff_X1(tile_bundle_bits n, int32_t offset)
static inline tile_bundle_bits set_BrOff_X1(tile_bundle_bits n, s32 offset)
{
tile_bundle_bits result;
......@@ -254,6 +254,18 @@ P("\n");
return bundle;
}
/*
* Called after execve() has started the new image. This allows us
* to reset the info state. Note that the the mmap'ed memory, if there
* was any, has already been unmapped by the exec.
*/
void single_step_execve(void)
{
struct thread_info *ti = current_thread_info();
kfree(ti->step_state);
ti->step_state = NULL;
}
/**
* single_step_once() - entry point when single stepping has been triggered.
* @regs: The machine register state
......@@ -373,7 +385,7 @@ void single_step_once(struct pt_regs *regs)
/* branches */
case BRANCH_OPCODE_X1:
{
int32_t offset = signExtend17(get_BrOff_X1(bundle));
s32 offset = signExtend17(get_BrOff_X1(bundle));
/*
* For branches, we use a rewriting trick to let the
......@@ -731,4 +743,9 @@ void single_step_once(struct pt_regs *regs)
__insn_mtspr(SPR_SINGLE_STEP_EN_K_K, 1 << USER_PL);
}
void single_step_execve(void)
{
/* Nothing */
}
#endif /* !__tilegx__ */
......@@ -36,6 +36,22 @@ static unsigned long __iomem *ipi_mappings[NR_CPUS];
/* Set by smp_send_stop() to avoid recursive panics. */
static int stopping_cpus;
static void __send_IPI_many(HV_Recipient *recip, int nrecip, int tag)
{
int sent = 0;
while (sent < nrecip) {
int rc = hv_send_message(recip, nrecip,
(HV_VirtAddr)&tag, sizeof(tag));
if (rc < 0) {
if (!stopping_cpus) /* avoid recursive panic */
panic("hv_send_message returned %d", rc);
break;
}
WARN_ONCE(rc == 0, "hv_send_message() returned zero\n");
sent += rc;
}
}
void send_IPI_single(int cpu, int tag)
{
HV_Recipient recip = {
......@@ -43,14 +59,13 @@ void send_IPI_single(int cpu, int tag)
.x = cpu % smp_width,
.state = HV_TO_BE_SENT
};
int rc = hv_send_message(&recip, 1, (HV_VirtAddr)&tag, sizeof(tag));
BUG_ON(rc <= 0);
__send_IPI_many(&recip, 1, tag);
}
void send_IPI_many(const struct cpumask *mask, int tag)
{
HV_Recipient recip[NR_CPUS];
int cpu, sent;
int cpu;
int nrecip = 0;
int my_cpu = smp_processor_id();
for_each_cpu(cpu, mask) {
......@@ -61,17 +76,7 @@ void send_IPI_many(const struct cpumask *mask, int tag)
r->x = cpu % smp_width;
r->state = HV_TO_BE_SENT;
}
sent = 0;
while (sent < nrecip) {
int rc = hv_send_message(recip, nrecip,
(HV_VirtAddr)&tag, sizeof(tag));
if (rc <= 0) {
if (!stopping_cpus) /* avoid recursive panic */
panic("hv_send_message returned %d", rc);
break;
}
sent += rc;
}
__send_IPI_many(recip, nrecip, tag);
}
void send_IPI_allbutself(int tag)
......
......@@ -44,13 +44,6 @@ static int in_kernel_stack(struct KBacktraceIterator *kbt, VirtualAddress sp)
return sp >= kstack_base && sp < kstack_base + THREAD_SIZE;
}
/* Is address in the specified kernel code? */
static int in_kernel_text(VirtualAddress address)
{
return (address >= MEM_SV_INTRPT &&
address < MEM_SV_INTRPT + HPAGE_SIZE);
}
/* Is address valid for reading? */
static int valid_address(struct KBacktraceIterator *kbt, VirtualAddress address)
{
......@@ -63,6 +56,23 @@ static int valid_address(struct KBacktraceIterator *kbt, VirtualAddress address)
if (l1_pgtable == NULL)
return 0; /* can't read user space in other tasks */
#ifdef CONFIG_64BIT
/* Find the real l1_pgtable by looking in the l0_pgtable. */
pte = l1_pgtable[HV_L0_INDEX(address)];
if (!hv_pte_get_present(pte))
return 0;
pfn = hv_pte_get_pfn(pte);
if (pte_huge(pte)) {
if (!pfn_valid(pfn)) {
pr_err("L0 huge page has bad pfn %#lx\n", pfn);
return 0;
}
return hv_pte_get_present(pte) && hv_pte_get_readable(pte);
}
page = pfn_to_page(pfn);
BUG_ON(PageHighMem(page)); /* No HIGHMEM on 64-bit. */
l1_pgtable = (HV_PTE *)pfn_to_kaddr(pfn);
#endif
pte = l1_pgtable[HV_L1_INDEX(address)];
if (!hv_pte_get_present(pte))
return 0;
......@@ -92,7 +102,7 @@ static bool read_memory_func(void *result, VirtualAddress address,
{
int retval;
struct KBacktraceIterator *kbt = (struct KBacktraceIterator *)vkbt;
if (in_kernel_text(address)) {
if (__kernel_text_address(address)) {
/* OK to read kernel code. */
} else if (address >= PAGE_OFFSET) {
/* We only tolerate kernel-space reads of this task's stack */
......@@ -132,7 +142,7 @@ static struct pt_regs *valid_fault_handler(struct KBacktraceIterator* kbt)
}
}
if (EX1_PL(p->ex1) == KERNEL_PL &&
in_kernel_text(p->pc) &&
__kernel_text_address(p->pc) &&
in_kernel_stack(kbt, p->sp) &&
p->sp >= sp) {
if (kbt->verbose)
......
......@@ -224,3 +224,13 @@ int setup_profiling_timer(unsigned int multiplier)
{
return -EINVAL;
}
/*
* Use the tile timer to convert nsecs to core clock cycles, relying
* on it having the same frequency as SPR_CYCLE.
*/
cycles_t ns2cycles(unsigned long nsecs)
{
struct clock_event_device *dev = &__get_cpu_var(tile_timer);
return ((u64)nsecs * dev->mult) >> dev->shift;
}
......@@ -59,10 +59,7 @@ SECTIONS
. = ALIGN(PAGE_SIZE);
VMLINUX_SYMBOL(_sinitdata) = .;
.init.page : AT (ADDR(.init.page) - LOAD_OFFSET) {
*(.init.page)
} :data =0
INIT_DATA_SECTION(16)
INIT_DATA_SECTION(16) :data =0
PERCPU(L2_CACHE_BYTES, PAGE_SIZE)
. = ALIGN(PAGE_SIZE);
VMLINUX_SYMBOL(_einitdata) = .;
......
......@@ -2,9 +2,8 @@
# Makefile for TILE-specific library files..
#
lib-y = cacheflush.o checksum.o cpumask.o delay.o \
mb_incoherent.o uaccess.o memmove.o \
memcpy_$(BITS).o memchr_$(BITS).o memset_$(BITS).o \
lib-y = cacheflush.o checksum.o cpumask.o delay.o uaccess.o \
memmove.o memcpy_$(BITS).o memchr_$(BITS).o memset_$(BITS).o \
strchr_$(BITS).o strlen_$(BITS).o
ifeq ($(CONFIG_TILEGX),y)
......
......@@ -46,14 +46,13 @@ struct atomic_locks_on_cpu *atomic_lock_ptr[ATOMIC_HASH_L1_SIZE]
#else /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */
/* This page is remapped on startup to be hash-for-home. */
int atomic_locks[PAGE_SIZE / sizeof(int) /* Only ATOMIC_HASH_SIZE is used */]
__attribute__((aligned(PAGE_SIZE), section(".bss.page_aligned")));
int atomic_locks[PAGE_SIZE / sizeof(int)] __page_aligned_bss;
#endif /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */
static inline int *__atomic_hashed_lock(volatile void *v)
{
/* NOTE: this code must match "sys_cmpxchg" in kernel/intvec.S */
/* NOTE: this code must match "sys_cmpxchg" in kernel/intvec_32.S */
#if ATOMIC_LOCKS_FOUND_VIA_TABLE()
unsigned long i =
(unsigned long) v & ((PAGE_SIZE-1) & -sizeof(long long));
......
......@@ -14,7 +14,7 @@
* Support routines for atomic operations. Each function takes:
*
* r0: address to manipulate
* r1: pointer to atomic lock guarding this operation (for FUTEX_LOCK_REG)
* r1: pointer to atomic lock guarding this operation (for ATOMIC_LOCK_REG)
* r2: new value to write, or for cmpxchg/add_unless, value to compare against
* r3: (cmpxchg/xchg_add_unless) new value to write or add;
* (atomic64 ops) high word of value to write
......
......@@ -21,3 +21,105 @@ void __flush_icache_range(unsigned long start, unsigned long end)
{
invalidate_icache((const void *)start, end - start, PAGE_SIZE);
}
/* Force a load instruction to issue. */
static inline void force_load(char *p)
{
*(volatile char *)p;
}
/*
* Flush and invalidate a VA range that is homed remotely on a single
* core (if "!hfh") or homed via hash-for-home (if "hfh"), waiting
* until the memory controller holds the flushed values.
*/
void finv_buffer_remote(void *buffer, size_t size, int hfh)
{
char *p, *base;
size_t step_size, load_count;
const unsigned long STRIPE_WIDTH = 8192;
/*
* Flush and invalidate the buffer out of the local L1/L2
* and request the home cache to flush and invalidate as well.
*/
__finv_buffer(buffer, size);
/*
* Wait for the home cache to acknowledge that it has processed
* all the flush-and-invalidate requests. This does not mean
* that the flushed data has reached the memory controller yet,
* but it does mean the home cache is processing the flushes.
*/
__insn_mf();
/*
* Issue a load to the last cache line, which can't complete
* until all the previously-issued flushes to the same memory
* controller have also completed. If we weren't striping
* memory, that one load would be sufficient, but since we may
* be, we also need to back up to the last load issued to
* another memory controller, which would be the point where
* we crossed an 8KB boundary (the granularity of striping
* across memory controllers). Keep backing up and doing this
* until we are before the beginning of the buffer, or have
* hit all the controllers.
*
* If we are flushing a hash-for-home buffer, it's even worse.
* Each line may be homed on a different tile, and each tile
* may have up to four lines that are on different
* controllers. So as we walk backwards, we have to touch
* enough cache lines to satisfy these constraints. In
* practice this ends up being close enough to "load from
* every cache line on a full memory stripe on each
* controller" that we simply do that, to simplify the logic.
*
* FIXME: See bug 9535 for some issues with this code.
*/
if (hfh) {
step_size = L2_CACHE_BYTES;
load_count = (STRIPE_WIDTH / L2_CACHE_BYTES) *
(1 << CHIP_LOG_NUM_MSHIMS());
} else {
step_size = STRIPE_WIDTH;
load_count = (1 << CHIP_LOG_NUM_MSHIMS());
}
/* Load the last byte of the buffer. */
p = (char *)buffer + size - 1;
force_load(p);
/* Bump down to the end of the previous stripe or cache line. */
p -= step_size;
p = (char *)((unsigned long)p | (step_size - 1));
/* Figure out how far back we need to go. */
base = p - (step_size * (load_count - 2));
if ((long)base < (long)buffer)
base = buffer;
/*
* Fire all the loads we need. The MAF only has eight entries
* so we can have at most eight outstanding loads, so we
* unroll by that amount.
*/
#pragma unroll 8
for (; p >= base; p -= step_size)
force_load(p);
/*
* Repeat, but with inv's instead of loads, to get rid of the
* data we just loaded into our own cache and the old home L3.
* No need to unroll since inv's don't target a register.
*/
p = (char *)buffer + size - 1;
__insn_inv(p);
p -= step_size;
p = (char *)((unsigned long)p | (step_size - 1));
for (; p >= base; p -= step_size)
__insn_inv(p);
/* Wait for the load+inv's (and thus finvs) to have completed. */
__insn_mf();
}
......@@ -15,20 +15,31 @@
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/thread_info.h>
#include <asm/fixmap.h>
#include <hv/hypervisor.h>
#include <asm/timex.h>
void __udelay(unsigned long usecs)
{
hv_nanosleep(usecs * 1000);
if (usecs > ULONG_MAX / 1000) {
WARN_ON_ONCE(usecs > ULONG_MAX / 1000);
usecs = ULONG_MAX / 1000;
}
__ndelay(usecs * 1000);
}
EXPORT_SYMBOL(__udelay);
void __ndelay(unsigned long nsecs)
{
hv_nanosleep(nsecs);
cycles_t target = get_cycles();
target += ns2cycles(nsecs);
while (get_cycles() < target)
cpu_relax();
}
EXPORT_SYMBOL(__ndelay);
/* FIXME: should be declared in a header somewhere. */
void __delay(unsigned long cycles)
{
cycles_t target = get_cycles() + cycles;
while (get_cycles() < target)
cpu_relax();
}
EXPORT_SYMBOL(__delay);
......@@ -29,6 +29,9 @@ EXPORT_SYMBOL(__put_user_8);
EXPORT_SYMBOL(strnlen_user_asm);
EXPORT_SYMBOL(strncpy_from_user_asm);
EXPORT_SYMBOL(clear_user_asm);
EXPORT_SYMBOL(flush_user_asm);
EXPORT_SYMBOL(inv_user_asm);
EXPORT_SYMBOL(finv_user_asm);
/* arch/tile/kernel/entry.S */
#include <linux/kernel.h>
......@@ -45,9 +48,6 @@ EXPORT_SYMBOL(__copy_from_user_zeroing);
EXPORT_SYMBOL(__copy_in_user_inatomic);
#endif
/* arch/tile/lib/mb_incoherent.S */
EXPORT_SYMBOL(__mb_incoherent);
/* hypervisor glue */
#include <hv/hypervisor.h>
EXPORT_SYMBOL(hv_dev_open);
......@@ -85,4 +85,8 @@ int64_t __muldi3(int64_t, int64_t);
EXPORT_SYMBOL(__muldi3);
uint64_t __lshrdi3(uint64_t, unsigned int);
EXPORT_SYMBOL(__lshrdi3);
uint64_t __ashrdi3(uint64_t, unsigned int);
EXPORT_SYMBOL(__ashrdi3);
uint64_t __ashldi3(uint64_t, unsigned int);
EXPORT_SYMBOL(__ashldi3);
#endif
......@@ -96,7 +96,7 @@ static void memcpy_multicache(void *dest, const void *source,
newsrc = __fix_to_virt(idx) + ((unsigned long)source & (PAGE_SIZE-1));
pmdp = pmd_offset(pud_offset(pgd_offset_k(newsrc), newsrc), newsrc);
ptep = pte_offset_kernel(pmdp, newsrc);
*ptep = src_pte; /* set_pte() would be confused by this */
__set_pte(ptep, src_pte); /* set_pte() would be confused by this */
local_flush_tlb_page(NULL, newsrc, PAGE_SIZE);
/* Actually move the data. */
......@@ -109,7 +109,7 @@ static void memcpy_multicache(void *dest, const void *source,
*/
src_pte = hv_pte_set_mode(src_pte, HV_PTE_MODE_CACHE_NO_L3);
src_pte = hv_pte_set_writable(src_pte); /* need write access for inv */
*ptep = src_pte; /* set_pte() would be confused by this */
__set_pte(ptep, src_pte); /* set_pte() would be confused by this */
local_flush_tlb_page(NULL, newsrc, PAGE_SIZE);
/*
......
......@@ -15,6 +15,7 @@
#include <linux/spinlock.h>
#include <linux/module.h>
#include <asm/processor.h>
#include <arch/spr_def.h>
#include "spinlock_common.h"
......@@ -91,75 +92,75 @@ EXPORT_SYMBOL(arch_spin_unlock_wait);
#define RD_COUNT_MASK ((1 << RD_COUNT_WIDTH) - 1)
/* Lock the word, spinning until there are no tns-ers. */
static inline u32 get_rwlock(arch_rwlock_t *rwlock)
{
u32 iterations = 0;
for (;;) {
u32 val = __insn_tns((int *)&rwlock->lock);
if (unlikely(val & 1)) {
delay_backoff(iterations++);
continue;
}
return val;
}
}
int arch_read_trylock_slow(arch_rwlock_t *rwlock)
{
u32 val = get_rwlock(rwlock);
int locked = (val << RD_COUNT_WIDTH) == 0;
rwlock->lock = val + (locked << RD_COUNT_SHIFT);
return locked;
}
EXPORT_SYMBOL(arch_read_trylock_slow);
void arch_read_unlock_slow(arch_rwlock_t *rwlock)
{
u32 val = get_rwlock(rwlock);
rwlock->lock = val - (1 << RD_COUNT_SHIFT);
}
EXPORT_SYMBOL(arch_read_unlock_slow);
void arch_write_unlock_slow(arch_rwlock_t *rwlock, u32 val)
/*
* We can get the read lock if everything but the reader bits (which
* are in the high part of the word) is zero, i.e. no active or
* waiting writers, no tns.
*
* We guard the tns/store-back with an interrupt critical section to
* preserve the semantic that the same read lock can be acquired in an
* interrupt context.
*/
inline int arch_read_trylock(arch_rwlock_t *rwlock)
{
u32 eq, mask = 1 << WR_CURR_SHIFT;
while (unlikely(val & 1)) {
/* Limited backoff since we are the highest-priority task. */
relax(4);
u32 val;
__insn_mtspr(SPR_INTERRUPT_CRITICAL_SECTION, 1);
val = __insn_tns((int *)&rwlock->lock);
if (likely((val << _RD_COUNT_WIDTH) == 0)) {
val += 1 << RD_COUNT_SHIFT;
rwlock->lock = val;
__insn_mtspr(SPR_INTERRUPT_CRITICAL_SECTION, 0);
BUG_ON(val == 0); /* we don't expect wraparound */
return 1;
}
val = __insn_addb(val, mask);
eq = __insn_seqb(val, val << (WR_CURR_SHIFT - WR_NEXT_SHIFT));
val = __insn_mz(eq & mask, val);
if ((val & 1) == 0)
rwlock->lock = val;
__insn_mtspr(SPR_INTERRUPT_CRITICAL_SECTION, 0);
return 0;
}
EXPORT_SYMBOL(arch_write_unlock_slow);
EXPORT_SYMBOL(arch_read_trylock);
/*
* We spin until everything but the reader bits (which are in the high
* part of the word) are zero, i.e. no active or waiting writers, no tns.
*
* Spin doing arch_read_trylock() until we acquire the lock.
* ISSUE: This approach can permanently starve readers. A reader who sees
* a writer could instead take a ticket lock (just like a writer would),
* and atomically enter read mode (with 1 reader) when it gets the ticket.
* This way both readers and writers will always make forward progress
* This way both readers and writers would always make forward progress
* in a finite time.
*/
void arch_read_lock_slow(arch_rwlock_t *rwlock, u32 val)
void arch_read_lock(arch_rwlock_t *rwlock)
{
u32 iterations = 0;
do {
if (!(val & 1))
rwlock->lock = val;
while (unlikely(!arch_read_trylock(rwlock)))
delay_backoff(iterations++);
}
EXPORT_SYMBOL(arch_read_lock);
void arch_read_unlock(arch_rwlock_t *rwlock)
{
u32 val, iterations = 0;
mb(); /* guarantee anything modified under the lock is visible */
for (;;) {
__insn_mtspr(SPR_INTERRUPT_CRITICAL_SECTION, 1);
val = __insn_tns((int *)&rwlock->lock);
} while ((val << RD_COUNT_WIDTH) != 0);
rwlock->lock = val + (1 << RD_COUNT_SHIFT);
if (likely(val & 1) == 0) {
rwlock->lock = val - (1 << _RD_COUNT_SHIFT);
__insn_mtspr(SPR_INTERRUPT_CRITICAL_SECTION, 0);
break;
}
__insn_mtspr(SPR_INTERRUPT_CRITICAL_SECTION, 0);
delay_backoff(iterations++);
}
}
EXPORT_SYMBOL(arch_read_lock_slow);
EXPORT_SYMBOL(arch_read_unlock);
void arch_write_lock_slow(arch_rwlock_t *rwlock, u32 val)
/*
* We don't need an interrupt critical section here (unlike for
* arch_read_lock) since we should never use a bare write lock where
* it could be interrupted by code that could try to re-acquire it.
*/
void arch_write_lock(arch_rwlock_t *rwlock)
{
/*
* The trailing underscore on this variable (and curr_ below)
......@@ -168,6 +169,12 @@ void arch_write_lock_slow(arch_rwlock_t *rwlock, u32 val)
*/
u32 my_ticket_;
u32 iterations = 0;
u32 val = __insn_tns((int *)&rwlock->lock);
if (likely(val == 0)) {
rwlock->lock = 1 << _WR_NEXT_SHIFT;
return;
}
/*
* Wait until there are no readers, then bump up the next
......@@ -206,23 +213,47 @@ void arch_write_lock_slow(arch_rwlock_t *rwlock, u32 val)
relax(4);
}
}
EXPORT_SYMBOL(arch_write_lock_slow);
EXPORT_SYMBOL(arch_write_lock);
int __tns_atomic_acquire(atomic_t *lock)
int arch_write_trylock(arch_rwlock_t *rwlock)
{
int ret;
u32 iterations = 0;
u32 val = __insn_tns((int *)&rwlock->lock);
BUG_ON(__insn_mfspr(SPR_INTERRUPT_CRITICAL_SECTION));
__insn_mtspr(SPR_INTERRUPT_CRITICAL_SECTION, 1);
/*
* If a tns is in progress, or there's a waiting or active locker,
* or active readers, we can't take the lock, so give up.
*/
if (unlikely(val != 0)) {
if (!(val & 1))
rwlock->lock = val;
return 0;
}
while ((ret = __insn_tns((void *)&lock->counter)) == 1)
delay_backoff(iterations++);
return ret;
/* Set the "next" field to mark it locked. */
rwlock->lock = 1 << _WR_NEXT_SHIFT;
return 1;
}
EXPORT_SYMBOL(arch_write_trylock);
void __tns_atomic_release(atomic_t *p, int v)
void arch_write_unlock(arch_rwlock_t *rwlock)
{
p->counter = v;
__insn_mtspr(SPR_INTERRUPT_CRITICAL_SECTION, 0);
u32 val, eq, mask;
mb(); /* guarantee anything modified under the lock is visible */
val = __insn_tns((int *)&rwlock->lock);
if (likely(val == (1 << _WR_NEXT_SHIFT))) {
rwlock->lock = 0;
return;
}
while (unlikely(val & 1)) {
/* Limited backoff since we are the highest-priority task. */
relax(4);
val = __insn_tns((int *)&rwlock->lock);
}
mask = 1 << WR_CURR_SHIFT;
val = __insn_addb(val, mask);
eq = __insn_seqb(val, val << (WR_CURR_SHIFT - WR_NEXT_SHIFT));
val = __insn_mz(eq & mask, val);
rwlock->lock = val;
}
EXPORT_SYMBOL(arch_write_unlock);
......@@ -654,14 +654,6 @@ struct intvec_state do_page_fault_ics(struct pt_regs *regs, int fault_num,
regs->ex1 = PL_ICS_EX1(KERNEL_PL, 0);
}
/*
* NOTE: the one other type of access that might bring us here
* are the memory ops in __tns_atomic_acquire/__tns_atomic_release,
* but we don't have to check specially for them since we can
* always safely return to the address of the fault and retry,
* since no separate atomic locks are involved.
*/
/*
* Now that we have released the atomic lock (if necessary),
* it's safe to spin if the PTE that caused the fault was migrating.
......
......@@ -179,23 +179,46 @@ void flush_remote(unsigned long cache_pfn, unsigned long cache_control,
panic("Unsafe to continue.");
}
void flush_remote_page(struct page *page, int order)
{
int i, pages = (1 << order);
for (i = 0; i < pages; ++i, ++page) {
void *p = kmap_atomic(page);
int hfh = 0;
int home = page_home(page);
#if CHIP_HAS_CBOX_HOME_MAP()
if (home == PAGE_HOME_HASH)
hfh = 1;
else
#endif
BUG_ON(home < 0 || home >= NR_CPUS);
finv_buffer_remote(p, PAGE_SIZE, hfh);
kunmap_atomic(p);
}
}
void homecache_evict(const struct cpumask *mask)
{
flush_remote(0, HV_FLUSH_EVICT_L2, mask, 0, 0, 0, NULL, NULL, 0);
}
/* Return a mask of the cpus whose caches currently own these pages. */
static void homecache_mask(struct page *page, int pages,
/*
* Return a mask of the cpus whose caches currently own these pages.
* The return value is whether the pages are all coherently cached
* (i.e. none are immutable, incoherent, or uncached).
*/
static int homecache_mask(struct page *page, int pages,
struct cpumask *home_mask)
{
int i;
int cached_coherently = 1;
cpumask_clear(home_mask);
for (i = 0; i < pages; ++i) {
int home = page_home(&page[i]);
if (home == PAGE_HOME_IMMUTABLE ||
home == PAGE_HOME_INCOHERENT) {
cpumask_copy(home_mask, cpu_possible_mask);
return;
return 0;
}
#if CHIP_HAS_CBOX_HOME_MAP()
if (home == PAGE_HOME_HASH) {
......@@ -203,11 +226,14 @@ static void homecache_mask(struct page *page, int pages,
continue;
}
#endif
if (home == PAGE_HOME_UNCACHED)
if (home == PAGE_HOME_UNCACHED) {
cached_coherently = 0;
continue;
}
BUG_ON(home < 0 || home >= NR_CPUS);
cpumask_set_cpu(home, home_mask);
}
return cached_coherently;
}
/*
......@@ -386,7 +412,7 @@ void homecache_change_page_home(struct page *page, int order, int home)
pte_t *ptep = virt_to_pte(NULL, kva);
pte_t pteval = *ptep;
BUG_ON(!pte_present(pteval) || pte_huge(pteval));
*ptep = pte_set_home(pteval, home);
__set_pte(ptep, pte_set_home(pteval, home));
}
}
......
......@@ -53,22 +53,11 @@
#include "migrate.h"
/*
* We could set FORCE_MAX_ZONEORDER to "(HPAGE_SHIFT - PAGE_SHIFT + 1)"
* in the Tile Kconfig, but this generates configure warnings.
* Do it here and force people to get it right to compile this file.
* The problem is that with 4KB small pages and 16MB huge pages,
* the default value doesn't allow us to group enough small pages
* together to make up a huge page.
*/
#if CONFIG_FORCE_MAX_ZONEORDER < HPAGE_SHIFT - PAGE_SHIFT + 1
# error "Change FORCE_MAX_ZONEORDER in arch/tile/Kconfig to match page size"
#endif
#define clear_pgd(pmdptr) (*(pmdptr) = hv_pte(0))
#ifndef __tilegx__
unsigned long VMALLOC_RESERVE = CONFIG_VMALLOC_RESERVE;
EXPORT_SYMBOL(VMALLOC_RESERVE);
#endif
DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
......@@ -445,7 +434,7 @@ static pmd_t *__init get_pmd(pgd_t pgtables[], unsigned long va)
/* Temporary page table we use for staging. */
static pgd_t pgtables[PTRS_PER_PGD]
__attribute__((section(".init.page")));
__attribute__((aligned(HV_PAGE_TABLE_ALIGN)));
/*
* This maps the physical memory to kernel virtual address space, a total
......@@ -653,6 +642,17 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
memcpy(pgd_base, pgtables, sizeof(pgtables));
__install_page_table(pgd_base, __get_cpu_var(current_asid),
swapper_pgprot);
/*
* We just read swapper_pgprot and thus brought it into the cache,
* with its new home & caching mode. When we start the other CPUs,
* they're going to reference swapper_pgprot via their initial fake
* VA-is-PA mappings, which cache everything locally. At that
* time, if it's in our cache with a conflicting home, the
* simulator's coherence checker will complain. So, flush it out
* of our cache; we're not going to ever use it again anyway.
*/
__insn_finv(&swapper_pgprot);
}
/*
......@@ -950,11 +950,7 @@ struct kmem_cache *pgd_cache;
void __init pgtable_cache_init(void)
{
pgd_cache = kmem_cache_create("pgd",
PTRS_PER_PGD*sizeof(pgd_t),
PTRS_PER_PGD*sizeof(pgd_t),
0,
NULL);
pgd_cache = kmem_cache_create("pgd", SIZEOF_PGD, SIZEOF_PGD, 0, NULL);
if (!pgd_cache)
panic("pgtable_cache_init(): Cannot create pgd cache");
}
......@@ -989,7 +985,7 @@ static long __write_once initfree = 1;
static int __init set_initfree(char *str)
{
long val;
if (strict_strtol(str, 0, &val)) {
if (strict_strtol(str, 0, &val) == 0) {
initfree = val;
pr_info("initfree: %s free init pages\n",
initfree ? "will" : "won't");
......
......@@ -18,6 +18,7 @@
#include <linux/linkage.h>
#include <linux/threads.h>
#include <asm/page.h>
#include <asm/thread_info.h>
#include <asm/types.h>
#include <asm/asm-offsets.h>
#include <hv/hypervisor.h>
......
......@@ -142,6 +142,76 @@ pte_t *_pte_offset_map(pmd_t *dir, unsigned long address)
}
#endif
/**
* shatter_huge_page() - ensure a given address is mapped by a small page.
*
* This function converts a huge PTE mapping kernel LOWMEM into a bunch
* of small PTEs with the same caching. No cache flush required, but we
* must do a global TLB flush.
*
* Any caller that wishes to modify a kernel mapping that might
* have been made with a huge page should call this function,
* since doing so properly avoids race conditions with installing the
* newly-shattered page and then flushing all the TLB entries.
*
* @addr: Address at which to shatter any existing huge page.
*/
void shatter_huge_page(unsigned long addr)
{
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
unsigned long flags = 0; /* happy compiler */
#ifdef __PAGETABLE_PMD_FOLDED
struct list_head *pos;
#endif
/* Get a pointer to the pmd entry that we need to change. */
addr &= HPAGE_MASK;
BUG_ON(pgd_addr_invalid(addr));
BUG_ON(addr < PAGE_OFFSET); /* only for kernel LOWMEM */
pgd = swapper_pg_dir + pgd_index(addr);
pud = pud_offset(pgd, addr);
BUG_ON(!pud_present(*pud));
pmd = pmd_offset(pud, addr);
BUG_ON(!pmd_present(*pmd));
if (!pmd_huge_page(*pmd))
return;
/*
* Grab the pgd_lock, since we may need it to walk the pgd_list,
* and since we need some kind of lock here to avoid races.
*/
spin_lock_irqsave(&pgd_lock, flags);
if (!pmd_huge_page(*pmd)) {
/* Lost the race to convert the huge page. */
spin_unlock_irqrestore(&pgd_lock, flags);
return;
}
/* Shatter the huge page into the preallocated L2 page table. */
pmd_populate_kernel(&init_mm, pmd,
get_prealloc_pte(pte_pfn(*(pte_t *)pmd)));
#ifdef __PAGETABLE_PMD_FOLDED
/* Walk every pgd on the system and update the pmd there. */
list_for_each(pos, &pgd_list) {
pmd_t *copy_pmd;
pgd = list_to_pgd(pos) + pgd_index(addr);
pud = pud_offset(pgd, addr);
copy_pmd = pmd_offset(pud, addr);
__set_pmd(copy_pmd, *pmd);
}
#endif
/* Tell every cpu to notice the change. */
flush_remote(0, 0, NULL, addr, HPAGE_SIZE, HPAGE_SIZE,
cpu_possible_mask, NULL, 0);
/* Hold the lock until the TLB flush is finished to avoid races. */
spin_unlock_irqrestore(&pgd_lock, flags);
}
/*
* List of all pgd's needed so it can invalidate entries in both cached
* and uncached pgd's. This is essentially codepath-based locking
......@@ -184,9 +254,9 @@ static void pgd_ctor(pgd_t *pgd)
BUG_ON(((u64 *)swapper_pg_dir)[pgd_index(MEM_USER_INTRPT)] != 0);
#endif
clone_pgd_range(pgd + KERNEL_PGD_INDEX_START,
memcpy(pgd + KERNEL_PGD_INDEX_START,
swapper_pg_dir + KERNEL_PGD_INDEX_START,
KERNEL_PGD_PTRS);
KERNEL_PGD_PTRS * sizeof(pgd_t));
pgd_list_add(pgd);
spin_unlock_irqrestore(&pgd_lock, flags);
......@@ -220,8 +290,11 @@ void pgd_free(struct mm_struct *mm, pgd_t *pgd)
struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
{
gfp_t flags = GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO|__GFP_COMP;
gfp_t flags = GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO;
struct page *p;
#if L2_USER_PGTABLE_ORDER > 0
int i;
#endif
#ifdef CONFIG_HIGHPTE
flags |= __GFP_HIGHMEM;
......@@ -231,6 +304,18 @@ struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
if (p == NULL)
return NULL;
#if L2_USER_PGTABLE_ORDER > 0
/*
* Make every page have a page_count() of one, not just the first.
* We don't use __GFP_COMP since it doesn't look like it works
* correctly with tlb_remove_page().
*/
for (i = 1; i < L2_USER_PGTABLE_PAGES; ++i) {
init_page_count(p+i);
inc_zone_page_state(p+i, NR_PAGETABLE);
}
#endif
pgtable_page_ctor(p);
return p;
}
......@@ -242,8 +327,15 @@ struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
*/
void pte_free(struct mm_struct *mm, struct page *p)
{
int i;
pgtable_page_dtor(p);
__free_pages(p, L2_USER_PGTABLE_ORDER);
__free_page(p);
for (i = 1; i < L2_USER_PGTABLE_PAGES; ++i) {
__free_page(p+i);
dec_zone_page_state(p+i, NR_PAGETABLE);
}
}
void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte,
......@@ -252,18 +344,11 @@ void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte,
int i;
pgtable_page_dtor(pte);
tlb->need_flush = 1;
if (tlb_fast_mode(tlb)) {
struct page *pte_pages[L2_USER_PGTABLE_PAGES];
for (i = 0; i < L2_USER_PGTABLE_PAGES; ++i)
pte_pages[i] = pte + i;
free_pages_and_swap_cache(pte_pages, L2_USER_PGTABLE_PAGES);
return;
}
for (i = 0; i < L2_USER_PGTABLE_PAGES; ++i) {
tlb->pages[tlb->nr++] = pte + i;
if (tlb->nr >= FREE_PTE_NR)
tlb_flush_mmu(tlb, 0, 0);
tlb_remove_page(tlb, pte);
for (i = 1; i < L2_USER_PGTABLE_PAGES; ++i) {
tlb_remove_page(tlb, pte + i);
dec_zone_page_state(pte + i, NR_PAGETABLE);
}
}
......@@ -346,35 +431,51 @@ int get_remote_cache_cpu(pgprot_t prot)
return x + y * smp_width;
}
void set_pte_order(pte_t *ptep, pte_t pte, int order)
/*
* Convert a kernel VA to a PA and homing information.
*/
int va_to_cpa_and_pte(void *va, unsigned long long *cpa, pte_t *pte)
{
unsigned long pfn = pte_pfn(pte);
struct page *page = pfn_to_page(pfn);
struct page *page = virt_to_page(va);
pte_t null_pte = { 0 };
/* Update the home of a PTE if necessary */
pte = pte_set_home(pte, page_home(page));
*cpa = __pa(va);
/* Note that this is not writing a page table, just returning a pte. */
*pte = pte_set_home(null_pte, page_home(page));
return 0; /* return non-zero if not hfh? */
}
EXPORT_SYMBOL(va_to_cpa_and_pte);
void __set_pte(pte_t *ptep, pte_t pte)
{
#ifdef __tilegx__
*ptep = pte;
#else
/*
* When setting a PTE, write the high bits first, then write
* the low bits. This sets the "present" bit only after the
* other bits are in place. If a particular PTE update
* involves transitioning from one valid PTE to another, it
* may be necessary to call set_pte_order() more than once,
* transitioning via a suitable intermediate state.
* Note that this sequence also means that if we are transitioning
* from any migrating PTE to a non-migrating one, we will not
* see a half-updated PTE with the migrating bit off.
*/
#if HV_PTE_INDEX_PRESENT >= 32 || HV_PTE_INDEX_MIGRATING >= 32
# if HV_PTE_INDEX_PRESENT >= 32 || HV_PTE_INDEX_MIGRATING >= 32
# error Must write the present and migrating bits last
#endif
# endif
if (pte_present(pte)) {
((u32 *)ptep)[1] = (u32)(pte_val(pte) >> 32);
barrier();
((u32 *)ptep)[0] = (u32)(pte_val(pte));
#endif
} else {
((u32 *)ptep)[0] = (u32)(pte_val(pte));
barrier();
((u32 *)ptep)[1] = (u32)(pte_val(pte) >> 32);
}
#endif /* __tilegx__ */
}
void set_pte(pte_t *ptep, pte_t pte)
{
struct page *page = pfn_to_page(pte_pfn(pte));
/* Update the home of a PTE if necessary */
pte = pte_set_home(pte, page_home(page));
__set_pte(ptep, pte);
}
/* Can this mm load a PTE with cached_priority set? */
......
......@@ -7,7 +7,7 @@
menuconfig EDAC
bool "EDAC (Error Detection And Correction) reporting"
depends on HAS_IOMEM
depends on X86 || PPC
depends on X86 || PPC || TILE
help
EDAC is designed to report errors in the core system.
These are low-level errors that are reported in the CPU or
......@@ -282,4 +282,12 @@ config EDAC_CPC925
a companion chip to the PowerPC 970 family of
processors.
config EDAC_TILE
tristate "Tilera Memory Controller"
depends on EDAC_MM_EDAC && TILE
default y
help
Support for error detection and correction on the
Tilera memory controller.
endif # EDAC
......@@ -54,3 +54,4 @@ obj-$(CONFIG_EDAC_PPC4XX) += ppc4xx_edac.o
obj-$(CONFIG_EDAC_AMD8111) += amd8111_edac.o
obj-$(CONFIG_EDAC_AMD8131) += amd8131_edac.o
obj-$(CONFIG_EDAC_TILE) += tile_edac.o
/*
* Copyright 2011 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
* Tilera-specific EDAC driver.
*
* This source code is derived from the following driver:
*
* Cell MIC driver for ECC counting
*
* Copyright 2007 Benjamin Herrenschmidt, IBM Corp.
* <benh@kernel.crashing.org>
*
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/uaccess.h>
#include <linux/edac.h>
#include <hv/hypervisor.h>
#include <hv/drv_mshim_intf.h>
#include "edac_core.h"
#define DRV_NAME "tile-edac"
/* Number of cs_rows needed per memory controller on TILEPro. */
#define TILE_EDAC_NR_CSROWS 1
/* Number of channels per memory controller on TILEPro. */
#define TILE_EDAC_NR_CHANS 1
/* Granularity of reported error in bytes on TILEPro. */
#define TILE_EDAC_ERROR_GRAIN 8
/* TILE processor has multiple independent memory controllers. */
struct platform_device *mshim_pdev[TILE_MAX_MSHIMS];
struct tile_edac_priv {
int hv_devhdl; /* Hypervisor device handle. */
int node; /* Memory controller instance #. */
unsigned int ce_count; /*
* Correctable-error counter
* kept by the driver.
*/
};
static void tile_edac_check(struct mem_ctl_info *mci)
{
struct tile_edac_priv *priv = mci->pvt_info;
struct mshim_mem_error mem_error;
if (hv_dev_pread(priv->hv_devhdl, 0, (HV_VirtAddr)&mem_error,
sizeof(struct mshim_mem_error), MSHIM_MEM_ERROR_OFF) !=
sizeof(struct mshim_mem_error)) {
pr_err(DRV_NAME ": MSHIM_MEM_ERROR_OFF pread failure.\n");
return;
}
/* Check if the current error count is different from the saved one. */
if (mem_error.sbe_count != priv->ce_count) {
dev_dbg(mci->dev, "ECC CE err on node %d\n", priv->node);
priv->ce_count = mem_error.sbe_count;
edac_mc_handle_ce(mci, 0, 0, 0, 0, 0, mci->ctl_name);
}
}
/*
* Initialize the 'csrows' table within the mci control structure with the
* addressing of memory.
*/
static int __devinit tile_edac_init_csrows(struct mem_ctl_info *mci)
{
struct csrow_info *csrow = &mci->csrows[0];
struct tile_edac_priv *priv = mci->pvt_info;
struct mshim_mem_info mem_info;
if (hv_dev_pread(priv->hv_devhdl, 0, (HV_VirtAddr)&mem_info,
sizeof(struct mshim_mem_info), MSHIM_MEM_INFO_OFF) !=
sizeof(struct mshim_mem_info)) {
pr_err(DRV_NAME ": MSHIM_MEM_INFO_OFF pread failure.\n");
return -1;
}
if (mem_info.mem_ecc)
csrow->edac_mode = EDAC_SECDED;
else
csrow->edac_mode = EDAC_NONE;
switch (mem_info.mem_type) {
case DDR2:
csrow->mtype = MEM_DDR2;
break;
case DDR3:
csrow->mtype = MEM_DDR3;
break;
default:
return -1;
}
csrow->first_page = 0;
csrow->nr_pages = mem_info.mem_size >> PAGE_SHIFT;
csrow->last_page = csrow->first_page + csrow->nr_pages - 1;
csrow->grain = TILE_EDAC_ERROR_GRAIN;
csrow->dtype = DEV_UNKNOWN;
return 0;
}
static int __devinit tile_edac_mc_probe(struct platform_device *pdev)
{
char hv_file[32];
int hv_devhdl;
struct mem_ctl_info *mci;
struct tile_edac_priv *priv;
int rc;
sprintf(hv_file, "mshim/%d", pdev->id);
hv_devhdl = hv_dev_open((HV_VirtAddr)hv_file, 0);
if (hv_devhdl < 0)
return -EINVAL;
/* A TILE MC has a single channel and one chip-select row. */
mci = edac_mc_alloc(sizeof(struct tile_edac_priv),
TILE_EDAC_NR_CSROWS, TILE_EDAC_NR_CHANS, pdev->id);
if (mci == NULL)
return -ENOMEM;
priv = mci->pvt_info;
priv->node = pdev->id;
priv->hv_devhdl = hv_devhdl;
mci->dev = &pdev->dev;
mci->mtype_cap = MEM_FLAG_DDR2;
mci->edac_ctl_cap = EDAC_FLAG_SECDED;
mci->mod_name = DRV_NAME;
mci->ctl_name = "TILEPro_Memory_Controller";
mci->dev_name = dev_name(&pdev->dev);
mci->edac_check = tile_edac_check;
/*
* Initialize the MC control structure 'csrows' table
* with the mapping and control information.
*/
if (tile_edac_init_csrows(mci)) {
/* No csrows found. */
mci->edac_cap = EDAC_FLAG_NONE;
} else {
mci->edac_cap = EDAC_FLAG_SECDED;
}
platform_set_drvdata(pdev, mci);
/* Register with EDAC core */
rc = edac_mc_add_mc(mci);
if (rc) {
dev_err(&pdev->dev, "failed to register with EDAC core\n");
edac_mc_free(mci);
return rc;
}
return 0;
}
static int __devexit tile_edac_mc_remove(struct platform_device *pdev)
{
struct mem_ctl_info *mci = platform_get_drvdata(pdev);
edac_mc_del_mc(&pdev->dev);
if (mci)
edac_mc_free(mci);
return 0;
}
static struct platform_driver tile_edac_mc_driver = {
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
},
.probe = tile_edac_mc_probe,
.remove = __devexit_p(tile_edac_mc_remove),
};
/*
* Driver init routine.
*/
static int __init tile_edac_init(void)
{
char hv_file[32];
struct platform_device *pdev;
int i, err, num = 0;
/* Only support POLL mode. */
edac_op_state = EDAC_OPSTATE_POLL;
err = platform_driver_register(&tile_edac_mc_driver);
if (err)
return err;
for (i = 0; i < TILE_MAX_MSHIMS; i++) {
/*
* Not all memory controllers are configured such as in the
* case of a simulator. So we register only those mshims
* that are configured by the hypervisor.
*/
sprintf(hv_file, "mshim/%d", i);
if (hv_dev_open((HV_VirtAddr)hv_file, 0) < 0)
continue;
pdev = platform_device_register_simple(DRV_NAME, i, NULL, 0);
if (IS_ERR(pdev))
continue;
mshim_pdev[i] = pdev;
num++;
}
if (num == 0) {
platform_driver_unregister(&tile_edac_mc_driver);
return -ENODEV;
}
return 0;
}
/*
* Driver cleanup routine.
*/
static void __exit tile_edac_exit(void)
{
int i;
for (i = 0; i < TILE_MAX_MSHIMS; i++) {
struct platform_device *pdev = mshim_pdev[i];
if (!pdev)
continue;
platform_set_drvdata(pdev, NULL);
platform_device_unregister(pdev);
}
platform_driver_unregister(&tile_edac_mc_driver);
}
module_init(tile_edac_init);
module_exit(tile_edac_exit);
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment