Commit 3ba113d1 authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/kyle/parisc-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/kyle/parisc-2.6: (23 commits)
  parisc: move dereference_function_descriptor to process.c
  parisc: Move kernel Elf_Fdesc define to <asm/elf.h>
  parisc: fix build when ARCH_HAS_KMAP
  parisc: fix "make tar-pkg"
  parisc: drivers: fix warnings
  parisc: select BUG always
  parisc: asm/pdc.h should include asm/page.h
  parisc: led: remove proc_dir_entry::owner
  parisc: fix macro expansion in atomic.h
  parisc: iosapic: fix build breakage
  parisc: oops_enter()/oops_exit() in die()
  parisc: document light weight syscall ABI
  parisc: blink all or loadavg LEDs on oops
  parisc: add ftrace (function and graph tracer) functionality
  parisc: simplify sys_clone()
  parisc: add LATENCYTOP_SUPPORT and CONFIG_STACKTRACE_SUPPORT
  parisc: allow to build with 16k default kernel page size
  parisc: expose 32/64-bit capabilities in cpuinfo
  parisc: use constants instead of numbers in assembly
  parisc: fix usage of 32bit PTE page table entries on 32bit kernels
  ...
parents bad6a5c0 b609308e
...@@ -9,9 +9,13 @@ config PARISC ...@@ -9,9 +9,13 @@ config PARISC
def_bool y def_bool y
select HAVE_IDE select HAVE_IDE
select HAVE_OPROFILE select HAVE_OPROFILE
select HAVE_FUNCTION_TRACER if 64BIT
select HAVE_FUNCTION_GRAPH_TRACER if 64BIT
select HAVE_FUNCTION_TRACE_MCOUNT_TEST if 64BIT
select RTC_CLASS select RTC_CLASS
select RTC_DRV_GENERIC select RTC_DRV_GENERIC
select INIT_ALL_POSSIBLE select INIT_ALL_POSSIBLE
select BUG
help help
The PA-RISC microprocessor is designed by Hewlett-Packard and used The PA-RISC microprocessor is designed by Hewlett-Packard and used
in many of their workstations & servers (HP9000 700 and 800 series, in many of their workstations & servers (HP9000 700 and 800 series,
...@@ -75,6 +79,9 @@ config GENERIC_HARDIRQS ...@@ -75,6 +79,9 @@ config GENERIC_HARDIRQS
config GENERIC_IRQ_PROBE config GENERIC_IRQ_PROBE
def_bool y def_bool y
config HAVE_LATENCYTOP_SUPPORT
def_bool y
config IRQ_PER_CPU config IRQ_PER_CPU
bool bool
default y default y
...@@ -83,6 +90,9 @@ config IRQ_PER_CPU ...@@ -83,6 +90,9 @@ config IRQ_PER_CPU
config PM config PM
bool bool
config STACKTRACE_SUPPORT
def_bool y
config ISA_DMA_API config ISA_DMA_API
bool bool
......
...@@ -56,7 +56,9 @@ cflags-y += -mdisable-fpregs ...@@ -56,7 +56,9 @@ cflags-y += -mdisable-fpregs
# Without this, "ld -r" results in .text sections that are too big # Without this, "ld -r" results in .text sections that are too big
# (> 0x40000) for branches to reach stubs. # (> 0x40000) for branches to reach stubs.
cflags-y += -ffunction-sections ifndef CONFIG_FUNCTION_TRACER
cflags-y += -ffunction-sections
endif
# select which processor to optimise for # select which processor to optimise for
cflags-$(CONFIG_PA7100) += -march=1.1 -mschedule=7100 cflags-$(CONFIG_PA7100) += -march=1.1 -mschedule=7100
......
...@@ -25,7 +25,7 @@ ...@@ -25,7 +25,7 @@
* Since "a" is usually an address, use one spinlock per cacheline. * Since "a" is usually an address, use one spinlock per cacheline.
*/ */
# define ATOMIC_HASH_SIZE 4 # define ATOMIC_HASH_SIZE 4
# define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) a)/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ])) # define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) (a))/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ]))
extern raw_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned; extern raw_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
...@@ -222,13 +222,13 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) ...@@ -222,13 +222,13 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
#define atomic_add(i,v) ((void)(__atomic_add_return( ((int)i),(v)))) #define atomic_add(i,v) ((void)(__atomic_add_return( ((int)(i)),(v))))
#define atomic_sub(i,v) ((void)(__atomic_add_return(-((int)i),(v)))) #define atomic_sub(i,v) ((void)(__atomic_add_return(-((int)(i)),(v))))
#define atomic_inc(v) ((void)(__atomic_add_return( 1,(v)))) #define atomic_inc(v) ((void)(__atomic_add_return( 1,(v))))
#define atomic_dec(v) ((void)(__atomic_add_return( -1,(v)))) #define atomic_dec(v) ((void)(__atomic_add_return( -1,(v))))
#define atomic_add_return(i,v) (__atomic_add_return( ((int)i),(v))) #define atomic_add_return(i,v) (__atomic_add_return( ((int)(i)),(v)))
#define atomic_sub_return(i,v) (__atomic_add_return(-((int)i),(v))) #define atomic_sub_return(i,v) (__atomic_add_return(-((int)(i)),(v)))
#define atomic_inc_return(v) (__atomic_add_return( 1,(v))) #define atomic_inc_return(v) (__atomic_add_return( 1,(v)))
#define atomic_dec_return(v) (__atomic_add_return( -1,(v))) #define atomic_dec_return(v) (__atomic_add_return( -1,(v)))
...@@ -289,13 +289,13 @@ atomic64_read(const atomic64_t *v) ...@@ -289,13 +289,13 @@ atomic64_read(const atomic64_t *v)
return v->counter; return v->counter;
} }
#define atomic64_add(i,v) ((void)(__atomic64_add_return( ((s64)i),(v)))) #define atomic64_add(i,v) ((void)(__atomic64_add_return( ((s64)(i)),(v))))
#define atomic64_sub(i,v) ((void)(__atomic64_add_return(-((s64)i),(v)))) #define atomic64_sub(i,v) ((void)(__atomic64_add_return(-((s64)(i)),(v))))
#define atomic64_inc(v) ((void)(__atomic64_add_return( 1,(v)))) #define atomic64_inc(v) ((void)(__atomic64_add_return( 1,(v))))
#define atomic64_dec(v) ((void)(__atomic64_add_return( -1,(v)))) #define atomic64_dec(v) ((void)(__atomic64_add_return( -1,(v))))
#define atomic64_add_return(i,v) (__atomic64_add_return( ((s64)i),(v))) #define atomic64_add_return(i,v) (__atomic64_add_return( ((s64)(i)),(v)))
#define atomic64_sub_return(i,v) (__atomic64_add_return(-((s64)i),(v))) #define atomic64_sub_return(i,v) (__atomic64_add_return(-((s64)(i)),(v)))
#define atomic64_inc_return(v) (__atomic64_add_return( 1,(v))) #define atomic64_inc_return(v) (__atomic64_add_return( 1,(v)))
#define atomic64_dec_return(v) (__atomic64_add_return( -1,(v))) #define atomic64_dec_return(v) (__atomic64_add_return( -1,(v)))
......
...@@ -97,6 +97,9 @@ void mark_rodata_ro(void); ...@@ -97,6 +97,9 @@ void mark_rodata_ro(void);
#ifdef CONFIG_PA8X00 #ifdef CONFIG_PA8X00
/* Only pa8800, pa8900 needs this */ /* Only pa8800, pa8900 needs this */
#include <asm/kmap_types.h>
#define ARCH_HAS_KMAP #define ARCH_HAS_KMAP
void kunmap_parisc(void *addr); void kunmap_parisc(void *addr);
......
...@@ -168,6 +168,16 @@ typedef struct elf64_fdesc { ...@@ -168,6 +168,16 @@ typedef struct elf64_fdesc {
__u64 gp; __u64 gp;
} Elf64_Fdesc; } Elf64_Fdesc;
#ifdef __KERNEL__
#ifdef CONFIG_64BIT
#define Elf_Fdesc Elf64_Fdesc
#else
#define Elf_Fdesc Elf32_Fdesc
#endif /*CONFIG_64BIT*/
#endif /*__KERNEL__*/
/* Legal values for p_type field of Elf32_Phdr/Elf64_Phdr. */ /* Legal values for p_type field of Elf32_Phdr/Elf64_Phdr. */
#define PT_HP_TLS (PT_LOOS + 0x0) #define PT_HP_TLS (PT_LOOS + 0x0)
......
#ifndef _ASM_PARISC_FTRACE_H
#define _ASM_PARISC_FTRACE_H
#ifndef __ASSEMBLY__
extern void mcount(void);
/*
* Stack of return addresses for functions of a thread.
* Used in struct thread_info
*/
struct ftrace_ret_stack {
unsigned long ret;
unsigned long func;
unsigned long long calltime;
};
/*
* Primary handler of a function return.
* It relays on ftrace_return_to_handler.
* Defined in entry.S
*/
extern void return_to_handler(void);
#endif /* __ASSEMBLY__ */
#endif /* _ASM_PARISC_FTRACE_H */
...@@ -36,16 +36,7 @@ void clear_user_page(void *page, unsigned long vaddr, struct page *pg); ...@@ -36,16 +36,7 @@ void clear_user_page(void *page, unsigned long vaddr, struct page *pg);
*/ */
#define STRICT_MM_TYPECHECKS #define STRICT_MM_TYPECHECKS
#ifdef STRICT_MM_TYPECHECKS #ifdef STRICT_MM_TYPECHECKS
typedef struct { unsigned long pte; typedef struct { unsigned long pte; } pte_t; /* either 32 or 64bit */
#if !defined(CONFIG_64BIT)
unsigned long future_flags;
/* XXX: it's possible to remove future_flags and change BITS_PER_PTE_ENTRY
to 2, but then strangely the identical 32bit kernel boots on a
c3000(pa20), but not any longer on a 715(pa11).
Still investigating... HelgeD.
*/
#endif
} pte_t; /* either 32 or 64bit */
/* NOTE: even on 64 bits, these entries are __u32 because we allocate /* NOTE: even on 64 bits, these entries are __u32 because we allocate
* the pmd and pgd in ZONE_DMA (i.e. under 4GB) */ * the pmd and pgd in ZONE_DMA (i.e. under 4GB) */
...@@ -111,7 +102,7 @@ extern int npmem_ranges; ...@@ -111,7 +102,7 @@ extern int npmem_ranges;
#define BITS_PER_PMD_ENTRY 2 #define BITS_PER_PMD_ENTRY 2
#define BITS_PER_PGD_ENTRY 2 #define BITS_PER_PGD_ENTRY 2
#else #else
#define BITS_PER_PTE_ENTRY 3 #define BITS_PER_PTE_ENTRY 2
#define BITS_PER_PMD_ENTRY 2 #define BITS_PER_PMD_ENTRY 2
#define BITS_PER_PGD_ENTRY BITS_PER_PMD_ENTRY #define BITS_PER_PGD_ENTRY BITS_PER_PMD_ENTRY
#endif #endif
......
...@@ -49,6 +49,8 @@ ...@@ -49,6 +49,8 @@
#define PDC_MODEL_CPU_ID 6 /* returns cpu-id (only newer machines!) */ #define PDC_MODEL_CPU_ID 6 /* returns cpu-id (only newer machines!) */
#define PDC_MODEL_CAPABILITIES 7 /* returns OS32/OS64-flags */ #define PDC_MODEL_CAPABILITIES 7 /* returns OS32/OS64-flags */
/* Values for PDC_MODEL_CAPABILITIES non-equivalent virtual aliasing support */ /* Values for PDC_MODEL_CAPABILITIES non-equivalent virtual aliasing support */
#define PDC_MODEL_OS64 (1 << 0)
#define PDC_MODEL_OS32 (1 << 1)
#define PDC_MODEL_IOPDIR_FDC (1 << 2) #define PDC_MODEL_IOPDIR_FDC (1 << 2)
#define PDC_MODEL_NVA_MASK (3 << 4) #define PDC_MODEL_NVA_MASK (3 << 4)
#define PDC_MODEL_NVA_SUPPORTED (0 << 4) #define PDC_MODEL_NVA_SUPPORTED (0 << 4)
...@@ -341,6 +343,8 @@ ...@@ -341,6 +343,8 @@
#ifdef __KERNEL__ #ifdef __KERNEL__
#include <asm/page.h> /* for __PAGE_OFFSET */
extern int pdc_type; extern int pdc_type;
/* Values for pdc_type */ /* Values for pdc_type */
......
...@@ -50,11 +50,7 @@ ...@@ -50,11 +50,7 @@
printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, (unsigned long)pgd_val(e)) printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, (unsigned long)pgd_val(e))
/* This is the size of the initially mapped kernel memory */ /* This is the size of the initially mapped kernel memory */
#ifdef CONFIG_64BIT
#define KERNEL_INITIAL_ORDER 24 /* 0 to 1<<24 = 16MB */ #define KERNEL_INITIAL_ORDER 24 /* 0 to 1<<24 = 16MB */
#else
#define KERNEL_INITIAL_ORDER 23 /* 0 to 1<<23 = 8MB */
#endif
#define KERNEL_INITIAL_SIZE (1 << KERNEL_INITIAL_ORDER) #define KERNEL_INITIAL_SIZE (1 << KERNEL_INITIAL_ORDER)
#if defined(CONFIG_64BIT) && defined(CONFIG_PARISC_PAGE_SIZE_4KB) #if defined(CONFIG_64BIT) && defined(CONFIG_PARISC_PAGE_SIZE_4KB)
...@@ -91,16 +87,25 @@ ...@@ -91,16 +87,25 @@
/* Definitions for 1st level */ /* Definitions for 1st level */
#define PGDIR_SHIFT (PMD_SHIFT + BITS_PER_PMD) #define PGDIR_SHIFT (PMD_SHIFT + BITS_PER_PMD)
#if (PGDIR_SHIFT + PAGE_SHIFT + PGD_ORDER - BITS_PER_PGD_ENTRY) > BITS_PER_LONG
#define BITS_PER_PGD (BITS_PER_LONG - PGDIR_SHIFT)
#else
#define BITS_PER_PGD (PAGE_SHIFT + PGD_ORDER - BITS_PER_PGD_ENTRY) #define BITS_PER_PGD (PAGE_SHIFT + PGD_ORDER - BITS_PER_PGD_ENTRY)
#endif
#define PGDIR_SIZE (1UL << PGDIR_SHIFT) #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
#define PGDIR_MASK (~(PGDIR_SIZE-1)) #define PGDIR_MASK (~(PGDIR_SIZE-1))
#define PTRS_PER_PGD (1UL << BITS_PER_PGD) #define PTRS_PER_PGD (1UL << BITS_PER_PGD)
#define USER_PTRS_PER_PGD PTRS_PER_PGD #define USER_PTRS_PER_PGD PTRS_PER_PGD
#ifdef CONFIG_64BIT
#define MAX_ADDRBITS (PGDIR_SHIFT + BITS_PER_PGD) #define MAX_ADDRBITS (PGDIR_SHIFT + BITS_PER_PGD)
#define MAX_ADDRESS (1UL << MAX_ADDRBITS) #define MAX_ADDRESS (1UL << MAX_ADDRBITS)
#define SPACEID_SHIFT (MAX_ADDRBITS - 32) #define SPACEID_SHIFT (MAX_ADDRBITS - 32)
#else
#define MAX_ADDRBITS (BITS_PER_LONG)
#define MAX_ADDRESS (1UL << MAX_ADDRBITS)
#define SPACEID_SHIFT 0
#endif
/* This calculates the number of initial pages we need for the initial /* This calculates the number of initial pages we need for the initial
* page tables */ * page tables */
......
...@@ -29,7 +29,8 @@ extern void smp_send_reschedule(int cpu); ...@@ -29,7 +29,8 @@ extern void smp_send_reschedule(int cpu);
extern void smp_send_all_nop(void); extern void smp_send_all_nop(void);
extern void arch_send_call_function_single_ipi(int cpu); extern void arch_send_call_function_single_ipi(int cpu);
extern void arch_send_call_function_ipi(cpumask_t mask); extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
#define arch_send_call_function_ipi_mask arch_send_call_function_ipi_mask
#endif /* !ASSEMBLY */ #endif /* !ASSEMBLY */
......
...@@ -11,10 +11,25 @@ obj-y := cache.o pacache.o setup.o traps.o time.o irq.o \ ...@@ -11,10 +11,25 @@ obj-y := cache.o pacache.o setup.o traps.o time.o irq.o \
process.o processor.o pdc_cons.o pdc_chassis.o unwind.o \ process.o processor.o pdc_cons.o pdc_chassis.o unwind.o \
topology.o topology.o
ifdef CONFIG_FUNCTION_TRACER
# Do not profile debug and lowlevel utilities
CFLAGS_REMOVE_ftrace.o = -pg
CFLAGS_REMOVE_cache.o = -pg
CFLAGS_REMOVE_irq.o = -pg
CFLAGS_REMOVE_pacache.o = -pg
CFLAGS_REMOVE_perf.o = -pg
CFLAGS_REMOVE_traps.o = -pg
CFLAGS_REMOVE_unaligned.o = -pg
CFLAGS_REMOVE_unwind.o = -pg
endif
obj-$(CONFIG_SMP) += smp.o obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_PA11) += pci-dma.o obj-$(CONFIG_PA11) += pci-dma.o
obj-$(CONFIG_PCI) += pci.o obj-$(CONFIG_PCI) += pci.o
obj-$(CONFIG_MODULES) += module.o obj-$(CONFIG_MODULES) += module.o
obj-$(CONFIG_64BIT) += binfmt_elf32.o sys_parisc32.o signal32.o obj-$(CONFIG_64BIT) += binfmt_elf32.o sys_parisc32.o signal32.o
obj-$(CONFIG_STACKTRACE)+= stacktrace.o
# only supported for PCX-W/U in 64-bit mode at the moment # only supported for PCX-W/U in 64-bit mode at the moment
obj-$(CONFIG_64BIT) += perf.o perf_asm.o obj-$(CONFIG_64BIT) += perf.o perf_asm.o
obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o
obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
...@@ -505,6 +505,18 @@ ...@@ -505,6 +505,18 @@
STREG \pte,0(\ptep) STREG \pte,0(\ptep)
.endm .endm
/* bitshift difference between a PFN (based on kernel's PAGE_SIZE)
* to a CPU TLB 4k PFN (4k => 12 bits to shift) */
#define PAGE_ADD_SHIFT (PAGE_SHIFT-12)
/* Drop prot bits and convert to page addr for iitlbt and idtlbt */
.macro convert_for_tlb_insert20 pte
extrd,u \pte,(63-ASM_PFN_PTE_SHIFT)+(63-58)+PAGE_ADD_SHIFT,\
64-PAGE_SHIFT-PAGE_ADD_SHIFT,\pte
depdi _PAGE_SIZE_ENCODING_DEFAULT,63,\
(63-58)+PAGE_ADD_SHIFT,\pte
.endm
/* Convert the pte and prot to tlb insertion values. How /* Convert the pte and prot to tlb insertion values. How
* this happens is quite subtle, read below */ * this happens is quite subtle, read below */
.macro make_insert_tlb spc,pte,prot .macro make_insert_tlb spc,pte,prot
...@@ -544,8 +556,7 @@ ...@@ -544,8 +556,7 @@
depi 1,12,1,\prot depi 1,12,1,\prot
/* Drop prot bits and convert to page addr for iitlbt and idtlbt */ /* Drop prot bits and convert to page addr for iitlbt and idtlbt */
extrd,u \pte,(63-ASM_PFN_PTE_SHIFT)+(63-58),64-PAGE_SHIFT,\pte convert_for_tlb_insert20 \pte
depdi _PAGE_SIZE_ENCODING_DEFAULT,63,63-58,\pte
.endm .endm
/* Identical macro to make_insert_tlb above, except it /* Identical macro to make_insert_tlb above, except it
...@@ -563,8 +574,8 @@ ...@@ -563,8 +574,8 @@
/* Get rid of prot bits and convert to page addr for iitlba */ /* Get rid of prot bits and convert to page addr for iitlba */
depi _PAGE_SIZE_ENCODING_DEFAULT,31,ASM_PFN_PTE_SHIFT,\pte depi 0,31,ASM_PFN_PTE_SHIFT,\pte
extru \pte,24,25,\pte SHRREG \pte,(ASM_PFN_PTE_SHIFT-(31-26)),\pte
.endm .endm
/* This is for ILP32 PA2.0 only. The TLB insertion needs /* This is for ILP32 PA2.0 only. The TLB insertion needs
...@@ -1244,10 +1255,9 @@ nadtlb_check_flush_20w: ...@@ -1244,10 +1255,9 @@ nadtlb_check_flush_20w:
depdi,z 7,7,3,prot depdi,z 7,7,3,prot
depdi 1,10,1,prot depdi 1,10,1,prot
/* Get rid of prot bits and convert to page addr for idtlbt */ /* Drop prot bits from pte and convert to page addr for idtlbt */
convert_for_tlb_insert20 pte
depdi 0,63,12,pte
extrd,u pte,56,52,pte
idtlbt pte,prot idtlbt pte,prot
rfir rfir
...@@ -1337,8 +1347,8 @@ nadtlb_check_flush_11: ...@@ -1337,8 +1347,8 @@ nadtlb_check_flush_11:
/* Get rid of prot bits and convert to page addr for idtlba */ /* Get rid of prot bits and convert to page addr for idtlba */
depi 0,31,12,pte depi 0,31,ASM_PFN_PTE_SHIFT,pte
extru pte,24,25,pte SHRREG pte,(ASM_PFN_PTE_SHIFT-(31-26)),pte
mfsp %sr1,t0 /* Save sr1 so we can use it in tlb inserts */ mfsp %sr1,t0 /* Save sr1 so we can use it in tlb inserts */
mtsp spc,%sr1 mtsp spc,%sr1
...@@ -1403,10 +1413,9 @@ nadtlb_check_flush_20: ...@@ -1403,10 +1413,9 @@ nadtlb_check_flush_20:
depdi,z 7,7,3,prot depdi,z 7,7,3,prot
depdi 1,10,1,prot depdi 1,10,1,prot
/* Get rid of prot bits and convert to page addr for idtlbt */ /* Drop prot bits from pte and convert to page addr for idtlbt */
convert_for_tlb_insert20 pte
depdi 0,63,12,pte
extrd,u pte,56,32,pte
idtlbt pte,prot idtlbt pte,prot
rfir rfir
...@@ -2176,6 +2185,33 @@ syscall_do_resched: ...@@ -2176,6 +2185,33 @@ syscall_do_resched:
ENDPROC(syscall_exit) ENDPROC(syscall_exit)
#ifdef CONFIG_FUNCTION_TRACER
.import ftrace_function_trampoline,code
ENTRY(_mcount)
copy %r3, %arg2
b ftrace_function_trampoline
nop
ENDPROC(_mcount)
ENTRY(return_to_handler)
load32 return_trampoline, %rp
copy %ret0, %arg0
copy %ret1, %arg1
b ftrace_return_to_handler
nop
return_trampoline:
copy %ret0, %rp
copy %r23, %ret0
copy %r24, %ret1
.globl ftrace_stub
ftrace_stub:
bv %r0(%rp)
nop
ENDPROC(return_to_handler)
#endif /* CONFIG_FUNCTION_TRACER */
get_register: get_register:
/* /*
* get_register is used by the non access tlb miss handlers to * get_register is used by the non access tlb miss handlers to
......
...@@ -527,7 +527,11 @@ int pdc_model_capabilities(unsigned long *capabilities) ...@@ -527,7 +527,11 @@ int pdc_model_capabilities(unsigned long *capabilities)
pdc_result[0] = 0; /* preset zero (call may not be implemented!) */ pdc_result[0] = 0; /* preset zero (call may not be implemented!) */
retval = mem_pdc_call(PDC_MODEL, PDC_MODEL_CAPABILITIES, __pa(pdc_result), 0); retval = mem_pdc_call(PDC_MODEL, PDC_MODEL_CAPABILITIES, __pa(pdc_result), 0);
convert_to_wide(pdc_result); convert_to_wide(pdc_result);
if (retval == PDC_OK) {
*capabilities = pdc_result[0]; *capabilities = pdc_result[0];
} else {
*capabilities = PDC_MODEL_OS32;
}
spin_unlock_irqrestore(&pdc_lock, flags); spin_unlock_irqrestore(&pdc_lock, flags);
return retval; return retval;
......
/*
* Code for tracing calls in Linux kernel.
* Copyright (C) 2009 Helge Deller <deller@gmx.de>
*
* based on code for x86 which is:
* Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
*
* future possible enhancements:
* - add CONFIG_DYNAMIC_FTRACE
* - add CONFIG_STACK_TRACER
*/
#include <linux/init.h>
#include <linux/ftrace.h>
#include <asm/sections.h>
#include <asm/ftrace.h>
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
/* Add a function return address to the trace stack on thread info.*/
static int push_return_trace(unsigned long ret, unsigned long long time,
unsigned long func, int *depth)
{
int index;
if (!current->ret_stack)
return -EBUSY;
/* The return trace stack is full */
if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
atomic_inc(&current->trace_overrun);
return -EBUSY;
}
index = ++current->curr_ret_stack;
barrier();
current->ret_stack[index].ret = ret;
current->ret_stack[index].func = func;
current->ret_stack[index].calltime = time;
*depth = index;
return 0;
}
/* Retrieve a function return address to the trace stack on thread info.*/
static void pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret)
{
int index;
index = current->curr_ret_stack;
if (unlikely(index < 0)) {
ftrace_graph_stop();
WARN_ON(1);
/* Might as well panic, otherwise we have no where to go */
*ret = (unsigned long)
dereference_function_descriptor(&panic);
return;
}
*ret = current->ret_stack[index].ret;
trace->func = current->ret_stack[index].func;
trace->calltime = current->ret_stack[index].calltime;
trace->overrun = atomic_read(&current->trace_overrun);
trace->depth = index;
barrier();
current->curr_ret_stack--;
}
/*
* Send the trace to the ring-buffer.
* @return the original return address.
*/
unsigned long ftrace_return_to_handler(unsigned long retval0,
unsigned long retval1)
{
struct ftrace_graph_ret trace;
unsigned long ret;
pop_return_trace(&trace, &ret);
trace.rettime = cpu_clock(raw_smp_processor_id());
ftrace_graph_return(&trace);
if (unlikely(!ret)) {
ftrace_graph_stop();
WARN_ON(1);
/* Might as well panic. What else to do? */
ret = (unsigned long)
dereference_function_descriptor(&panic);
}
/* HACK: we hand over the old functions' return values
in %r23 and %r24. Assembly in entry.S will take care
and move those to their final registers %ret0 and %ret1 */
asm( "copy %0, %%r23 \n\t"
"copy %1, %%r24 \n" : : "r" (retval0), "r" (retval1) );
return ret;
}
/*
* Hook the return address and push it in the stack of return addrs
* in current thread info.
*/
void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
{
unsigned long old;
unsigned long long calltime;
struct ftrace_graph_ent trace;
if (unlikely(atomic_read(&current->tracing_graph_pause)))
return;
old = *parent;
*parent = (unsigned long)
dereference_function_descriptor(&return_to_handler);
if (unlikely(!__kernel_text_address(old))) {
ftrace_graph_stop();
*parent = old;
WARN_ON(1);
return;
}
calltime = cpu_clock(raw_smp_processor_id());
if (push_return_trace(old, calltime,
self_addr, &trace.depth) == -EBUSY) {
*parent = old;
return;
}
trace.func = self_addr;
/* Only trace if the calling function expects to */
if (!ftrace_graph_entry(&trace)) {
current->curr_ret_stack--;
*parent = old;
}
}
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
void ftrace_function_trampoline(unsigned long parent,
unsigned long self_addr,
unsigned long org_sp_gr3)
{
extern ftrace_func_t ftrace_trace_function;
if (function_trace_stop)
return;
if (ftrace_trace_function != ftrace_stub) {
ftrace_trace_function(parent, self_addr);
return;
}
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
if (ftrace_graph_entry && ftrace_graph_return) {
unsigned long sp;
unsigned long *parent_rp;
asm volatile ("copy %%r30, %0" : "=r"(sp));
/* sanity check: is stack pointer which we got from
assembler function in entry.S in a reasonable
range compared to current stack pointer? */
if ((sp - org_sp_gr3) > 0x400)
return;
/* calculate pointer to %rp in stack */
parent_rp = (unsigned long *) org_sp_gr3 - 0x10;
/* sanity check: parent_rp should hold parent */
if (*parent_rp != parent)
return;
prepare_ftrace_return(parent_rp, self_addr);
return;
}
#endif
}
...@@ -311,12 +311,12 @@ unsigned long txn_alloc_addr(unsigned int virt_irq) ...@@ -311,12 +311,12 @@ unsigned long txn_alloc_addr(unsigned int virt_irq)
next_cpu++; /* assign to "next" CPU we want this bugger on */ next_cpu++; /* assign to "next" CPU we want this bugger on */
/* validate entry */ /* validate entry */
while ((next_cpu < NR_CPUS) && while ((next_cpu < nr_cpu_ids) &&
(!per_cpu(cpu_data, next_cpu).txn_addr || (!per_cpu(cpu_data, next_cpu).txn_addr ||
!cpu_online(next_cpu))) !cpu_online(next_cpu)))
next_cpu++; next_cpu++;
if (next_cpu >= NR_CPUS) if (next_cpu >= nr_cpu_ids)
next_cpu = 0; /* nothing else, assign monarch */ next_cpu = 0; /* nothing else, assign monarch */
return txn_affinity_addr(virt_irq, next_cpu); return txn_affinity_addr(virt_irq, next_cpu);
......
...@@ -61,9 +61,7 @@ ...@@ -61,9 +61,7 @@
#include <linux/string.h> #include <linux/string.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/bug.h> #include <linux/bug.h>
#include <linux/uaccess.h>
#include <asm/sections.h>
#include <asm/unwind.h> #include <asm/unwind.h>
#if 0 #if 0
...@@ -115,8 +113,6 @@ struct got_entry { ...@@ -115,8 +113,6 @@ struct got_entry {
Elf32_Addr addr; Elf32_Addr addr;
}; };
#define Elf_Fdesc Elf32_Fdesc
struct stub_entry { struct stub_entry {
Elf32_Word insns[2]; /* each stub entry has two insns */ Elf32_Word insns[2]; /* each stub entry has two insns */
}; };
...@@ -125,8 +121,6 @@ struct got_entry { ...@@ -125,8 +121,6 @@ struct got_entry {
Elf64_Addr addr; Elf64_Addr addr;
}; };
#define Elf_Fdesc Elf64_Fdesc
struct stub_entry { struct stub_entry {
Elf64_Word insns[4]; /* each stub entry has four insns */ Elf64_Word insns[4]; /* each stub entry has four insns */
}; };
...@@ -916,15 +910,3 @@ void module_arch_cleanup(struct module *mod) ...@@ -916,15 +910,3 @@ void module_arch_cleanup(struct module *mod)
deregister_unwind_table(mod); deregister_unwind_table(mod);
module_bug_cleanup(mod); module_bug_cleanup(mod);
} }
#ifdef CONFIG_64BIT
void *dereference_function_descriptor(void *ptr)
{
Elf64_Fdesc *desc = ptr;
void *p;
if (!probe_kernel_address(&desc->addr, p))
ptr = p;
return ptr;
}
#endif
...@@ -153,5 +153,10 @@ EXPORT_SYMBOL(node_data); ...@@ -153,5 +153,10 @@ EXPORT_SYMBOL(node_data);
EXPORT_SYMBOL(pfnnid_map); EXPORT_SYMBOL(pfnnid_map);
#endif #endif
#ifdef CONFIG_FUNCTION_TRACER
extern void _mcount(void);
EXPORT_SYMBOL(_mcount);
#endif
/* from pacache.S -- needed for copy_page */ /* from pacache.S -- needed for copy_page */
EXPORT_SYMBOL(copy_user_page_asm); EXPORT_SYMBOL(copy_user_page_asm);
...@@ -46,14 +46,15 @@ ...@@ -46,14 +46,15 @@
#include <linux/stddef.h> #include <linux/stddef.h>
#include <linux/unistd.h> #include <linux/unistd.h>
#include <linux/kallsyms.h> #include <linux/kallsyms.h>
#include <linux/uaccess.h>
#include <asm/io.h> #include <asm/io.h>
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
#include <asm/pdc.h> #include <asm/pdc.h>
#include <asm/pdc_chassis.h> #include <asm/pdc_chassis.h>
#include <asm/pgalloc.h> #include <asm/pgalloc.h>
#include <asm/uaccess.h>
#include <asm/unwind.h> #include <asm/unwind.h>
#include <asm/sections.h>
/* /*
* The idle thread. There's no useful work to be * The idle thread. There's no useful work to be
...@@ -231,8 +232,8 @@ sys_clone(unsigned long clone_flags, unsigned long usp, ...@@ -231,8 +232,8 @@ sys_clone(unsigned long clone_flags, unsigned long usp,
However, these last 3 args are only examined However, these last 3 args are only examined
if the proper flags are set. */ if the proper flags are set. */
int __user *child_tidptr; int __user *parent_tidptr = (int __user *)regs->gr[24];
int __user *parent_tidptr; int __user *child_tidptr = (int __user *)regs->gr[22];
/* usp must be word aligned. This also prevents users from /* usp must be word aligned. This also prevents users from
* passing in the value 1 (which is the signal for a special * passing in the value 1 (which is the signal for a special
...@@ -243,16 +244,6 @@ sys_clone(unsigned long clone_flags, unsigned long usp, ...@@ -243,16 +244,6 @@ sys_clone(unsigned long clone_flags, unsigned long usp,
if (usp == 0) if (usp == 0)
usp = regs->gr[30]; usp = regs->gr[30];
if (clone_flags & CLONE_PARENT_SETTID)
parent_tidptr = (int __user *)regs->gr[24];
else
parent_tidptr = NULL;
if (clone_flags & (CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID))
child_tidptr = (int __user *)regs->gr[22];
else
child_tidptr = NULL;
return do_fork(clone_flags, usp, regs, 0, parent_tidptr, child_tidptr); return do_fork(clone_flags, usp, regs, 0, parent_tidptr, child_tidptr);
} }
...@@ -400,3 +391,15 @@ get_wchan(struct task_struct *p) ...@@ -400,3 +391,15 @@ get_wchan(struct task_struct *p)
} while (count++ < 16); } while (count++ < 16);
return 0; return 0;
} }
#ifdef CONFIG_64BIT
void *dereference_function_descriptor(void *ptr)
{
Elf64_Fdesc *desc = ptr;
void *p;
if (!probe_kernel_address(&desc->addr, p))
ptr = p;
return ptr;
}
#endif
...@@ -100,8 +100,8 @@ static int __cpuinit processor_probe(struct parisc_device *dev) ...@@ -100,8 +100,8 @@ static int __cpuinit processor_probe(struct parisc_device *dev)
struct cpuinfo_parisc *p; struct cpuinfo_parisc *p;
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
if (num_online_cpus() >= NR_CPUS) { if (num_online_cpus() >= nr_cpu_ids) {
printk(KERN_INFO "num_online_cpus() >= NR_CPUS\n"); printk(KERN_INFO "num_online_cpus() >= nr_cpu_ids\n");
return 1; return 1;
} }
#else #else
...@@ -214,7 +214,7 @@ static int __cpuinit processor_probe(struct parisc_device *dev) ...@@ -214,7 +214,7 @@ static int __cpuinit processor_probe(struct parisc_device *dev)
*/ */
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
if (cpuid) { if (cpuid) {
cpu_set(cpuid, cpu_present_map); set_cpu_present(cpuid, true);
cpu_up(cpuid); cpu_up(cpuid);
} }
#endif #endif
...@@ -364,6 +364,13 @@ show_cpuinfo (struct seq_file *m, void *v) ...@@ -364,6 +364,13 @@ show_cpuinfo (struct seq_file *m, void *v)
boot_cpu_data.cpu_hz / 1000000, boot_cpu_data.cpu_hz / 1000000,
boot_cpu_data.cpu_hz % 1000000 ); boot_cpu_data.cpu_hz % 1000000 );
seq_printf(m, "capabilities\t:");
if (boot_cpu_data.pdc.capabilities & PDC_MODEL_OS32)
seq_printf(m, " os32");
if (boot_cpu_data.pdc.capabilities & PDC_MODEL_OS64)
seq_printf(m, " os64");
seq_printf(m, "\n");
seq_printf(m, "model\t\t: %s\n" seq_printf(m, "model\t\t: %s\n"
"model name\t: %s\n", "model name\t: %s\n",
boot_cpu_data.pdc.sys_model_name, boot_cpu_data.pdc.sys_model_name,
......
...@@ -31,6 +31,7 @@ ...@@ -31,6 +31,7 @@
#include <linux/err.h> #include <linux/err.h>
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/bitops.h> #include <linux/bitops.h>
#include <linux/ftrace.h>
#include <asm/system.h> #include <asm/system.h>
#include <asm/atomic.h> #include <asm/atomic.h>
...@@ -113,14 +114,14 @@ halt_processor(void) ...@@ -113,14 +114,14 @@ halt_processor(void)
{ {
/* REVISIT : redirect I/O Interrupts to another CPU? */ /* REVISIT : redirect I/O Interrupts to another CPU? */
/* REVISIT : does PM *know* this CPU isn't available? */ /* REVISIT : does PM *know* this CPU isn't available? */
cpu_clear(smp_processor_id(), cpu_online_map); set_cpu_online(smp_processor_id(), false);
local_irq_disable(); local_irq_disable();
for (;;) for (;;)
; ;
} }
irqreturn_t irqreturn_t __irq_entry
ipi_interrupt(int irq, void *dev_id) ipi_interrupt(int irq, void *dev_id)
{ {
int this_cpu = smp_processor_id(); int this_cpu = smp_processor_id();
...@@ -214,11 +215,11 @@ ipi_send(int cpu, enum ipi_message_type op) ...@@ -214,11 +215,11 @@ ipi_send(int cpu, enum ipi_message_type op)
} }
static void static void
send_IPI_mask(cpumask_t mask, enum ipi_message_type op) send_IPI_mask(const struct cpumask *mask, enum ipi_message_type op)
{ {
int cpu; int cpu;
for_each_cpu_mask(cpu, mask) for_each_cpu(cpu, mask)
ipi_send(cpu, op); ipi_send(cpu, op);
} }
...@@ -257,7 +258,7 @@ smp_send_all_nop(void) ...@@ -257,7 +258,7 @@ smp_send_all_nop(void)
send_IPI_allbutself(IPI_NOP); send_IPI_allbutself(IPI_NOP);
} }
void arch_send_call_function_ipi(cpumask_t mask) void arch_send_call_function_ipi_mask(const struct cpumask *mask)
{ {
send_IPI_mask(mask, IPI_CALL_FUNC); send_IPI_mask(mask, IPI_CALL_FUNC);
} }
...@@ -296,13 +297,14 @@ smp_cpu_init(int cpunum) ...@@ -296,13 +297,14 @@ smp_cpu_init(int cpunum)
mb(); mb();
/* Well, support 2.4 linux scheme as well. */ /* Well, support 2.4 linux scheme as well. */
if (cpu_test_and_set(cpunum, cpu_online_map)) if (cpu_isset(cpunum, cpu_online_map))
{ {
extern void machine_halt(void); /* arch/parisc.../process.c */ extern void machine_halt(void); /* arch/parisc.../process.c */
printk(KERN_CRIT "CPU#%d already initialized!\n", cpunum); printk(KERN_CRIT "CPU#%d already initialized!\n", cpunum);
machine_halt(); machine_halt();
} }
set_cpu_online(cpunum, true);
/* Initialise the idle task for this CPU */ /* Initialise the idle task for this CPU */
atomic_inc(&init_mm.mm_count); atomic_inc(&init_mm.mm_count);
...@@ -424,8 +426,8 @@ void __init smp_prepare_boot_cpu(void) ...@@ -424,8 +426,8 @@ void __init smp_prepare_boot_cpu(void)
/* Setup BSP mappings */ /* Setup BSP mappings */
printk(KERN_INFO "SMP: bootstrap CPU ID is %d\n", bootstrap_processor); printk(KERN_INFO "SMP: bootstrap CPU ID is %d\n", bootstrap_processor);
cpu_set(bootstrap_processor, cpu_online_map); set_cpu_online(bootstrap_processor, true);
cpu_set(bootstrap_processor, cpu_present_map); set_cpu_present(bootstrap_processor, true);
} }
...@@ -436,8 +438,7 @@ void __init smp_prepare_boot_cpu(void) ...@@ -436,8 +438,7 @@ void __init smp_prepare_boot_cpu(void)
*/ */
void __init smp_prepare_cpus(unsigned int max_cpus) void __init smp_prepare_cpus(unsigned int max_cpus)
{ {
cpus_clear(cpu_present_map); init_cpu_present(cpumask_of(0));
cpu_set(0, cpu_present_map);
parisc_max_cpus = max_cpus; parisc_max_cpus = max_cpus;
if (!max_cpus) if (!max_cpus)
......
/*
* Stack trace management functions
*
* Copyright (C) 2009 Helge Deller <deller@gmx.de>
* based on arch/x86/kernel/stacktrace.c by Ingo Molnar <mingo@redhat.com>
* and parisc unwind functions by Randolph Chung <tausq@debian.org>
*
* TODO: Userspace stacktrace (CONFIG_USER_STACKTRACE_SUPPORT)
*/
#include <linux/module.h>
#include <linux/stacktrace.h>
#include <asm/unwind.h>
static void dump_trace(struct task_struct *task, struct stack_trace *trace)
{
struct unwind_frame_info info;
/* initialize unwind info */
if (task == current) {
unsigned long sp;
struct pt_regs r;
HERE:
asm volatile ("copy %%r30, %0" : "=r"(sp));
memset(&r, 0, sizeof(struct pt_regs));
r.iaoq[0] = (unsigned long)&&HERE;
r.gr[2] = (unsigned long)__builtin_return_address(0);
r.gr[30] = sp;
unwind_frame_init(&info, task, &r);
} else {
unwind_frame_init_from_blocked_task(&info, task);
}
/* unwind stack and save entries in stack_trace struct */
trace->nr_entries = 0;
while (trace->nr_entries < trace->max_entries) {
if (unwind_once(&info) < 0 || info.ip == 0)
break;
if (__kernel_text_address(info.ip))
trace->entries[trace->nr_entries++] = info.ip;
}
}
/*
* Save stack-backtrace addresses into a stack_trace buffer.
*/
void save_stack_trace(struct stack_trace *trace)
{
dump_trace(current, trace);
if (trace->nr_entries < trace->max_entries)
trace->entries[trace->nr_entries++] = ULONG_MAX;
}
EXPORT_SYMBOL_GPL(save_stack_trace);
void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
{
dump_trace(tsk, trace);
if (trace->nr_entries < trace->max_entries)
trace->entries[trace->nr_entries++] = ULONG_MAX;
}
EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
...@@ -365,17 +365,51 @@ tracesys_sigexit: ...@@ -365,17 +365,51 @@ tracesys_sigexit:
/********************************************************* /*********************************************************
Light-weight-syscall code 32/64-bit Light-Weight-Syscall ABI
r20 - lws number * - Indicates a hint for userspace inline asm
r26,r25,r24,r23,r22 - Input registers implementations.
r28 - Function return register
r21 - Error code.
Scracth: Any of the above that aren't being Syscall number (caller-saves)
currently used, including r1. - %r20
* In asm clobber.
Return pointer: r31 (Not usable) Argument registers (caller-saves)
- %r26, %r25, %r24, %r23, %r22
* In asm input.
Return registers (caller-saves)
- %r28 (return), %r21 (errno)
* In asm output.
Caller-saves registers
- %r1, %r27, %r29
- %r2 (return pointer)
- %r31 (ble link register)
* In asm clobber.
Callee-saves registers
- %r3-%r18
- %r30 (stack pointer)
* Not in asm clobber.
If userspace is 32-bit:
Callee-saves registers
- %r19 (32-bit PIC register)
Differences from 32-bit calling convention:
- Syscall number in %r20
- Additional argument register %r22 (arg4)
- Callee-saves %r19.
If userspace is 64-bit:
Callee-saves registers
- %r27 (64-bit PIC register)
Differences from 64-bit calling convention:
- Syscall number in %r20
- Additional argument register %r22 (arg4)
- Callee-saves %r27.
Error codes returned by entry path: Error codes returned by entry path:
...@@ -473,7 +507,8 @@ lws_compare_and_swap64: ...@@ -473,7 +507,8 @@ lws_compare_and_swap64:
b,n lws_compare_and_swap b,n lws_compare_and_swap
#else #else
/* If we are not a 64-bit kernel, then we don't /* If we are not a 64-bit kernel, then we don't
* implement having 64-bit input registers * have 64-bit input registers, and calling
* the 64-bit LWS CAS returns ENOSYS.
*/ */
b,n lws_exit_nosys b,n lws_exit_nosys
#endif #endif
...@@ -635,12 +670,15 @@ END(sys_call_table64) ...@@ -635,12 +670,15 @@ END(sys_call_table64)
/* /*
All light-weight-syscall atomic operations All light-weight-syscall atomic operations
will use this set of locks will use this set of locks
NOTE: The lws_lock_start symbol must be
at least 16-byte aligned for safe use
with ldcw.
*/ */
.section .data .section .data
.align PAGE_SIZE .align PAGE_SIZE
ENTRY(lws_lock_start) ENTRY(lws_lock_start)
/* lws locks */ /* lws locks */
.align 16
.rept 16 .rept 16
/* Keep locks aligned at 16-bytes */ /* Keep locks aligned at 16-bytes */
.word 1 .word 1
......
...@@ -24,6 +24,7 @@ ...@@ -24,6 +24,7 @@
#include <linux/profile.h> #include <linux/profile.h>
#include <linux/clocksource.h> #include <linux/clocksource.h>
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <linux/ftrace.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/io.h> #include <asm/io.h>
...@@ -53,7 +54,7 @@ static unsigned long clocktick __read_mostly; /* timer cycles per tick */ ...@@ -53,7 +54,7 @@ static unsigned long clocktick __read_mostly; /* timer cycles per tick */
* held off for an arbitrarily long period of time by interrupts being * held off for an arbitrarily long period of time by interrupts being
* disabled, so we may miss one or more ticks. * disabled, so we may miss one or more ticks.
*/ */
irqreturn_t timer_interrupt(int irq, void *dev_id) irqreturn_t __irq_entry timer_interrupt(int irq, void *dev_id)
{ {
unsigned long now; unsigned long now;
unsigned long next_tick; unsigned long next_tick;
......
...@@ -247,6 +247,8 @@ void die_if_kernel(char *str, struct pt_regs *regs, long err) ...@@ -247,6 +247,8 @@ void die_if_kernel(char *str, struct pt_regs *regs, long err)
oops_in_progress = 1; oops_in_progress = 1;
oops_enter();
/* Amuse the user in a SPARC fashion */ /* Amuse the user in a SPARC fashion */
if (err) printk( if (err) printk(
KERN_CRIT " _______________________________ \n" KERN_CRIT " _______________________________ \n"
...@@ -293,6 +295,7 @@ KERN_CRIT " || ||\n"); ...@@ -293,6 +295,7 @@ KERN_CRIT " || ||\n");
panic("Fatal exception"); panic("Fatal exception");
} }
oops_exit();
do_exit(SIGSEGV); do_exit(SIGSEGV);
} }
...@@ -494,7 +497,7 @@ void parisc_terminate(char *msg, struct pt_regs *regs, int code, unsigned long o ...@@ -494,7 +497,7 @@ void parisc_terminate(char *msg, struct pt_regs *regs, int code, unsigned long o
panic(msg); panic(msg);
} }
void handle_interruption(int code, struct pt_regs *regs) void notrace handle_interruption(int code, struct pt_regs *regs)
{ {
unsigned long fault_address = 0; unsigned long fault_address = 0;
unsigned long fault_space = 0; unsigned long fault_space = 0;
......
...@@ -54,6 +54,8 @@ SECTIONS ...@@ -54,6 +54,8 @@ SECTIONS
TEXT_TEXT TEXT_TEXT
SCHED_TEXT SCHED_TEXT
LOCK_TEXT LOCK_TEXT
KPROBES_TEXT
IRQENTRY_TEXT
*(.text.do_softirq) *(.text.do_softirq)
*(.text.sys_exit) *(.text.sys_exit)
*(.text.do_sigaltstack) *(.text.do_sigaltstack)
......
...@@ -456,6 +456,13 @@ void __init mem_init(void) ...@@ -456,6 +456,13 @@ void __init mem_init(void)
{ {
int codesize, reservedpages, datasize, initsize; int codesize, reservedpages, datasize, initsize;
/* Do sanity checks on page table constants */
BUILD_BUG_ON(PTE_ENTRY_SIZE != sizeof(pte_t));
BUILD_BUG_ON(PMD_ENTRY_SIZE != sizeof(pmd_t));
BUILD_BUG_ON(PGD_ENTRY_SIZE != sizeof(pgd_t));
BUILD_BUG_ON(PAGE_SHIFT + BITS_PER_PTE + BITS_PER_PMD + BITS_PER_PGD
> BITS_PER_LONG);
high_memory = __va((max_pfn << PAGE_SHIFT)); high_memory = __va((max_pfn << PAGE_SHIFT));
#ifndef CONFIG_DISCONTIGMEM #ifndef CONFIG_DISCONTIGMEM
......
...@@ -81,7 +81,7 @@ static int __init asp_init_chip(struct parisc_device *dev) ...@@ -81,7 +81,7 @@ static int __init asp_init_chip(struct parisc_device *dev)
asp.hpa = ASP_INTERRUPT_ADDR; asp.hpa = ASP_INTERRUPT_ADDR;
printk(KERN_INFO "%s version %d at 0x%lx found.\n", printk(KERN_INFO "%s version %d at 0x%lx found.\n",
asp.name, asp.version, dev->hpa.start); asp.name, asp.version, (unsigned long)dev->hpa.start);
/* the IRQ ASP should use */ /* the IRQ ASP should use */
ret = -EBUSY; ret = -EBUSY;
......
...@@ -406,8 +406,6 @@ ccio_alloc_range(struct ioc *ioc, struct device *dev, size_t size) ...@@ -406,8 +406,6 @@ ccio_alloc_range(struct ioc *ioc, struct device *dev, size_t size)
} }
ioc->avg_search[ioc->avg_idx++] = cr_start; ioc->avg_search[ioc->avg_idx++] = cr_start;
ioc->avg_idx &= CCIO_SEARCH_SAMPLE - 1; ioc->avg_idx &= CCIO_SEARCH_SAMPLE - 1;
#endif
#ifdef CCIO_COLLECT_STATS
ioc->used_pages += pages_needed; ioc->used_pages += pages_needed;
#endif #endif
/* /*
...@@ -453,10 +451,10 @@ ccio_free_range(struct ioc *ioc, dma_addr_t iova, unsigned long pages_mapped) ...@@ -453,10 +451,10 @@ ccio_free_range(struct ioc *ioc, dma_addr_t iova, unsigned long pages_mapped)
unsigned long mask = ~(~0UL >> pages_mapped); unsigned long mask = ~(~0UL >> pages_mapped);
CCIO_FREE_MAPPINGS(ioc, res_idx, mask, 8); CCIO_FREE_MAPPINGS(ioc, res_idx, mask, 8);
#else #else
CCIO_FREE_MAPPINGS(ioc, res_idx, 0xff, 8); CCIO_FREE_MAPPINGS(ioc, res_idx, 0xffUL, 8);
#endif #endif
} else if(pages_mapped <= 16) { } else if(pages_mapped <= 16) {
CCIO_FREE_MAPPINGS(ioc, res_idx, 0xffff, 16); CCIO_FREE_MAPPINGS(ioc, res_idx, 0xffffUL, 16);
} else if(pages_mapped <= 32) { } else if(pages_mapped <= 32) {
CCIO_FREE_MAPPINGS(ioc, res_idx, ~(unsigned int)0, 32); CCIO_FREE_MAPPINGS(ioc, res_idx, ~(unsigned int)0, 32);
#ifdef __LP64__ #ifdef __LP64__
...@@ -1028,8 +1026,10 @@ static int ccio_proc_info(struct seq_file *m, void *p) ...@@ -1028,8 +1026,10 @@ static int ccio_proc_info(struct seq_file *m, void *p)
while (ioc != NULL) { while (ioc != NULL) {
unsigned int total_pages = ioc->res_size << 3; unsigned int total_pages = ioc->res_size << 3;
#ifdef CCIO_COLLECT_STATS
unsigned long avg = 0, min, max; unsigned long avg = 0, min, max;
int j; int j;
#endif
len += seq_printf(m, "%s\n", ioc->name); len += seq_printf(m, "%s\n", ioc->name);
...@@ -1060,8 +1060,7 @@ static int ccio_proc_info(struct seq_file *m, void *p) ...@@ -1060,8 +1060,7 @@ static int ccio_proc_info(struct seq_file *m, void *p)
avg /= CCIO_SEARCH_SAMPLE; avg /= CCIO_SEARCH_SAMPLE;
len += seq_printf(m, " Bitmap search : %ld/%ld/%ld (min/avg/max CPU Cycles)\n", len += seq_printf(m, " Bitmap search : %ld/%ld/%ld (min/avg/max CPU Cycles)\n",
min, avg, max); min, avg, max);
#endif
#ifdef CCIO_COLLECT_STATS
len += seq_printf(m, "pci_map_single(): %8ld calls %8ld pages (avg %d/1000)\n", len += seq_printf(m, "pci_map_single(): %8ld calls %8ld pages (avg %d/1000)\n",
ioc->msingle_calls, ioc->msingle_pages, ioc->msingle_calls, ioc->msingle_pages,
(int)((ioc->msingle_pages * 1000)/ioc->msingle_calls)); (int)((ioc->msingle_pages * 1000)/ioc->msingle_calls));
...@@ -1400,7 +1399,7 @@ ccio_init_resource(struct resource *res, char *name, void __iomem *ioaddr) ...@@ -1400,7 +1399,7 @@ ccio_init_resource(struct resource *res, char *name, void __iomem *ioaddr)
result = insert_resource(&iomem_resource, res); result = insert_resource(&iomem_resource, res);
if (result < 0) { if (result < 0) {
printk(KERN_ERR "%s() failed to claim CCIO bus address space (%08lx,%08lx)\n", printk(KERN_ERR "%s() failed to claim CCIO bus address space (%08lx,%08lx)\n",
__func__, res->start, res->end); __func__, (unsigned long)res->start, (unsigned long)res->end);
} }
} }
...@@ -1551,7 +1550,8 @@ static int __init ccio_probe(struct parisc_device *dev) ...@@ -1551,7 +1550,8 @@ static int __init ccio_probe(struct parisc_device *dev)
ioc->name = dev->id.hversion == U2_IOA_RUNWAY ? "U2" : "UTurn"; ioc->name = dev->id.hversion == U2_IOA_RUNWAY ? "U2" : "UTurn";
printk(KERN_INFO "Found %s at 0x%lx\n", ioc->name, dev->hpa.start); printk(KERN_INFO "Found %s at 0x%lx\n", ioc->name,
(unsigned long)dev->hpa.start);
for (i = 0; i < ioc_count; i++) { for (i = 0; i < ioc_count; i++) {
ioc_p = &(*ioc_p)->next; ioc_p = &(*ioc_p)->next;
......
...@@ -819,7 +819,9 @@ dino_bridge_init(struct dino_device *dino_dev, const char *name) ...@@ -819,7 +819,9 @@ dino_bridge_init(struct dino_device *dino_dev, const char *name)
result = ccio_request_resource(dino_dev->hba.dev, &res[i]); result = ccio_request_resource(dino_dev->hba.dev, &res[i]);
if (result < 0) { if (result < 0) {
printk(KERN_ERR "%s: failed to claim PCI Bus address space %d (0x%lx-0x%lx)!\n", name, i, res[i].start, res[i].end); printk(KERN_ERR "%s: failed to claim PCI Bus address "
"space %d (0x%lx-0x%lx)!\n", name, i,
(unsigned long)res[i].start, (unsigned long)res[i].end);
return result; return result;
} }
} }
...@@ -899,7 +901,8 @@ static int __init dino_common_init(struct parisc_device *dev, ...@@ -899,7 +901,8 @@ static int __init dino_common_init(struct parisc_device *dev,
if (request_resource(&ioport_resource, res) < 0) { if (request_resource(&ioport_resource, res) < 0) {
printk(KERN_ERR "%s: request I/O Port region failed " printk(KERN_ERR "%s: request I/O Port region failed "
"0x%lx/%lx (hpa 0x%p)\n", "0x%lx/%lx (hpa 0x%p)\n",
name, res->start, res->end, dino_dev->hba.base_addr); name, (unsigned long)res->start, (unsigned long)res->end,
dino_dev->hba.base_addr);
return 1; return 1;
} }
......
...@@ -314,7 +314,7 @@ static int __init eisa_probe(struct parisc_device *dev) ...@@ -314,7 +314,7 @@ static int __init eisa_probe(struct parisc_device *dev)
char *name = is_mongoose(dev) ? "Mongoose" : "Wax"; char *name = is_mongoose(dev) ? "Mongoose" : "Wax";
printk(KERN_INFO "%s EISA Adapter found at 0x%08lx\n", printk(KERN_INFO "%s EISA Adapter found at 0x%08lx\n",
name, dev->hpa.start); name, (unsigned long)dev->hpa.start);
eisa_dev.hba.dev = dev; eisa_dev.hba.dev = dev;
eisa_dev.hba.iommu = ccio_get_iommu(dev); eisa_dev.hba.iommu = ccio_get_iommu(dev);
......
...@@ -98,7 +98,7 @@ static int configure_memory(const unsigned char *buf, ...@@ -98,7 +98,7 @@ static int configure_memory(const unsigned char *buf,
res->start = mem_parent->start + get_24(buf+len+2); res->start = mem_parent->start + get_24(buf+len+2);
res->end = res->start + get_16(buf+len+5)*1024; res->end = res->start + get_16(buf+len+5)*1024;
res->flags = IORESOURCE_MEM; res->flags = IORESOURCE_MEM;
printk("memory %lx-%lx ", res->start, res->end); printk("memory %lx-%lx ", (unsigned long)res->start, (unsigned long)res->end);
result = request_resource(mem_parent, res); result = request_resource(mem_parent, res);
if (result < 0) { if (result < 0) {
printk("\n" KERN_ERR "EISA Enumerator: failed to claim EISA Bus address space!\n"); printk("\n" KERN_ERR "EISA Enumerator: failed to claim EISA Bus address space!\n");
...@@ -188,7 +188,7 @@ static int configure_port(const unsigned char *buf, struct resource *io_parent, ...@@ -188,7 +188,7 @@ static int configure_port(const unsigned char *buf, struct resource *io_parent,
res->start = get_16(buf+len+1); res->start = get_16(buf+len+1);
res->end = get_16(buf+len+1)+(c&HPEE_PORT_SIZE_MASK)+1; res->end = get_16(buf+len+1)+(c&HPEE_PORT_SIZE_MASK)+1;
res->flags = IORESOURCE_IO; res->flags = IORESOURCE_IO;
printk("ioports %lx-%lx ", res->start, res->end); printk("ioports %lx-%lx ", (unsigned long)res->start, (unsigned long)res->end);
result = request_resource(io_parent, res); result = request_resource(io_parent, res);
if (result < 0) { if (result < 0) {
printk("\n" KERN_ERR "EISA Enumerator: failed to claim EISA Bus address space!\n"); printk("\n" KERN_ERR "EISA Enumerator: failed to claim EISA Bus address space!\n");
......
...@@ -714,7 +714,7 @@ static void iosapic_set_affinity_irq(unsigned int irq, ...@@ -714,7 +714,7 @@ static void iosapic_set_affinity_irq(unsigned int irq,
if (dest_cpu < 0) if (dest_cpu < 0)
return; return;
irq_desc[irq].affinity = cpumask_of_cpu(dest_cpu); cpumask_copy(irq_desc[irq].affinity, cpumask_of(dest_cpu));
vi->txn_addr = txn_affinity_addr(irq, dest_cpu); vi->txn_addr = txn_affinity_addr(irq, dest_cpu);
spin_lock_irqsave(&iosapic_lock, flags); spin_lock_irqsave(&iosapic_lock, flags);
......
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
* *
* (c) Copyright 2000 Red Hat Software * (c) Copyright 2000 Red Hat Software
* (c) Copyright 2000 Helge Deller <hdeller@redhat.com> * (c) Copyright 2000 Helge Deller <hdeller@redhat.com>
* (c) Copyright 2001-2005 Helge Deller <deller@gmx.de> * (c) Copyright 2001-2009 Helge Deller <deller@gmx.de>
* (c) Copyright 2001 Randolph Chung <tausq@debian.org> * (c) Copyright 2001 Randolph Chung <tausq@debian.org>
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
...@@ -243,13 +243,11 @@ static int __init led_create_procfs(void) ...@@ -243,13 +243,11 @@ static int __init led_create_procfs(void)
proc_pdc_root = proc_mkdir("pdc", 0); proc_pdc_root = proc_mkdir("pdc", 0);
if (!proc_pdc_root) return -1; if (!proc_pdc_root) return -1;
proc_pdc_root->owner = THIS_MODULE;
ent = create_proc_entry("led", S_IFREG|S_IRUGO|S_IWUSR, proc_pdc_root); ent = create_proc_entry("led", S_IFREG|S_IRUGO|S_IWUSR, proc_pdc_root);
if (!ent) return -1; if (!ent) return -1;
ent->data = (void *)LED_NOLCD; /* LED */ ent->data = (void *)LED_NOLCD; /* LED */
ent->read_proc = led_proc_read; ent->read_proc = led_proc_read;
ent->write_proc = led_proc_write; ent->write_proc = led_proc_write;
ent->owner = THIS_MODULE;
if (led_type == LED_HASLCD) if (led_type == LED_HASLCD)
{ {
...@@ -258,7 +256,6 @@ static int __init led_create_procfs(void) ...@@ -258,7 +256,6 @@ static int __init led_create_procfs(void)
ent->data = (void *)LED_HASLCD; /* LCD */ ent->data = (void *)LED_HASLCD; /* LCD */
ent->read_proc = led_proc_read; ent->read_proc = led_proc_read;
ent->write_proc = led_proc_write; ent->write_proc = led_proc_write;
ent->owner = THIS_MODULE;
} }
return 0; return 0;
...@@ -463,9 +460,20 @@ static void led_work_func (struct work_struct *unused) ...@@ -463,9 +460,20 @@ static void led_work_func (struct work_struct *unused)
if (likely(led_lanrxtx)) currentleds |= led_get_net_activity(); if (likely(led_lanrxtx)) currentleds |= led_get_net_activity();
if (likely(led_diskio)) currentleds |= led_get_diskio_activity(); if (likely(led_diskio)) currentleds |= led_get_diskio_activity();
/* blink all LEDs twice a second if we got an Oops (HPMC) */ /* blink LEDs if we got an Oops (HPMC) */
if (unlikely(oops_in_progress)) if (unlikely(oops_in_progress)) {
currentleds = (count_HZ<=(HZ/2)) ? 0 : 0xff; if (boot_cpu_data.cpu_type >= pcxl2) {
/* newer machines don't have loadavg. LEDs, so we
* let all LEDs blink twice per second instead */
currentleds = (count_HZ <= (HZ/2)) ? 0 : 0xff;
} else {
/* old machines: blink loadavg. LEDs twice per second */
if (count_HZ <= (HZ/2))
currentleds &= ~(LED4|LED5|LED6|LED7);
else
currentleds |= (LED4|LED5|LED6|LED7);
}
}
if (currentleds != lastleds) if (currentleds != lastleds)
{ {
...@@ -511,7 +519,7 @@ static int led_halt(struct notifier_block *nb, unsigned long event, void *buf) ...@@ -511,7 +519,7 @@ static int led_halt(struct notifier_block *nb, unsigned long event, void *buf)
/* Cancel the work item and delete the queue */ /* Cancel the work item and delete the queue */
if (led_wq) { if (led_wq) {
cancel_rearming_delayed_workqueue(led_wq, &led_task); cancel_delayed_work_sync(&led_task);
destroy_workqueue(led_wq); destroy_workqueue(led_wq);
led_wq = NULL; led_wq = NULL;
} }
...@@ -630,7 +638,7 @@ int lcd_print( const char *str ) ...@@ -630,7 +638,7 @@ int lcd_print( const char *str )
/* temporarily disable the led work task */ /* temporarily disable the led work task */
if (led_wq) if (led_wq)
cancel_rearming_delayed_workqueue(led_wq, &led_task); cancel_delayed_work_sync(&led_task);
/* copy display string to buffer for procfs */ /* copy display string to buffer for procfs */
strlcpy(lcd_text, str, sizeof(lcd_text)); strlcpy(lcd_text, str, sizeof(lcd_text));
......
...@@ -75,6 +75,10 @@ case "${ARCH}" in ...@@ -75,6 +75,10 @@ case "${ARCH}" in
alpha) alpha)
[ -f "${objtree}/arch/alpha/boot/vmlinux.gz" ] && cp -v -- "${objtree}/arch/alpha/boot/vmlinux.gz" "${tmpdir}/boot/vmlinuz-${KERNELRELEASE}" [ -f "${objtree}/arch/alpha/boot/vmlinux.gz" ] && cp -v -- "${objtree}/arch/alpha/boot/vmlinux.gz" "${tmpdir}/boot/vmlinuz-${KERNELRELEASE}"
;; ;;
parisc*)
[ -f "${KBUILD_IMAGE}" ] && cp -v -- "${KBUILD_IMAGE}" "${tmpdir}/boot/vmlinux-${KERNELRELEASE}"
[ -f "${objtree}/lifimage" ] && cp -v -- "${objtree}/lifimage" "${tmpdir}/boot/lifimage-${KERNELRELEASE}"
;;
vax) vax)
[ -f "${objtree}/vmlinux.SYS" ] && cp -v -- "${objtree}/vmlinux.SYS" "${tmpdir}/boot/vmlinux-${KERNELRELEASE}.SYS" [ -f "${objtree}/vmlinux.SYS" ] && cp -v -- "${objtree}/vmlinux.SYS" "${tmpdir}/boot/vmlinux-${KERNELRELEASE}.SYS"
[ -f "${objtree}/vmlinux.dsk" ] && cp -v -- "${objtree}/vmlinux.dsk" "${tmpdir}/boot/vmlinux-${KERNELRELEASE}.dsk" [ -f "${objtree}/vmlinux.dsk" ] && cp -v -- "${objtree}/vmlinux.dsk" "${tmpdir}/boot/vmlinux-${KERNELRELEASE}.dsk"
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment