Commit 74b6eb6b authored by Ingo Molnar's avatar Ingo Molnar

Merge branches 'x86/asm', 'x86/cleanups', 'x86/cpudetect', 'x86/debug',...

Merge branches 'x86/asm', 'x86/cleanups', 'x86/cpudetect', 'x86/debug', 'x86/doc', 'x86/header-fixes', 'x86/mm', 'x86/paravirt', 'x86/pat', 'x86/setup-v2', 'x86/subarch', 'x86/uaccess' and 'x86/urgent' into x86/core
...@@ -158,7 +158,7 @@ Offset Proto Name Meaning ...@@ -158,7 +158,7 @@ Offset Proto Name Meaning
0202/4 2.00+ header Magic signature "HdrS" 0202/4 2.00+ header Magic signature "HdrS"
0206/2 2.00+ version Boot protocol version supported 0206/2 2.00+ version Boot protocol version supported
0208/4 2.00+ realmode_swtch Boot loader hook (see below) 0208/4 2.00+ realmode_swtch Boot loader hook (see below)
020C/2 2.00+ start_sys The load-low segment (0x1000) (obsolete) 020C/2 2.00+ start_sys_seg The load-low segment (0x1000) (obsolete)
020E/2 2.00+ kernel_version Pointer to kernel version string 020E/2 2.00+ kernel_version Pointer to kernel version string
0210/1 2.00+ type_of_loader Boot loader identifier 0210/1 2.00+ type_of_loader Boot loader identifier
0211/1 2.00+ loadflags Boot protocol option flags 0211/1 2.00+ loadflags Boot protocol option flags
...@@ -170,10 +170,11 @@ Offset Proto Name Meaning ...@@ -170,10 +170,11 @@ Offset Proto Name Meaning
0224/2 2.01+ heap_end_ptr Free memory after setup end 0224/2 2.01+ heap_end_ptr Free memory after setup end
0226/2 N/A pad1 Unused 0226/2 N/A pad1 Unused
0228/4 2.02+ cmd_line_ptr 32-bit pointer to the kernel command line 0228/4 2.02+ cmd_line_ptr 32-bit pointer to the kernel command line
022C/4 2.03+ initrd_addr_max Highest legal initrd address 022C/4 2.03+ ramdisk_max Highest legal initrd address
0230/4 2.05+ kernel_alignment Physical addr alignment required for kernel 0230/4 2.05+ kernel_alignment Physical addr alignment required for kernel
0234/1 2.05+ relocatable_kernel Whether kernel is relocatable or not 0234/1 2.05+ relocatable_kernel Whether kernel is relocatable or not
0235/3 N/A pad2 Unused 0235/1 N/A pad2 Unused
0236/2 N/A pad3 Unused
0238/4 2.06+ cmdline_size Maximum size of the kernel command line 0238/4 2.06+ cmdline_size Maximum size of the kernel command line
023C/4 2.07+ hardware_subarch Hardware subarchitecture 023C/4 2.07+ hardware_subarch Hardware subarchitecture
0240/8 2.07+ hardware_subarch_data Subarchitecture-specific data 0240/8 2.07+ hardware_subarch_data Subarchitecture-specific data
...@@ -299,14 +300,14 @@ Protocol: 2.00+ ...@@ -299,14 +300,14 @@ Protocol: 2.00+
e.g. 0x0204 for version 2.04, and 0x0a11 for a hypothetical version e.g. 0x0204 for version 2.04, and 0x0a11 for a hypothetical version
10.17. 10.17.
Field name: readmode_swtch Field name: realmode_swtch
Type: modify (optional) Type: modify (optional)
Offset/size: 0x208/4 Offset/size: 0x208/4
Protocol: 2.00+ Protocol: 2.00+
Boot loader hook (see ADVANCED BOOT LOADER HOOKS below.) Boot loader hook (see ADVANCED BOOT LOADER HOOKS below.)
Field name: start_sys Field name: start_sys_seg
Type: read Type: read
Offset/size: 0x20c/2 Offset/size: 0x20c/2
Protocol: 2.00+ Protocol: 2.00+
...@@ -468,7 +469,7 @@ Protocol: 2.02+ ...@@ -468,7 +469,7 @@ Protocol: 2.02+
zero, the kernel will assume that your boot loader does not support zero, the kernel will assume that your boot loader does not support
the 2.02+ protocol. the 2.02+ protocol.
Field name: initrd_addr_max Field name: ramdisk_max
Type: read Type: read
Offset/size: 0x22c/4 Offset/size: 0x22c/4
Protocol: 2.03+ Protocol: 2.03+
......
...@@ -269,9 +269,8 @@ void vesa_store_edid(void) ...@@ -269,9 +269,8 @@ void vesa_store_edid(void)
we genuinely have to assume all registers are destroyed here. */ we genuinely have to assume all registers are destroyed here. */
asm("pushw %%es; movw %2,%%es; "INT10"; popw %%es" asm("pushw %%es; movw %2,%%es; "INT10"; popw %%es"
: "+a" (ax), "+b" (bx) : "+a" (ax), "+b" (bx), "+c" (cx), "+D" (di)
: "c" (cx), "D" (di) : : "esi", "edx");
: "esi");
if (ax != 0x004f) if (ax != 0x004f)
return; /* No EDID */ return; /* No EDID */
...@@ -285,9 +284,9 @@ void vesa_store_edid(void) ...@@ -285,9 +284,9 @@ void vesa_store_edid(void)
dx = 0; /* EDID block number */ dx = 0; /* EDID block number */
di =(size_t) &boot_params.edid_info; /* (ES:)Pointer to block */ di =(size_t) &boot_params.edid_info; /* (ES:)Pointer to block */
asm(INT10 asm(INT10
: "+a" (ax), "+b" (bx), "+d" (dx), "=m" (boot_params.edid_info) : "+a" (ax), "+b" (bx), "+d" (dx), "=m" (boot_params.edid_info),
: "c" (cx), "D" (di) "+c" (cx), "+D" (di)
: "esi"); : : "esi");
#endif /* CONFIG_FIRMWARE_EDID */ #endif /* CONFIG_FIRMWARE_EDID */
} }
......
This diff is collapsed.
...@@ -91,7 +91,7 @@ extern void unxlate_dev_mem_ptr(unsigned long phys, void *addr); ...@@ -91,7 +91,7 @@ extern void unxlate_dev_mem_ptr(unsigned long phys, void *addr);
extern int ioremap_change_attr(unsigned long vaddr, unsigned long size, extern int ioremap_change_attr(unsigned long vaddr, unsigned long size,
unsigned long prot_val); unsigned long prot_val);
extern void __iomem *ioremap_wc(unsigned long offset, unsigned long size); extern void __iomem *ioremap_wc(resource_size_t offset, unsigned long size);
/* /*
* early_ioremap() and early_iounmap() are for temporary early boot-time * early_ioremap() and early_iounmap() are for temporary early boot-time
......
#ifndef _ASM_X86_MACH_RDC321X_GPIO_H
#define _ASM_X86_MACH_RDC321X_GPIO_H
#include <linux/kernel.h>
extern int rdc_gpio_get_value(unsigned gpio);
extern void rdc_gpio_set_value(unsigned gpio, int value);
extern int rdc_gpio_direction_input(unsigned gpio);
extern int rdc_gpio_direction_output(unsigned gpio, int value);
extern int rdc_gpio_request(unsigned gpio, const char *label);
extern void rdc_gpio_free(unsigned gpio);
extern void __init rdc321x_gpio_setup(void);
/* Wrappers for the arch-neutral GPIO API */
static inline int gpio_request(unsigned gpio, const char *label)
{
return rdc_gpio_request(gpio, label);
}
static inline void gpio_free(unsigned gpio)
{
might_sleep();
rdc_gpio_free(gpio);
}
static inline int gpio_direction_input(unsigned gpio)
{
return rdc_gpio_direction_input(gpio);
}
static inline int gpio_direction_output(unsigned gpio, int value)
{
return rdc_gpio_direction_output(gpio, value);
}
static inline int gpio_get_value(unsigned gpio)
{
return rdc_gpio_get_value(gpio);
}
static inline void gpio_set_value(unsigned gpio, int value)
{
rdc_gpio_set_value(gpio, value);
}
static inline int gpio_to_irq(unsigned gpio)
{
return gpio;
}
static inline int irq_to_gpio(unsigned irq)
{
return irq;
}
/* For cansleep */
#include <asm-generic/gpio.h>
#endif /* _ASM_X86_MACH_RDC321X_GPIO_H */
...@@ -147,7 +147,7 @@ static inline pteval_t native_pte_val(pte_t pte) ...@@ -147,7 +147,7 @@ static inline pteval_t native_pte_val(pte_t pte)
return pte.pte; return pte.pte;
} }
static inline pteval_t native_pte_flags(pte_t pte) static inline pteval_t pte_flags(pte_t pte)
{ {
return native_pte_val(pte) & PTE_FLAGS_MASK; return native_pte_val(pte) & PTE_FLAGS_MASK;
} }
...@@ -173,7 +173,6 @@ static inline pteval_t native_pte_flags(pte_t pte) ...@@ -173,7 +173,6 @@ static inline pteval_t native_pte_flags(pte_t pte)
#endif #endif
#define pte_val(x) native_pte_val(x) #define pte_val(x) native_pte_val(x)
#define pte_flags(x) native_pte_flags(x)
#define __pte(x) native_make_pte(x) #define __pte(x) native_make_pte(x)
#endif /* CONFIG_PARAVIRT */ #endif /* CONFIG_PARAVIRT */
......
...@@ -280,7 +280,6 @@ struct pv_mmu_ops { ...@@ -280,7 +280,6 @@ struct pv_mmu_ops {
pte_t *ptep, pte_t pte); pte_t *ptep, pte_t pte);
pteval_t (*pte_val)(pte_t); pteval_t (*pte_val)(pte_t);
pteval_t (*pte_flags)(pte_t);
pte_t (*make_pte)(pteval_t pte); pte_t (*make_pte)(pteval_t pte);
pgdval_t (*pgd_val)(pgd_t); pgdval_t (*pgd_val)(pgd_t);
...@@ -1086,23 +1085,6 @@ static inline pteval_t pte_val(pte_t pte) ...@@ -1086,23 +1085,6 @@ static inline pteval_t pte_val(pte_t pte)
return ret; return ret;
} }
static inline pteval_t pte_flags(pte_t pte)
{
pteval_t ret;
if (sizeof(pteval_t) > sizeof(long))
ret = PVOP_CALL2(pteval_t, pv_mmu_ops.pte_flags,
pte.pte, (u64)pte.pte >> 32);
else
ret = PVOP_CALL1(pteval_t, pv_mmu_ops.pte_flags,
pte.pte);
#ifdef CONFIG_PARAVIRT_DEBUG
BUG_ON(ret & PTE_PFN_MASK);
#endif
return ret;
}
static inline pgd_t __pgd(pgdval_t val) static inline pgd_t __pgd(pgdval_t val)
{ {
pgdval_t ret; pgdval_t ret;
...@@ -1391,8 +1373,6 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx, ...@@ -1391,8 +1373,6 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
void _paravirt_nop(void); void _paravirt_nop(void);
#define paravirt_nop ((void *)_paravirt_nop) #define paravirt_nop ((void *)_paravirt_nop)
void paravirt_use_bytelocks(void);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
static inline int __raw_spin_is_locked(struct raw_spinlock *lock) static inline int __raw_spin_is_locked(struct raw_spinlock *lock)
......
...@@ -5,10 +5,8 @@ ...@@ -5,10 +5,8 @@
#ifdef CONFIG_X86_PAT #ifdef CONFIG_X86_PAT
extern int pat_enabled; extern int pat_enabled;
extern void validate_pat_support(struct cpuinfo_x86 *c);
#else #else
static const int pat_enabled; static const int pat_enabled;
static inline void validate_pat_support(struct cpuinfo_x86 *c) { }
#endif #endif
extern void pat_init(void); extern void pat_init(void);
...@@ -17,6 +15,4 @@ extern int reserve_memtype(u64 start, u64 end, ...@@ -17,6 +15,4 @@ extern int reserve_memtype(u64 start, u64 end,
unsigned long req_type, unsigned long *ret_type); unsigned long req_type, unsigned long *ret_type);
extern int free_memtype(u64 start, u64 end); extern int free_memtype(u64 start, u64 end);
extern void pat_disable(char *reason);
#endif /* _ASM_X86_PAT_H */ #endif /* _ASM_X86_PAT_H */
...@@ -240,64 +240,78 @@ static inline int pmd_large(pmd_t pte) ...@@ -240,64 +240,78 @@ static inline int pmd_large(pmd_t pte)
(_PAGE_PSE | _PAGE_PRESENT); (_PAGE_PSE | _PAGE_PRESENT);
} }
static inline pte_t pte_set_flags(pte_t pte, pteval_t set)
{
pteval_t v = native_pte_val(pte);
return native_make_pte(v | set);
}
static inline pte_t pte_clear_flags(pte_t pte, pteval_t clear)
{
pteval_t v = native_pte_val(pte);
return native_make_pte(v & ~clear);
}
static inline pte_t pte_mkclean(pte_t pte) static inline pte_t pte_mkclean(pte_t pte)
{ {
return __pte(pte_val(pte) & ~_PAGE_DIRTY); return pte_clear_flags(pte, _PAGE_DIRTY);
} }
static inline pte_t pte_mkold(pte_t pte) static inline pte_t pte_mkold(pte_t pte)
{ {
return __pte(pte_val(pte) & ~_PAGE_ACCESSED); return pte_clear_flags(pte, _PAGE_ACCESSED);
} }
static inline pte_t pte_wrprotect(pte_t pte) static inline pte_t pte_wrprotect(pte_t pte)
{ {
return __pte(pte_val(pte) & ~_PAGE_RW); return pte_clear_flags(pte, _PAGE_RW);
} }
static inline pte_t pte_mkexec(pte_t pte) static inline pte_t pte_mkexec(pte_t pte)
{ {
return __pte(pte_val(pte) & ~_PAGE_NX); return pte_clear_flags(pte, _PAGE_NX);
} }
static inline pte_t pte_mkdirty(pte_t pte) static inline pte_t pte_mkdirty(pte_t pte)
{ {
return __pte(pte_val(pte) | _PAGE_DIRTY); return pte_set_flags(pte, _PAGE_DIRTY);
} }
static inline pte_t pte_mkyoung(pte_t pte) static inline pte_t pte_mkyoung(pte_t pte)
{ {
return __pte(pte_val(pte) | _PAGE_ACCESSED); return pte_set_flags(pte, _PAGE_ACCESSED);
} }
static inline pte_t pte_mkwrite(pte_t pte) static inline pte_t pte_mkwrite(pte_t pte)
{ {
return __pte(pte_val(pte) | _PAGE_RW); return pte_set_flags(pte, _PAGE_RW);
} }
static inline pte_t pte_mkhuge(pte_t pte) static inline pte_t pte_mkhuge(pte_t pte)
{ {
return __pte(pte_val(pte) | _PAGE_PSE); return pte_set_flags(pte, _PAGE_PSE);
} }
static inline pte_t pte_clrhuge(pte_t pte) static inline pte_t pte_clrhuge(pte_t pte)
{ {
return __pte(pte_val(pte) & ~_PAGE_PSE); return pte_clear_flags(pte, _PAGE_PSE);
} }
static inline pte_t pte_mkglobal(pte_t pte) static inline pte_t pte_mkglobal(pte_t pte)
{ {
return __pte(pte_val(pte) | _PAGE_GLOBAL); return pte_set_flags(pte, _PAGE_GLOBAL);
} }
static inline pte_t pte_clrglobal(pte_t pte) static inline pte_t pte_clrglobal(pte_t pte)
{ {
return __pte(pte_val(pte) & ~_PAGE_GLOBAL); return pte_clear_flags(pte, _PAGE_GLOBAL);
} }
static inline pte_t pte_mkspecial(pte_t pte) static inline pte_t pte_mkspecial(pte_t pte)
{ {
return __pte(pte_val(pte) | _PAGE_SPECIAL); return pte_set_flags(pte, _PAGE_SPECIAL);
} }
extern pteval_t __supported_pte_mask; extern pteval_t __supported_pte_mask;
......
...@@ -6,8 +6,4 @@ ...@@ -6,8 +6,4 @@
#define ARCH_GET_FS 0x1003 #define ARCH_GET_FS 0x1003
#define ARCH_GET_GS 0x1004 #define ARCH_GET_GS 0x1004
#ifdef CONFIG_X86_64
extern long sys_arch_prctl(int, unsigned long);
#endif /* CONFIG_X86_64 */
#endif /* _ASM_X86_PRCTL_H */ #endif /* _ASM_X86_PRCTL_H */
...@@ -73,7 +73,7 @@ struct cpuinfo_x86 { ...@@ -73,7 +73,7 @@ struct cpuinfo_x86 {
char pad0; char pad0;
#else #else
/* Number of 4K pages in DTLB/ITLB combined(in pages): */ /* Number of 4K pages in DTLB/ITLB combined(in pages): */
int x86_tlbsize; int x86_tlbsize;
__u8 x86_virt_bits; __u8 x86_virt_bits;
__u8 x86_phys_bits; __u8 x86_phys_bits;
#endif #endif
......
#ifndef _ASM_X86_SETUP_H #ifndef _ASM_X86_SETUP_H
#define _ASM_X86_SETUP_H #define _ASM_X86_SETUP_H
#ifdef __KERNEL__
#define COMMAND_LINE_SIZE 2048 #define COMMAND_LINE_SIZE 2048
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
...@@ -8,10 +10,8 @@ ...@@ -8,10 +10,8 @@
/* Interrupt control for vSMPowered x86_64 systems */ /* Interrupt control for vSMPowered x86_64 systems */
void vsmp_init(void); void vsmp_init(void);
void setup_bios_corruption_check(void); void setup_bios_corruption_check(void);
#ifdef CONFIG_X86_VISWS #ifdef CONFIG_X86_VISWS
extern void visws_early_detect(void); extern void visws_early_detect(void);
extern int is_visws_box(void); extern int is_visws_box(void);
...@@ -43,7 +43,7 @@ struct x86_quirks { ...@@ -43,7 +43,7 @@ struct x86_quirks {
void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name); void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name);
void (*mpc_oem_pci_bus)(struct mpc_bus *m); void (*mpc_oem_pci_bus)(struct mpc_bus *m);
void (*smp_read_mpc_oem)(struct mpc_oemtable *oemtable, void (*smp_read_mpc_oem)(struct mpc_oemtable *oemtable,
unsigned short oemsize); unsigned short oemsize);
int (*setup_ioapic_ids)(void); int (*setup_ioapic_ids)(void);
int (*update_genapic)(void); int (*update_genapic)(void);
}; };
...@@ -56,8 +56,6 @@ extern unsigned long saved_video_mode; ...@@ -56,8 +56,6 @@ extern unsigned long saved_video_mode;
#endif #endif
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#ifdef __KERNEL__
#ifdef __i386__ #ifdef __i386__
#include <linux/pfn.h> #include <linux/pfn.h>
......
#ifndef _ASM_X86_SIGCONTEXT32_H #ifndef _ASM_X86_SIGCONTEXT32_H
#define _ASM_X86_SIGCONTEXT32_H #define _ASM_X86_SIGCONTEXT32_H
#include <linux/types.h>
/* signal context for 32bit programs. */ /* signal context for 32bit programs. */
#define X86_FXSR_MAGIC 0x0000 #define X86_FXSR_MAGIC 0x0000
......
...@@ -172,70 +172,8 @@ static inline int __ticket_spin_is_contended(raw_spinlock_t *lock) ...@@ -172,70 +172,8 @@ static inline int __ticket_spin_is_contended(raw_spinlock_t *lock)
return (((tmp >> TICKET_SHIFT) - tmp) & ((1 << TICKET_SHIFT) - 1)) > 1; return (((tmp >> TICKET_SHIFT) - tmp) & ((1 << TICKET_SHIFT) - 1)) > 1;
} }
#ifdef CONFIG_PARAVIRT #ifndef CONFIG_PARAVIRT
/*
* Define virtualization-friendly old-style lock byte lock, for use in
* pv_lock_ops if desired.
*
* This differs from the pre-2.6.24 spinlock by always using xchgb
* rather than decb to take the lock; this allows it to use a
* zero-initialized lock structure. It also maintains a 1-byte
* contention counter, so that we can implement
* __byte_spin_is_contended.
*/
struct __byte_spinlock {
s8 lock;
s8 spinners;
};
static inline int __byte_spin_is_locked(raw_spinlock_t *lock)
{
struct __byte_spinlock *bl = (struct __byte_spinlock *)lock;
return bl->lock != 0;
}
static inline int __byte_spin_is_contended(raw_spinlock_t *lock)
{
struct __byte_spinlock *bl = (struct __byte_spinlock *)lock;
return bl->spinners != 0;
}
static inline void __byte_spin_lock(raw_spinlock_t *lock)
{
struct __byte_spinlock *bl = (struct __byte_spinlock *)lock;
s8 val = 1;
asm("1: xchgb %1, %0\n"
" test %1,%1\n"
" jz 3f\n"
" " LOCK_PREFIX "incb %2\n"
"2: rep;nop\n"
" cmpb $1, %0\n"
" je 2b\n"
" " LOCK_PREFIX "decb %2\n"
" jmp 1b\n"
"3:"
: "+m" (bl->lock), "+q" (val), "+m" (bl->spinners): : "memory");
}
static inline int __byte_spin_trylock(raw_spinlock_t *lock)
{
struct __byte_spinlock *bl = (struct __byte_spinlock *)lock;
u8 old = 1;
asm("xchgb %1,%0"
: "+m" (bl->lock), "+q" (old) : : "memory");
return old == 0;
}
static inline void __byte_spin_unlock(raw_spinlock_t *lock)
{
struct __byte_spinlock *bl = (struct __byte_spinlock *)lock;
smp_wmb();
bl->lock = 0;
}
#else /* !CONFIG_PARAVIRT */
static inline int __raw_spin_is_locked(raw_spinlock_t *lock) static inline int __raw_spin_is_locked(raw_spinlock_t *lock)
{ {
return __ticket_spin_is_locked(lock); return __ticket_spin_is_locked(lock);
...@@ -267,7 +205,7 @@ static __always_inline void __raw_spin_lock_flags(raw_spinlock_t *lock, ...@@ -267,7 +205,7 @@ static __always_inline void __raw_spin_lock_flags(raw_spinlock_t *lock,
__raw_spin_lock(lock); __raw_spin_lock(lock);
} }
#endif /* CONFIG_PARAVIRT */ #endif
static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock) static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock)
{ {
...@@ -329,8 +267,7 @@ static inline int __raw_read_trylock(raw_rwlock_t *lock) ...@@ -329,8 +267,7 @@ static inline int __raw_read_trylock(raw_rwlock_t *lock)
{ {
atomic_t *count = (atomic_t *)lock; atomic_t *count = (atomic_t *)lock;
atomic_dec(count); if (atomic_dec_return(count) >= 0)
if (atomic_read(count) >= 0)
return 1; return 1;
atomic_inc(count); atomic_inc(count);
return 0; return 0;
......
...@@ -111,16 +111,16 @@ do { \ ...@@ -111,16 +111,16 @@ do { \
"movq "__percpu_arg([current_task])",%%rsi\n\t" \ "movq "__percpu_arg([current_task])",%%rsi\n\t" \
__switch_canary \ __switch_canary \
"movq %P[thread_info](%%rsi),%%r8\n\t" \ "movq %P[thread_info](%%rsi),%%r8\n\t" \
LOCK_PREFIX "btr %[tif_fork],%P[ti_flags](%%r8)\n\t" \
"movq %%rax,%%rdi\n\t" \ "movq %%rax,%%rdi\n\t" \
"jc ret_from_fork\n\t" \ "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
"jnz ret_from_fork\n\t" \
RESTORE_CONTEXT \ RESTORE_CONTEXT \
: "=a" (last) \ : "=a" (last) \
__switch_canary_oparam \ __switch_canary_oparam \
: [next] "S" (next), [prev] "D" (prev), \ : [next] "S" (next), [prev] "D" (prev), \
[threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \ [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
[ti_flags] "i" (offsetof(struct thread_info, flags)), \ [ti_flags] "i" (offsetof(struct thread_info, flags)), \
[tif_fork] "i" (TIF_FORK), \ [_tif_fork] "i" (_TIF_FORK), \
[thread_info] "i" (offsetof(struct task_struct, stack)), \ [thread_info] "i" (offsetof(struct task_struct, stack)), \
[current_task] "m" (per_cpu_var(current_task)) \ [current_task] "m" (per_cpu_var(current_task)) \
__switch_canary_iparam \ __switch_canary_iparam \
......
...@@ -40,6 +40,7 @@ struct thread_info { ...@@ -40,6 +40,7 @@ struct thread_info {
*/ */
__u8 supervisor_stack[0]; __u8 supervisor_stack[0];
#endif #endif
int uaccess_err;
}; };
#define INIT_THREAD_INFO(tsk) \ #define INIT_THREAD_INFO(tsk) \
......
...@@ -121,7 +121,7 @@ extern int __get_user_bad(void); ...@@ -121,7 +121,7 @@ extern int __get_user_bad(void);
#define __get_user_x(size, ret, x, ptr) \ #define __get_user_x(size, ret, x, ptr) \
asm volatile("call __get_user_" #size \ asm volatile("call __get_user_" #size \
: "=a" (ret),"=d" (x) \ : "=a" (ret), "=d" (x) \
: "0" (ptr)) \ : "0" (ptr)) \
/* Careful: we have to cast the result to the type of the pointer /* Careful: we have to cast the result to the type of the pointer
...@@ -181,12 +181,12 @@ extern int __get_user_bad(void); ...@@ -181,12 +181,12 @@ extern int __get_user_bad(void);
#define __put_user_x(size, x, ptr, __ret_pu) \ #define __put_user_x(size, x, ptr, __ret_pu) \
asm volatile("call __put_user_" #size : "=a" (__ret_pu) \ asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
:"0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx") : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
#define __put_user_u64(x, addr, err) \ #define __put_user_asm_u64(x, addr, err) \
asm volatile("1: movl %%eax,0(%2)\n" \ asm volatile("1: movl %%eax,0(%2)\n" \
"2: movl %%edx,4(%2)\n" \ "2: movl %%edx,4(%2)\n" \
"3:\n" \ "3:\n" \
...@@ -199,12 +199,22 @@ extern int __get_user_bad(void); ...@@ -199,12 +199,22 @@ extern int __get_user_bad(void);
: "=r" (err) \ : "=r" (err) \
: "A" (x), "r" (addr), "i" (-EFAULT), "0" (err)) : "A" (x), "r" (addr), "i" (-EFAULT), "0" (err))
#define __put_user_asm_ex_u64(x, addr) \
asm volatile("1: movl %%eax,0(%1)\n" \
"2: movl %%edx,4(%1)\n" \
"3:\n" \
_ASM_EXTABLE(1b, 2b - 1b) \
_ASM_EXTABLE(2b, 3b - 2b) \
: : "A" (x), "r" (addr))
#define __put_user_x8(x, ptr, __ret_pu) \ #define __put_user_x8(x, ptr, __ret_pu) \
asm volatile("call __put_user_8" : "=a" (__ret_pu) \ asm volatile("call __put_user_8" : "=a" (__ret_pu) \
: "A" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx") : "A" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
#else #else
#define __put_user_u64(x, ptr, retval) \ #define __put_user_asm_u64(x, ptr, retval) \
__put_user_asm(x, ptr, retval, "q", "", "Zr", -EFAULT) __put_user_asm(x, ptr, retval, "q", "", "Zr", -EFAULT)
#define __put_user_asm_ex_u64(x, addr) \
__put_user_asm_ex(x, addr, "q", "", "Zr")
#define __put_user_x8(x, ptr, __ret_pu) __put_user_x(8, x, ptr, __ret_pu) #define __put_user_x8(x, ptr, __ret_pu) __put_user_x(8, x, ptr, __ret_pu)
#endif #endif
...@@ -276,10 +286,31 @@ do { \ ...@@ -276,10 +286,31 @@ do { \
__put_user_asm(x, ptr, retval, "w", "w", "ir", errret); \ __put_user_asm(x, ptr, retval, "w", "w", "ir", errret); \
break; \ break; \
case 4: \ case 4: \
__put_user_asm(x, ptr, retval, "l", "k", "ir", errret);\ __put_user_asm(x, ptr, retval, "l", "k", "ir", errret); \
break; \ break; \
case 8: \ case 8: \
__put_user_u64((__typeof__(*ptr))(x), ptr, retval); \ __put_user_asm_u64((__typeof__(*ptr))(x), ptr, retval); \
break; \
default: \
__put_user_bad(); \
} \
} while (0)
#define __put_user_size_ex(x, ptr, size) \
do { \
__chk_user_ptr(ptr); \
switch (size) { \
case 1: \
__put_user_asm_ex(x, ptr, "b", "b", "iq"); \
break; \
case 2: \
__put_user_asm_ex(x, ptr, "w", "w", "ir"); \
break; \
case 4: \
__put_user_asm_ex(x, ptr, "l", "k", "ir"); \
break; \
case 8: \
__put_user_asm_ex_u64((__typeof__(*ptr))(x), ptr); \
break; \ break; \
default: \ default: \
__put_user_bad(); \ __put_user_bad(); \
...@@ -311,9 +342,12 @@ do { \ ...@@ -311,9 +342,12 @@ do { \
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
#define __get_user_asm_u64(x, ptr, retval, errret) (x) = __get_user_bad() #define __get_user_asm_u64(x, ptr, retval, errret) (x) = __get_user_bad()
#define __get_user_asm_ex_u64(x, ptr) (x) = __get_user_bad()
#else #else
#define __get_user_asm_u64(x, ptr, retval, errret) \ #define __get_user_asm_u64(x, ptr, retval, errret) \
__get_user_asm(x, ptr, retval, "q", "", "=r", errret) __get_user_asm(x, ptr, retval, "q", "", "=r", errret)
#define __get_user_asm_ex_u64(x, ptr) \
__get_user_asm_ex(x, ptr, "q", "", "=r")
#endif #endif
#define __get_user_size(x, ptr, size, retval, errret) \ #define __get_user_size(x, ptr, size, retval, errret) \
...@@ -350,6 +384,33 @@ do { \ ...@@ -350,6 +384,33 @@ do { \
: "=r" (err), ltype(x) \ : "=r" (err), ltype(x) \
: "m" (__m(addr)), "i" (errret), "0" (err)) : "m" (__m(addr)), "i" (errret), "0" (err))
#define __get_user_size_ex(x, ptr, size) \
do { \
__chk_user_ptr(ptr); \
switch (size) { \
case 1: \
__get_user_asm_ex(x, ptr, "b", "b", "=q"); \
break; \
case 2: \
__get_user_asm_ex(x, ptr, "w", "w", "=r"); \
break; \
case 4: \
__get_user_asm_ex(x, ptr, "l", "k", "=r"); \
break; \
case 8: \
__get_user_asm_ex_u64(x, ptr); \
break; \
default: \
(x) = __get_user_bad(); \
} \
} while (0)
#define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
asm volatile("1: mov"itype" %1,%"rtype"0\n" \
"2:\n" \
_ASM_EXTABLE(1b, 2b - 1b) \
: ltype(x) : "m" (__m(addr)))
#define __put_user_nocheck(x, ptr, size) \ #define __put_user_nocheck(x, ptr, size) \
({ \ ({ \
int __pu_err; \ int __pu_err; \
...@@ -385,6 +446,26 @@ struct __large_struct { unsigned long buf[100]; }; ...@@ -385,6 +446,26 @@ struct __large_struct { unsigned long buf[100]; };
_ASM_EXTABLE(1b, 3b) \ _ASM_EXTABLE(1b, 3b) \
: "=r"(err) \ : "=r"(err) \
: ltype(x), "m" (__m(addr)), "i" (errret), "0" (err)) : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
#define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
asm volatile("1: mov"itype" %"rtype"0,%1\n" \
"2:\n" \
_ASM_EXTABLE(1b, 2b - 1b) \
: : ltype(x), "m" (__m(addr)))
/*
* uaccess_try and catch
*/
#define uaccess_try do { \
int prev_err = current_thread_info()->uaccess_err; \
current_thread_info()->uaccess_err = 0; \
barrier();
#define uaccess_catch(err) \
(err) |= current_thread_info()->uaccess_err; \
current_thread_info()->uaccess_err = prev_err; \
} while (0)
/** /**
* __get_user: - Get a simple variable from user space, with less checking. * __get_user: - Get a simple variable from user space, with less checking.
* @x: Variable to store result. * @x: Variable to store result.
...@@ -408,6 +489,7 @@ struct __large_struct { unsigned long buf[100]; }; ...@@ -408,6 +489,7 @@ struct __large_struct { unsigned long buf[100]; };
#define __get_user(x, ptr) \ #define __get_user(x, ptr) \
__get_user_nocheck((x), (ptr), sizeof(*(ptr))) __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
/** /**
* __put_user: - Write a simple value into user space, with less checking. * __put_user: - Write a simple value into user space, with less checking.
* @x: Value to copy to user space. * @x: Value to copy to user space.
...@@ -434,6 +516,27 @@ struct __large_struct { unsigned long buf[100]; }; ...@@ -434,6 +516,27 @@ struct __large_struct { unsigned long buf[100]; };
#define __get_user_unaligned __get_user #define __get_user_unaligned __get_user
#define __put_user_unaligned __put_user #define __put_user_unaligned __put_user
/*
* {get|put}_user_try and catch
*
* get_user_try {
* get_user_ex(...);
* } get_user_catch(err)
*/
#define get_user_try uaccess_try
#define get_user_catch(err) uaccess_catch(err)
#define put_user_try uaccess_try
#define put_user_catch(err) uaccess_catch(err)
#define get_user_ex(x, ptr) do { \
unsigned long __gue_val; \
__get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
(x) = (__force __typeof__(*(ptr)))__gue_val; \
} while (0)
#define put_user_ex(x, ptr) \
__put_user_size_ex((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
/* /*
* movsl can be slow when source and dest are not both 8-byte aligned * movsl can be slow when source and dest are not both 8-byte aligned
*/ */
......
...@@ -143,37 +143,3 @@ void __cpuinit detect_extended_topology(struct cpuinfo_x86 *c) ...@@ -143,37 +143,3 @@ void __cpuinit detect_extended_topology(struct cpuinfo_x86 *c)
return; return;
#endif #endif
} }
#ifdef CONFIG_X86_PAT
void __cpuinit validate_pat_support(struct cpuinfo_x86 *c)
{
if (!cpu_has_pat)
pat_disable("PAT not supported by CPU.");
switch (c->x86_vendor) {
case X86_VENDOR_INTEL:
/*
* There is a known erratum on Pentium III and Core Solo
* and Core Duo CPUs.
* " Page with PAT set to WC while associated MTRR is UC
* may consolidate to UC "
* Because of this erratum, it is better to stick with
* setting WC in MTRR rather than using PAT on these CPUs.
*
* Enable PAT WC only on P4, Core 2 or later CPUs.
*/
if (c->x86 > 0x6 || (c->x86 == 6 && c->x86_model >= 15))
return;
pat_disable("PAT WC disabled due to known CPU erratum.");
return;
case X86_VENDOR_AMD:
case X86_VENDOR_CENTAUR:
case X86_VENDOR_TRANSMETA:
return;
}
pat_disable("PAT disabled. Not yet verified on this CPU type.");
}
#endif
...@@ -223,6 +223,49 @@ static inline void squash_the_stupid_serial_number(struct cpuinfo_x86 *c) ...@@ -223,6 +223,49 @@ static inline void squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
} }
#endif #endif
/*
* Some CPU features depend on higher CPUID levels, which may not always
* be available due to CPUID level capping or broken virtualization
* software. Add those features to this table to auto-disable them.
*/
struct cpuid_dependent_feature {
u32 feature;
u32 level;
};
static const struct cpuid_dependent_feature __cpuinitconst
cpuid_dependent_features[] = {
{ X86_FEATURE_MWAIT, 0x00000005 },
{ X86_FEATURE_DCA, 0x00000009 },
{ X86_FEATURE_XSAVE, 0x0000000d },
{ 0, 0 }
};
static void __cpuinit filter_cpuid_features(struct cpuinfo_x86 *c, bool warn)
{
const struct cpuid_dependent_feature *df;
for (df = cpuid_dependent_features; df->feature; df++) {
/*
* Note: cpuid_level is set to -1 if unavailable, but
* extended_extended_level is set to 0 if unavailable
* and the legitimate extended levels are all negative
* when signed; hence the weird messing around with
* signs here...
*/
if (cpu_has(c, df->feature) &&
((s32)df->feature < 0 ?
(u32)df->feature > (u32)c->extended_cpuid_level :
(s32)df->feature > (s32)c->cpuid_level)) {
clear_cpu_cap(c, df->feature);
if (warn)
printk(KERN_WARNING
"CPU: CPU feature %s disabled "
"due to lack of CPUID level 0x%x\n",
x86_cap_flags[df->feature],
df->level);
}
}
}
/* /*
* Naming convention should be: <Name> [(<Codename>)] * Naming convention should be: <Name> [(<Codename>)]
* This table only is used unless init_<vendor>() below doesn't set it; * This table only is used unless init_<vendor>() below doesn't set it;
...@@ -586,11 +629,10 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c) ...@@ -586,11 +629,10 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
if (this_cpu->c_early_init) if (this_cpu->c_early_init)
this_cpu->c_early_init(c); this_cpu->c_early_init(c);
validate_pat_support(c);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
c->cpu_index = boot_cpu_id; c->cpu_index = boot_cpu_id;
#endif #endif
filter_cpuid_features(c, false);
} }
void __init early_cpu_init(void) void __init early_cpu_init(void)
...@@ -724,6 +766,9 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c) ...@@ -724,6 +766,9 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
* we do "generic changes." * we do "generic changes."
*/ */
/* Filter out anything that depends on CPUID levels we don't have */
filter_cpuid_features(c, true);
/* If the model name is still unset, do table lookup. */ /* If the model name is still unset, do table lookup. */
if (!c->x86_model_id[0]) { if (!c->x86_model_id[0]) {
char *p; char *p;
...@@ -1053,22 +1098,19 @@ void __cpuinit cpu_init(void) ...@@ -1053,22 +1098,19 @@ void __cpuinit cpu_init(void)
*/ */
if (kgdb_connected && arch_kgdb_ops.correct_hw_break) if (kgdb_connected && arch_kgdb_ops.correct_hw_break)
arch_kgdb_ops.correct_hw_break(); arch_kgdb_ops.correct_hw_break();
else { else
#endif #endif
/* {
* Clear all 6 debug registers: /*
*/ * Clear all 6 debug registers:
*/
set_debugreg(0UL, 0); set_debugreg(0UL, 0);
set_debugreg(0UL, 1); set_debugreg(0UL, 1);
set_debugreg(0UL, 2); set_debugreg(0UL, 2);
set_debugreg(0UL, 3); set_debugreg(0UL, 3);
set_debugreg(0UL, 6); set_debugreg(0UL, 6);
set_debugreg(0UL, 7); set_debugreg(0UL, 7);
#ifdef CONFIG_KGDB
/* If the kgdb is connected no debug regs should be altered. */
} }
#endif
fpu_init(); fpu_init();
......
...@@ -30,7 +30,7 @@ ...@@ -30,7 +30,7 @@
static void __cpuinit early_init_intel(struct cpuinfo_x86 *c) static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
{ {
/* Unmask CPUID levels if masked: */ /* Unmask CPUID levels if masked: */
if (c->x86 == 6 && c->x86_model >= 15) { if (c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xd)) {
u64 misc_enable; u64 misc_enable;
rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable); rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
...@@ -63,6 +63,18 @@ static void __cpuinit early_init_intel(struct cpuinfo_x86 *c) ...@@ -63,6 +63,18 @@ static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC); set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
} }
/*
* There is a known erratum on Pentium III and Core Solo
* and Core Duo CPUs.
* " Page with PAT set to WC while associated MTRR is UC
* may consolidate to UC "
* Because of this erratum, it is better to stick with
* setting WC in MTRR rather than using PAT on these CPUs.
*
* Enable PAT WC only on P4, Core 2 or later CPUs.
*/
if (c->x86 == 6 && c->x86_model < 15)
clear_cpu_cap(c, X86_FEATURE_PAT);
} }
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
......
...@@ -409,6 +409,8 @@ END(save_paranoid) ...@@ -409,6 +409,8 @@ END(save_paranoid)
ENTRY(ret_from_fork) ENTRY(ret_from_fork)
DEFAULT_FRAME DEFAULT_FRAME
LOCK ; btr $TIF_FORK,TI_flags(%r8)
push kernel_eflags(%rip) push kernel_eflags(%rip)
CFI_ADJUST_CFA_OFFSET 8 CFI_ADJUST_CFA_OFFSET 8
popf # reset kernel eflags popf # reset kernel eflags
......
...@@ -548,11 +548,7 @@ early_fault: ...@@ -548,11 +548,7 @@ early_fault:
pushl %eax pushl %eax
pushl %edx /* trapno */ pushl %edx /* trapno */
pushl $fault_msg pushl $fault_msg
#ifdef CONFIG_EARLY_PRINTK
call early_printk
#else
call printk call printk
#endif
#endif #endif
call dump_stack call dump_stack
hlt_loop: hlt_loop:
...@@ -580,11 +576,10 @@ ignore_int: ...@@ -580,11 +576,10 @@ ignore_int:
pushl 32(%esp) pushl 32(%esp)
pushl 40(%esp) pushl 40(%esp)
pushl $int_msg pushl $int_msg
#ifdef CONFIG_EARLY_PRINTK
call early_printk
#else
call printk call printk
#endif
call dump_stack
addl $(5*4),%esp addl $(5*4),%esp
popl %ds popl %ds
popl %es popl %es
...@@ -660,7 +655,7 @@ early_recursion_flag: ...@@ -660,7 +655,7 @@ early_recursion_flag:
.long 0 .long 0
int_msg: int_msg:
.asciz "Unknown interrupt or fault at EIP %p %p %p\n" .asciz "Unknown interrupt or fault at: %p %p %p\n"
fault_msg: fault_msg:
/* fault info: */ /* fault info: */
......
...@@ -3465,40 +3465,6 @@ static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int irq) ...@@ -3465,40 +3465,6 @@ static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int irq)
return 0; return 0;
} }
int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc)
{
unsigned int irq;
int ret;
unsigned int irq_want;
irq_want = nr_irqs_gsi;
irq = create_irq_nr(irq_want);
if (irq == 0)
return -1;
#ifdef CONFIG_INTR_REMAP
if (!intr_remapping_enabled)
goto no_ir;
ret = msi_alloc_irte(dev, irq, 1);
if (ret < 0)
goto error;
no_ir:
#endif
ret = setup_msi_irq(dev, msidesc, irq);
if (ret < 0) {
destroy_irq(irq);
return ret;
}
return 0;
#ifdef CONFIG_INTR_REMAP
error:
destroy_irq(irq);
return ret;
#endif
}
int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
{ {
unsigned int irq; unsigned int irq;
......
...@@ -26,13 +26,3 @@ struct pv_lock_ops pv_lock_ops = { ...@@ -26,13 +26,3 @@ struct pv_lock_ops pv_lock_ops = {
}; };
EXPORT_SYMBOL(pv_lock_ops); EXPORT_SYMBOL(pv_lock_ops);
void __init paravirt_use_bytelocks(void)
{
#ifdef CONFIG_SMP
pv_lock_ops.spin_is_locked = __byte_spin_is_locked;
pv_lock_ops.spin_is_contended = __byte_spin_is_contended;
pv_lock_ops.spin_lock = __byte_spin_lock;
pv_lock_ops.spin_trylock = __byte_spin_trylock;
pv_lock_ops.spin_unlock = __byte_spin_unlock;
#endif
}
...@@ -435,7 +435,6 @@ struct pv_mmu_ops pv_mmu_ops = { ...@@ -435,7 +435,6 @@ struct pv_mmu_ops pv_mmu_ops = {
#endif /* PAGETABLE_LEVELS >= 3 */ #endif /* PAGETABLE_LEVELS >= 3 */
.pte_val = native_pte_val, .pte_val = native_pte_val,
.pte_flags = native_pte_flags,
.pgd_val = native_pgd_val, .pgd_val = native_pgd_val,
.make_pte = native_make_pte, .make_pte = native_make_pte,
......
This diff is collapsed.
#
# Makefile for the RDC321x specific parts of the kernel
#
obj-$(CONFIG_X86_RDC321X) := gpio.o platform.o
/*
* GPIO support for RDC SoC R3210/R8610
*
* Copyright (C) 2007, Florian Fainelli <florian@openwrt.org>
* Copyright (C) 2008, Volker Weiss <dev@tintuc.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
*/
#include <linux/spinlock.h>
#include <linux/io.h>
#include <linux/types.h>
#include <linux/module.h>
#include <asm/gpio.h>
#include <asm/mach-rdc321x/rdc321x_defs.h>
/* spin lock to protect our private copy of GPIO data register plus
the access to PCI conf registers. */
static DEFINE_SPINLOCK(gpio_lock);
/* copy of GPIO data registers */
static u32 gpio_data_reg1;
static u32 gpio_data_reg2;
static u32 gpio_request_data[2];
static inline void rdc321x_conf_write(unsigned addr, u32 value)
{
outl((1 << 31) | (7 << 11) | addr, RDC3210_CFGREG_ADDR);
outl(value, RDC3210_CFGREG_DATA);
}
static inline void rdc321x_conf_or(unsigned addr, u32 value)
{
outl((1 << 31) | (7 << 11) | addr, RDC3210_CFGREG_ADDR);
value |= inl(RDC3210_CFGREG_DATA);
outl(value, RDC3210_CFGREG_DATA);
}
static inline u32 rdc321x_conf_read(unsigned addr)
{
outl((1 << 31) | (7 << 11) | addr, RDC3210_CFGREG_ADDR);
return inl(RDC3210_CFGREG_DATA);
}
/* configure pin as GPIO */
static void rdc321x_configure_gpio(unsigned gpio)
{
unsigned long flags;
spin_lock_irqsave(&gpio_lock, flags);
rdc321x_conf_or(gpio < 32
? RDC321X_GPIO_CTRL_REG1 : RDC321X_GPIO_CTRL_REG2,
1 << (gpio & 0x1f));
spin_unlock_irqrestore(&gpio_lock, flags);
}
/* initially setup the 2 copies of the gpio data registers.
This function must be called by the platform setup code. */
void __init rdc321x_gpio_setup()
{
/* this might not be, what others (BIOS, bootloader, etc.)
wrote to these registers before, but it's a good guess. Still
better than just using 0xffffffff. */
gpio_data_reg1 = rdc321x_conf_read(RDC321X_GPIO_DATA_REG1);
gpio_data_reg2 = rdc321x_conf_read(RDC321X_GPIO_DATA_REG2);
}
/* determine, if gpio number is valid */
static inline int rdc321x_is_gpio(unsigned gpio)
{
return gpio <= RDC321X_MAX_GPIO;
}
/* request GPIO */
int rdc_gpio_request(unsigned gpio, const char *label)
{
unsigned long flags;
if (!rdc321x_is_gpio(gpio))
return -EINVAL;
spin_lock_irqsave(&gpio_lock, flags);
if (gpio_request_data[(gpio & 0x20) ? 1 : 0] & (1 << (gpio & 0x1f)))
goto inuse;
gpio_request_data[(gpio & 0x20) ? 1 : 0] |= (1 << (gpio & 0x1f));
spin_unlock_irqrestore(&gpio_lock, flags);
return 0;
inuse:
spin_unlock_irqrestore(&gpio_lock, flags);
return -EINVAL;
}
EXPORT_SYMBOL(rdc_gpio_request);
/* release previously-claimed GPIO */
void rdc_gpio_free(unsigned gpio)
{
unsigned long flags;
if (!rdc321x_is_gpio(gpio))
return;
spin_lock_irqsave(&gpio_lock, flags);
gpio_request_data[(gpio & 0x20) ? 1 : 0] &= ~(1 << (gpio & 0x1f));
spin_unlock_irqrestore(&gpio_lock, flags);
}
EXPORT_SYMBOL(rdc_gpio_free);
/* read GPIO pin */
int rdc_gpio_get_value(unsigned gpio)
{
u32 reg;
unsigned long flags;
spin_lock_irqsave(&gpio_lock, flags);
reg = rdc321x_conf_read(gpio < 32
? RDC321X_GPIO_DATA_REG1 : RDC321X_GPIO_DATA_REG2);
spin_unlock_irqrestore(&gpio_lock, flags);
return (1 << (gpio & 0x1f)) & reg ? 1 : 0;
}
EXPORT_SYMBOL(rdc_gpio_get_value);
/* set GPIO pin to value */
void rdc_gpio_set_value(unsigned gpio, int value)
{
unsigned long flags;
u32 reg;
reg = 1 << (gpio & 0x1f);
if (gpio < 32) {
spin_lock_irqsave(&gpio_lock, flags);
if (value)
gpio_data_reg1 |= reg;
else
gpio_data_reg1 &= ~reg;
rdc321x_conf_write(RDC321X_GPIO_DATA_REG1, gpio_data_reg1);
spin_unlock_irqrestore(&gpio_lock, flags);
} else {
spin_lock_irqsave(&gpio_lock, flags);
if (value)
gpio_data_reg2 |= reg;
else
gpio_data_reg2 &= ~reg;
rdc321x_conf_write(RDC321X_GPIO_DATA_REG2, gpio_data_reg2);
spin_unlock_irqrestore(&gpio_lock, flags);
}
}
EXPORT_SYMBOL(rdc_gpio_set_value);
/* configure GPIO pin as input */
int rdc_gpio_direction_input(unsigned gpio)
{
if (!rdc321x_is_gpio(gpio))
return -EINVAL;
rdc321x_configure_gpio(gpio);
return 0;
}
EXPORT_SYMBOL(rdc_gpio_direction_input);
/* configure GPIO pin as output and set value */
int rdc_gpio_direction_output(unsigned gpio, int value)
{
if (!rdc321x_is_gpio(gpio))
return -EINVAL;
gpio_set_value(gpio, value);
rdc321x_configure_gpio(gpio);
return 0;
}
EXPORT_SYMBOL(rdc_gpio_direction_output);
/*
* Generic RDC321x platform devices
*
* Copyright (C) 2007 Florian Fainelli <florian@openwrt.org>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the
* Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
* Boston, MA 02110-1301, USA.
*
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/device.h>
#include <linux/platform_device.h>
#include <linux/leds.h>
#include <asm/gpio.h>
/* LEDS */
static struct gpio_led default_leds[] = {
{ .name = "rdc:dmz", .gpio = 1, },
};
static struct gpio_led_platform_data rdc321x_led_data = {
.num_leds = ARRAY_SIZE(default_leds),
.leds = default_leds,
};
static struct platform_device rdc321x_leds = {
.name = "leds-gpio",
.id = -1,
.dev = {
.platform_data = &rdc321x_led_data,
}
};
/* Watchdog */
static struct platform_device rdc321x_wdt = {
.name = "rdc321x-wdt",
.id = -1,
.num_resources = 0,
};
static struct platform_device *rdc321x_devs[] = {
&rdc321x_leds,
&rdc321x_wdt
};
static int __init rdc_board_setup(void)
{
rdc321x_gpio_setup();
return platform_add_devices(rdc321x_devs, ARRAY_SIZE(rdc321x_devs));
}
arch_initcall(rdc_board_setup);
...@@ -23,6 +23,12 @@ int fixup_exception(struct pt_regs *regs) ...@@ -23,6 +23,12 @@ int fixup_exception(struct pt_regs *regs)
fixup = search_exception_tables(regs->ip); fixup = search_exception_tables(regs->ip);
if (fixup) { if (fixup) {
/* If fixup is less than 16, it means uaccess error */
if (fixup->fixup < 16) {
current_thread_info()->uaccess_err = -EFAULT;
regs->ip += fixup->fixup;
return 1;
}
regs->ip = fixup->fixup; regs->ip = fixup->fixup;
return 1; return 1;
} }
......
...@@ -420,7 +420,6 @@ static noinline void pgtable_bad(struct pt_regs *regs, ...@@ -420,7 +420,6 @@ static noinline void pgtable_bad(struct pt_regs *regs,
printk(KERN_ALERT "%s: Corrupted page table at address %lx\n", printk(KERN_ALERT "%s: Corrupted page table at address %lx\n",
tsk->comm, address); tsk->comm, address);
dump_pagetable(address); dump_pagetable(address);
tsk = current;
tsk->thread.cr2 = address; tsk->thread.cr2 = address;
tsk->thread.trap_no = 14; tsk->thread.trap_no = 14;
tsk->thread.error_code = error_code; tsk->thread.error_code = error_code;
......
...@@ -367,7 +367,7 @@ EXPORT_SYMBOL(ioremap_nocache); ...@@ -367,7 +367,7 @@ EXPORT_SYMBOL(ioremap_nocache);
* *
* Must be freed with iounmap. * Must be freed with iounmap.
*/ */
void __iomem *ioremap_wc(unsigned long phys_addr, unsigned long size) void __iomem *ioremap_wc(resource_size_t phys_addr, unsigned long size)
{ {
if (pat_enabled) if (pat_enabled)
return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WC, return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WC,
......
...@@ -30,7 +30,7 @@ ...@@ -30,7 +30,7 @@
#ifdef CONFIG_X86_PAT #ifdef CONFIG_X86_PAT
int __read_mostly pat_enabled = 1; int __read_mostly pat_enabled = 1;
void __cpuinit pat_disable(char *reason) void __cpuinit pat_disable(const char *reason)
{ {
pat_enabled = 0; pat_enabled = 0;
printk(KERN_INFO "%s\n", reason); printk(KERN_INFO "%s\n", reason);
...@@ -42,6 +42,11 @@ static int __init nopat(char *str) ...@@ -42,6 +42,11 @@ static int __init nopat(char *str)
return 0; return 0;
} }
early_param("nopat", nopat); early_param("nopat", nopat);
#else
static inline void pat_disable(const char *reason)
{
(void)reason;
}
#endif #endif
...@@ -78,16 +83,20 @@ void pat_init(void) ...@@ -78,16 +83,20 @@ void pat_init(void)
if (!pat_enabled) if (!pat_enabled)
return; return;
/* Paranoia check. */ if (!cpu_has_pat) {
if (!cpu_has_pat && boot_pat_state) { if (!boot_pat_state) {
/* pat_disable("PAT not supported by CPU.");
* If this happens we are on a secondary CPU, but return;
* switched to PAT on the boot CPU. We have no way to } else {
* undo PAT. /*
*/ * If this happens we are on a secondary CPU, but
printk(KERN_ERR "PAT enabled, " * switched to PAT on the boot CPU. We have no way to
"but not supported by secondary CPU\n"); * undo PAT.
BUG(); */
printk(KERN_ERR "PAT enabled, "
"but not supported by secondary CPU\n");
BUG();
}
} }
/* Set PWT to Write-Combining. All other bits stay the same */ /* Set PWT to Write-Combining. All other bits stay the same */
......
...@@ -1307,7 +1307,6 @@ static const struct pv_mmu_ops xen_mmu_ops __initdata = { ...@@ -1307,7 +1307,6 @@ static const struct pv_mmu_ops xen_mmu_ops __initdata = {
.ptep_modify_prot_commit = __ptep_modify_prot_commit, .ptep_modify_prot_commit = __ptep_modify_prot_commit,
.pte_val = xen_pte_val, .pte_val = xen_pte_val,
.pte_flags = native_pte_flags,
.pgd_val = xen_pgd_val, .pgd_val = xen_pgd_val,
.make_pte = xen_make_pte, .make_pte = xen_make_pte,
......
...@@ -37,7 +37,7 @@ ...@@ -37,7 +37,7 @@
#include <linux/io.h> #include <linux/io.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <asm/mach-rdc321x/rdc321x_defs.h> #include <asm/rdc321x_defs.h>
#define RDC_WDT_MASK 0x80000000 /* Mask */ #define RDC_WDT_MASK 0x80000000 /* Mask */
#define RDC_WDT_EN 0x00800000 /* Enable bit */ #define RDC_WDT_EN 0x00800000 /* Enable bit */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment