Commit a8745105 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'please-pull-misc-3.10' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux

Pull ia64 fixes from Tony Luck:
 "Bundle of miscellaneous ia64 fixes for 3.10 merge window."

* tag 'please-pull-misc-3.10' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux:
  Add size restriction to the kdump documentation
  Fix example error_injection_tool
  Fix build error for numa_clear_node() under IA64
  Fix initialization of CMCI/CMCP interrupts
  Change "select DMAR" to "select INTEL_IOMMU"
  Wrong asm register contraints in the kvm implementation
  Wrong asm register contraints in the futex implementation
  Remove cast for kmalloc return value
  Fix kexec oops when iosapic was removed
  iosapic: fix a minor typo in comments
  Add WB/UC check for early_ioremap
  Fix broken fsys_getppid()
  tiocx: check retval from bus_register()
parents 916bb6d7 797f6a68
...@@ -882,7 +882,7 @@ int err_inj() ...@@ -882,7 +882,7 @@ int err_inj()
cpu=parameters[i].cpu; cpu=parameters[i].cpu;
k = cpu%64; k = cpu%64;
j = cpu/64; j = cpu/64;
mask[j]=1<<k; mask[j] = 1UL << k;
if (sched_setaffinity(0, MASK_SIZE*8, mask)==-1) { if (sched_setaffinity(0, MASK_SIZE*8, mask)==-1) {
perror("Error sched_setaffinity:"); perror("Error sched_setaffinity:");
......
...@@ -297,6 +297,7 @@ Boot into System Kernel ...@@ -297,6 +297,7 @@ Boot into System Kernel
On ia64, 256M@256M is a generous value that typically works. On ia64, 256M@256M is a generous value that typically works.
The region may be automatically placed on ia64, see the The region may be automatically placed on ia64, see the
dump-capture kernel config option notes above. dump-capture kernel config option notes above.
If use sparse memory, the size should be rounded to GRANULE boundaries.
On s390x, typically use "crashkernel=xxM". The value of xx is dependent On s390x, typically use "crashkernel=xxM". The value of xx is dependent
on the memory consumption of the kdump system. In general this is not on the memory consumption of the kdump system. In general this is not
......
...@@ -187,7 +187,7 @@ config IA64_DIG ...@@ -187,7 +187,7 @@ config IA64_DIG
config IA64_DIG_VTD config IA64_DIG_VTD
bool "DIG+Intel+IOMMU" bool "DIG+Intel+IOMMU"
select DMAR select INTEL_IOMMU
select PCI_MSI select PCI_MSI
config IA64_HP_ZX1 config IA64_HP_ZX1
......
...@@ -106,16 +106,15 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, ...@@ -106,16 +106,15 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
return -EFAULT; return -EFAULT;
{ {
register unsigned long r8 __asm ("r8"); register unsigned long r8 __asm ("r8") = 0;
unsigned long prev; unsigned long prev;
__asm__ __volatile__( __asm__ __volatile__(
" mf;; \n" " mf;; \n"
" mov %0=r0 \n"
" mov ar.ccv=%4;; \n" " mov ar.ccv=%4;; \n"
"[1:] cmpxchg4.acq %1=[%2],%3,ar.ccv \n" "[1:] cmpxchg4.acq %1=[%2],%3,ar.ccv \n"
" .xdata4 \"__ex_table\", 1b-., 2f-. \n" " .xdata4 \"__ex_table\", 1b-., 2f-. \n"
"[2:]" "[2:]"
: "=r" (r8), "=r" (prev) : "+r" (r8), "=&r" (prev)
: "r" (uaddr), "r" (newval), : "r" (uaddr), "r" (newval),
"rO" ((long) (unsigned) oldval) "rO" ((long) (unsigned) oldval)
: "memory"); : "memory");
......
...@@ -143,6 +143,7 @@ extern unsigned long __per_cpu_mca[NR_CPUS]; ...@@ -143,6 +143,7 @@ extern unsigned long __per_cpu_mca[NR_CPUS];
extern int cpe_vector; extern int cpe_vector;
extern int ia64_cpe_irq; extern int ia64_cpe_irq;
extern void ia64_mca_init(void); extern void ia64_mca_init(void);
extern void ia64_mca_irq_init(void);
extern void ia64_mca_cpu_init(void *); extern void ia64_mca_cpu_init(void *);
extern void ia64_os_mca_dispatch(void); extern void ia64_os_mca_dispatch(void);
extern void ia64_os_mca_dispatch_end(void); extern void ia64_os_mca_dispatch_end(void);
......
...@@ -67,14 +67,13 @@ extern int paddr_to_nid(unsigned long paddr); ...@@ -67,14 +67,13 @@ extern int paddr_to_nid(unsigned long paddr);
extern void map_cpu_to_node(int cpu, int nid); extern void map_cpu_to_node(int cpu, int nid);
extern void unmap_cpu_from_node(int cpu, int nid); extern void unmap_cpu_from_node(int cpu, int nid);
extern void numa_clear_node(int cpu);
#else /* !CONFIG_NUMA */ #else /* !CONFIG_NUMA */
#define map_cpu_to_node(cpu, nid) do{}while(0) #define map_cpu_to_node(cpu, nid) do{}while(0)
#define unmap_cpu_from_node(cpu, nid) do{}while(0) #define unmap_cpu_from_node(cpu, nid) do{}while(0)
#define paddr_to_nid(addr) 0 #define paddr_to_nid(addr) 0
#define numa_clear_node(cpu) do { } while (0)
#endif /* CONFIG_NUMA */ #endif /* CONFIG_NUMA */
#endif /* _ASM_IA64_NUMA_H */ #endif /* _ASM_IA64_NUMA_H */
...@@ -90,53 +90,6 @@ ENTRY(fsys_getpid) ...@@ -90,53 +90,6 @@ ENTRY(fsys_getpid)
FSYS_RETURN FSYS_RETURN
END(fsys_getpid) END(fsys_getpid)
ENTRY(fsys_getppid)
.prologue
.altrp b6
.body
add r17=IA64_TASK_GROUP_LEADER_OFFSET,r16
;;
ld8 r17=[r17] // r17 = current->group_leader
add r9=TI_FLAGS+IA64_TASK_SIZE,r16
;;
ld4 r9=[r9]
add r17=IA64_TASK_REAL_PARENT_OFFSET,r17 // r17 = &current->group_leader->real_parent
;;
and r9=TIF_ALLWORK_MASK,r9
1: ld8 r18=[r17] // r18 = current->group_leader->real_parent
;;
cmp.ne p8,p0=0,r9
add r8=IA64_TASK_TGID_OFFSET,r18 // r8 = &current->group_leader->real_parent->tgid
;;
/*
* The .acq is needed to ensure that the read of tgid has returned its data before
* we re-check "real_parent".
*/
ld4.acq r8=[r8] // r8 = current->group_leader->real_parent->tgid
#ifdef CONFIG_SMP
/*
* Re-read current->group_leader->real_parent.
*/
ld8 r19=[r17] // r19 = current->group_leader->real_parent
(p8) br.spnt.many fsys_fallback_syscall
;;
cmp.ne p6,p0=r18,r19 // did real_parent change?
mov r19=0 // i must not leak kernel bits...
(p6) br.cond.spnt.few 1b // yes -> redo the read of tgid and the check
;;
mov r17=0 // i must not leak kernel bits...
mov r18=0 // i must not leak kernel bits...
#else
mov r17=0 // i must not leak kernel bits...
mov r18=0 // i must not leak kernel bits...
mov r19=0 // i must not leak kernel bits...
#endif
FSYS_RETURN
END(fsys_getppid)
ENTRY(fsys_set_tid_address) ENTRY(fsys_set_tid_address)
.prologue .prologue
.altrp b6 .altrp b6
...@@ -614,7 +567,7 @@ paravirt_fsyscall_table: ...@@ -614,7 +567,7 @@ paravirt_fsyscall_table:
data8 0 // chown data8 0 // chown
data8 0 // lseek // 1040 data8 0 // lseek // 1040
data8 fsys_getpid // getpid data8 fsys_getpid // getpid
data8 fsys_getppid // getppid data8 0 // getppid
data8 0 // mount data8 0 // mount
data8 0 // umount data8 0 // umount
data8 0 // setuid // 1045 data8 0 // setuid // 1045
......
...@@ -76,7 +76,7 @@ ...@@ -76,7 +76,7 @@
* PCI pin -> global system interrupt (GSI) -> IA-64 vector <-> IRQ * PCI pin -> global system interrupt (GSI) -> IA-64 vector <-> IRQ
* *
* Note: The term "IRQ" is loosely used everywhere in Linux kernel to * Note: The term "IRQ" is loosely used everywhere in Linux kernel to
* describeinterrupts. Now we use "IRQ" only for Linux IRQ's. ISA IRQ * describe interrupts. Now we use "IRQ" only for Linux IRQ's. ISA IRQ
* (isa_irq) is the only exception in this source code. * (isa_irq) is the only exception in this source code.
*/ */
...@@ -1010,6 +1010,26 @@ iosapic_check_gsi_range (unsigned int gsi_base, unsigned int ver) ...@@ -1010,6 +1010,26 @@ iosapic_check_gsi_range (unsigned int gsi_base, unsigned int ver)
return 0; return 0;
} }
static int
iosapic_delete_rte(unsigned int irq, unsigned int gsi)
{
struct iosapic_rte_info *rte, *temp;
list_for_each_entry_safe(rte, temp, &iosapic_intr_info[irq].rtes,
rte_list) {
if (rte->iosapic->gsi_base + rte->rte_index == gsi) {
if (rte->refcnt)
return -EBUSY;
list_del(&rte->rte_list);
kfree(rte);
return 0;
}
}
return -EINVAL;
}
int iosapic_init(unsigned long phys_addr, unsigned int gsi_base) int iosapic_init(unsigned long phys_addr, unsigned int gsi_base)
{ {
int num_rte, err, index; int num_rte, err, index;
...@@ -1069,7 +1089,7 @@ int iosapic_init(unsigned long phys_addr, unsigned int gsi_base) ...@@ -1069,7 +1089,7 @@ int iosapic_init(unsigned long phys_addr, unsigned int gsi_base)
int iosapic_remove(unsigned int gsi_base) int iosapic_remove(unsigned int gsi_base)
{ {
int index, err = 0; int i, irq, index, err = 0;
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&iosapic_lock, flags); spin_lock_irqsave(&iosapic_lock, flags);
...@@ -1087,6 +1107,16 @@ int iosapic_remove(unsigned int gsi_base) ...@@ -1087,6 +1107,16 @@ int iosapic_remove(unsigned int gsi_base)
goto out; goto out;
} }
for (i = gsi_base; i < gsi_base + iosapic_lists[index].num_rte; i++) {
irq = __gsi_to_irq(i);
if (irq < 0)
continue;
err = iosapic_delete_rte(irq, i);
if (err)
goto out;
}
iounmap(iosapic_lists[index].addr); iounmap(iosapic_lists[index].addr);
iosapic_free(index); iosapic_free(index);
out: out:
......
...@@ -23,6 +23,8 @@ ...@@ -23,6 +23,8 @@
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/kernel_stat.h> #include <linux/kernel_stat.h>
#include <asm/mca.h>
/* /*
* 'what should we do if we get a hw irq event on an illegal vector'. * 'what should we do if we get a hw irq event on an illegal vector'.
* each architecture has to answer this themselves. * each architecture has to answer this themselves.
...@@ -83,6 +85,12 @@ bool is_affinity_mask_valid(const struct cpumask *cpumask) ...@@ -83,6 +85,12 @@ bool is_affinity_mask_valid(const struct cpumask *cpumask)
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
int __init arch_early_irq_init(void)
{
ia64_mca_irq_init();
return 0;
}
#ifdef CONFIG_HOTPLUG_CPU #ifdef CONFIG_HOTPLUG_CPU
unsigned int vectors_in_migration[NR_IRQS]; unsigned int vectors_in_migration[NR_IRQS];
......
...@@ -2074,22 +2074,16 @@ ia64_mca_init(void) ...@@ -2074,22 +2074,16 @@ ia64_mca_init(void)
printk(KERN_INFO "MCA related initialization done\n"); printk(KERN_INFO "MCA related initialization done\n");
} }
/* /*
* ia64_mca_late_init * These pieces cannot be done in ia64_mca_init() because it is called before
* * early_irq_init() which would wipe out our percpu irq registrations. But we
* Opportunity to setup things that require initialization later * cannot leave them until ia64_mca_late_init() because by then all the other
* than ia64_mca_init. Setup a timer to poll for CPEs if the * processors have been brought online and have set their own CMC vectors to
* platform doesn't support an interrupt driven mechanism. * point at a non-existant action. Called from arch_early_irq_init().
*
* Inputs : None
* Outputs : Status
*/ */
static int __init void __init ia64_mca_irq_init(void)
ia64_mca_late_init(void)
{ {
if (!mca_init)
return 0;
/* /*
* Configure the CMCI/P vector and handler. Interrupts for CMC are * Configure the CMCI/P vector and handler. Interrupts for CMC are
* per-processor, so AP CMC interrupts are setup in smp_callin() (smpboot.c). * per-processor, so AP CMC interrupts are setup in smp_callin() (smpboot.c).
...@@ -2108,6 +2102,23 @@ ia64_mca_late_init(void) ...@@ -2108,6 +2102,23 @@ ia64_mca_late_init(void)
/* Setup the CPEI/P handler */ /* Setup the CPEI/P handler */
register_percpu_irq(IA64_CPEP_VECTOR, &mca_cpep_irqaction); register_percpu_irq(IA64_CPEP_VECTOR, &mca_cpep_irqaction);
#endif #endif
}
/*
* ia64_mca_late_init
*
* Opportunity to setup things that require initialization later
* than ia64_mca_init. Setup a timer to poll for CPEs if the
* platform doesn't support an interrupt driven mechanism.
*
* Inputs : None
* Outputs : Status
*/
static int __init
ia64_mca_late_init(void)
{
if (!mca_init)
return 0;
register_hotcpu_notifier(&mca_cpu_notifier); register_hotcpu_notifier(&mca_cpu_notifier);
......
...@@ -349,7 +349,7 @@ init_record_index_pools(void) ...@@ -349,7 +349,7 @@ init_record_index_pools(void)
/* - 3 - */ /* - 3 - */
slidx_pool.max_idx = (rec_max_size/sect_min_size) * 2 + 1; slidx_pool.max_idx = (rec_max_size/sect_min_size) * 2 + 1;
slidx_pool.buffer = (slidx_list_t *) slidx_pool.buffer =
kmalloc(slidx_pool.max_idx * sizeof(slidx_list_t), GFP_KERNEL); kmalloc(slidx_pool.max_idx * sizeof(slidx_list_t), GFP_KERNEL);
return slidx_pool.buffer ? 0 : -ENOMEM; return slidx_pool.buffer ? 0 : -ENOMEM;
......
...@@ -256,7 +256,7 @@ u64 guest_vhpt_lookup(u64 iha, u64 *pte) ...@@ -256,7 +256,7 @@ u64 guest_vhpt_lookup(u64 iha, u64 *pte)
"srlz.d;;" "srlz.d;;"
"ssm psr.i;;" "ssm psr.i;;"
"srlz.d;;" "srlz.d;;"
: "=r"(ret) : "r"(iha), "r"(pte):"memory"); : "=&r"(ret) : "r"(iha), "r"(pte) : "memory");
return ret; return ret;
} }
......
...@@ -16,7 +16,7 @@ ...@@ -16,7 +16,7 @@
#include <asm/meminit.h> #include <asm/meminit.h>
static inline void __iomem * static inline void __iomem *
__ioremap (unsigned long phys_addr) __ioremap_uc(unsigned long phys_addr)
{ {
return (void __iomem *) (__IA64_UNCACHED_OFFSET | phys_addr); return (void __iomem *) (__IA64_UNCACHED_OFFSET | phys_addr);
} }
...@@ -24,7 +24,11 @@ __ioremap (unsigned long phys_addr) ...@@ -24,7 +24,11 @@ __ioremap (unsigned long phys_addr)
void __iomem * void __iomem *
early_ioremap (unsigned long phys_addr, unsigned long size) early_ioremap (unsigned long phys_addr, unsigned long size)
{ {
return __ioremap(phys_addr); u64 attr;
attr = kern_mem_attribute(phys_addr, size);
if (attr & EFI_MEMORY_WB)
return (void __iomem *) phys_to_virt(phys_addr);
return __ioremap_uc(phys_addr);
} }
void __iomem * void __iomem *
...@@ -47,7 +51,7 @@ ioremap (unsigned long phys_addr, unsigned long size) ...@@ -47,7 +51,7 @@ ioremap (unsigned long phys_addr, unsigned long size)
if (attr & EFI_MEMORY_WB) if (attr & EFI_MEMORY_WB)
return (void __iomem *) phys_to_virt(phys_addr); return (void __iomem *) phys_to_virt(phys_addr);
else if (attr & EFI_MEMORY_UC) else if (attr & EFI_MEMORY_UC)
return __ioremap(phys_addr); return __ioremap_uc(phys_addr);
/* /*
* Some chipsets don't support UC access to memory. If * Some chipsets don't support UC access to memory. If
...@@ -93,7 +97,7 @@ ioremap (unsigned long phys_addr, unsigned long size) ...@@ -93,7 +97,7 @@ ioremap (unsigned long phys_addr, unsigned long size)
return (void __iomem *) (offset + (char __iomem *)addr); return (void __iomem *) (offset + (char __iomem *)addr);
} }
return __ioremap(phys_addr); return __ioremap_uc(phys_addr);
} }
EXPORT_SYMBOL(ioremap); EXPORT_SYMBOL(ioremap);
...@@ -103,7 +107,7 @@ ioremap_nocache (unsigned long phys_addr, unsigned long size) ...@@ -103,7 +107,7 @@ ioremap_nocache (unsigned long phys_addr, unsigned long size)
if (kern_mem_attribute(phys_addr, size) & EFI_MEMORY_WB) if (kern_mem_attribute(phys_addr, size) & EFI_MEMORY_WB)
return NULL; return NULL;
return __ioremap(phys_addr); return __ioremap_uc(phys_addr);
} }
EXPORT_SYMBOL(ioremap_nocache); EXPORT_SYMBOL(ioremap_nocache);
......
...@@ -73,6 +73,11 @@ int __meminit __early_pfn_to_nid(unsigned long pfn) ...@@ -73,6 +73,11 @@ int __meminit __early_pfn_to_nid(unsigned long pfn)
return -1; return -1;
} }
void __cpuinit numa_clear_node(int cpu)
{
unmap_cpu_from_node(cpu, NUMA_NO_NODE);
}
#ifdef CONFIG_MEMORY_HOTPLUG #ifdef CONFIG_MEMORY_HOTPLUG
/* /*
* SRAT information is stored in node_memblk[], then we can use SRAT * SRAT information is stored in node_memblk[], then we can use SRAT
......
...@@ -490,11 +490,14 @@ static int __init tiocx_init(void) ...@@ -490,11 +490,14 @@ static int __init tiocx_init(void)
{ {
cnodeid_t cnodeid; cnodeid_t cnodeid;
int found_tiocx_device = 0; int found_tiocx_device = 0;
int err;
if (!ia64_platform_is("sn2")) if (!ia64_platform_is("sn2"))
return 0; return 0;
bus_register(&tiocx_bus_type); err = bus_register(&tiocx_bus_type);
if (err)
return err;
for (cnodeid = 0; cnodeid < num_cnodes; cnodeid++) { for (cnodeid = 0; cnodeid < num_cnodes; cnodeid++) {
nasid_t nasid; nasid_t nasid;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment