Commit 7cb7beb3 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/fyu/linux-2.6

* 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/fyu/linux-2.6:
  arch/ia64/kernel/iosapic: missing test after ioremap()
  ia64/topology.c: exit cache_add_dev when kobject_init_and_add fails
  arch/ia64/Makefile: Remove -mtune=merced in IA64 kernel build
  IA64: includecheck fix: ia64, pgtable.h
  IA64: includecheck fix: ia64, ia64_ksyms.c
  ia64: boolean __test_and_clear_bit
  Bug Fix arch/ia64/kernel/pci-dma.c: fix recursive dma_supported() call in iommu_dma_supported()
parents 4d9c73f6 e7369e01
...@@ -41,11 +41,6 @@ $(error Sorry, you need a newer version of the assember, one that is built from ...@@ -41,11 +41,6 @@ $(error Sorry, you need a newer version of the assember, one that is built from
ftp://ftp.hpl.hp.com/pub/linux-ia64/gas-030124.tar.gz) ftp://ftp.hpl.hp.com/pub/linux-ia64/gas-030124.tar.gz)
endif endif
ifeq ($(call cc-version),0304)
cflags-$(CONFIG_ITANIUM) += -mtune=merced
cflags-$(CONFIG_MCKINLEY) += -mtune=mckinley
endif
KBUILD_CFLAGS += $(cflags-y) KBUILD_CFLAGS += $(cflags-y)
head-y := arch/ia64/kernel/head.o arch/ia64/kernel/init_task.o head-y := arch/ia64/kernel/head.o arch/ia64/kernel/init_task.o
......
...@@ -286,7 +286,7 @@ __test_and_clear_bit(int nr, volatile void * addr) ...@@ -286,7 +286,7 @@ __test_and_clear_bit(int nr, volatile void * addr)
{ {
__u32 *p = (__u32 *) addr + (nr >> 5); __u32 *p = (__u32 *) addr + (nr >> 5);
__u32 m = 1 << (nr & 31); __u32 m = 1 << (nr & 31);
int oldbitset = *p & m; int oldbitset = (*p & m) != 0;
*p &= ~m; *p &= ~m;
return oldbitset; return oldbitset;
......
...@@ -155,7 +155,6 @@ ...@@ -155,7 +155,6 @@
#include <linux/bitops.h> #include <linux/bitops.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
#include <asm/processor.h>
/* /*
* Next come the mappings that determine how mmap() protection bits * Next come the mappings that determine how mmap() protection bits
......
...@@ -21,6 +21,7 @@ EXPORT_SYMBOL(csum_ipv6_magic); ...@@ -21,6 +21,7 @@ EXPORT_SYMBOL(csum_ipv6_magic);
#include <asm/page.h> #include <asm/page.h>
EXPORT_SYMBOL(clear_page); EXPORT_SYMBOL(clear_page);
EXPORT_SYMBOL(copy_page);
#ifdef CONFIG_VIRTUAL_MEM_MAP #ifdef CONFIG_VIRTUAL_MEM_MAP
#include <linux/bootmem.h> #include <linux/bootmem.h>
...@@ -60,9 +61,6 @@ EXPORT_SYMBOL(__udivdi3); ...@@ -60,9 +61,6 @@ EXPORT_SYMBOL(__udivdi3);
EXPORT_SYMBOL(__moddi3); EXPORT_SYMBOL(__moddi3);
EXPORT_SYMBOL(__umoddi3); EXPORT_SYMBOL(__umoddi3);
#include <asm/page.h>
EXPORT_SYMBOL(copy_page);
#if defined(CONFIG_MD_RAID456) || defined(CONFIG_MD_RAID456_MODULE) #if defined(CONFIG_MD_RAID456) || defined(CONFIG_MD_RAID456_MODULE)
extern void xor_ia64_2(void); extern void xor_ia64_2(void);
extern void xor_ia64_3(void); extern void xor_ia64_3(void);
......
...@@ -1072,6 +1072,10 @@ iosapic_init (unsigned long phys_addr, unsigned int gsi_base) ...@@ -1072,6 +1072,10 @@ iosapic_init (unsigned long phys_addr, unsigned int gsi_base)
} }
addr = ioremap(phys_addr, 0); addr = ioremap(phys_addr, 0);
if (addr == NULL) {
spin_unlock_irqrestore(&iosapic_lock, flags);
return -ENOMEM;
}
ver = iosapic_version(addr); ver = iosapic_version(addr);
if ((err = iosapic_check_gsi_range(gsi_base, ver))) { if ((err = iosapic_check_gsi_range(gsi_base, ver))) {
iounmap(addr); iounmap(addr);
......
...@@ -69,11 +69,6 @@ iommu_dma_init(void) ...@@ -69,11 +69,6 @@ iommu_dma_init(void)
int iommu_dma_supported(struct device *dev, u64 mask) int iommu_dma_supported(struct device *dev, u64 mask)
{ {
struct dma_map_ops *ops = platform_dma_get_ops(dev);
if (ops->dma_supported)
return ops->dma_supported(dev, mask);
/* Copied from i386. Doesn't make much sense, because it will /* Copied from i386. Doesn't make much sense, because it will
only work for pci_alloc_coherent. only work for pci_alloc_coherent.
The caller just has to use GFP_DMA in this case. */ The caller just has to use GFP_DMA in this case. */
......
...@@ -372,6 +372,10 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev) ...@@ -372,6 +372,10 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
retval = kobject_init_and_add(&all_cpu_cache_info[cpu].kobj, retval = kobject_init_and_add(&all_cpu_cache_info[cpu].kobj,
&cache_ktype_percpu_entry, &sys_dev->kobj, &cache_ktype_percpu_entry, &sys_dev->kobj,
"%s", "cache"); "%s", "cache");
if (unlikely(retval < 0)) {
cpu_cache_sysfs_exit(cpu);
return retval;
}
for (i = 0; i < all_cpu_cache_info[cpu].num_cache_leaves; i++) { for (i = 0; i < all_cpu_cache_info[cpu].num_cache_leaves; i++) {
this_object = LEAF_KOBJECT_PTR(cpu,i); this_object = LEAF_KOBJECT_PTR(cpu,i);
...@@ -385,7 +389,7 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev) ...@@ -385,7 +389,7 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
} }
kobject_put(&all_cpu_cache_info[cpu].kobj); kobject_put(&all_cpu_cache_info[cpu].kobj);
cpu_cache_sysfs_exit(cpu); cpu_cache_sysfs_exit(cpu);
break; return retval;
} }
kobject_uevent(&(this_object->kobj), KOBJ_ADD); kobject_uevent(&(this_object->kobj), KOBJ_ADD);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment