Commit 0d8762c9 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'core-fixes-for-linus' of...

Merge branch 'core-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'core-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  lockdep: fix irqs on/off ip tracing
  lockdep: minor fix for debug_show_all_locks()
  x86: restore the old swiotlb alloc_coherent behavior
  x86: use GFP_DMA for 24bit coherent_dma_mask
  swiotlb: remove panic for alloc_coherent failure
  xen: compilation fix of drivers/xen/events.c on IA64
  xen: portability clean up and some minor clean up for xencomm.c
  xen: don't reload cr3 on suspend
  kernel/resource: fix reserve_region_with_split() section mismatch
  printk: remove unused code from kernel/printk.c
parents cf76dddb 6afe40b4
...@@ -255,9 +255,11 @@ static inline unsigned long dma_alloc_coherent_mask(struct device *dev, ...@@ -255,9 +255,11 @@ static inline unsigned long dma_alloc_coherent_mask(struct device *dev,
static inline gfp_t dma_alloc_coherent_gfp_flags(struct device *dev, gfp_t gfp) static inline gfp_t dma_alloc_coherent_gfp_flags(struct device *dev, gfp_t gfp)
{ {
#ifdef CONFIG_X86_64
unsigned long dma_mask = dma_alloc_coherent_mask(dev, gfp); unsigned long dma_mask = dma_alloc_coherent_mask(dev, gfp);
if (dma_mask <= DMA_24BIT_MASK)
gfp |= GFP_DMA;
#ifdef CONFIG_X86_64
if (dma_mask <= DMA_32BIT_MASK && !(gfp & GFP_DMA)) if (dma_mask <= DMA_32BIT_MASK && !(gfp & GFP_DMA))
gfp |= GFP_DMA32; gfp |= GFP_DMA32;
#endif #endif
......
...@@ -18,9 +18,21 @@ swiotlb_map_single_phys(struct device *hwdev, phys_addr_t paddr, size_t size, ...@@ -18,9 +18,21 @@ swiotlb_map_single_phys(struct device *hwdev, phys_addr_t paddr, size_t size,
return swiotlb_map_single(hwdev, phys_to_virt(paddr), size, direction); return swiotlb_map_single(hwdev, phys_to_virt(paddr), size, direction);
} }
static void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
dma_addr_t *dma_handle, gfp_t flags)
{
void *vaddr;
vaddr = dma_generic_alloc_coherent(hwdev, size, dma_handle, flags);
if (vaddr)
return vaddr;
return swiotlb_alloc_coherent(hwdev, size, dma_handle, flags);
}
struct dma_mapping_ops swiotlb_dma_ops = { struct dma_mapping_ops swiotlb_dma_ops = {
.mapping_error = swiotlb_dma_mapping_error, .mapping_error = swiotlb_dma_mapping_error,
.alloc_coherent = swiotlb_alloc_coherent, .alloc_coherent = x86_swiotlb_alloc_coherent,
.free_coherent = swiotlb_free_coherent, .free_coherent = swiotlb_free_coherent,
.map_single = swiotlb_map_single_phys, .map_single = swiotlb_map_single_phys,
.unmap_single = swiotlb_unmap_single, .unmap_single = swiotlb_unmap_single,
......
...@@ -774,7 +774,7 @@ void xen_poll_irq(int irq) ...@@ -774,7 +774,7 @@ void xen_poll_irq(int irq)
poll.nr_ports = 1; poll.nr_ports = 1;
poll.timeout = 0; poll.timeout = 0;
poll.ports = &evtchn; set_xen_guest_handle(poll.ports, &evtchn);
if (HYPERVISOR_sched_op(SCHEDOP_poll, &poll) != 0) if (HYPERVISOR_sched_op(SCHEDOP_poll, &poll) != 0)
BUG(); BUG();
......
...@@ -39,8 +39,6 @@ static int xen_suspend(void *data) ...@@ -39,8 +39,6 @@ static int xen_suspend(void *data)
BUG_ON(!irqs_disabled()); BUG_ON(!irqs_disabled());
load_cr3(swapper_pg_dir);
err = device_power_down(PMSG_SUSPEND); err = device_power_down(PMSG_SUSPEND);
if (err) { if (err) {
printk(KERN_ERR "xen_suspend: device_power_down failed: %d\n", printk(KERN_ERR "xen_suspend: device_power_down failed: %d\n",
......
...@@ -23,13 +23,7 @@ ...@@ -23,13 +23,7 @@
#include <asm/page.h> #include <asm/page.h>
#include <xen/xencomm.h> #include <xen/xencomm.h>
#include <xen/interface/xen.h> #include <xen/interface/xen.h>
#ifdef __ia64__ #include <asm/xen/xencomm.h> /* for xencomm_is_phys_contiguous() */
#include <asm/xen/xencomm.h> /* for is_kern_addr() */
#endif
#ifdef HAVE_XEN_PLATFORM_COMPAT_H
#include <xen/platform-compat.h>
#endif
static int xencomm_init(struct xencomm_desc *desc, static int xencomm_init(struct xencomm_desc *desc,
void *buffer, unsigned long bytes) void *buffer, unsigned long bytes)
...@@ -157,20 +151,11 @@ static int xencomm_create(void *buffer, unsigned long bytes, ...@@ -157,20 +151,11 @@ static int xencomm_create(void *buffer, unsigned long bytes,
return 0; return 0;
} }
/* check if memory address is within VMALLOC region */
static int is_phys_contiguous(unsigned long addr)
{
if (!is_kernel_addr(addr))
return 0;
return (addr < VMALLOC_START) || (addr >= VMALLOC_END);
}
static struct xencomm_handle *xencomm_create_inline(void *ptr) static struct xencomm_handle *xencomm_create_inline(void *ptr)
{ {
unsigned long paddr; unsigned long paddr;
BUG_ON(!is_phys_contiguous((unsigned long)ptr)); BUG_ON(!xencomm_is_phys_contiguous((unsigned long)ptr));
paddr = (unsigned long)xencomm_pa(ptr); paddr = (unsigned long)xencomm_pa(ptr);
BUG_ON(paddr & XENCOMM_INLINE_FLAG); BUG_ON(paddr & XENCOMM_INLINE_FLAG);
...@@ -202,7 +187,7 @@ struct xencomm_handle *xencomm_map(void *ptr, unsigned long bytes) ...@@ -202,7 +187,7 @@ struct xencomm_handle *xencomm_map(void *ptr, unsigned long bytes)
int rc; int rc;
struct xencomm_desc *desc; struct xencomm_desc *desc;
if (is_phys_contiguous((unsigned long)ptr)) if (xencomm_is_phys_contiguous((unsigned long)ptr))
return xencomm_create_inline(ptr); return xencomm_create_inline(ptr);
rc = xencomm_create(ptr, bytes, &desc, GFP_KERNEL); rc = xencomm_create(ptr, bytes, &desc, GFP_KERNEL);
...@@ -219,7 +204,7 @@ struct xencomm_handle *__xencomm_map_no_alloc(void *ptr, unsigned long bytes, ...@@ -219,7 +204,7 @@ struct xencomm_handle *__xencomm_map_no_alloc(void *ptr, unsigned long bytes,
int rc; int rc;
struct xencomm_desc *desc = NULL; struct xencomm_desc *desc = NULL;
if (is_phys_contiguous((unsigned long)ptr)) if (xencomm_is_phys_contiguous((unsigned long)ptr))
return xencomm_create_inline(ptr); return xencomm_create_inline(ptr);
rc = xencomm_create_mini(ptr, bytes, xc_desc, rc = xencomm_create_mini(ptr, bytes, xc_desc,
......
...@@ -2169,12 +2169,11 @@ void early_boot_irqs_on(void) ...@@ -2169,12 +2169,11 @@ void early_boot_irqs_on(void)
/* /*
* Hardirqs will be enabled: * Hardirqs will be enabled:
*/ */
void trace_hardirqs_on_caller(unsigned long a0) void trace_hardirqs_on_caller(unsigned long ip)
{ {
struct task_struct *curr = current; struct task_struct *curr = current;
unsigned long ip;
time_hardirqs_on(CALLER_ADDR0, a0); time_hardirqs_on(CALLER_ADDR0, ip);
if (unlikely(!debug_locks || current->lockdep_recursion)) if (unlikely(!debug_locks || current->lockdep_recursion))
return; return;
...@@ -2188,7 +2187,6 @@ void trace_hardirqs_on_caller(unsigned long a0) ...@@ -2188,7 +2187,6 @@ void trace_hardirqs_on_caller(unsigned long a0)
} }
/* we'll do an OFF -> ON transition: */ /* we'll do an OFF -> ON transition: */
curr->hardirqs_enabled = 1; curr->hardirqs_enabled = 1;
ip = (unsigned long) __builtin_return_address(0);
if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
return; return;
...@@ -2224,11 +2222,11 @@ EXPORT_SYMBOL(trace_hardirqs_on); ...@@ -2224,11 +2222,11 @@ EXPORT_SYMBOL(trace_hardirqs_on);
/* /*
* Hardirqs were disabled: * Hardirqs were disabled:
*/ */
void trace_hardirqs_off_caller(unsigned long a0) void trace_hardirqs_off_caller(unsigned long ip)
{ {
struct task_struct *curr = current; struct task_struct *curr = current;
time_hardirqs_off(CALLER_ADDR0, a0); time_hardirqs_off(CALLER_ADDR0, ip);
if (unlikely(!debug_locks || current->lockdep_recursion)) if (unlikely(!debug_locks || current->lockdep_recursion))
return; return;
...@@ -2241,7 +2239,7 @@ void trace_hardirqs_off_caller(unsigned long a0) ...@@ -2241,7 +2239,7 @@ void trace_hardirqs_off_caller(unsigned long a0)
* We have done an ON -> OFF transition: * We have done an ON -> OFF transition:
*/ */
curr->hardirqs_enabled = 0; curr->hardirqs_enabled = 0;
curr->hardirq_disable_ip = _RET_IP_; curr->hardirq_disable_ip = ip;
curr->hardirq_disable_event = ++curr->irq_events; curr->hardirq_disable_event = ++curr->irq_events;
debug_atomic_inc(&hardirqs_off_events); debug_atomic_inc(&hardirqs_off_events);
} else } else
...@@ -3417,9 +3415,10 @@ void debug_show_all_locks(void) ...@@ -3417,9 +3415,10 @@ void debug_show_all_locks(void)
} }
printk(" ignoring it.\n"); printk(" ignoring it.\n");
unlock = 0; unlock = 0;
} else {
if (count != 10)
printk(KERN_CONT " locked it.\n");
} }
if (count != 10)
printk(" locked it.\n");
do_each_thread(g, p) { do_each_thread(g, p) {
/* /*
......
...@@ -232,45 +232,6 @@ static inline void boot_delay_msec(void) ...@@ -232,45 +232,6 @@ static inline void boot_delay_msec(void)
} }
#endif #endif
/*
* Return the number of unread characters in the log buffer.
*/
static int log_buf_get_len(void)
{
return logged_chars;
}
/*
* Copy a range of characters from the log buffer.
*/
int log_buf_copy(char *dest, int idx, int len)
{
int ret, max;
bool took_lock = false;
if (!oops_in_progress) {
spin_lock_irq(&logbuf_lock);
took_lock = true;
}
max = log_buf_get_len();
if (idx < 0 || idx >= max) {
ret = -1;
} else {
if (len > max)
len = max;
ret = len;
idx += (log_end - max);
while (len-- > 0)
dest[len] = LOG_BUF(idx + len);
}
if (took_lock)
spin_unlock_irq(&logbuf_lock);
return ret;
}
/* /*
* Commands to do_syslog: * Commands to do_syslog:
* *
......
...@@ -571,7 +571,7 @@ static void __init __reserve_region_with_split(struct resource *root, ...@@ -571,7 +571,7 @@ static void __init __reserve_region_with_split(struct resource *root,
} }
void reserve_region_with_split(struct resource *root, void __init reserve_region_with_split(struct resource *root,
resource_size_t start, resource_size_t end, resource_size_t start, resource_size_t end,
const char *name) const char *name)
{ {
......
...@@ -497,8 +497,10 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size, ...@@ -497,8 +497,10 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n", printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n",
(unsigned long long)*hwdev->dma_mask, (unsigned long long)*hwdev->dma_mask,
(unsigned long long)dev_addr); (unsigned long long)dev_addr);
panic("swiotlb_alloc_coherent: allocated memory is out of "
"range for device"); /* DMA_TO_DEVICE to avoid memcpy in unmap_single */
unmap_single(hwdev, ret, size, DMA_TO_DEVICE);
return NULL;
} }
*dma_handle = dev_addr; *dma_handle = dev_addr;
return ret; return ret;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment