Commit a25b9316 authored by Dave Hansen's avatar Dave Hansen Committed by H. Peter Anvin

x86, mm: Make DEBUG_VIRTUAL work earlier in boot

The KVM code has some repeated bugs in it around use of __pa() on
per-cpu data.  Those data are not in an area on which using
__pa() is valid.  However, they are also called early enough in
boot that __vmalloc_start_set is not set, and thus the
CONFIG_DEBUG_VIRTUAL debugging does not catch them.

This adds a check to also verify __pa() calls against max_low_pfn,
which we can use earler in boot than is_vmalloc_addr().  However,
if we are super-early in boot, max_low_pfn=0 and this will trip
on every call, so also make sure that max_low_pfn is set before
we try to use it.

With this patch applied, CONFIG_DEBUG_VIRTUAL will actually
catch the bug I was chasing (and fix later in this series).

I'd love to find a generic way so that any __pa() call on percpu
areas could do a BUG_ON(), but there don't appear to be any nice
and easy ways to check if an address is a percpu one.  Anybody
have ideas on a way to do this?
Signed-off-by: default avatarDave Hansen <dave@linux.vnet.ibm.com>
Link: http://lkml.kernel.org/r/20130122212430.F46F8159@kernel.stglabs.ibm.comSigned-off-by: default avatarH. Peter Anvin <hpa@linux.intel.com>
parent 7b5c4a65
...@@ -219,7 +219,7 @@ static void __init setup_node_data(int nid, u64 start, u64 end) ...@@ -219,7 +219,7 @@ static void __init setup_node_data(int nid, u64 start, u64 end)
*/ */
nd = alloc_remap(nid, nd_size); nd = alloc_remap(nid, nd_size);
if (nd) { if (nd) {
nd_pa = __pa(nd); nd_pa = __phys_addr_nodebug(nd);
remapped = true; remapped = true;
} else { } else {
nd_pa = memblock_alloc_nid(nd_size, SMP_CACHE_BYTES, nid); nd_pa = memblock_alloc_nid(nd_size, SMP_CACHE_BYTES, nid);
......
...@@ -560,10 +560,10 @@ int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags) ...@@ -560,10 +560,10 @@ int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
{ {
unsigned long id_sz; unsigned long id_sz;
if (base >= __pa(high_memory)) if (base > __pa(high_memory-1))
return 0; return 0;
id_sz = (__pa(high_memory) < base + size) ? id_sz = (__pa(high_memory-1) <= base + size) ?
__pa(high_memory) - base : __pa(high_memory) - base :
size; size;
......
#include <linux/bootmem.h>
#include <linux/mmdebug.h> #include <linux/mmdebug.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/mm.h> #include <linux/mm.h>
...@@ -68,10 +69,16 @@ EXPORT_SYMBOL(__virt_addr_valid); ...@@ -68,10 +69,16 @@ EXPORT_SYMBOL(__virt_addr_valid);
#ifdef CONFIG_DEBUG_VIRTUAL #ifdef CONFIG_DEBUG_VIRTUAL
unsigned long __phys_addr(unsigned long x) unsigned long __phys_addr(unsigned long x)
{ {
unsigned long phys_addr = x - PAGE_OFFSET;
/* VMALLOC_* aren't constants */ /* VMALLOC_* aren't constants */
VIRTUAL_BUG_ON(x < PAGE_OFFSET); VIRTUAL_BUG_ON(x < PAGE_OFFSET);
VIRTUAL_BUG_ON(__vmalloc_start_set && is_vmalloc_addr((void *) x)); VIRTUAL_BUG_ON(__vmalloc_start_set && is_vmalloc_addr((void *) x));
return x - PAGE_OFFSET; /* max_low_pfn is set early, but not _that_ early */
if (max_low_pfn) {
VIRTUAL_BUG_ON((phys_addr >> PAGE_SHIFT) > max_low_pfn);
BUG_ON(slow_virt_to_phys((void *)x) != phys_addr);
}
return phys_addr;
} }
EXPORT_SYMBOL(__phys_addr); EXPORT_SYMBOL(__phys_addr);
#endif #endif
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment