Commit 0182a438 authored by Linus Torvalds's avatar Linus Torvalds

Revert the move of ptrinfo - it may make NOMMU compile, but it

breaks everybody else.

Instead, fix the NOMMU case by avoiding the part that walks page
tables.
parent f37c1ddd
......@@ -1701,76 +1701,3 @@ struct page * vmalloc_to_page(void * vmalloc_addr)
}
EXPORT_SYMBOL(vmalloc_to_page);
void ptrinfo(unsigned long addr)
{
struct page *page;
printk("Dumping data about address %p.\n", (void*)addr);
if (!virt_addr_valid((void*)addr)) {
printk("virt addr invalid.\n");
return;
}
do {
pgd_t *pgd = pgd_offset_k(addr);
pmd_t *pmd;
if (pgd_none(*pgd)) {
printk("No pgd.\n");
break;
}
pmd = pmd_offset(pgd, addr);
if (pmd_none(*pmd)) {
printk("No pmd.\n");
break;
}
#ifdef CONFIG_X86
if (pmd_large(*pmd)) {
printk("Large page.\n");
break;
}
#endif
printk("normal page, pte_val 0x%llx\n",
(unsigned long long)pte_val(*pte_offset_kernel(pmd, addr)));
} while(0);
page = virt_to_page((void*)addr);
printk("struct page at %p, flags %lxh.\n", page, page->flags);
if (PageSlab(page)) {
kmem_cache_t *c;
struct slab *s;
unsigned long flags;
int objnr;
void *objp;
c = GET_PAGE_CACHE(page);
printk("belongs to cache %s.\n",c->name);
spin_lock_irqsave(&c->spinlock, flags);
s = GET_PAGE_SLAB(page);
printk("slabp %p with %d inuse objects (from %d).\n",
s, s->inuse, c->num);
check_slabp(c,s);
objnr = (addr-(unsigned long)s->s_mem)/c->objsize;
objp = s->s_mem+c->objsize*objnr;
printk("points into object no %d, starting at %p, len %d.\n",
objnr, objp, c->objsize);
if (objnr >= c->num) {
printk("Bad obj number.\n");
} else {
kernel_map_pages(virt_to_page(objp),
c->objsize/PAGE_SIZE, 1);
if (c->flags & SLAB_RED_ZONE)
printk("redzone: 0x%lx/0x%lx.\n",
*dbg_redzone1(c, objp),
*dbg_redzone2(c, objp));
if (c->flags & SLAB_STORE_USER)
printk("Last user: %p.\n",
*dbg_userword(c, objp));
}
spin_unlock_irqrestore(&c->spinlock, flags);
}
}
......@@ -2763,3 +2763,78 @@ unsigned int ksize(const void *objp)
return size;
}
void ptrinfo(unsigned long addr)
{
struct page *page;
printk("Dumping data about address %p.\n", (void*)addr);
if (!virt_addr_valid((void*)addr)) {
printk("virt addr invalid.\n");
return;
}
#ifdef CONFIG_MMU
do {
pgd_t *pgd = pgd_offset_k(addr);
pmd_t *pmd;
if (pgd_none(*pgd)) {
printk("No pgd.\n");
break;
}
pmd = pmd_offset(pgd, addr);
if (pmd_none(*pmd)) {
printk("No pmd.\n");
break;
}
#ifdef CONFIG_X86
if (pmd_large(*pmd)) {
printk("Large page.\n");
break;
}
#endif
printk("normal page, pte_val 0x%llx\n",
(unsigned long long)pte_val(*pte_offset_kernel(pmd, addr)));
} while(0);
#endif
page = virt_to_page((void*)addr);
printk("struct page at %p, flags %lxh.\n", page, page->flags);
if (PageSlab(page)) {
kmem_cache_t *c;
struct slab *s;
unsigned long flags;
int objnr;
void *objp;
c = GET_PAGE_CACHE(page);
printk("belongs to cache %s.\n",c->name);
spin_lock_irqsave(&c->spinlock, flags);
s = GET_PAGE_SLAB(page);
printk("slabp %p with %d inuse objects (from %d).\n",
s, s->inuse, c->num);
check_slabp(c,s);
objnr = (addr-(unsigned long)s->s_mem)/c->objsize;
objp = s->s_mem+c->objsize*objnr;
printk("points into object no %d, starting at %p, len %d.\n",
objnr, objp, c->objsize);
if (objnr >= c->num) {
printk("Bad obj number.\n");
} else {
kernel_map_pages(virt_to_page(objp),
c->objsize/PAGE_SIZE, 1);
if (c->flags & SLAB_RED_ZONE)
printk("redzone: 0x%lx/0x%lx.\n",
*dbg_redzone1(c, objp),
*dbg_redzone2(c, objp));
if (c->flags & SLAB_STORE_USER)
printk("Last user: %p.\n",
*dbg_userword(c, objp));
}
spin_unlock_irqrestore(&c->spinlock, flags);
}
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment