Commit 6d60ce38 authored by Karol Herbst's avatar Karol Herbst Committed by Ingo Molnar

x86/mm/kmmio: Fix mmiotrace for page unaligned addresses

If something calls ioremap() with an address not aligned to PAGE_SIZE, the
returned address might be not aligned as well. This led to a probe
registered on exactly the returned address, but the entire page was armed
for mmiotracing.

On calling iounmap() the address passed to unregister_kmmio_probe() was
PAGE_SIZE aligned by the caller leading to a complete freeze of the
machine.

We should always page align addresses while (un)registerung mappings,
because the mmiotracer works on top of pages, not mappings. We still keep
track of the probes based on their real addresses and lengths though,
because the mmiotrace still needs to know what are mapped memory regions.

Also move the call to mmiotrace_iounmap() prior page aligning the address,
so that all probes are unregistered properly, otherwise the kernel ends up
failing memory allocations randomly after disabling the mmiotracer.
Tested-by: default avatarLyude <lyude@redhat.com>
Signed-off-by: default avatarKarol Herbst <kherbst@redhat.com>
Acked-by: default avatarPekka Paalanen <ppaalanen@gmail.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: nouveau@lists.freedesktop.org
Link: http://lkml.kernel.org/r/20171127075139.4928-1-kherbst@redhat.comSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 6d7e0ba2
...@@ -404,11 +404,11 @@ void iounmap(volatile void __iomem *addr) ...@@ -404,11 +404,11 @@ void iounmap(volatile void __iomem *addr)
return; return;
} }
mmiotrace_iounmap(addr);
addr = (volatile void __iomem *) addr = (volatile void __iomem *)
(PAGE_MASK & (unsigned long __force)addr); (PAGE_MASK & (unsigned long __force)addr);
mmiotrace_iounmap(addr);
/* Use the vm area unlocked, assuming the caller /* Use the vm area unlocked, assuming the caller
ensures there isn't another iounmap for the same address ensures there isn't another iounmap for the same address
in parallel. Reuse of the virtual address is prevented by in parallel. Reuse of the virtual address is prevented by
......
...@@ -435,17 +435,18 @@ int register_kmmio_probe(struct kmmio_probe *p) ...@@ -435,17 +435,18 @@ int register_kmmio_probe(struct kmmio_probe *p)
unsigned long flags; unsigned long flags;
int ret = 0; int ret = 0;
unsigned long size = 0; unsigned long size = 0;
unsigned long addr = p->addr & PAGE_MASK;
const unsigned long size_lim = p->len + (p->addr & ~PAGE_MASK); const unsigned long size_lim = p->len + (p->addr & ~PAGE_MASK);
unsigned int l; unsigned int l;
pte_t *pte; pte_t *pte;
spin_lock_irqsave(&kmmio_lock, flags); spin_lock_irqsave(&kmmio_lock, flags);
if (get_kmmio_probe(p->addr)) { if (get_kmmio_probe(addr)) {
ret = -EEXIST; ret = -EEXIST;
goto out; goto out;
} }
pte = lookup_address(p->addr, &l); pte = lookup_address(addr, &l);
if (!pte) { if (!pte) {
ret = -EINVAL; ret = -EINVAL;
goto out; goto out;
...@@ -454,7 +455,7 @@ int register_kmmio_probe(struct kmmio_probe *p) ...@@ -454,7 +455,7 @@ int register_kmmio_probe(struct kmmio_probe *p)
kmmio_count++; kmmio_count++;
list_add_rcu(&p->list, &kmmio_probes); list_add_rcu(&p->list, &kmmio_probes);
while (size < size_lim) { while (size < size_lim) {
if (add_kmmio_fault_page(p->addr + size)) if (add_kmmio_fault_page(addr + size))
pr_err("Unable to set page fault.\n"); pr_err("Unable to set page fault.\n");
size += page_level_size(l); size += page_level_size(l);
} }
...@@ -528,19 +529,20 @@ void unregister_kmmio_probe(struct kmmio_probe *p) ...@@ -528,19 +529,20 @@ void unregister_kmmio_probe(struct kmmio_probe *p)
{ {
unsigned long flags; unsigned long flags;
unsigned long size = 0; unsigned long size = 0;
unsigned long addr = p->addr & PAGE_MASK;
const unsigned long size_lim = p->len + (p->addr & ~PAGE_MASK); const unsigned long size_lim = p->len + (p->addr & ~PAGE_MASK);
struct kmmio_fault_page *release_list = NULL; struct kmmio_fault_page *release_list = NULL;
struct kmmio_delayed_release *drelease; struct kmmio_delayed_release *drelease;
unsigned int l; unsigned int l;
pte_t *pte; pte_t *pte;
pte = lookup_address(p->addr, &l); pte = lookup_address(addr, &l);
if (!pte) if (!pte)
return; return;
spin_lock_irqsave(&kmmio_lock, flags); spin_lock_irqsave(&kmmio_lock, flags);
while (size < size_lim) { while (size < size_lim) {
release_kmmio_fault_page(p->addr + size, &release_list); release_kmmio_fault_page(addr + size, &release_list);
size += page_level_size(l); size += page_level_size(l);
} }
list_del_rcu(&p->list); list_del_rcu(&p->list);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment