Commit 89c29def authored by Alex Williamson's avatar Alex Williamson

Revert "vfio/type1: Improve memory pinning process for raw PFN mapping"

Bisection by Amadeusz Sławiński implicates this commit leading to bad
page state issues after VM shutdown, likely due to unbalanced page
references.  The original commit was intended only as a performance
improvement, therefore revert for offline rework.

Link: https://lkml.org/lkml/2018/6/2/97
Fixes: 356e88eb ("vfio/type1: Improve memory pinning process for raw PFN mapping")
Cc: Jason Cai (Xiang Feng) <jason.cai@linux.alibaba.com>
Reported-by: default avatarAmadeusz Sławiński <amade@asmblr.net>
Signed-off-by: default avatarAlex Williamson <alex.williamson@redhat.com>
parent 0512e013
...@@ -404,6 +404,7 @@ static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr, ...@@ -404,6 +404,7 @@ static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr,
{ {
unsigned long pfn = 0; unsigned long pfn = 0;
long ret, pinned = 0, lock_acct = 0; long ret, pinned = 0, lock_acct = 0;
bool rsvd;
dma_addr_t iova = vaddr - dma->vaddr + dma->iova; dma_addr_t iova = vaddr - dma->vaddr + dma->iova;
/* This code path is only user initiated */ /* This code path is only user initiated */
...@@ -414,23 +415,14 @@ static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr, ...@@ -414,23 +415,14 @@ static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr,
if (ret) if (ret)
return ret; return ret;
if (is_invalid_reserved_pfn(*pfn_base)) {
struct vm_area_struct *vma;
down_read(&current->mm->mmap_sem);
vma = find_vma_intersection(current->mm, vaddr, vaddr + 1);
pinned = min_t(long, npage, vma_pages(vma));
up_read(&current->mm->mmap_sem);
return pinned;
}
pinned++; pinned++;
rsvd = is_invalid_reserved_pfn(*pfn_base);
/* /*
* Reserved pages aren't counted against the user, externally pinned * Reserved pages aren't counted against the user, externally pinned
* pages are already counted against the user. * pages are already counted against the user.
*/ */
if (!vfio_find_vpfn(dma, iova)) { if (!rsvd && !vfio_find_vpfn(dma, iova)) {
if (!lock_cap && current->mm->locked_vm + 1 > limit) { if (!lock_cap && current->mm->locked_vm + 1 > limit) {
put_pfn(*pfn_base, dma->prot); put_pfn(*pfn_base, dma->prot);
pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n", __func__, pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n", __func__,
...@@ -450,12 +442,13 @@ static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr, ...@@ -450,12 +442,13 @@ static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr,
if (ret) if (ret)
break; break;
if (pfn != *pfn_base + pinned) { if (pfn != *pfn_base + pinned ||
rsvd != is_invalid_reserved_pfn(pfn)) {
put_pfn(pfn, dma->prot); put_pfn(pfn, dma->prot);
break; break;
} }
if (!vfio_find_vpfn(dma, iova)) { if (!rsvd && !vfio_find_vpfn(dma, iova)) {
if (!lock_cap && if (!lock_cap &&
current->mm->locked_vm + lock_acct + 1 > limit) { current->mm->locked_vm + lock_acct + 1 > limit) {
put_pfn(pfn, dma->prot); put_pfn(pfn, dma->prot);
...@@ -473,8 +466,10 @@ static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr, ...@@ -473,8 +466,10 @@ static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr,
unpin_out: unpin_out:
if (ret) { if (ret) {
if (!rsvd) {
for (pfn = *pfn_base ; pinned ; pfn++, pinned--) for (pfn = *pfn_base ; pinned ; pfn++, pinned--)
put_pfn(pfn, dma->prot); put_pfn(pfn, dma->prot);
}
return ret; return ret;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment