Commit 4f1063f6 authored by Nadav Amit's avatar Nadav Amit Committed by Kleber Sacilotto de Souza

vmw_balloon: fix inflation of 64-bit GFNs

BugLink: https://bugs.launchpad.net/bugs/1792419

commit 09755690 upstream.

When balloon batching is not supported by the hypervisor, the guest
frame number (GFN) must fit in 32-bit. However, due to a bug, this check
was mistakenly ignored. In practice, when total RAM is greater than
16TB, the balloon does not work currently, making this bug unlikely to
happen.

Fixes: ef0f8f11 ("VMware balloon: partially inline vmballoon_reserve_page.")
Cc: stable@vger.kernel.org
Reviewed-by: default avatarXavier Deguillard <xdeguillard@vmware.com>
Signed-off-by: default avatarNadav Amit <namit@vmware.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Signed-off-by: default avatarStefan Bader <stefan.bader@canonical.com>
Signed-off-by: default avatarKleber Sacilotto de Souza <kleber.souza@canonical.com>
parent 4d7cefae
...@@ -450,7 +450,7 @@ static int vmballoon_send_lock_page(struct vmballoon *b, unsigned long pfn, ...@@ -450,7 +450,7 @@ static int vmballoon_send_lock_page(struct vmballoon *b, unsigned long pfn,
pfn32 = (u32)pfn; pfn32 = (u32)pfn;
if (pfn32 != pfn) if (pfn32 != pfn)
return -1; return -EINVAL;
STATS_INC(b->stats.lock[false]); STATS_INC(b->stats.lock[false]);
...@@ -460,7 +460,7 @@ static int vmballoon_send_lock_page(struct vmballoon *b, unsigned long pfn, ...@@ -460,7 +460,7 @@ static int vmballoon_send_lock_page(struct vmballoon *b, unsigned long pfn,
pr_debug("%s - ppn %lx, hv returns %ld\n", __func__, pfn, status); pr_debug("%s - ppn %lx, hv returns %ld\n", __func__, pfn, status);
STATS_INC(b->stats.lock_fail[false]); STATS_INC(b->stats.lock_fail[false]);
return 1; return -EIO;
} }
static int vmballoon_send_batched_lock(struct vmballoon *b, static int vmballoon_send_batched_lock(struct vmballoon *b,
...@@ -597,11 +597,12 @@ static int vmballoon_lock_page(struct vmballoon *b, unsigned int num_pages, ...@@ -597,11 +597,12 @@ static int vmballoon_lock_page(struct vmballoon *b, unsigned int num_pages,
locked = vmballoon_send_lock_page(b, page_to_pfn(page), &hv_status, locked = vmballoon_send_lock_page(b, page_to_pfn(page), &hv_status,
target); target);
if (locked > 0) { if (locked) {
STATS_INC(b->stats.refused_alloc[false]); STATS_INC(b->stats.refused_alloc[false]);
if (hv_status == VMW_BALLOON_ERROR_RESET || if (locked == -EIO &&
hv_status == VMW_BALLOON_ERROR_PPN_NOTNEEDED) { (hv_status == VMW_BALLOON_ERROR_RESET ||
hv_status == VMW_BALLOON_ERROR_PPN_NOTNEEDED)) {
vmballoon_free_page(page, false); vmballoon_free_page(page, false);
return -EIO; return -EIO;
} }
...@@ -617,7 +618,7 @@ static int vmballoon_lock_page(struct vmballoon *b, unsigned int num_pages, ...@@ -617,7 +618,7 @@ static int vmballoon_lock_page(struct vmballoon *b, unsigned int num_pages,
} else { } else {
vmballoon_free_page(page, false); vmballoon_free_page(page, false);
} }
return -EIO; return locked;
} }
/* track allocated page */ /* track allocated page */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment