Commit 578c1bb9 authored by Juergen Gross's avatar Juergen Gross Committed by Boris Ostrovsky

xen/xenbus: let xenbus_map_ring_valloc() return errno values only

Today xenbus_map_ring_valloc() can return either a negative errno
value (-ENOMEM or -EINVAL) or a grant status value. This is a mess as
e.g -ENOMEM and GNTST_eagain have the same numeric value.

Fix that by turning all grant mapping errors into -ENOENT. This is
no problem as all callers of xenbus_map_ring_valloc() only use the
return value to print an error message, and in case of mapping errors
the grant status value has already been printed by __xenbus_map_ring()
before.
Signed-off-by: default avatarJuergen Gross <jgross@suse.com>
Reviewed-by: default avatarBoris Ostrovsky <boris.ostrovsky@oracle.com>
Link: https://lore.kernel.org/r/20200701121638.19840-3-jgross@suse.comSigned-off-by: default avatarBoris Ostrovsky <boris.ostrovsky@oracle.com>
parent 3848e4e0
...@@ -456,8 +456,7 @@ EXPORT_SYMBOL_GPL(xenbus_free_evtchn); ...@@ -456,8 +456,7 @@ EXPORT_SYMBOL_GPL(xenbus_free_evtchn);
* Map @nr_grefs pages of memory into this domain from another * Map @nr_grefs pages of memory into this domain from another
* domain's grant table. xenbus_map_ring_valloc allocates @nr_grefs * domain's grant table. xenbus_map_ring_valloc allocates @nr_grefs
* pages of virtual address space, maps the pages to that address, and * pages of virtual address space, maps the pages to that address, and
* sets *vaddr to that address. Returns 0 on success, and GNTST_* * sets *vaddr to that address. Returns 0 on success, and -errno on
* (see xen/include/interface/grant_table.h) or -ENOMEM / -EINVAL on
* error. If an error is returned, device will switch to * error. If an error is returned, device will switch to
* XenbusStateClosing and the error message will be saved in XenStore. * XenbusStateClosing and the error message will be saved in XenStore.
*/ */
...@@ -477,18 +476,11 @@ int xenbus_map_ring_valloc(struct xenbus_device *dev, grant_ref_t *gnt_refs, ...@@ -477,18 +476,11 @@ int xenbus_map_ring_valloc(struct xenbus_device *dev, grant_ref_t *gnt_refs,
return -ENOMEM; return -ENOMEM;
info->node = kzalloc(sizeof(*info->node), GFP_KERNEL); info->node = kzalloc(sizeof(*info->node), GFP_KERNEL);
if (!info->node) { if (!info->node)
err = -ENOMEM; err = -ENOMEM;
goto out; else
}
err = ring_ops->map(dev, info, gnt_refs, nr_grefs, vaddr); err = ring_ops->map(dev, info, gnt_refs, nr_grefs, vaddr);
/* Some hypervisors are buggy and can return 1. */
if (err > 0)
err = GNTST_general_error;
out:
kfree(info->node); kfree(info->node);
kfree(info); kfree(info);
return err; return err;
...@@ -507,7 +499,6 @@ static int __xenbus_map_ring(struct xenbus_device *dev, ...@@ -507,7 +499,6 @@ static int __xenbus_map_ring(struct xenbus_device *dev,
bool *leaked) bool *leaked)
{ {
int i, j; int i, j;
int err = GNTST_okay;
if (nr_grefs > XENBUS_MAX_RING_GRANTS) if (nr_grefs > XENBUS_MAX_RING_GRANTS)
return -EINVAL; return -EINVAL;
...@@ -522,7 +513,6 @@ static int __xenbus_map_ring(struct xenbus_device *dev, ...@@ -522,7 +513,6 @@ static int __xenbus_map_ring(struct xenbus_device *dev,
for (i = 0; i < nr_grefs; i++) { for (i = 0; i < nr_grefs; i++) {
if (info->map[i].status != GNTST_okay) { if (info->map[i].status != GNTST_okay) {
err = info->map[i].status;
xenbus_dev_fatal(dev, info->map[i].status, xenbus_dev_fatal(dev, info->map[i].status,
"mapping in shared page %d from domain %d", "mapping in shared page %d from domain %d",
gnt_refs[i], dev->otherend_id); gnt_refs[i], dev->otherend_id);
...@@ -531,7 +521,7 @@ static int __xenbus_map_ring(struct xenbus_device *dev, ...@@ -531,7 +521,7 @@ static int __xenbus_map_ring(struct xenbus_device *dev,
handles[i] = info->map[i].handle; handles[i] = info->map[i].handle;
} }
return GNTST_okay; return 0;
fail: fail:
for (i = j = 0; i < nr_grefs; i++) { for (i = j = 0; i < nr_grefs; i++) {
...@@ -554,7 +544,7 @@ static int __xenbus_map_ring(struct xenbus_device *dev, ...@@ -554,7 +544,7 @@ static int __xenbus_map_ring(struct xenbus_device *dev,
} }
} }
return err; return -ENOENT;
} }
/** /**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment