Commit 3848e4e0 authored by Juergen Gross's avatar Juergen Gross Committed by Boris Ostrovsky

xen/xenbus: avoid large structs and arrays on the stack

xenbus_map_ring_valloc() and its sub-functions are putting quite large
structs and arrays on the stack. This is problematic at runtime, but
might also result in build failures (e.g. with clang due to the option
-Werror,-Wframe-larger-than=... used).

Fix that by moving most of the data from the stack into a dynamically
allocated struct. Performance is no issue here, as
xenbus_map_ring_valloc() is used only when adding a new PV device to
a backend driver.

While at it move some duplicated code from pv/hvm specific mapping
functions to the single caller.
Reported-by: default avatarArnd Bergmann <arnd@arndb.de>
Signed-off-by: default avatarJuergen Gross <jgross@suse.com>
Reviewed-by: default avatarBoris Ostrovsky <boris.ostrovsky@oracle.com>
Link: https://lore.kernel.org/r/20200701121638.19840-2-jgross@suse.comSigned-off-by: default avatarBoris Ostrovsky <boris.ostrovsky@oracle.com>
parent caef73cf
...@@ -69,11 +69,27 @@ struct xenbus_map_node { ...@@ -69,11 +69,27 @@ struct xenbus_map_node {
unsigned int nr_handles; unsigned int nr_handles;
}; };
struct map_ring_valloc {
struct xenbus_map_node *node;
/* Why do we need two arrays? See comment of __xenbus_map_ring */
union {
unsigned long addrs[XENBUS_MAX_RING_GRANTS];
pte_t *ptes[XENBUS_MAX_RING_GRANTS];
};
phys_addr_t phys_addrs[XENBUS_MAX_RING_GRANTS];
struct gnttab_map_grant_ref map[XENBUS_MAX_RING_GRANTS];
struct gnttab_unmap_grant_ref unmap[XENBUS_MAX_RING_GRANTS];
unsigned int idx; /* HVM only. */
};
static DEFINE_SPINLOCK(xenbus_valloc_lock); static DEFINE_SPINLOCK(xenbus_valloc_lock);
static LIST_HEAD(xenbus_valloc_pages); static LIST_HEAD(xenbus_valloc_pages);
struct xenbus_ring_ops { struct xenbus_ring_ops {
int (*map)(struct xenbus_device *dev, int (*map)(struct xenbus_device *dev, struct map_ring_valloc *info,
grant_ref_t *gnt_refs, unsigned int nr_grefs, grant_ref_t *gnt_refs, unsigned int nr_grefs,
void **vaddr); void **vaddr);
int (*unmap)(struct xenbus_device *dev, void *vaddr); int (*unmap)(struct xenbus_device *dev, void *vaddr);
...@@ -449,12 +465,32 @@ int xenbus_map_ring_valloc(struct xenbus_device *dev, grant_ref_t *gnt_refs, ...@@ -449,12 +465,32 @@ int xenbus_map_ring_valloc(struct xenbus_device *dev, grant_ref_t *gnt_refs,
unsigned int nr_grefs, void **vaddr) unsigned int nr_grefs, void **vaddr)
{ {
int err; int err;
struct map_ring_valloc *info;
*vaddr = NULL;
if (nr_grefs > XENBUS_MAX_RING_GRANTS)
return -EINVAL;
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
info->node = kzalloc(sizeof(*info->node), GFP_KERNEL);
if (!info->node) {
err = -ENOMEM;
goto out;
}
err = ring_ops->map(dev, info, gnt_refs, nr_grefs, vaddr);
err = ring_ops->map(dev, gnt_refs, nr_grefs, vaddr);
/* Some hypervisors are buggy and can return 1. */ /* Some hypervisors are buggy and can return 1. */
if (err > 0) if (err > 0)
err = GNTST_general_error; err = GNTST_general_error;
out:
kfree(info->node);
kfree(info);
return err; return err;
} }
EXPORT_SYMBOL_GPL(xenbus_map_ring_valloc); EXPORT_SYMBOL_GPL(xenbus_map_ring_valloc);
...@@ -466,12 +502,10 @@ static int __xenbus_map_ring(struct xenbus_device *dev, ...@@ -466,12 +502,10 @@ static int __xenbus_map_ring(struct xenbus_device *dev,
grant_ref_t *gnt_refs, grant_ref_t *gnt_refs,
unsigned int nr_grefs, unsigned int nr_grefs,
grant_handle_t *handles, grant_handle_t *handles,
phys_addr_t *addrs, struct map_ring_valloc *info,
unsigned int flags, unsigned int flags,
bool *leaked) bool *leaked)
{ {
struct gnttab_map_grant_ref map[XENBUS_MAX_RING_GRANTS];
struct gnttab_unmap_grant_ref unmap[XENBUS_MAX_RING_GRANTS];
int i, j; int i, j;
int err = GNTST_okay; int err = GNTST_okay;
...@@ -479,23 +513,22 @@ static int __xenbus_map_ring(struct xenbus_device *dev, ...@@ -479,23 +513,22 @@ static int __xenbus_map_ring(struct xenbus_device *dev,
return -EINVAL; return -EINVAL;
for (i = 0; i < nr_grefs; i++) { for (i = 0; i < nr_grefs; i++) {
memset(&map[i], 0, sizeof(map[i])); gnttab_set_map_op(&info->map[i], info->phys_addrs[i], flags,
gnttab_set_map_op(&map[i], addrs[i], flags, gnt_refs[i], gnt_refs[i], dev->otherend_id);
dev->otherend_id);
handles[i] = INVALID_GRANT_HANDLE; handles[i] = INVALID_GRANT_HANDLE;
} }
gnttab_batch_map(map, i); gnttab_batch_map(info->map, i);
for (i = 0; i < nr_grefs; i++) { for (i = 0; i < nr_grefs; i++) {
if (map[i].status != GNTST_okay) { if (info->map[i].status != GNTST_okay) {
err = map[i].status; err = info->map[i].status;
xenbus_dev_fatal(dev, map[i].status, xenbus_dev_fatal(dev, info->map[i].status,
"mapping in shared page %d from domain %d", "mapping in shared page %d from domain %d",
gnt_refs[i], dev->otherend_id); gnt_refs[i], dev->otherend_id);
goto fail; goto fail;
} else } else
handles[i] = map[i].handle; handles[i] = info->map[i].handle;
} }
return GNTST_okay; return GNTST_okay;
...@@ -503,19 +536,19 @@ static int __xenbus_map_ring(struct xenbus_device *dev, ...@@ -503,19 +536,19 @@ static int __xenbus_map_ring(struct xenbus_device *dev,
fail: fail:
for (i = j = 0; i < nr_grefs; i++) { for (i = j = 0; i < nr_grefs; i++) {
if (handles[i] != INVALID_GRANT_HANDLE) { if (handles[i] != INVALID_GRANT_HANDLE) {
memset(&unmap[j], 0, sizeof(unmap[j])); gnttab_set_unmap_op(&info->unmap[j],
gnttab_set_unmap_op(&unmap[j], (phys_addr_t)addrs[i], info->phys_addrs[i],
GNTMAP_host_map, handles[i]); GNTMAP_host_map, handles[i]);
j++; j++;
} }
} }
if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap, j)) if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, info->unmap, j))
BUG(); BUG();
*leaked = false; *leaked = false;
for (i = 0; i < j; i++) { for (i = 0; i < j; i++) {
if (unmap[i].status != GNTST_okay) { if (info->unmap[i].status != GNTST_okay) {
*leaked = true; *leaked = true;
break; break;
} }
...@@ -566,21 +599,12 @@ static int xenbus_unmap_ring(struct xenbus_device *dev, grant_handle_t *handles, ...@@ -566,21 +599,12 @@ static int xenbus_unmap_ring(struct xenbus_device *dev, grant_handle_t *handles,
return err; return err;
} }
struct map_ring_valloc_hvm
{
unsigned int idx;
/* Why do we need two arrays? See comment of __xenbus_map_ring */
phys_addr_t phys_addrs[XENBUS_MAX_RING_GRANTS];
unsigned long addrs[XENBUS_MAX_RING_GRANTS];
};
static void xenbus_map_ring_setup_grant_hvm(unsigned long gfn, static void xenbus_map_ring_setup_grant_hvm(unsigned long gfn,
unsigned int goffset, unsigned int goffset,
unsigned int len, unsigned int len,
void *data) void *data)
{ {
struct map_ring_valloc_hvm *info = data; struct map_ring_valloc *info = data;
unsigned long vaddr = (unsigned long)gfn_to_virt(gfn); unsigned long vaddr = (unsigned long)gfn_to_virt(gfn);
info->phys_addrs[info->idx] = vaddr; info->phys_addrs[info->idx] = vaddr;
...@@ -589,39 +613,28 @@ static void xenbus_map_ring_setup_grant_hvm(unsigned long gfn, ...@@ -589,39 +613,28 @@ static void xenbus_map_ring_setup_grant_hvm(unsigned long gfn,
info->idx++; info->idx++;
} }
static int xenbus_map_ring_valloc_hvm(struct xenbus_device *dev, static int xenbus_map_ring_hvm(struct xenbus_device *dev,
struct map_ring_valloc *info,
grant_ref_t *gnt_ref, grant_ref_t *gnt_ref,
unsigned int nr_grefs, unsigned int nr_grefs,
void **vaddr) void **vaddr)
{ {
struct xenbus_map_node *node; struct xenbus_map_node *node = info->node;
int err; int err;
void *addr; void *addr;
bool leaked = false; bool leaked = false;
struct map_ring_valloc_hvm info = {
.idx = 0,
};
unsigned int nr_pages = XENBUS_PAGES(nr_grefs); unsigned int nr_pages = XENBUS_PAGES(nr_grefs);
if (nr_grefs > XENBUS_MAX_RING_GRANTS)
return -EINVAL;
*vaddr = NULL;
node = kzalloc(sizeof(*node), GFP_KERNEL);
if (!node)
return -ENOMEM;
err = alloc_xenballooned_pages(nr_pages, node->hvm.pages); err = alloc_xenballooned_pages(nr_pages, node->hvm.pages);
if (err) if (err)
goto out_err; goto out_err;
gnttab_foreach_grant(node->hvm.pages, nr_grefs, gnttab_foreach_grant(node->hvm.pages, nr_grefs,
xenbus_map_ring_setup_grant_hvm, xenbus_map_ring_setup_grant_hvm,
&info); info);
err = __xenbus_map_ring(dev, gnt_ref, nr_grefs, node->handles, err = __xenbus_map_ring(dev, gnt_ref, nr_grefs, node->handles,
info.phys_addrs, GNTMAP_host_map, &leaked); info, GNTMAP_host_map, &leaked);
node->nr_handles = nr_grefs; node->nr_handles = nr_grefs;
if (err) if (err)
...@@ -641,11 +654,13 @@ static int xenbus_map_ring_valloc_hvm(struct xenbus_device *dev, ...@@ -641,11 +654,13 @@ static int xenbus_map_ring_valloc_hvm(struct xenbus_device *dev,
spin_unlock(&xenbus_valloc_lock); spin_unlock(&xenbus_valloc_lock);
*vaddr = addr; *vaddr = addr;
info->node = NULL;
return 0; return 0;
out_xenbus_unmap_ring: out_xenbus_unmap_ring:
if (!leaked) if (!leaked)
xenbus_unmap_ring(dev, node->handles, nr_grefs, info.addrs); xenbus_unmap_ring(dev, node->handles, nr_grefs, info->addrs);
else else
pr_alert("leaking %p size %u page(s)", pr_alert("leaking %p size %u page(s)",
addr, nr_pages); addr, nr_pages);
...@@ -653,7 +668,6 @@ static int xenbus_map_ring_valloc_hvm(struct xenbus_device *dev, ...@@ -653,7 +668,6 @@ static int xenbus_map_ring_valloc_hvm(struct xenbus_device *dev,
if (!leaked) if (!leaked)
free_xenballooned_pages(nr_pages, node->hvm.pages); free_xenballooned_pages(nr_pages, node->hvm.pages);
out_err: out_err:
kfree(node);
return err; return err;
} }
...@@ -676,40 +690,30 @@ int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr) ...@@ -676,40 +690,30 @@ int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr)
EXPORT_SYMBOL_GPL(xenbus_unmap_ring_vfree); EXPORT_SYMBOL_GPL(xenbus_unmap_ring_vfree);
#ifdef CONFIG_XEN_PV #ifdef CONFIG_XEN_PV
static int xenbus_map_ring_valloc_pv(struct xenbus_device *dev, static int xenbus_map_ring_pv(struct xenbus_device *dev,
struct map_ring_valloc *info,
grant_ref_t *gnt_refs, grant_ref_t *gnt_refs,
unsigned int nr_grefs, unsigned int nr_grefs,
void **vaddr) void **vaddr)
{ {
struct xenbus_map_node *node; struct xenbus_map_node *node = info->node;
struct vm_struct *area; struct vm_struct *area;
pte_t *ptes[XENBUS_MAX_RING_GRANTS];
phys_addr_t phys_addrs[XENBUS_MAX_RING_GRANTS];
int err = GNTST_okay; int err = GNTST_okay;
int i; int i;
bool leaked; bool leaked;
*vaddr = NULL; area = alloc_vm_area(XEN_PAGE_SIZE * nr_grefs, info->ptes);
if (nr_grefs > XENBUS_MAX_RING_GRANTS)
return -EINVAL;
node = kzalloc(sizeof(*node), GFP_KERNEL);
if (!node)
return -ENOMEM;
area = alloc_vm_area(XEN_PAGE_SIZE * nr_grefs, ptes);
if (!area) { if (!area) {
kfree(node); kfree(node);
return -ENOMEM; return -ENOMEM;
} }
for (i = 0; i < nr_grefs; i++) for (i = 0; i < nr_grefs; i++)
phys_addrs[i] = arbitrary_virt_to_machine(ptes[i]).maddr; info->phys_addrs[i] =
arbitrary_virt_to_machine(info->ptes[i]).maddr;
err = __xenbus_map_ring(dev, gnt_refs, nr_grefs, node->handles, err = __xenbus_map_ring(dev, gnt_refs, nr_grefs, node->handles,
phys_addrs, info, GNTMAP_host_map | GNTMAP_contains_pte,
GNTMAP_host_map | GNTMAP_contains_pte,
&leaked); &leaked);
if (err) if (err)
goto failed; goto failed;
...@@ -722,6 +726,8 @@ static int xenbus_map_ring_valloc_pv(struct xenbus_device *dev, ...@@ -722,6 +726,8 @@ static int xenbus_map_ring_valloc_pv(struct xenbus_device *dev,
spin_unlock(&xenbus_valloc_lock); spin_unlock(&xenbus_valloc_lock);
*vaddr = area->addr; *vaddr = area->addr;
info->node = NULL;
return 0; return 0;
failed: failed:
...@@ -730,11 +736,10 @@ static int xenbus_map_ring_valloc_pv(struct xenbus_device *dev, ...@@ -730,11 +736,10 @@ static int xenbus_map_ring_valloc_pv(struct xenbus_device *dev,
else else
pr_alert("leaking VM area %p size %u page(s)", area, nr_grefs); pr_alert("leaking VM area %p size %u page(s)", area, nr_grefs);
kfree(node);
return err; return err;
} }
static int xenbus_unmap_ring_vfree_pv(struct xenbus_device *dev, void *vaddr) static int xenbus_unmap_ring_pv(struct xenbus_device *dev, void *vaddr)
{ {
struct xenbus_map_node *node; struct xenbus_map_node *node;
struct gnttab_unmap_grant_ref unmap[XENBUS_MAX_RING_GRANTS]; struct gnttab_unmap_grant_ref unmap[XENBUS_MAX_RING_GRANTS];
...@@ -798,12 +803,12 @@ static int xenbus_unmap_ring_vfree_pv(struct xenbus_device *dev, void *vaddr) ...@@ -798,12 +803,12 @@ static int xenbus_unmap_ring_vfree_pv(struct xenbus_device *dev, void *vaddr)
} }
static const struct xenbus_ring_ops ring_ops_pv = { static const struct xenbus_ring_ops ring_ops_pv = {
.map = xenbus_map_ring_valloc_pv, .map = xenbus_map_ring_pv,
.unmap = xenbus_unmap_ring_vfree_pv, .unmap = xenbus_unmap_ring_pv,
}; };
#endif #endif
struct unmap_ring_vfree_hvm struct unmap_ring_hvm
{ {
unsigned int idx; unsigned int idx;
unsigned long addrs[XENBUS_MAX_RING_GRANTS]; unsigned long addrs[XENBUS_MAX_RING_GRANTS];
...@@ -814,19 +819,19 @@ static void xenbus_unmap_ring_setup_grant_hvm(unsigned long gfn, ...@@ -814,19 +819,19 @@ static void xenbus_unmap_ring_setup_grant_hvm(unsigned long gfn,
unsigned int len, unsigned int len,
void *data) void *data)
{ {
struct unmap_ring_vfree_hvm *info = data; struct unmap_ring_hvm *info = data;
info->addrs[info->idx] = (unsigned long)gfn_to_virt(gfn); info->addrs[info->idx] = (unsigned long)gfn_to_virt(gfn);
info->idx++; info->idx++;
} }
static int xenbus_unmap_ring_vfree_hvm(struct xenbus_device *dev, void *vaddr) static int xenbus_unmap_ring_hvm(struct xenbus_device *dev, void *vaddr)
{ {
int rv; int rv;
struct xenbus_map_node *node; struct xenbus_map_node *node;
void *addr; void *addr;
struct unmap_ring_vfree_hvm info = { struct unmap_ring_hvm info = {
.idx = 0, .idx = 0,
}; };
unsigned int nr_pages; unsigned int nr_pages;
...@@ -887,8 +892,8 @@ enum xenbus_state xenbus_read_driver_state(const char *path) ...@@ -887,8 +892,8 @@ enum xenbus_state xenbus_read_driver_state(const char *path)
EXPORT_SYMBOL_GPL(xenbus_read_driver_state); EXPORT_SYMBOL_GPL(xenbus_read_driver_state);
static const struct xenbus_ring_ops ring_ops_hvm = { static const struct xenbus_ring_ops ring_ops_hvm = {
.map = xenbus_map_ring_valloc_hvm, .map = xenbus_map_ring_hvm,
.unmap = xenbus_unmap_ring_vfree_hvm, .unmap = xenbus_unmap_ring_hvm,
}; };
void __init xenbus_ring_ops_init(void) void __init xenbus_ring_ops_init(void)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment