Commit 6db20ea8 authored by Ohad Ben-Cohen's avatar Ohad Ben-Cohen

remoteproc: allocate vrings on demand, free when not needed

Dynamically allocate the vrings' DMA when the remote processor
is about to be powered on (i.e. when ->find_vqs() is invoked),
and release them as soon as it is powered off (i.e. when ->del_vqs()
is invoked).

The obvious and immediate benefit is better memory utilization, since
memory for the vrings is now only allocated when the relevant remote
processor is used.

Additionally, this approach also makes recovery of a (crashing)
remote processor easier: one just needs to remove the relevant
vdevs, and the entire vrings cleanup takes place automagically.
Tested-by: default avatarFernando Guzman Lugo <fernando.lugo@ti.com>
Signed-off-by: default avatarOhad Ben-Cohen <ohad@wizery.com>
parent 485802a6
...@@ -279,34 +279,17 @@ rproc_load_segments(struct rproc *rproc, const u8 *elf_data, size_t len) ...@@ -279,34 +279,17 @@ rproc_load_segments(struct rproc *rproc, const u8 *elf_data, size_t len)
return ret; return ret;
} }
static int int rproc_alloc_vring(struct rproc_vdev *rvdev, int i)
__rproc_handle_vring(struct rproc_vdev *rvdev, struct fw_rsc_vdev *rsc, int i)
{ {
struct rproc *rproc = rvdev->rproc; struct rproc *rproc = rvdev->rproc;
struct device *dev = rproc->dev; struct device *dev = rproc->dev;
struct fw_rsc_vdev_vring *vring = &rsc->vring[i]; struct rproc_vring *rvring = &rvdev->vring[i];
dma_addr_t dma; dma_addr_t dma;
void *va; void *va;
int ret, size, notifyid; int ret, size, notifyid;
dev_dbg(dev, "vdev rsc: vring%d: da %x, qsz %d, align %d\n",
i, vring->da, vring->num, vring->align);
/* make sure reserved bytes are zeroes */
if (vring->reserved) {
dev_err(dev, "vring rsc has non zero reserved bytes\n");
return -EINVAL;
}
/* verify queue size and vring alignment are sane */
if (!vring->num || !vring->align) {
dev_err(dev, "invalid qsz (%d) or alignment (%d)\n",
vring->num, vring->align);
return -EINVAL;
}
/* actual size of vring (in bytes) */ /* actual size of vring (in bytes) */
size = PAGE_ALIGN(vring_size(vring->num, vring->align)); size = PAGE_ALIGN(vring_size(rvring->len, rvring->align));
if (!idr_pre_get(&rproc->notifyids, GFP_KERNEL)) { if (!idr_pre_get(&rproc->notifyids, GFP_KERNEL)) {
dev_err(dev, "idr_pre_get failed\n"); dev_err(dev, "idr_pre_get failed\n");
...@@ -316,6 +299,7 @@ __rproc_handle_vring(struct rproc_vdev *rvdev, struct fw_rsc_vdev *rsc, int i) ...@@ -316,6 +299,7 @@ __rproc_handle_vring(struct rproc_vdev *rvdev, struct fw_rsc_vdev *rsc, int i)
/* /*
* Allocate non-cacheable memory for the vring. In the future * Allocate non-cacheable memory for the vring. In the future
* this call will also configure the IOMMU for us * this call will also configure the IOMMU for us
* TODO: let the rproc know the da of this vring
*/ */
va = dma_alloc_coherent(dev, size, &dma, GFP_KERNEL); va = dma_alloc_coherent(dev, size, &dma, GFP_KERNEL);
if (!va) { if (!va) {
...@@ -323,44 +307,67 @@ __rproc_handle_vring(struct rproc_vdev *rvdev, struct fw_rsc_vdev *rsc, int i) ...@@ -323,44 +307,67 @@ __rproc_handle_vring(struct rproc_vdev *rvdev, struct fw_rsc_vdev *rsc, int i)
return -EINVAL; return -EINVAL;
} }
/* assign an rproc-wide unique index for this vring */ /*
/* TODO: assign a notifyid for rvdev updates as well */ * Assign an rproc-wide unique index for this vring
ret = idr_get_new(&rproc->notifyids, &rvdev->vring[i], &notifyid); * TODO: assign a notifyid for rvdev updates as well
* TODO: let the rproc know the notifyid of this vring
* TODO: support predefined notifyids (via resource table)
*/
ret = idr_get_new(&rproc->notifyids, rvring, &notifyid);
if (ret) { if (ret) {
dev_err(dev, "idr_get_new failed: %d\n", ret); dev_err(dev, "idr_get_new failed: %d\n", ret);
dma_free_coherent(dev, size, va, dma); dma_free_coherent(dev, size, va, dma);
return ret; return ret;
} }
/* let the rproc know the da and notifyid of this vring */
/* TODO: expose this to remote processor */
vring->da = dma;
vring->notifyid = notifyid;
dev_dbg(dev, "vring%d: va %p dma %x size %x idr %d\n", i, va, dev_dbg(dev, "vring%d: va %p dma %x size %x idr %d\n", i, va,
dma, size, notifyid); dma, size, notifyid);
rvdev->vring[i].len = vring->num; rvring->va = va;
rvdev->vring[i].align = vring->align; rvring->dma = dma;
rvdev->vring[i].va = va; rvring->notifyid = notifyid;
rvdev->vring[i].dma = dma;
rvdev->vring[i].notifyid = notifyid;
rvdev->vring[i].rvdev = rvdev;
return 0; return 0;
} }
static void __rproc_free_vrings(struct rproc_vdev *rvdev, int i) static int
rproc_parse_vring(struct rproc_vdev *rvdev, struct fw_rsc_vdev *rsc, int i)
{ {
struct rproc *rproc = rvdev->rproc; struct rproc *rproc = rvdev->rproc;
struct device *dev = rproc->dev;
for (i--; i >= 0; i--) { struct fw_rsc_vdev_vring *vring = &rsc->vring[i];
struct rproc_vring *rvring = &rvdev->vring[i]; struct rproc_vring *rvring = &rvdev->vring[i];
dev_dbg(dev, "vdev rsc: vring%d: da %x, qsz %d, align %d\n",
i, vring->da, vring->num, vring->align);
/* make sure reserved bytes are zeroes */
if (vring->reserved) {
dev_err(dev, "vring rsc has non zero reserved bytes\n");
return -EINVAL;
}
/* verify queue size and vring alignment are sane */
if (!vring->num || !vring->align) {
dev_err(dev, "invalid qsz (%d) or alignment (%d)\n",
vring->num, vring->align);
return -EINVAL;
}
rvring->len = vring->num;
rvring->align = vring->align;
rvring->rvdev = rvdev;
return 0;
}
void rproc_free_vring(struct rproc_vring *rvring)
{
int size = PAGE_ALIGN(vring_size(rvring->len, rvring->align)); int size = PAGE_ALIGN(vring_size(rvring->len, rvring->align));
struct rproc *rproc = rvring->rvdev->rproc;
dma_free_coherent(rproc->dev, size, rvring->va, rvring->dma); dma_free_coherent(rproc->dev, size, rvring->va, rvring->dma);
idr_remove(&rproc->notifyids, rvring->notifyid); idr_remove(&rproc->notifyids, rvring->notifyid);
}
} }
/** /**
...@@ -425,11 +432,11 @@ static int rproc_handle_vdev(struct rproc *rproc, struct fw_rsc_vdev *rsc, ...@@ -425,11 +432,11 @@ static int rproc_handle_vdev(struct rproc *rproc, struct fw_rsc_vdev *rsc,
rvdev->rproc = rproc; rvdev->rproc = rproc;
/* allocate the vrings */ /* parse the vrings */
for (i = 0; i < rsc->num_of_vrings; i++) { for (i = 0; i < rsc->num_of_vrings; i++) {
ret = __rproc_handle_vring(rvdev, rsc, i); ret = rproc_parse_vring(rvdev, rsc, i);
if (ret) if (ret)
goto free_vrings; goto free_rvdev;
} }
/* remember the device features */ /* remember the device features */
...@@ -440,12 +447,11 @@ static int rproc_handle_vdev(struct rproc *rproc, struct fw_rsc_vdev *rsc, ...@@ -440,12 +447,11 @@ static int rproc_handle_vdev(struct rproc *rproc, struct fw_rsc_vdev *rsc,
/* it is now safe to add the virtio device */ /* it is now safe to add the virtio device */
ret = rproc_add_virtio_dev(rvdev, rsc->id); ret = rproc_add_virtio_dev(rvdev, rsc->id);
if (ret) if (ret)
goto free_vrings; goto free_rvdev;
return 0; return 0;
free_vrings: free_rvdev:
__rproc_free_vrings(rvdev, i);
kfree(rvdev); kfree(rvdev);
return ret; return ret;
} }
...@@ -1264,18 +1270,11 @@ EXPORT_SYMBOL(rproc_shutdown); ...@@ -1264,18 +1270,11 @@ EXPORT_SYMBOL(rproc_shutdown);
void rproc_release(struct kref *kref) void rproc_release(struct kref *kref)
{ {
struct rproc *rproc = container_of(kref, struct rproc, refcount); struct rproc *rproc = container_of(kref, struct rproc, refcount);
struct rproc_vdev *rvdev, *rvtmp;
dev_info(rproc->dev, "removing %s\n", rproc->name); dev_info(rproc->dev, "removing %s\n", rproc->name);
rproc_delete_debug_dir(rproc); rproc_delete_debug_dir(rproc);
/* clean up remote vdev entries */
list_for_each_entry_safe(rvdev, rvtmp, &rproc->rvdevs, node) {
__rproc_free_vrings(rvdev, RVDEV_NUM_VRINGS);
list_del(&rvdev->node);
}
/* /*
* At this point no one holds a reference to rproc anymore, * At this point no one holds a reference to rproc anymore,
* so we can directly unroll rproc_alloc() * so we can directly unroll rproc_alloc()
...@@ -1546,7 +1545,7 @@ EXPORT_SYMBOL(rproc_free); ...@@ -1546,7 +1545,7 @@ EXPORT_SYMBOL(rproc_free);
*/ */
int rproc_unregister(struct rproc *rproc) int rproc_unregister(struct rproc *rproc)
{ {
struct rproc_vdev *rvdev; struct rproc_vdev *rvdev, *tmp;
if (!rproc) if (!rproc)
return -EINVAL; return -EINVAL;
...@@ -1555,7 +1554,7 @@ int rproc_unregister(struct rproc *rproc) ...@@ -1555,7 +1554,7 @@ int rproc_unregister(struct rproc *rproc)
wait_for_completion(&rproc->firmware_loading_complete); wait_for_completion(&rproc->firmware_loading_complete);
/* clean up remote vdev entries */ /* clean up remote vdev entries */
list_for_each_entry(rvdev, &rproc->rvdevs, node) list_for_each_entry_safe(rvdev, tmp, &rproc->rvdevs, node)
rproc_remove_virtio_dev(rvdev); rproc_remove_virtio_dev(rvdev);
/* the rproc is downref'ed as soon as it's removed from the klist */ /* the rproc is downref'ed as soon as it's removed from the klist */
......
...@@ -41,4 +41,6 @@ void rproc_create_debug_dir(struct rproc *rproc); ...@@ -41,4 +41,6 @@ void rproc_create_debug_dir(struct rproc *rproc);
void rproc_init_debugfs(void); void rproc_init_debugfs(void);
void rproc_exit_debugfs(void); void rproc_exit_debugfs(void);
void rproc_free_vring(struct rproc_vring *rvring);
int rproc_alloc_vring(struct rproc_vdev *rvdev, int i);
#endif /* REMOTEPROC_INTERNAL_H */ #endif /* REMOTEPROC_INTERNAL_H */
...@@ -77,14 +77,17 @@ static struct virtqueue *rp_find_vq(struct virtio_device *vdev, ...@@ -77,14 +77,17 @@ static struct virtqueue *rp_find_vq(struct virtio_device *vdev,
struct rproc_vring *rvring; struct rproc_vring *rvring;
struct virtqueue *vq; struct virtqueue *vq;
void *addr; void *addr;
int len, size; int len, size, ret;
/* we're temporarily limited to two virtqueues per rvdev */ /* we're temporarily limited to two virtqueues per rvdev */
if (id >= ARRAY_SIZE(rvdev->vring)) if (id >= ARRAY_SIZE(rvdev->vring))
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
rvring = &rvdev->vring[id]; ret = rproc_alloc_vring(rvdev, id);
if (ret)
return ERR_PTR(ret);
rvring = &rvdev->vring[id];
addr = rvring->va; addr = rvring->va;
len = rvring->len; len = rvring->len;
...@@ -103,6 +106,7 @@ static struct virtqueue *rp_find_vq(struct virtio_device *vdev, ...@@ -103,6 +106,7 @@ static struct virtqueue *rp_find_vq(struct virtio_device *vdev,
rproc_virtio_notify, callback, name); rproc_virtio_notify, callback, name);
if (!vq) { if (!vq) {
dev_err(rproc->dev, "vring_new_virtqueue %s failed\n", name); dev_err(rproc->dev, "vring_new_virtqueue %s failed\n", name);
rproc_free_vring(rvring);
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
} }
...@@ -125,6 +129,7 @@ static void rproc_virtio_del_vqs(struct virtio_device *vdev) ...@@ -125,6 +129,7 @@ static void rproc_virtio_del_vqs(struct virtio_device *vdev)
rvring = vq->priv; rvring = vq->priv;
rvring->vq = NULL; rvring->vq = NULL;
vring_del_virtqueue(vq); vring_del_virtqueue(vq);
rproc_free_vring(rvring);
} }
} }
...@@ -228,8 +233,12 @@ static struct virtio_config_ops rproc_virtio_config_ops = { ...@@ -228,8 +233,12 @@ static struct virtio_config_ops rproc_virtio_config_ops = {
static void rproc_vdev_release(struct device *dev) static void rproc_vdev_release(struct device *dev)
{ {
struct virtio_device *vdev = dev_to_virtio(dev); struct virtio_device *vdev = dev_to_virtio(dev);
struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
struct rproc *rproc = vdev_to_rproc(vdev); struct rproc *rproc = vdev_to_rproc(vdev);
list_del(&rvdev->node);
kfree(rvdev);
kref_put(&rproc->refcount, rproc_release); kref_put(&rproc->refcount, rproc_release);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment