Commit 4ab50af6 authored by Juergen Gross's avatar Juergen Gross Committed by Boris Ostrovsky

xen/blkfront: fix ring info addressing

Commit 0265d6e8 ("xen/blkfront: limit allocated memory size to
actual use case") made struct blkfront_ring_info size dynamic. This is
fine when running with only one queue, but with multiple queues the
addressing of the single queues has to be adapted as the structs are
allocated in an array.

Fixes: 0265d6e8 ("xen/blkfront: limit allocated memory size to actual use case")
Reported-by: default avatarSander Eikelenboom <linux@eikelenboom.it>
Tested-by: default avatarSander Eikelenboom <linux@eikelenboom.it>
Signed-off-by: default avatarJuergen Gross <jgross@suse.com>
Acked-by: default avatarRoger Pau Monné <roger.pau@citrix.com>
Link: https://lore.kernel.org/r/20200305155129.28326-1-jgross@suse.comSigned-off-by: default avatarBoris Ostrovsky <boris.ostrovsky@oracle.com>
parent 2f69a110
...@@ -213,6 +213,7 @@ struct blkfront_info ...@@ -213,6 +213,7 @@ struct blkfront_info
struct blk_mq_tag_set tag_set; struct blk_mq_tag_set tag_set;
struct blkfront_ring_info *rinfo; struct blkfront_ring_info *rinfo;
unsigned int nr_rings; unsigned int nr_rings;
unsigned int rinfo_size;
/* Save uncomplete reqs and bios for migration. */ /* Save uncomplete reqs and bios for migration. */
struct list_head requests; struct list_head requests;
struct bio_list bio_list; struct bio_list bio_list;
...@@ -259,6 +260,18 @@ static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo); ...@@ -259,6 +260,18 @@ static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo);
static void blkfront_gather_backend_features(struct blkfront_info *info); static void blkfront_gather_backend_features(struct blkfront_info *info);
static int negotiate_mq(struct blkfront_info *info); static int negotiate_mq(struct blkfront_info *info);
#define for_each_rinfo(info, ptr, idx) \
for ((ptr) = (info)->rinfo, (idx) = 0; \
(idx) < (info)->nr_rings; \
(idx)++, (ptr) = (void *)(ptr) + (info)->rinfo_size)
static inline struct blkfront_ring_info *
get_rinfo(const struct blkfront_info *info, unsigned int i)
{
BUG_ON(i >= info->nr_rings);
return (void *)info->rinfo + i * info->rinfo_size;
}
static int get_id_from_freelist(struct blkfront_ring_info *rinfo) static int get_id_from_freelist(struct blkfront_ring_info *rinfo)
{ {
unsigned long free = rinfo->shadow_free; unsigned long free = rinfo->shadow_free;
...@@ -883,8 +896,7 @@ static blk_status_t blkif_queue_rq(struct blk_mq_hw_ctx *hctx, ...@@ -883,8 +896,7 @@ static blk_status_t blkif_queue_rq(struct blk_mq_hw_ctx *hctx,
struct blkfront_info *info = hctx->queue->queuedata; struct blkfront_info *info = hctx->queue->queuedata;
struct blkfront_ring_info *rinfo = NULL; struct blkfront_ring_info *rinfo = NULL;
BUG_ON(info->nr_rings <= qid); rinfo = get_rinfo(info, qid);
rinfo = &info->rinfo[qid];
blk_mq_start_request(qd->rq); blk_mq_start_request(qd->rq);
spin_lock_irqsave(&rinfo->ring_lock, flags); spin_lock_irqsave(&rinfo->ring_lock, flags);
if (RING_FULL(&rinfo->ring)) if (RING_FULL(&rinfo->ring))
...@@ -1181,6 +1193,7 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity, ...@@ -1181,6 +1193,7 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
static void xlvbd_release_gendisk(struct blkfront_info *info) static void xlvbd_release_gendisk(struct blkfront_info *info)
{ {
unsigned int minor, nr_minors, i; unsigned int minor, nr_minors, i;
struct blkfront_ring_info *rinfo;
if (info->rq == NULL) if (info->rq == NULL)
return; return;
...@@ -1188,9 +1201,7 @@ static void xlvbd_release_gendisk(struct blkfront_info *info) ...@@ -1188,9 +1201,7 @@ static void xlvbd_release_gendisk(struct blkfront_info *info)
/* No more blkif_request(). */ /* No more blkif_request(). */
blk_mq_stop_hw_queues(info->rq); blk_mq_stop_hw_queues(info->rq);
for (i = 0; i < info->nr_rings; i++) { for_each_rinfo(info, rinfo, i) {
struct blkfront_ring_info *rinfo = &info->rinfo[i];
/* No more gnttab callback work. */ /* No more gnttab callback work. */
gnttab_cancel_free_callback(&rinfo->callback); gnttab_cancel_free_callback(&rinfo->callback);
...@@ -1339,6 +1350,7 @@ static void blkif_free_ring(struct blkfront_ring_info *rinfo) ...@@ -1339,6 +1350,7 @@ static void blkif_free_ring(struct blkfront_ring_info *rinfo)
static void blkif_free(struct blkfront_info *info, int suspend) static void blkif_free(struct blkfront_info *info, int suspend)
{ {
unsigned int i; unsigned int i;
struct blkfront_ring_info *rinfo;
/* Prevent new requests being issued until we fix things up. */ /* Prevent new requests being issued until we fix things up. */
info->connected = suspend ? info->connected = suspend ?
...@@ -1347,8 +1359,8 @@ static void blkif_free(struct blkfront_info *info, int suspend) ...@@ -1347,8 +1359,8 @@ static void blkif_free(struct blkfront_info *info, int suspend)
if (info->rq) if (info->rq)
blk_mq_stop_hw_queues(info->rq); blk_mq_stop_hw_queues(info->rq);
for (i = 0; i < info->nr_rings; i++) for_each_rinfo(info, rinfo, i)
blkif_free_ring(&info->rinfo[i]); blkif_free_ring(rinfo);
kvfree(info->rinfo); kvfree(info->rinfo);
info->rinfo = NULL; info->rinfo = NULL;
...@@ -1775,6 +1787,7 @@ static int talk_to_blkback(struct xenbus_device *dev, ...@@ -1775,6 +1787,7 @@ static int talk_to_blkback(struct xenbus_device *dev,
int err; int err;
unsigned int i, max_page_order; unsigned int i, max_page_order;
unsigned int ring_page_order; unsigned int ring_page_order;
struct blkfront_ring_info *rinfo;
if (!info) if (!info)
return -ENODEV; return -ENODEV;
...@@ -1788,9 +1801,7 @@ static int talk_to_blkback(struct xenbus_device *dev, ...@@ -1788,9 +1801,7 @@ static int talk_to_blkback(struct xenbus_device *dev,
if (err) if (err)
goto destroy_blkring; goto destroy_blkring;
for (i = 0; i < info->nr_rings; i++) { for_each_rinfo(info, rinfo, i) {
struct blkfront_ring_info *rinfo = &info->rinfo[i];
/* Create shared ring, alloc event channel. */ /* Create shared ring, alloc event channel. */
err = setup_blkring(dev, rinfo); err = setup_blkring(dev, rinfo);
if (err) if (err)
...@@ -1815,7 +1826,7 @@ static int talk_to_blkback(struct xenbus_device *dev, ...@@ -1815,7 +1826,7 @@ static int talk_to_blkback(struct xenbus_device *dev,
/* We already got the number of queues/rings in _probe */ /* We already got the number of queues/rings in _probe */
if (info->nr_rings == 1) { if (info->nr_rings == 1) {
err = write_per_ring_nodes(xbt, &info->rinfo[0], dev->nodename); err = write_per_ring_nodes(xbt, info->rinfo, dev->nodename);
if (err) if (err)
goto destroy_blkring; goto destroy_blkring;
} else { } else {
...@@ -1837,10 +1848,10 @@ static int talk_to_blkback(struct xenbus_device *dev, ...@@ -1837,10 +1848,10 @@ static int talk_to_blkback(struct xenbus_device *dev,
goto abort_transaction; goto abort_transaction;
} }
for (i = 0; i < info->nr_rings; i++) { for_each_rinfo(info, rinfo, i) {
memset(path, 0, pathsize); memset(path, 0, pathsize);
snprintf(path, pathsize, "%s/queue-%u", dev->nodename, i); snprintf(path, pathsize, "%s/queue-%u", dev->nodename, i);
err = write_per_ring_nodes(xbt, &info->rinfo[i], path); err = write_per_ring_nodes(xbt, rinfo, path);
if (err) { if (err) {
kfree(path); kfree(path);
goto destroy_blkring; goto destroy_blkring;
...@@ -1868,9 +1879,8 @@ static int talk_to_blkback(struct xenbus_device *dev, ...@@ -1868,9 +1879,8 @@ static int talk_to_blkback(struct xenbus_device *dev,
goto destroy_blkring; goto destroy_blkring;
} }
for (i = 0; i < info->nr_rings; i++) { for_each_rinfo(info, rinfo, i) {
unsigned int j; unsigned int j;
struct blkfront_ring_info *rinfo = &info->rinfo[i];
for (j = 0; j < BLK_RING_SIZE(info); j++) for (j = 0; j < BLK_RING_SIZE(info); j++)
rinfo->shadow[j].req.u.rw.id = j + 1; rinfo->shadow[j].req.u.rw.id = j + 1;
...@@ -1900,6 +1910,7 @@ static int negotiate_mq(struct blkfront_info *info) ...@@ -1900,6 +1910,7 @@ static int negotiate_mq(struct blkfront_info *info)
{ {
unsigned int backend_max_queues; unsigned int backend_max_queues;
unsigned int i; unsigned int i;
struct blkfront_ring_info *rinfo;
BUG_ON(info->nr_rings); BUG_ON(info->nr_rings);
...@@ -1911,20 +1922,16 @@ static int negotiate_mq(struct blkfront_info *info) ...@@ -1911,20 +1922,16 @@ static int negotiate_mq(struct blkfront_info *info)
if (!info->nr_rings) if (!info->nr_rings)
info->nr_rings = 1; info->nr_rings = 1;
info->rinfo = kvcalloc(info->nr_rings, info->rinfo_size = struct_size(info->rinfo, shadow,
struct_size(info->rinfo, shadow, BLK_RING_SIZE(info));
BLK_RING_SIZE(info)), info->rinfo = kvcalloc(info->nr_rings, info->rinfo_size, GFP_KERNEL);
GFP_KERNEL);
if (!info->rinfo) { if (!info->rinfo) {
xenbus_dev_fatal(info->xbdev, -ENOMEM, "allocating ring_info structure"); xenbus_dev_fatal(info->xbdev, -ENOMEM, "allocating ring_info structure");
info->nr_rings = 0; info->nr_rings = 0;
return -ENOMEM; return -ENOMEM;
} }
for (i = 0; i < info->nr_rings; i++) { for_each_rinfo(info, rinfo, i) {
struct blkfront_ring_info *rinfo;
rinfo = &info->rinfo[i];
INIT_LIST_HEAD(&rinfo->indirect_pages); INIT_LIST_HEAD(&rinfo->indirect_pages);
INIT_LIST_HEAD(&rinfo->grants); INIT_LIST_HEAD(&rinfo->grants);
rinfo->dev_info = info; rinfo->dev_info = info;
...@@ -2017,6 +2024,7 @@ static int blkif_recover(struct blkfront_info *info) ...@@ -2017,6 +2024,7 @@ static int blkif_recover(struct blkfront_info *info)
int rc; int rc;
struct bio *bio; struct bio *bio;
unsigned int segs; unsigned int segs;
struct blkfront_ring_info *rinfo;
blkfront_gather_backend_features(info); blkfront_gather_backend_features(info);
/* Reset limits changed by blk_mq_update_nr_hw_queues(). */ /* Reset limits changed by blk_mq_update_nr_hw_queues(). */
...@@ -2024,9 +2032,7 @@ static int blkif_recover(struct blkfront_info *info) ...@@ -2024,9 +2032,7 @@ static int blkif_recover(struct blkfront_info *info)
segs = info->max_indirect_segments ? : BLKIF_MAX_SEGMENTS_PER_REQUEST; segs = info->max_indirect_segments ? : BLKIF_MAX_SEGMENTS_PER_REQUEST;
blk_queue_max_segments(info->rq, segs / GRANTS_PER_PSEG); blk_queue_max_segments(info->rq, segs / GRANTS_PER_PSEG);
for (r_index = 0; r_index < info->nr_rings; r_index++) { for_each_rinfo(info, rinfo, r_index) {
struct blkfront_ring_info *rinfo = &info->rinfo[r_index];
rc = blkfront_setup_indirect(rinfo); rc = blkfront_setup_indirect(rinfo);
if (rc) if (rc)
return rc; return rc;
...@@ -2036,10 +2042,7 @@ static int blkif_recover(struct blkfront_info *info) ...@@ -2036,10 +2042,7 @@ static int blkif_recover(struct blkfront_info *info)
/* Now safe for us to use the shared ring */ /* Now safe for us to use the shared ring */
info->connected = BLKIF_STATE_CONNECTED; info->connected = BLKIF_STATE_CONNECTED;
for (r_index = 0; r_index < info->nr_rings; r_index++) { for_each_rinfo(info, rinfo, r_index) {
struct blkfront_ring_info *rinfo;
rinfo = &info->rinfo[r_index];
/* Kick any other new requests queued since we resumed */ /* Kick any other new requests queued since we resumed */
kick_pending_request_queues(rinfo); kick_pending_request_queues(rinfo);
} }
...@@ -2072,13 +2075,13 @@ static int blkfront_resume(struct xenbus_device *dev) ...@@ -2072,13 +2075,13 @@ static int blkfront_resume(struct xenbus_device *dev)
struct blkfront_info *info = dev_get_drvdata(&dev->dev); struct blkfront_info *info = dev_get_drvdata(&dev->dev);
int err = 0; int err = 0;
unsigned int i, j; unsigned int i, j;
struct blkfront_ring_info *rinfo;
dev_dbg(&dev->dev, "blkfront_resume: %s\n", dev->nodename); dev_dbg(&dev->dev, "blkfront_resume: %s\n", dev->nodename);
bio_list_init(&info->bio_list); bio_list_init(&info->bio_list);
INIT_LIST_HEAD(&info->requests); INIT_LIST_HEAD(&info->requests);
for (i = 0; i < info->nr_rings; i++) { for_each_rinfo(info, rinfo, i) {
struct blkfront_ring_info *rinfo = &info->rinfo[i];
struct bio_list merge_bio; struct bio_list merge_bio;
struct blk_shadow *shadow = rinfo->shadow; struct blk_shadow *shadow = rinfo->shadow;
...@@ -2337,6 +2340,7 @@ static void blkfront_connect(struct blkfront_info *info) ...@@ -2337,6 +2340,7 @@ static void blkfront_connect(struct blkfront_info *info)
unsigned int binfo; unsigned int binfo;
char *envp[] = { "RESIZE=1", NULL }; char *envp[] = { "RESIZE=1", NULL };
int err, i; int err, i;
struct blkfront_ring_info *rinfo;
switch (info->connected) { switch (info->connected) {
case BLKIF_STATE_CONNECTED: case BLKIF_STATE_CONNECTED:
...@@ -2394,8 +2398,8 @@ static void blkfront_connect(struct blkfront_info *info) ...@@ -2394,8 +2398,8 @@ static void blkfront_connect(struct blkfront_info *info)
"physical-sector-size", "physical-sector-size",
sector_size); sector_size);
blkfront_gather_backend_features(info); blkfront_gather_backend_features(info);
for (i = 0; i < info->nr_rings; i++) { for_each_rinfo(info, rinfo, i) {
err = blkfront_setup_indirect(&info->rinfo[i]); err = blkfront_setup_indirect(rinfo);
if (err) { if (err) {
xenbus_dev_fatal(info->xbdev, err, "setup_indirect at %s", xenbus_dev_fatal(info->xbdev, err, "setup_indirect at %s",
info->xbdev->otherend); info->xbdev->otherend);
...@@ -2416,8 +2420,8 @@ static void blkfront_connect(struct blkfront_info *info) ...@@ -2416,8 +2420,8 @@ static void blkfront_connect(struct blkfront_info *info)
/* Kick pending requests. */ /* Kick pending requests. */
info->connected = BLKIF_STATE_CONNECTED; info->connected = BLKIF_STATE_CONNECTED;
for (i = 0; i < info->nr_rings; i++) for_each_rinfo(info, rinfo, i)
kick_pending_request_queues(&info->rinfo[i]); kick_pending_request_queues(rinfo);
device_add_disk(&info->xbdev->dev, info->gd, NULL); device_add_disk(&info->xbdev->dev, info->gd, NULL);
...@@ -2652,9 +2656,9 @@ static void purge_persistent_grants(struct blkfront_info *info) ...@@ -2652,9 +2656,9 @@ static void purge_persistent_grants(struct blkfront_info *info)
{ {
unsigned int i; unsigned int i;
unsigned long flags; unsigned long flags;
struct blkfront_ring_info *rinfo;
for (i = 0; i < info->nr_rings; i++) { for_each_rinfo(info, rinfo, i) {
struct blkfront_ring_info *rinfo = &info->rinfo[i];
struct grant *gnt_list_entry, *tmp; struct grant *gnt_list_entry, *tmp;
spin_lock_irqsave(&rinfo->ring_lock, flags); spin_lock_irqsave(&rinfo->ring_lock, flags);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment