Commit 614f0388 authored by Linus Walleij's avatar Linus Walleij Committed by Ulf Hansson

mmc: block: move single ioctl() commands to block requests

This wraps single ioctl() commands into block requests using
the custom block layer request types REQ_OP_DRV_IN and
REQ_OP_DRV_OUT.

By doing this we are loosening the grip on the big host lock,
since two calls to mmc_get_card()/mmc_put_card() are removed.

We are storing the ioctl() in/out argument as a pointer in
the per-request struct mmc_blk_request container. Since we
now let the block layer allocate this data, blk_get_request()
will allocate it for us and we can immediately dereference
it and use it to pass the argument into the block layer.

We refactor the if/else/if/else ladder in mmc_blk_issue_rq()
as part of the job, keeping some extra attention to the
case when a NULL req is passed into this function and
making that pipeline flush more explicit.

Tested on the ux500 with the userspace:
mmc extcsd read /dev/mmcblk3
resulting in a successful EXTCSD info dump back to the
console.

This commit fixes a starvation issue in the MMC/SD stack
that can be easily provoked in the following way by
issueing the following commands in sequence:

> dd if=/dev/mmcblk3 of=/dev/null bs=1M &
> mmc extcs read /dev/mmcblk3

Before this patch, the extcsd read command would hang
(starve) while waiting for the dd command to finish since
the block layer was holding the card/host lock.

After this patch, the extcsd ioctl() command is nicely
interpersed with the rest of the block commands and we
can issue a bunch of ioctl()s from userspace while there
is some busy block IO going on without any problems.

Conversely userspace ioctl()s can no longer starve
the block layer by holding the card/host lock.
Signed-off-by: default avatarLinus Walleij <linus.walleij@linaro.org>
Signed-off-by: default avatarUlf Hansson <ulf.hansson@linaro.org>
Tested-by: default avatarAvri Altman <Avri.Altman@sandisk.com>
parent 829043c4
...@@ -564,8 +564,10 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev, ...@@ -564,8 +564,10 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev,
{ {
struct mmc_blk_ioc_data *idata; struct mmc_blk_ioc_data *idata;
struct mmc_blk_data *md; struct mmc_blk_data *md;
struct mmc_queue *mq;
struct mmc_card *card; struct mmc_card *card;
int err = 0, ioc_err = 0; int err = 0, ioc_err = 0;
struct request *req;
/* /*
* The caller must have CAP_SYS_RAWIO, and must be calling this on the * The caller must have CAP_SYS_RAWIO, and must be calling this on the
...@@ -591,17 +593,18 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev, ...@@ -591,17 +593,18 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev,
goto cmd_done; goto cmd_done;
} }
mmc_get_card(card); /*
* Dispatch the ioctl() into the block request queue.
ioc_err = __mmc_blk_ioctl_cmd(card, md, idata); */
mq = &md->queue;
/* Always switch back to main area after RPMB access */ req = blk_get_request(mq->queue,
if (md->area_type & MMC_BLK_DATA_AREA_RPMB) idata->ic.write_flag ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN,
mmc_blk_part_switch(card, dev_get_drvdata(&card->dev)); __GFP_RECLAIM);
req_to_mmc_queue_req(req)->idata = idata;
mmc_put_card(card); blk_execute_rq(mq->queue, NULL, req, 0);
ioc_err = req_to_mmc_queue_req(req)->ioc_result;
err = mmc_blk_ioctl_copy_to_user(ic_ptr, idata); err = mmc_blk_ioctl_copy_to_user(ic_ptr, idata);
blk_put_request(req);
cmd_done: cmd_done:
mmc_blk_put(md); mmc_blk_put(md);
...@@ -611,6 +614,31 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev, ...@@ -611,6 +614,31 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev,
return ioc_err ? ioc_err : err; return ioc_err ? ioc_err : err;
} }
/*
* The ioctl commands come back from the block layer after it queued it and
* processed it with all other requests and then they get issued in this
* function.
*/
static void mmc_blk_ioctl_cmd_issue(struct mmc_queue *mq, struct request *req)
{
struct mmc_queue_req *mq_rq;
struct mmc_blk_ioc_data *idata;
struct mmc_card *card = mq->card;
struct mmc_blk_data *md = mq->blkdata;
int ioc_err;
mq_rq = req_to_mmc_queue_req(req);
idata = mq_rq->idata;
ioc_err = __mmc_blk_ioctl_cmd(card, md, idata);
mq_rq->ioc_result = ioc_err;
/* Always switch back to main area after RPMB access */
if (md->area_type & MMC_BLK_DATA_AREA_RPMB)
mmc_blk_part_switch(card, dev_get_drvdata(&card->dev));
blk_end_request_all(req, ioc_err);
}
static int mmc_blk_ioctl_multi_cmd(struct block_device *bdev, static int mmc_blk_ioctl_multi_cmd(struct block_device *bdev,
struct mmc_ioc_multi_cmd __user *user) struct mmc_ioc_multi_cmd __user *user)
{ {
...@@ -1854,23 +1882,54 @@ void mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) ...@@ -1854,23 +1882,54 @@ void mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
goto out; goto out;
} }
if (req && req_op(req) == REQ_OP_DISCARD) { if (req) {
/* complete ongoing async transfer before issuing discard */ switch (req_op(req)) {
if (mq->qcnt) case REQ_OP_DRV_IN:
mmc_blk_issue_rw_rq(mq, NULL); case REQ_OP_DRV_OUT:
mmc_blk_issue_discard_rq(mq, req); /*
} else if (req && req_op(req) == REQ_OP_SECURE_ERASE) { * Complete ongoing async transfer before issuing
/* complete ongoing async transfer before issuing secure erase*/ * ioctl()s
if (mq->qcnt) */
mmc_blk_issue_rw_rq(mq, NULL); if (mq->qcnt)
mmc_blk_issue_secdiscard_rq(mq, req); mmc_blk_issue_rw_rq(mq, NULL);
} else if (req && req_op(req) == REQ_OP_FLUSH) { mmc_blk_ioctl_cmd_issue(mq, req);
/* complete ongoing async transfer before issuing flush */ break;
if (mq->qcnt) case REQ_OP_DISCARD:
mmc_blk_issue_rw_rq(mq, NULL); /*
mmc_blk_issue_flush(mq, req); * Complete ongoing async transfer before issuing
* discard.
*/
if (mq->qcnt)
mmc_blk_issue_rw_rq(mq, NULL);
mmc_blk_issue_discard_rq(mq, req);
break;
case REQ_OP_SECURE_ERASE:
/*
* Complete ongoing async transfer before issuing
* secure erase.
*/
if (mq->qcnt)
mmc_blk_issue_rw_rq(mq, NULL);
mmc_blk_issue_secdiscard_rq(mq, req);
break;
case REQ_OP_FLUSH:
/*
* Complete ongoing async transfer before issuing
* flush.
*/
if (mq->qcnt)
mmc_blk_issue_rw_rq(mq, NULL);
mmc_blk_issue_flush(mq, req);
break;
default:
/* Normal request, just issue it */
mmc_blk_issue_rw_rq(mq, req);
card->host->context_info.is_waiting_last_req = false;
break;
};
} else { } else {
mmc_blk_issue_rw_rq(mq, req); /* No request, flushing the pipeline with NULL */
mmc_blk_issue_rw_rq(mq, NULL);
card->host->context_info.is_waiting_last_req = false; card->host->context_info.is_waiting_last_req = false;
} }
......
...@@ -22,6 +22,7 @@ static inline bool mmc_req_is_special(struct request *req) ...@@ -22,6 +22,7 @@ static inline bool mmc_req_is_special(struct request *req)
struct task_struct; struct task_struct;
struct mmc_blk_data; struct mmc_blk_data;
struct mmc_blk_ioc_data;
struct mmc_blk_request { struct mmc_blk_request {
struct mmc_request mrq; struct mmc_request mrq;
...@@ -40,6 +41,8 @@ struct mmc_queue_req { ...@@ -40,6 +41,8 @@ struct mmc_queue_req {
struct scatterlist *bounce_sg; struct scatterlist *bounce_sg;
unsigned int bounce_sg_len; unsigned int bounce_sg_len;
struct mmc_async_req areq; struct mmc_async_req areq;
int ioc_result;
struct mmc_blk_ioc_data *idata;
}; };
struct mmc_queue { struct mmc_queue {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment