Commit 15f73f5b authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jens Axboe

blk-mq: move failure injection out of blk_mq_complete_request

Move the call to blk_should_fake_timeout out of blk_mq_complete_request
and into the drivers, skipping call sites that are obvious error
handlers, and remove the now superflous blk_mq_force_complete_rq helper.
This ensures we don't keep injecting errors into completions that just
terminate the Linux request after the hardware has been reset or the
command has been aborted.
Reviewed-by: default avatarDaniel Wagner <dwagner@suse.de>
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent d391a7a3
...@@ -655,16 +655,13 @@ static void __blk_mq_complete_request_remote(void *data) ...@@ -655,16 +655,13 @@ static void __blk_mq_complete_request_remote(void *data)
} }
/** /**
* blk_mq_force_complete_rq() - Force complete the request, bypassing any error * blk_mq_complete_request - end I/O on a request
* injection that could drop the completion. * @rq: the request being processed
* @rq: Request to be force completed
* *
* Drivers should use blk_mq_complete_request() to complete requests in their * Description:
* normal IO path. For timeout error recovery, drivers may call this forced * Complete a request by scheduling the ->complete_rq operation.
* completion routine after they've reclaimed timed out requests to bypass **/
* potentially subsequent fake timeouts. void blk_mq_complete_request(struct request *rq)
*/
void blk_mq_force_complete_rq(struct request *rq)
{ {
struct blk_mq_ctx *ctx = rq->mq_ctx; struct blk_mq_ctx *ctx = rq->mq_ctx;
struct request_queue *q = rq->q; struct request_queue *q = rq->q;
...@@ -702,7 +699,7 @@ void blk_mq_force_complete_rq(struct request *rq) ...@@ -702,7 +699,7 @@ void blk_mq_force_complete_rq(struct request *rq)
} }
put_cpu(); put_cpu();
} }
EXPORT_SYMBOL_GPL(blk_mq_force_complete_rq); EXPORT_SYMBOL(blk_mq_complete_request);
static void hctx_unlock(struct blk_mq_hw_ctx *hctx, int srcu_idx) static void hctx_unlock(struct blk_mq_hw_ctx *hctx, int srcu_idx)
__releases(hctx->srcu) __releases(hctx->srcu)
...@@ -724,23 +721,6 @@ static void hctx_lock(struct blk_mq_hw_ctx *hctx, int *srcu_idx) ...@@ -724,23 +721,6 @@ static void hctx_lock(struct blk_mq_hw_ctx *hctx, int *srcu_idx)
*srcu_idx = srcu_read_lock(hctx->srcu); *srcu_idx = srcu_read_lock(hctx->srcu);
} }
/**
* blk_mq_complete_request - end I/O on a request
* @rq: the request being processed
*
* Description:
* Ends all I/O on a request. It does not handle partial completions.
* The actual completion happens out-of-order, through a IPI handler.
**/
bool blk_mq_complete_request(struct request *rq)
{
if (unlikely(blk_should_fake_timeout(rq->q)))
return false;
blk_mq_force_complete_rq(rq);
return true;
}
EXPORT_SYMBOL(blk_mq_complete_request);
/** /**
* blk_mq_start_request - Start processing a request * blk_mq_start_request - Start processing a request
* @rq: Pointer to request to be started * @rq: Pointer to request to be started
......
...@@ -20,13 +20,11 @@ static int __init setup_fail_io_timeout(char *str) ...@@ -20,13 +20,11 @@ static int __init setup_fail_io_timeout(char *str)
} }
__setup("fail_io_timeout=", setup_fail_io_timeout); __setup("fail_io_timeout=", setup_fail_io_timeout);
int blk_should_fake_timeout(struct request_queue *q) bool __blk_should_fake_timeout(struct request_queue *q)
{ {
if (!test_bit(QUEUE_FLAG_FAIL_IO, &q->queue_flags))
return 0;
return should_fail(&fail_io_timeout, 1); return should_fail(&fail_io_timeout, 1);
} }
EXPORT_SYMBOL_GPL(__blk_should_fake_timeout);
static int __init fail_io_timeout_debugfs(void) static int __init fail_io_timeout_debugfs(void)
{ {
......
...@@ -223,18 +223,9 @@ ssize_t part_fail_show(struct device *dev, struct device_attribute *attr, ...@@ -223,18 +223,9 @@ ssize_t part_fail_show(struct device *dev, struct device_attribute *attr,
char *buf); char *buf);
ssize_t part_fail_store(struct device *dev, struct device_attribute *attr, ssize_t part_fail_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count); const char *buf, size_t count);
#ifdef CONFIG_FAIL_IO_TIMEOUT
int blk_should_fake_timeout(struct request_queue *);
ssize_t part_timeout_show(struct device *, struct device_attribute *, char *); ssize_t part_timeout_show(struct device *, struct device_attribute *, char *);
ssize_t part_timeout_store(struct device *, struct device_attribute *, ssize_t part_timeout_store(struct device *, struct device_attribute *,
const char *, size_t); const char *, size_t);
#else
static inline int blk_should_fake_timeout(struct request_queue *q)
{
return 0;
}
#endif
void __blk_queue_split(struct request_queue *q, struct bio **bio, void __blk_queue_split(struct request_queue *q, struct bio **bio,
unsigned int *nr_segs); unsigned int *nr_segs);
......
...@@ -181,9 +181,12 @@ EXPORT_SYMBOL_GPL(bsg_job_get); ...@@ -181,9 +181,12 @@ EXPORT_SYMBOL_GPL(bsg_job_get);
void bsg_job_done(struct bsg_job *job, int result, void bsg_job_done(struct bsg_job *job, int result,
unsigned int reply_payload_rcv_len) unsigned int reply_payload_rcv_len)
{ {
struct request *rq = blk_mq_rq_from_pdu(job);
job->result = result; job->result = result;
job->reply_payload_rcv_len = reply_payload_rcv_len; job->reply_payload_rcv_len = reply_payload_rcv_len;
blk_mq_complete_request(blk_mq_rq_from_pdu(job)); if (likely(!blk_should_fake_timeout(rq->q)))
blk_mq_complete_request(rq);
} }
EXPORT_SYMBOL_GPL(bsg_job_done); EXPORT_SYMBOL_GPL(bsg_job_done);
......
...@@ -509,6 +509,7 @@ static void lo_rw_aio_do_completion(struct loop_cmd *cmd) ...@@ -509,6 +509,7 @@ static void lo_rw_aio_do_completion(struct loop_cmd *cmd)
return; return;
kfree(cmd->bvec); kfree(cmd->bvec);
cmd->bvec = NULL; cmd->bvec = NULL;
if (likely(!blk_should_fake_timeout(rq->q)))
blk_mq_complete_request(rq); blk_mq_complete_request(rq);
} }
...@@ -2048,6 +2049,7 @@ static void loop_handle_cmd(struct loop_cmd *cmd) ...@@ -2048,6 +2049,7 @@ static void loop_handle_cmd(struct loop_cmd *cmd)
cmd->ret = ret; cmd->ret = ret;
else else
cmd->ret = ret ? -EIO : 0; cmd->ret = ret ? -EIO : 0;
if (likely(!blk_should_fake_timeout(rq->q)))
blk_mq_complete_request(rq); blk_mq_complete_request(rq);
} }
} }
......
...@@ -492,6 +492,7 @@ static void mtip_complete_command(struct mtip_cmd *cmd, blk_status_t status) ...@@ -492,6 +492,7 @@ static void mtip_complete_command(struct mtip_cmd *cmd, blk_status_t status)
struct request *req = blk_mq_rq_from_pdu(cmd); struct request *req = blk_mq_rq_from_pdu(cmd);
cmd->status = status; cmd->status = status;
if (likely(!blk_should_fake_timeout(req->q)))
blk_mq_complete_request(req); blk_mq_complete_request(req);
} }
......
...@@ -784,6 +784,7 @@ static void recv_work(struct work_struct *work) ...@@ -784,6 +784,7 @@ static void recv_work(struct work_struct *work)
struct nbd_device *nbd = args->nbd; struct nbd_device *nbd = args->nbd;
struct nbd_config *config = nbd->config; struct nbd_config *config = nbd->config;
struct nbd_cmd *cmd; struct nbd_cmd *cmd;
struct request *rq;
while (1) { while (1) {
cmd = nbd_read_stat(nbd, args->index); cmd = nbd_read_stat(nbd, args->index);
...@@ -796,7 +797,9 @@ static void recv_work(struct work_struct *work) ...@@ -796,7 +797,9 @@ static void recv_work(struct work_struct *work)
break; break;
} }
blk_mq_complete_request(blk_mq_rq_from_pdu(cmd)); rq = blk_mq_rq_from_pdu(cmd);
if (likely(!blk_should_fake_timeout(rq->q)))
blk_mq_complete_request(rq);
} }
atomic_dec(&config->recv_threads); atomic_dec(&config->recv_threads);
wake_up(&config->recv_wq); wake_up(&config->recv_wq);
......
...@@ -1283,6 +1283,7 @@ static inline void nullb_complete_cmd(struct nullb_cmd *cmd) ...@@ -1283,6 +1283,7 @@ static inline void nullb_complete_cmd(struct nullb_cmd *cmd)
case NULL_IRQ_SOFTIRQ: case NULL_IRQ_SOFTIRQ:
switch (cmd->nq->dev->queue_mode) { switch (cmd->nq->dev->queue_mode) {
case NULL_Q_MQ: case NULL_Q_MQ:
if (likely(!blk_should_fake_timeout(cmd->rq->q)))
blk_mq_complete_request(cmd->rq); blk_mq_complete_request(cmd->rq);
break; break;
case NULL_Q_BIO: case NULL_Q_BIO:
...@@ -1423,7 +1424,7 @@ static bool should_requeue_request(struct request *rq) ...@@ -1423,7 +1424,7 @@ static bool should_requeue_request(struct request *rq)
static enum blk_eh_timer_return null_timeout_rq(struct request *rq, bool res) static enum blk_eh_timer_return null_timeout_rq(struct request *rq, bool res)
{ {
pr_info("rq %p timed out\n", rq); pr_info("rq %p timed out\n", rq);
blk_mq_force_complete_rq(rq); blk_mq_complete_request(rq);
return BLK_EH_DONE; return BLK_EH_DONE;
} }
......
...@@ -1417,6 +1417,7 @@ static void skd_resolve_req_exception(struct skd_device *skdev, ...@@ -1417,6 +1417,7 @@ static void skd_resolve_req_exception(struct skd_device *skdev,
case SKD_CHECK_STATUS_REPORT_GOOD: case SKD_CHECK_STATUS_REPORT_GOOD:
case SKD_CHECK_STATUS_REPORT_SMART_ALERT: case SKD_CHECK_STATUS_REPORT_SMART_ALERT:
skreq->status = BLK_STS_OK; skreq->status = BLK_STS_OK;
if (likely(!blk_should_fake_timeout(req->q)))
blk_mq_complete_request(req); blk_mq_complete_request(req);
break; break;
...@@ -1440,6 +1441,7 @@ static void skd_resolve_req_exception(struct skd_device *skdev, ...@@ -1440,6 +1441,7 @@ static void skd_resolve_req_exception(struct skd_device *skdev,
case SKD_CHECK_STATUS_REPORT_ERROR: case SKD_CHECK_STATUS_REPORT_ERROR:
default: default:
skreq->status = BLK_STS_IOERR; skreq->status = BLK_STS_IOERR;
if (likely(!blk_should_fake_timeout(req->q)))
blk_mq_complete_request(req); blk_mq_complete_request(req);
break; break;
} }
...@@ -1560,6 +1562,7 @@ static int skd_isr_completion_posted(struct skd_device *skdev, ...@@ -1560,6 +1562,7 @@ static int skd_isr_completion_posted(struct skd_device *skdev,
*/ */
if (likely(cmp_status == SAM_STAT_GOOD)) { if (likely(cmp_status == SAM_STAT_GOOD)) {
skreq->status = BLK_STS_OK; skreq->status = BLK_STS_OK;
if (likely(!blk_should_fake_timeout(rq->q)))
blk_mq_complete_request(rq); blk_mq_complete_request(rq);
} else { } else {
skd_resolve_req_exception(skdev, skreq, rq); skd_resolve_req_exception(skdev, skreq, rq);
......
...@@ -171,6 +171,7 @@ static void virtblk_done(struct virtqueue *vq) ...@@ -171,6 +171,7 @@ static void virtblk_done(struct virtqueue *vq)
while ((vbr = virtqueue_get_buf(vblk->vqs[qid].vq, &len)) != NULL) { while ((vbr = virtqueue_get_buf(vblk->vqs[qid].vq, &len)) != NULL) {
struct request *req = blk_mq_rq_from_pdu(vbr); struct request *req = blk_mq_rq_from_pdu(vbr);
if (likely(!blk_should_fake_timeout(req->q)))
blk_mq_complete_request(req); blk_mq_complete_request(req);
req_done = true; req_done = true;
} }
......
...@@ -1655,6 +1655,7 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id) ...@@ -1655,6 +1655,7 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
BUG(); BUG();
} }
if (likely(!blk_should_fake_timeout(req->q)))
blk_mq_complete_request(req); blk_mq_complete_request(req);
} }
......
...@@ -288,6 +288,7 @@ static void dm_complete_request(struct request *rq, blk_status_t error) ...@@ -288,6 +288,7 @@ static void dm_complete_request(struct request *rq, blk_status_t error)
struct dm_rq_target_io *tio = tio_from_request(rq); struct dm_rq_target_io *tio = tio_from_request(rq);
tio->error = error; tio->error = error;
if (likely(!blk_should_fake_timeout(rq->q)))
blk_mq_complete_request(rq); blk_mq_complete_request(rq);
} }
......
...@@ -1446,7 +1446,7 @@ static void mmc_blk_cqe_req_done(struct mmc_request *mrq) ...@@ -1446,7 +1446,7 @@ static void mmc_blk_cqe_req_done(struct mmc_request *mrq)
*/ */
if (mq->in_recovery) if (mq->in_recovery)
mmc_blk_cqe_complete_rq(mq, req); mmc_blk_cqe_complete_rq(mq, req);
else else if (likely(!blk_should_fake_timeout(req->q)))
blk_mq_complete_request(req); blk_mq_complete_request(req);
} }
...@@ -1926,7 +1926,7 @@ static void mmc_blk_hsq_req_done(struct mmc_request *mrq) ...@@ -1926,7 +1926,7 @@ static void mmc_blk_hsq_req_done(struct mmc_request *mrq)
*/ */
if (mq->in_recovery) if (mq->in_recovery)
mmc_blk_cqe_complete_rq(mq, req); mmc_blk_cqe_complete_rq(mq, req);
else else if (likely(!blk_should_fake_timeout(req->q)))
blk_mq_complete_request(req); blk_mq_complete_request(req);
} }
...@@ -1936,7 +1936,7 @@ void mmc_blk_mq_complete(struct request *req) ...@@ -1936,7 +1936,7 @@ void mmc_blk_mq_complete(struct request *req)
if (mq->use_cqe) if (mq->use_cqe)
mmc_blk_cqe_complete_rq(mq, req); mmc_blk_cqe_complete_rq(mq, req);
else else if (likely(!blk_should_fake_timeout(req->q)))
mmc_blk_mq_complete_rq(mq, req); mmc_blk_mq_complete_rq(mq, req);
} }
...@@ -1988,7 +1988,7 @@ static void mmc_blk_mq_post_req(struct mmc_queue *mq, struct request *req) ...@@ -1988,7 +1988,7 @@ static void mmc_blk_mq_post_req(struct mmc_queue *mq, struct request *req)
*/ */
if (mq->in_recovery) if (mq->in_recovery)
mmc_blk_mq_complete_rq(mq, req); mmc_blk_mq_complete_rq(mq, req);
else else if (likely(!blk_should_fake_timeout(req->q)))
blk_mq_complete_request(req); blk_mq_complete_request(req);
mmc_blk_mq_dec_in_flight(mq, req); mmc_blk_mq_dec_in_flight(mq, req);
......
...@@ -304,7 +304,7 @@ bool nvme_cancel_request(struct request *req, void *data, bool reserved) ...@@ -304,7 +304,7 @@ bool nvme_cancel_request(struct request *req, void *data, bool reserved)
return true; return true;
nvme_req(req)->status = NVME_SC_HOST_ABORTED_CMD; nvme_req(req)->status = NVME_SC_HOST_ABORTED_CMD;
blk_mq_force_complete_rq(req); blk_mq_complete_request(req);
return true; return true;
} }
EXPORT_SYMBOL_GPL(nvme_cancel_request); EXPORT_SYMBOL_GPL(nvme_cancel_request);
......
...@@ -481,6 +481,7 @@ static inline void nvme_end_request(struct request *req, __le16 status, ...@@ -481,6 +481,7 @@ static inline void nvme_end_request(struct request *req, __le16 status,
rq->result = result; rq->result = result;
/* inject error when permitted by fault injection framework */ /* inject error when permitted by fault injection framework */
nvme_should_fail(req); nvme_should_fail(req);
if (likely(!blk_should_fake_timeout(req->q)))
blk_mq_complete_request(req); blk_mq_complete_request(req);
} }
......
...@@ -2802,7 +2802,7 @@ static void __dasd_cleanup_cqr(struct dasd_ccw_req *cqr) ...@@ -2802,7 +2802,7 @@ static void __dasd_cleanup_cqr(struct dasd_ccw_req *cqr)
blk_update_request(req, BLK_STS_OK, blk_update_request(req, BLK_STS_OK,
blk_rq_bytes(req) - proc_bytes); blk_rq_bytes(req) - proc_bytes);
blk_mq_requeue_request(req, true); blk_mq_requeue_request(req, true);
} else { } else if (likely(!blk_should_fake_timeout(req->q))) {
blk_mq_complete_request(req); blk_mq_complete_request(req);
} }
} }
......
...@@ -256,6 +256,7 @@ static void scm_request_finish(struct scm_request *scmrq) ...@@ -256,6 +256,7 @@ static void scm_request_finish(struct scm_request *scmrq)
for (i = 0; i < nr_requests_per_io && scmrq->request[i]; i++) { for (i = 0; i < nr_requests_per_io && scmrq->request[i]; i++) {
error = blk_mq_rq_to_pdu(scmrq->request[i]); error = blk_mq_rq_to_pdu(scmrq->request[i]);
*error = scmrq->error; *error = scmrq->error;
if (likely(!blk_should_fake_timeout(scmrq->request[i]->q)))
blk_mq_complete_request(scmrq->request[i]); blk_mq_complete_request(scmrq->request[i]);
} }
......
...@@ -1589,18 +1589,12 @@ static blk_status_t scsi_mq_prep_fn(struct request *req) ...@@ -1589,18 +1589,12 @@ static blk_status_t scsi_mq_prep_fn(struct request *req)
static void scsi_mq_done(struct scsi_cmnd *cmd) static void scsi_mq_done(struct scsi_cmnd *cmd)
{ {
if (unlikely(blk_should_fake_timeout(cmd->request->q)))
return;
if (unlikely(test_and_set_bit(SCMD_STATE_COMPLETE, &cmd->state))) if (unlikely(test_and_set_bit(SCMD_STATE_COMPLETE, &cmd->state)))
return; return;
trace_scsi_dispatch_cmd_done(cmd); trace_scsi_dispatch_cmd_done(cmd);
blk_mq_complete_request(cmd->request);
/*
* If the block layer didn't complete the request due to a timeout
* injection, scsi must clear its internal completed state so that the
* timeout handler will see it needs to escalate its own error
* recovery.
*/
if (unlikely(!blk_mq_complete_request(cmd->request)))
clear_bit(SCMD_STATE_COMPLETE, &cmd->state);
} }
static void scsi_mq_put_budget(struct blk_mq_hw_ctx *hctx) static void scsi_mq_put_budget(struct blk_mq_hw_ctx *hctx)
......
...@@ -503,8 +503,7 @@ void __blk_mq_end_request(struct request *rq, blk_status_t error); ...@@ -503,8 +503,7 @@ void __blk_mq_end_request(struct request *rq, blk_status_t error);
void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list); void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list);
void blk_mq_kick_requeue_list(struct request_queue *q); void blk_mq_kick_requeue_list(struct request_queue *q);
void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs); void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs);
bool blk_mq_complete_request(struct request *rq); void blk_mq_complete_request(struct request *rq);
void blk_mq_force_complete_rq(struct request *rq);
bool blk_mq_bio_list_merge(struct request_queue *q, struct list_head *list, bool blk_mq_bio_list_merge(struct request_queue *q, struct list_head *list,
struct bio *bio, unsigned int nr_segs); struct bio *bio, unsigned int nr_segs);
bool blk_mq_queue_stopped(struct request_queue *q); bool blk_mq_queue_stopped(struct request_queue *q);
...@@ -537,6 +536,15 @@ void blk_mq_quiesce_queue_nowait(struct request_queue *q); ...@@ -537,6 +536,15 @@ void blk_mq_quiesce_queue_nowait(struct request_queue *q);
unsigned int blk_mq_rq_cpu(struct request *rq); unsigned int blk_mq_rq_cpu(struct request *rq);
bool __blk_should_fake_timeout(struct request_queue *q);
static inline bool blk_should_fake_timeout(struct request_queue *q)
{
if (IS_ENABLED(CONFIG_FAIL_IO_TIMEOUT) &&
test_bit(QUEUE_FLAG_FAIL_IO, &q->queue_flags))
return __blk_should_fake_timeout(q);
return false;
}
/** /**
* blk_mq_rq_from_pdu - cast a PDU to a request * blk_mq_rq_from_pdu - cast a PDU to a request
* @pdu: the PDU (Protocol Data Unit) to be casted * @pdu: the PDU (Protocol Data Unit) to be casted
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment