Commit 5a1efc6e authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.dk/linux-block

Pull block IO fixes from Jens Axboe:
 "Normally I'd defer my initial for-linus pull request until after the
  merge window, but a race was uncovered in the virtio-blk conversion to
  blk-mq that could cause hangs.  So here's a small collection of fixes
  for you to pull:

   - The fix for the virtio-blk IO hang reported by Dave Chinner, from
     Shaohua and myself.

   - Add the Insert blktrace event for blk-mq.  This makes 'btt' happy
     when it is doing it's state transition analysis.

   - Ensure that blk-mq has disk/partition stats enabled by default,
     instead of making it opt-in.

   - A fix for __bio_add_page() and large sector counts"

* 'for-linus' of git://git.kernel.dk/linux-block:
  blk-mq: add blktrace insert event trace
  virtio-blk: virtqueue_kick() must be ordered with other virtqueue operations
  blk-mq: ensure that we set REQ_IO_STAT so diskstats work
  bio: fix argument of __bio_add_page() for max_sectors > 0xffff
parents 6d6e352c 01b983c9
...@@ -171,9 +171,12 @@ bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx) ...@@ -171,9 +171,12 @@ bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx)
} }
EXPORT_SYMBOL(blk_mq_can_queue); EXPORT_SYMBOL(blk_mq_can_queue);
static void blk_mq_rq_ctx_init(struct blk_mq_ctx *ctx, struct request *rq, static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
unsigned int rw_flags) struct request *rq, unsigned int rw_flags)
{ {
if (blk_queue_io_stat(q))
rw_flags |= REQ_IO_STAT;
rq->mq_ctx = ctx; rq->mq_ctx = ctx;
rq->cmd_flags = rw_flags; rq->cmd_flags = rw_flags;
ctx->rq_dispatched[rw_is_sync(rw_flags)]++; ctx->rq_dispatched[rw_is_sync(rw_flags)]++;
...@@ -197,7 +200,7 @@ static struct request *blk_mq_alloc_request_pinned(struct request_queue *q, ...@@ -197,7 +200,7 @@ static struct request *blk_mq_alloc_request_pinned(struct request_queue *q,
rq = __blk_mq_alloc_request(hctx, gfp & ~__GFP_WAIT, reserved); rq = __blk_mq_alloc_request(hctx, gfp & ~__GFP_WAIT, reserved);
if (rq) { if (rq) {
blk_mq_rq_ctx_init(ctx, rq, rw); blk_mq_rq_ctx_init(q, ctx, rq, rw);
break; break;
} else if (!(gfp & __GFP_WAIT)) } else if (!(gfp & __GFP_WAIT))
break; break;
...@@ -718,6 +721,8 @@ static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, ...@@ -718,6 +721,8 @@ static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx,
{ {
struct blk_mq_ctx *ctx = rq->mq_ctx; struct blk_mq_ctx *ctx = rq->mq_ctx;
trace_block_rq_insert(hctx->queue, rq);
list_add_tail(&rq->queuelist, &ctx->rq_list); list_add_tail(&rq->queuelist, &ctx->rq_list);
blk_mq_hctx_mark_pending(hctx, ctx); blk_mq_hctx_mark_pending(hctx, ctx);
...@@ -921,7 +926,7 @@ static void blk_mq_make_request(struct request_queue *q, struct bio *bio) ...@@ -921,7 +926,7 @@ static void blk_mq_make_request(struct request_queue *q, struct bio *bio)
trace_block_getrq(q, bio, rw); trace_block_getrq(q, bio, rw);
rq = __blk_mq_alloc_request(hctx, GFP_ATOMIC, false); rq = __blk_mq_alloc_request(hctx, GFP_ATOMIC, false);
if (likely(rq)) if (likely(rq))
blk_mq_rq_ctx_init(ctx, rq, rw); blk_mq_rq_ctx_init(q, ctx, rq, rw);
else { else {
blk_mq_put_ctx(ctx); blk_mq_put_ctx(ctx);
trace_block_sleeprq(q, bio, rw); trace_block_sleeprq(q, bio, rw);
...@@ -1377,6 +1382,7 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_reg *reg, ...@@ -1377,6 +1382,7 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_reg *reg,
q->queue_hw_ctx = hctxs; q->queue_hw_ctx = hctxs;
q->mq_ops = reg->ops; q->mq_ops = reg->ops;
q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT;
blk_queue_make_request(q, blk_mq_make_request); blk_queue_make_request(q, blk_mq_make_request);
blk_queue_rq_timed_out(q, reg->ops->timeout); blk_queue_rq_timed_out(q, reg->ops->timeout);
......
...@@ -199,15 +199,16 @@ static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *req) ...@@ -199,15 +199,16 @@ static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *req)
spin_lock_irqsave(&vblk->vq_lock, flags); spin_lock_irqsave(&vblk->vq_lock, flags);
if (__virtblk_add_req(vblk->vq, vbr, vbr->sg, num) < 0) { if (__virtblk_add_req(vblk->vq, vbr, vbr->sg, num) < 0) {
virtqueue_kick(vblk->vq);
spin_unlock_irqrestore(&vblk->vq_lock, flags); spin_unlock_irqrestore(&vblk->vq_lock, flags);
blk_mq_stop_hw_queue(hctx); blk_mq_stop_hw_queue(hctx);
virtqueue_kick(vblk->vq);
return BLK_MQ_RQ_QUEUE_BUSY; return BLK_MQ_RQ_QUEUE_BUSY;
} }
spin_unlock_irqrestore(&vblk->vq_lock, flags);
if (last) if (last)
virtqueue_kick(vblk->vq); virtqueue_kick(vblk->vq);
spin_unlock_irqrestore(&vblk->vq_lock, flags);
return BLK_MQ_RQ_QUEUE_OK; return BLK_MQ_RQ_QUEUE_OK;
} }
......
...@@ -601,7 +601,7 @@ EXPORT_SYMBOL(bio_get_nr_vecs); ...@@ -601,7 +601,7 @@ EXPORT_SYMBOL(bio_get_nr_vecs);
static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page
*page, unsigned int len, unsigned int offset, *page, unsigned int len, unsigned int offset,
unsigned short max_sectors) unsigned int max_sectors)
{ {
int retried_segments = 0; int retried_segments = 0;
struct bio_vec *bvec; struct bio_vec *bvec;
......
...@@ -505,6 +505,9 @@ struct request_queue { ...@@ -505,6 +505,9 @@ struct request_queue {
(1 << QUEUE_FLAG_SAME_COMP) | \ (1 << QUEUE_FLAG_SAME_COMP) | \
(1 << QUEUE_FLAG_ADD_RANDOM)) (1 << QUEUE_FLAG_ADD_RANDOM))
#define QUEUE_FLAG_MQ_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
(1 << QUEUE_FLAG_SAME_COMP))
static inline void queue_lockdep_assert_held(struct request_queue *q) static inline void queue_lockdep_assert_held(struct request_queue *q)
{ {
if (q->queue_lock) if (q->queue_lock)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment