Commit e6e5bec4 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus-20180629' of git://git.kernel.dk/linux-block

Pull block fixes from Jens Axboe:
 "Small set of fixes for this series. Mostly just minor fixes, the only
  oddball in here is the sg change.

  The sg change came out of the stall fix for NVMe, where we added a
  mempool and limited us to a single page allocation. CONFIG_SG_DEBUG
  sort-of ruins that, since we'd need to account for that. That's
  actually a generic problem, since lots of drivers need to allocate SG
  lists. So this just removes support for CONFIG_SG_DEBUG, which I added
  back in 2007 and to my knowledge it was never useful.

  Anyway, outside of that, this pull contains:

   - clone of request with special payload fix (Bart)

   - drbd discard handling fix (Bart)

   - SATA blk-mq stall fix (me)

   - chunk size fix (Keith)

   - double free nvme rdma fix (Sagi)"

* tag 'for-linus-20180629' of git://git.kernel.dk/linux-block:
  sg: remove ->sg_magic member
  drbd: Fix drbd_request_prepare() discard handling
  blk-mq: don't queue more if we get a busy return
  block: Fix cloning of requests with a special payload
  nvme-rdma: fix possible double free of controller async event buffer
  block: Fix transfer when chunk sectors exceeds max
parents 1904148a 9544bc53
......@@ -3473,6 +3473,10 @@ static void __blk_rq_prep_clone(struct request *dst, struct request *src)
dst->cpu = src->cpu;
dst->__sector = blk_rq_pos(src);
dst->__data_len = blk_rq_bytes(src);
if (src->rq_flags & RQF_SPECIAL_PAYLOAD) {
dst->rq_flags |= RQF_SPECIAL_PAYLOAD;
dst->special_vec = src->special_vec;
}
dst->nr_phys_segments = src->nr_phys_segments;
dst->ioprio = src->ioprio;
dst->extra_len = src->extra_len;
......
......@@ -1075,6 +1075,9 @@ static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx **hctx,
#define BLK_MQ_RESOURCE_DELAY 3 /* ms units */
/*
* Returns true if we did some work AND can potentially do more.
*/
bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list,
bool got_budget)
{
......@@ -1205,8 +1208,17 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list,
blk_mq_run_hw_queue(hctx, true);
else if (needs_restart && (ret == BLK_STS_RESOURCE))
blk_mq_delay_run_hw_queue(hctx, BLK_MQ_RESOURCE_DELAY);
return false;
}
/*
* If the host/device is unable to accept more work, inform the
* caller of that.
*/
if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE)
return false;
return (queued + errors) != 0;
}
......
......@@ -1244,8 +1244,8 @@ drbd_request_prepare(struct drbd_device *device, struct bio *bio, unsigned long
_drbd_start_io_acct(device, req);
/* process discards always from our submitter thread */
if ((bio_op(bio) & REQ_OP_WRITE_ZEROES) ||
(bio_op(bio) & REQ_OP_DISCARD))
if (bio_op(bio) == REQ_OP_WRITE_ZEROES ||
bio_op(bio) == REQ_OP_DISCARD)
goto queue_for_submitter_thread;
if (rw == WRITE && req->private_bio && req->i.size
......
......@@ -2245,9 +2245,6 @@ static inline struct scatterlist *____sg_next(struct scatterlist *sg)
**/
static inline struct scatterlist *__sg_next(struct scatterlist *sg)
{
#ifdef CONFIG_DEBUG_SG
BUG_ON(sg->sg_magic != SG_MAGIC);
#endif
return sg_is_last(sg) ? NULL : ____sg_next(sg);
}
......
......@@ -732,8 +732,11 @@ static void nvme_rdma_destroy_admin_queue(struct nvme_rdma_ctrl *ctrl,
blk_cleanup_queue(ctrl->ctrl.admin_q);
nvme_rdma_free_tagset(&ctrl->ctrl, ctrl->ctrl.admin_tagset);
}
nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe,
sizeof(struct nvme_command), DMA_TO_DEVICE);
if (ctrl->async_event_sqe.data) {
nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe,
sizeof(struct nvme_command), DMA_TO_DEVICE);
ctrl->async_event_sqe.data = NULL;
}
nvme_rdma_free_queue(&ctrl->queues[0]);
}
......
......@@ -1119,8 +1119,8 @@ static inline unsigned int blk_max_size_offset(struct request_queue *q,
if (!q->limits.chunk_sectors)
return q->limits.max_sectors;
return q->limits.chunk_sectors -
(offset & (q->limits.chunk_sectors - 1));
return min(q->limits.max_sectors, (unsigned int)(q->limits.chunk_sectors -
(offset & (q->limits.chunk_sectors - 1))));
}
static inline unsigned int blk_rq_get_max_sectors(struct request *rq,
......
......@@ -9,9 +9,6 @@
#include <asm/io.h>
struct scatterlist {
#ifdef CONFIG_DEBUG_SG
unsigned long sg_magic;
#endif
unsigned long page_link;
unsigned int offset;
unsigned int length;
......@@ -64,7 +61,6 @@ struct sg_table {
*
*/
#define SG_MAGIC 0x87654321
#define SG_CHAIN 0x01UL
#define SG_END 0x02UL
......@@ -98,7 +94,6 @@ static inline void sg_assign_page(struct scatterlist *sg, struct page *page)
*/
BUG_ON((unsigned long) page & (SG_CHAIN | SG_END));
#ifdef CONFIG_DEBUG_SG
BUG_ON(sg->sg_magic != SG_MAGIC);
BUG_ON(sg_is_chain(sg));
#endif
sg->page_link = page_link | (unsigned long) page;
......@@ -129,7 +124,6 @@ static inline void sg_set_page(struct scatterlist *sg, struct page *page,
static inline struct page *sg_page(struct scatterlist *sg)
{
#ifdef CONFIG_DEBUG_SG
BUG_ON(sg->sg_magic != SG_MAGIC);
BUG_ON(sg_is_chain(sg));
#endif
return (struct page *)((sg)->page_link & ~(SG_CHAIN | SG_END));
......@@ -195,9 +189,6 @@ static inline void sg_chain(struct scatterlist *prv, unsigned int prv_nents,
**/
static inline void sg_mark_end(struct scatterlist *sg)
{
#ifdef CONFIG_DEBUG_SG
BUG_ON(sg->sg_magic != SG_MAGIC);
#endif
/*
* Set termination bit, clear potential chain bit
*/
......@@ -215,9 +206,6 @@ static inline void sg_mark_end(struct scatterlist *sg)
**/
static inline void sg_unmark_end(struct scatterlist *sg)
{
#ifdef CONFIG_DEBUG_SG
BUG_ON(sg->sg_magic != SG_MAGIC);
#endif
sg->page_link &= ~SG_END;
}
......@@ -260,12 +248,6 @@ static inline void *sg_virt(struct scatterlist *sg)
static inline void sg_init_marker(struct scatterlist *sgl,
unsigned int nents)
{
#ifdef CONFIG_DEBUG_SG
unsigned int i;
for (i = 0; i < nents; i++)
sgl[i].sg_magic = SG_MAGIC;
#endif
sg_mark_end(&sgl[nents - 1]);
}
......
......@@ -24,9 +24,6 @@
**/
struct scatterlist *sg_next(struct scatterlist *sg)
{
#ifdef CONFIG_DEBUG_SG
BUG_ON(sg->sg_magic != SG_MAGIC);
#endif
if (sg_is_last(sg))
return NULL;
......@@ -111,10 +108,7 @@ struct scatterlist *sg_last(struct scatterlist *sgl, unsigned int nents)
for_each_sg(sgl, sg, nents, i)
ret = sg;
#ifdef CONFIG_DEBUG_SG
BUG_ON(sgl[0].sg_magic != SG_MAGIC);
BUG_ON(!sg_is_last(ret));
#endif
return ret;
}
EXPORT_SYMBOL(sg_last);
......
......@@ -36,7 +36,6 @@ static inline void sg_assign_page(struct scatterlist *sg, struct page *page)
*/
BUG_ON((unsigned long) page & 0x03);
#ifdef CONFIG_DEBUG_SG
BUG_ON(sg->sg_magic != SG_MAGIC);
BUG_ON(sg_is_chain(sg));
#endif
sg->page_link = page_link | (unsigned long) page;
......@@ -67,7 +66,6 @@ static inline void sg_set_page(struct scatterlist *sg, struct page *page,
static inline struct page *sg_page(struct scatterlist *sg)
{
#ifdef CONFIG_DEBUG_SG
BUG_ON(sg->sg_magic != SG_MAGIC);
BUG_ON(sg_is_chain(sg));
#endif
return (struct page *)((sg)->page_link & ~0x3);
......@@ -116,9 +114,6 @@ static inline void sg_chain(struct scatterlist *prv, unsigned int prv_nents,
**/
static inline void sg_mark_end(struct scatterlist *sg)
{
#ifdef CONFIG_DEBUG_SG
BUG_ON(sg->sg_magic != SG_MAGIC);
#endif
/*
* Set termination bit, clear potential chain bit
*/
......@@ -136,17 +131,11 @@ static inline void sg_mark_end(struct scatterlist *sg)
**/
static inline void sg_unmark_end(struct scatterlist *sg)
{
#ifdef CONFIG_DEBUG_SG
BUG_ON(sg->sg_magic != SG_MAGIC);
#endif
sg->page_link &= ~0x02;
}
static inline struct scatterlist *sg_next(struct scatterlist *sg)
{
#ifdef CONFIG_DEBUG_SG
BUG_ON(sg->sg_magic != SG_MAGIC);
#endif
if (sg_is_last(sg))
return NULL;
......@@ -160,13 +149,6 @@ static inline struct scatterlist *sg_next(struct scatterlist *sg)
static inline void sg_init_table(struct scatterlist *sgl, unsigned int nents)
{
memset(sgl, 0, sizeof(*sgl) * nents);
#ifdef CONFIG_DEBUG_SG
{
unsigned int i;
for (i = 0; i < nents; i++)
sgl[i].sg_magic = SG_MAGIC;
}
#endif
sg_mark_end(&sgl[nents - 1]);
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment