Commit f1fcd778 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus-20191212' of git://git.kernel.dk/linux-block

Pull block fixes from Jens Axboe:

 - stable fix for the bi_size overflow. Not a corruption issue, but a
   case wher we could merge but disallowed (Andreas)

 - NVMe pull request via Keith, with various fixes.

 - MD pull request from Song.

 - Merge window regression fix for the rq passthrough stats (Logan)

 - Remove unused blkcg_drain_queue() function (Guoqing)

* tag 'for-linus-20191212' of git://git.kernel.dk/linux-block:
  blk-cgroup: remove blkcg_drain_queue
  block: fix NULL pointer dereference in account statistics with IDE
  md: make sure desc_nr less than MD_SB_DISKS
  md: raid1: check rdev before reference in raid1_sync_request func
  raid5: need to set STRIPE_HANDLE for batch head
  block: fix "check bi_size overflow before merge"
  nvme/pci: Fix read queue count
  nvme/pci Limit write queue sizes to possible cpus
  nvme/pci: Fix write and poll queue types
  nvme/pci: Remove last_cq_head
  nvme: Namepace identification descriptor list is optional
  nvme-fc: fix double-free scenarios on hw queues
  nvme: else following return is not needed
  nvme: add error message on mismatching controller ids
  nvme_fc: add module to ops template to allow module references
  nvmet-loop: Avoid preallocating big SGL for data
  nvme-fc: Avoid preallocating big SGL for data
  nvme-rdma: Avoid preallocating big SGL for data
parents 5bd831a4 5addeae1
...@@ -754,10 +754,12 @@ bool __bio_try_merge_page(struct bio *bio, struct page *page, ...@@ -754,10 +754,12 @@ bool __bio_try_merge_page(struct bio *bio, struct page *page,
if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED))) if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)))
return false; return false;
if (bio->bi_vcnt > 0 && !bio_full(bio, len)) { if (bio->bi_vcnt > 0) {
struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1]; struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1];
if (page_is_mergeable(bv, page, len, off, same_page)) { if (page_is_mergeable(bv, page, len, off, same_page)) {
if (bio->bi_iter.bi_size > UINT_MAX - len)
return false;
bv->bv_len += len; bv->bv_len += len;
bio->bi_iter.bi_size += len; bio->bi_iter.bi_size += len;
return true; return true;
......
...@@ -1061,26 +1061,6 @@ int blkcg_init_queue(struct request_queue *q) ...@@ -1061,26 +1061,6 @@ int blkcg_init_queue(struct request_queue *q)
return PTR_ERR(blkg); return PTR_ERR(blkg);
} }
/**
* blkcg_drain_queue - drain blkcg part of request_queue
* @q: request_queue to drain
*
* Called from blk_drain_queue(). Responsible for draining blkcg part.
*/
void blkcg_drain_queue(struct request_queue *q)
{
lockdep_assert_held(&q->queue_lock);
/*
* @q could be exiting and already have destroyed all blkgs as
* indicated by NULL root_blkg. If so, don't confuse policies.
*/
if (!q->root_blkg)
return;
blk_throtl_drain(q);
}
/** /**
* blkcg_exit_queue - exit and release blkcg part of request_queue * blkcg_exit_queue - exit and release blkcg part of request_queue
* @q: request_queue being released * @q: request_queue being released
......
...@@ -1310,7 +1310,7 @@ EXPORT_SYMBOL_GPL(blk_rq_err_bytes); ...@@ -1310,7 +1310,7 @@ EXPORT_SYMBOL_GPL(blk_rq_err_bytes);
void blk_account_io_completion(struct request *req, unsigned int bytes) void blk_account_io_completion(struct request *req, unsigned int bytes)
{ {
if (blk_do_io_stat(req)) { if (req->part && blk_do_io_stat(req)) {
const int sgrp = op_stat_group(req_op(req)); const int sgrp = op_stat_group(req_op(req));
struct hd_struct *part; struct hd_struct *part;
...@@ -1328,7 +1328,8 @@ void blk_account_io_done(struct request *req, u64 now) ...@@ -1328,7 +1328,8 @@ void blk_account_io_done(struct request *req, u64 now)
* normal IO on queueing nor completion. Accounting the * normal IO on queueing nor completion. Accounting the
* containing request is enough. * containing request is enough.
*/ */
if (blk_do_io_stat(req) && !(req->rq_flags & RQF_FLUSH_SEQ)) { if (req->part && blk_do_io_stat(req) &&
!(req->rq_flags & RQF_FLUSH_SEQ)) {
const int sgrp = op_stat_group(req_op(req)); const int sgrp = op_stat_group(req_op(req));
struct hd_struct *part; struct hd_struct *part;
......
...@@ -1159,6 +1159,7 @@ static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor ...@@ -1159,6 +1159,7 @@ static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor
/* not spare disk, or LEVEL_MULTIPATH */ /* not spare disk, or LEVEL_MULTIPATH */
if (sb->level == LEVEL_MULTIPATH || if (sb->level == LEVEL_MULTIPATH ||
(rdev->desc_nr >= 0 && (rdev->desc_nr >= 0 &&
rdev->desc_nr < MD_SB_DISKS &&
sb->disks[rdev->desc_nr].state & sb->disks[rdev->desc_nr].state &
((1<<MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE)))) ((1<<MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE))))
spare_disk = false; spare_disk = false;
......
...@@ -2782,7 +2782,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr, ...@@ -2782,7 +2782,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
write_targets++; write_targets++;
} }
} }
if (bio->bi_end_io) { if (rdev && bio->bi_end_io) {
atomic_inc(&rdev->nr_pending); atomic_inc(&rdev->nr_pending);
bio->bi_iter.bi_sector = sector_nr + rdev->data_offset; bio->bi_iter.bi_sector = sector_nr + rdev->data_offset;
bio_set_dev(bio, rdev->bdev); bio_set_dev(bio, rdev->bdev);
......
...@@ -5726,7 +5726,7 @@ static bool raid5_make_request(struct mddev *mddev, struct bio * bi) ...@@ -5726,7 +5726,7 @@ static bool raid5_make_request(struct mddev *mddev, struct bio * bi)
do_flush = false; do_flush = false;
} }
if (!sh->batch_head) if (!sh->batch_head || sh == sh->batch_head)
set_bit(STRIPE_HANDLE, &sh->state); set_bit(STRIPE_HANDLE, &sh->state);
clear_bit(STRIPE_DELAYED, &sh->state); clear_bit(STRIPE_DELAYED, &sh->state);
if ((!sh->batch_head || sh == sh->batch_head) && if ((!sh->batch_head || sh == sh->batch_head) &&
......
...@@ -1735,6 +1735,8 @@ static int nvme_report_ns_ids(struct nvme_ctrl *ctrl, unsigned int nsid, ...@@ -1735,6 +1735,8 @@ static int nvme_report_ns_ids(struct nvme_ctrl *ctrl, unsigned int nsid,
if (ret) if (ret)
dev_warn(ctrl->device, dev_warn(ctrl->device,
"Identify Descriptors failed (%d)\n", ret); "Identify Descriptors failed (%d)\n", ret);
if (ret > 0)
ret = 0;
} }
return ret; return ret;
} }
...@@ -2852,6 +2854,10 @@ int nvme_init_identify(struct nvme_ctrl *ctrl) ...@@ -2852,6 +2854,10 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
* admin connect * admin connect
*/ */
if (ctrl->cntlid != le16_to_cpu(id->cntlid)) { if (ctrl->cntlid != le16_to_cpu(id->cntlid)) {
dev_err(ctrl->device,
"Mismatching cntlid: Connect %u vs Identify "
"%u, rejecting\n",
ctrl->cntlid, le16_to_cpu(id->cntlid));
ret = -EINVAL; ret = -EINVAL;
goto out_free; goto out_free;
} }
......
...@@ -95,7 +95,7 @@ struct nvme_fc_fcp_op { ...@@ -95,7 +95,7 @@ struct nvme_fc_fcp_op {
struct nvme_fcp_op_w_sgl { struct nvme_fcp_op_w_sgl {
struct nvme_fc_fcp_op op; struct nvme_fc_fcp_op op;
struct scatterlist sgl[SG_CHUNK_SIZE]; struct scatterlist sgl[NVME_INLINE_SG_CNT];
uint8_t priv[0]; uint8_t priv[0];
}; };
...@@ -342,7 +342,8 @@ nvme_fc_register_localport(struct nvme_fc_port_info *pinfo, ...@@ -342,7 +342,8 @@ nvme_fc_register_localport(struct nvme_fc_port_info *pinfo,
!template->ls_req || !template->fcp_io || !template->ls_req || !template->fcp_io ||
!template->ls_abort || !template->fcp_abort || !template->ls_abort || !template->fcp_abort ||
!template->max_hw_queues || !template->max_sgl_segments || !template->max_hw_queues || !template->max_sgl_segments ||
!template->max_dif_sgl_segments || !template->dma_boundary) { !template->max_dif_sgl_segments || !template->dma_boundary ||
!template->module) {
ret = -EINVAL; ret = -EINVAL;
goto out_reghost_failed; goto out_reghost_failed;
} }
...@@ -2015,6 +2016,7 @@ nvme_fc_ctrl_free(struct kref *ref) ...@@ -2015,6 +2016,7 @@ nvme_fc_ctrl_free(struct kref *ref)
{ {
struct nvme_fc_ctrl *ctrl = struct nvme_fc_ctrl *ctrl =
container_of(ref, struct nvme_fc_ctrl, ref); container_of(ref, struct nvme_fc_ctrl, ref);
struct nvme_fc_lport *lport = ctrl->lport;
unsigned long flags; unsigned long flags;
if (ctrl->ctrl.tagset) { if (ctrl->ctrl.tagset) {
...@@ -2041,6 +2043,7 @@ nvme_fc_ctrl_free(struct kref *ref) ...@@ -2041,6 +2043,7 @@ nvme_fc_ctrl_free(struct kref *ref)
if (ctrl->ctrl.opts) if (ctrl->ctrl.opts)
nvmf_free_options(ctrl->ctrl.opts); nvmf_free_options(ctrl->ctrl.opts);
kfree(ctrl); kfree(ctrl);
module_put(lport->ops->module);
} }
static void static void
...@@ -2141,7 +2144,7 @@ nvme_fc_map_data(struct nvme_fc_ctrl *ctrl, struct request *rq, ...@@ -2141,7 +2144,7 @@ nvme_fc_map_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
freq->sg_table.sgl = freq->first_sgl; freq->sg_table.sgl = freq->first_sgl;
ret = sg_alloc_table_chained(&freq->sg_table, ret = sg_alloc_table_chained(&freq->sg_table,
blk_rq_nr_phys_segments(rq), freq->sg_table.sgl, blk_rq_nr_phys_segments(rq), freq->sg_table.sgl,
SG_CHUNK_SIZE); NVME_INLINE_SG_CNT);
if (ret) if (ret)
return -ENOMEM; return -ENOMEM;
...@@ -2150,7 +2153,7 @@ nvme_fc_map_data(struct nvme_fc_ctrl *ctrl, struct request *rq, ...@@ -2150,7 +2153,7 @@ nvme_fc_map_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
freq->sg_cnt = fc_dma_map_sg(ctrl->lport->dev, freq->sg_table.sgl, freq->sg_cnt = fc_dma_map_sg(ctrl->lport->dev, freq->sg_table.sgl,
op->nents, rq_dma_dir(rq)); op->nents, rq_dma_dir(rq));
if (unlikely(freq->sg_cnt <= 0)) { if (unlikely(freq->sg_cnt <= 0)) {
sg_free_table_chained(&freq->sg_table, SG_CHUNK_SIZE); sg_free_table_chained(&freq->sg_table, NVME_INLINE_SG_CNT);
freq->sg_cnt = 0; freq->sg_cnt = 0;
return -EFAULT; return -EFAULT;
} }
...@@ -2173,7 +2176,7 @@ nvme_fc_unmap_data(struct nvme_fc_ctrl *ctrl, struct request *rq, ...@@ -2173,7 +2176,7 @@ nvme_fc_unmap_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
fc_dma_unmap_sg(ctrl->lport->dev, freq->sg_table.sgl, op->nents, fc_dma_unmap_sg(ctrl->lport->dev, freq->sg_table.sgl, op->nents,
rq_dma_dir(rq)); rq_dma_dir(rq));
sg_free_table_chained(&freq->sg_table, SG_CHUNK_SIZE); sg_free_table_chained(&freq->sg_table, NVME_INLINE_SG_CNT);
freq->sg_cnt = 0; freq->sg_cnt = 0;
} }
...@@ -2910,10 +2913,22 @@ nvme_fc_reconnect_or_delete(struct nvme_fc_ctrl *ctrl, int status) ...@@ -2910,10 +2913,22 @@ nvme_fc_reconnect_or_delete(struct nvme_fc_ctrl *ctrl, int status)
static void static void
__nvme_fc_terminate_io(struct nvme_fc_ctrl *ctrl) __nvme_fc_terminate_io(struct nvme_fc_ctrl *ctrl)
{ {
/*
* if state is connecting - the error occurred as part of a
* reconnect attempt. The create_association error paths will
* clean up any outstanding io.
*
* if it's a different state - ensure all pending io is
* terminated. Given this can delay while waiting for the
* aborted io to return, we recheck adapter state below
* before changing state.
*/
if (ctrl->ctrl.state != NVME_CTRL_CONNECTING) {
nvme_stop_keep_alive(&ctrl->ctrl); nvme_stop_keep_alive(&ctrl->ctrl);
/* will block will waiting for io to terminate */ /* will block will waiting for io to terminate */
nvme_fc_delete_association(ctrl); nvme_fc_delete_association(ctrl);
}
if (ctrl->ctrl.state != NVME_CTRL_CONNECTING && if (ctrl->ctrl.state != NVME_CTRL_CONNECTING &&
!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) !nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING))
...@@ -3059,10 +3074,15 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts, ...@@ -3059,10 +3074,15 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
goto out_fail; goto out_fail;
} }
if (!try_module_get(lport->ops->module)) {
ret = -EUNATCH;
goto out_free_ctrl;
}
idx = ida_simple_get(&nvme_fc_ctrl_cnt, 0, 0, GFP_KERNEL); idx = ida_simple_get(&nvme_fc_ctrl_cnt, 0, 0, GFP_KERNEL);
if (idx < 0) { if (idx < 0) {
ret = -ENOSPC; ret = -ENOSPC;
goto out_free_ctrl; goto out_mod_put;
} }
ctrl->ctrl.opts = opts; ctrl->ctrl.opts = opts;
...@@ -3215,6 +3235,8 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts, ...@@ -3215,6 +3235,8 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
out_free_ida: out_free_ida:
put_device(ctrl->dev); put_device(ctrl->dev);
ida_simple_remove(&nvme_fc_ctrl_cnt, ctrl->cnum); ida_simple_remove(&nvme_fc_ctrl_cnt, ctrl->cnum);
out_mod_put:
module_put(lport->ops->module);
out_free_ctrl: out_free_ctrl:
kfree(ctrl); kfree(ctrl);
out_fail: out_fail:
......
...@@ -28,6 +28,12 @@ extern unsigned int admin_timeout; ...@@ -28,6 +28,12 @@ extern unsigned int admin_timeout;
#define NVME_DEFAULT_KATO 5 #define NVME_DEFAULT_KATO 5
#define NVME_KATO_GRACE 10 #define NVME_KATO_GRACE 10
#ifdef CONFIG_ARCH_NO_SG_CHAIN
#define NVME_INLINE_SG_CNT 0
#else
#define NVME_INLINE_SG_CNT 2
#endif
extern struct workqueue_struct *nvme_wq; extern struct workqueue_struct *nvme_wq;
extern struct workqueue_struct *nvme_reset_wq; extern struct workqueue_struct *nvme_reset_wq;
extern struct workqueue_struct *nvme_delete_wq; extern struct workqueue_struct *nvme_delete_wq;
......
...@@ -68,14 +68,14 @@ static int io_queue_depth = 1024; ...@@ -68,14 +68,14 @@ static int io_queue_depth = 1024;
module_param_cb(io_queue_depth, &io_queue_depth_ops, &io_queue_depth, 0644); module_param_cb(io_queue_depth, &io_queue_depth_ops, &io_queue_depth, 0644);
MODULE_PARM_DESC(io_queue_depth, "set io queue depth, should >= 2"); MODULE_PARM_DESC(io_queue_depth, "set io queue depth, should >= 2");
static int write_queues; static unsigned int write_queues;
module_param(write_queues, int, 0644); module_param(write_queues, uint, 0644);
MODULE_PARM_DESC(write_queues, MODULE_PARM_DESC(write_queues,
"Number of queues to use for writes. If not set, reads and writes " "Number of queues to use for writes. If not set, reads and writes "
"will share a queue set."); "will share a queue set.");
static int poll_queues; static unsigned int poll_queues;
module_param(poll_queues, int, 0644); module_param(poll_queues, uint, 0644);
MODULE_PARM_DESC(poll_queues, "Number of queues to use for polled IO."); MODULE_PARM_DESC(poll_queues, "Number of queues to use for polled IO.");
struct nvme_dev; struct nvme_dev;
...@@ -176,7 +176,6 @@ struct nvme_queue { ...@@ -176,7 +176,6 @@ struct nvme_queue {
u16 sq_tail; u16 sq_tail;
u16 last_sq_tail; u16 last_sq_tail;
u16 cq_head; u16 cq_head;
u16 last_cq_head;
u16 qid; u16 qid;
u8 cq_phase; u8 cq_phase;
u8 sqes; u8 sqes;
...@@ -1026,10 +1025,7 @@ static irqreturn_t nvme_irq(int irq, void *data) ...@@ -1026,10 +1025,7 @@ static irqreturn_t nvme_irq(int irq, void *data)
* the irq handler, even if that was on another CPU. * the irq handler, even if that was on another CPU.
*/ */
rmb(); rmb();
if (nvmeq->cq_head != nvmeq->last_cq_head)
ret = IRQ_HANDLED;
nvme_process_cq(nvmeq, &start, &end, -1); nvme_process_cq(nvmeq, &start, &end, -1);
nvmeq->last_cq_head = nvmeq->cq_head;
wmb(); wmb();
if (start != end) { if (start != end) {
...@@ -1549,7 +1545,7 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid, bool polled) ...@@ -1549,7 +1545,7 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid, bool polled)
result = adapter_alloc_sq(dev, qid, nvmeq); result = adapter_alloc_sq(dev, qid, nvmeq);
if (result < 0) if (result < 0)
return result; return result;
else if (result) if (result)
goto release_cq; goto release_cq;
nvmeq->cq_vector = vector; nvmeq->cq_vector = vector;
...@@ -2058,7 +2054,6 @@ static int nvme_setup_irqs(struct nvme_dev *dev, unsigned int nr_io_queues) ...@@ -2058,7 +2054,6 @@ static int nvme_setup_irqs(struct nvme_dev *dev, unsigned int nr_io_queues)
.priv = dev, .priv = dev,
}; };
unsigned int irq_queues, this_p_queues; unsigned int irq_queues, this_p_queues;
unsigned int nr_cpus = num_possible_cpus();
/* /*
* Poll queues don't need interrupts, but we need at least one IO * Poll queues don't need interrupts, but we need at least one IO
...@@ -2069,9 +2064,6 @@ static int nvme_setup_irqs(struct nvme_dev *dev, unsigned int nr_io_queues) ...@@ -2069,9 +2064,6 @@ static int nvme_setup_irqs(struct nvme_dev *dev, unsigned int nr_io_queues)
this_p_queues = nr_io_queues - 1; this_p_queues = nr_io_queues - 1;
irq_queues = 1; irq_queues = 1;
} else { } else {
if (nr_cpus < nr_io_queues - this_p_queues)
irq_queues = nr_cpus + 1;
else
irq_queues = nr_io_queues - this_p_queues + 1; irq_queues = nr_io_queues - this_p_queues + 1;
} }
dev->io_queues[HCTX_TYPE_POLL] = this_p_queues; dev->io_queues[HCTX_TYPE_POLL] = this_p_queues;
...@@ -3142,6 +3134,9 @@ static int __init nvme_init(void) ...@@ -3142,6 +3134,9 @@ static int __init nvme_init(void)
BUILD_BUG_ON(sizeof(struct nvme_create_sq) != 64); BUILD_BUG_ON(sizeof(struct nvme_create_sq) != 64);
BUILD_BUG_ON(sizeof(struct nvme_delete_queue) != 64); BUILD_BUG_ON(sizeof(struct nvme_delete_queue) != 64);
BUILD_BUG_ON(IRQ_AFFINITY_MAX_SETS < 2); BUILD_BUG_ON(IRQ_AFFINITY_MAX_SETS < 2);
write_queues = min(write_queues, num_possible_cpus());
poll_queues = min(poll_queues, num_possible_cpus());
return pci_register_driver(&nvme_driver); return pci_register_driver(&nvme_driver);
} }
......
...@@ -731,7 +731,7 @@ static struct blk_mq_tag_set *nvme_rdma_alloc_tagset(struct nvme_ctrl *nctrl, ...@@ -731,7 +731,7 @@ static struct blk_mq_tag_set *nvme_rdma_alloc_tagset(struct nvme_ctrl *nctrl,
set->reserved_tags = 2; /* connect + keep-alive */ set->reserved_tags = 2; /* connect + keep-alive */
set->numa_node = nctrl->numa_node; set->numa_node = nctrl->numa_node;
set->cmd_size = sizeof(struct nvme_rdma_request) + set->cmd_size = sizeof(struct nvme_rdma_request) +
SG_CHUNK_SIZE * sizeof(struct scatterlist); NVME_INLINE_SG_CNT * sizeof(struct scatterlist);
set->driver_data = ctrl; set->driver_data = ctrl;
set->nr_hw_queues = 1; set->nr_hw_queues = 1;
set->timeout = ADMIN_TIMEOUT; set->timeout = ADMIN_TIMEOUT;
...@@ -745,7 +745,7 @@ static struct blk_mq_tag_set *nvme_rdma_alloc_tagset(struct nvme_ctrl *nctrl, ...@@ -745,7 +745,7 @@ static struct blk_mq_tag_set *nvme_rdma_alloc_tagset(struct nvme_ctrl *nctrl,
set->numa_node = nctrl->numa_node; set->numa_node = nctrl->numa_node;
set->flags = BLK_MQ_F_SHOULD_MERGE; set->flags = BLK_MQ_F_SHOULD_MERGE;
set->cmd_size = sizeof(struct nvme_rdma_request) + set->cmd_size = sizeof(struct nvme_rdma_request) +
SG_CHUNK_SIZE * sizeof(struct scatterlist); NVME_INLINE_SG_CNT * sizeof(struct scatterlist);
set->driver_data = ctrl; set->driver_data = ctrl;
set->nr_hw_queues = nctrl->queue_count - 1; set->nr_hw_queues = nctrl->queue_count - 1;
set->timeout = NVME_IO_TIMEOUT; set->timeout = NVME_IO_TIMEOUT;
...@@ -1160,7 +1160,7 @@ static void nvme_rdma_unmap_data(struct nvme_rdma_queue *queue, ...@@ -1160,7 +1160,7 @@ static void nvme_rdma_unmap_data(struct nvme_rdma_queue *queue,
} }
ib_dma_unmap_sg(ibdev, req->sg_table.sgl, req->nents, rq_dma_dir(rq)); ib_dma_unmap_sg(ibdev, req->sg_table.sgl, req->nents, rq_dma_dir(rq));
sg_free_table_chained(&req->sg_table, SG_CHUNK_SIZE); sg_free_table_chained(&req->sg_table, NVME_INLINE_SG_CNT);
} }
static int nvme_rdma_set_sg_null(struct nvme_command *c) static int nvme_rdma_set_sg_null(struct nvme_command *c)
...@@ -1276,7 +1276,7 @@ static int nvme_rdma_map_data(struct nvme_rdma_queue *queue, ...@@ -1276,7 +1276,7 @@ static int nvme_rdma_map_data(struct nvme_rdma_queue *queue,
req->sg_table.sgl = req->first_sgl; req->sg_table.sgl = req->first_sgl;
ret = sg_alloc_table_chained(&req->sg_table, ret = sg_alloc_table_chained(&req->sg_table,
blk_rq_nr_phys_segments(rq), req->sg_table.sgl, blk_rq_nr_phys_segments(rq), req->sg_table.sgl,
SG_CHUNK_SIZE); NVME_INLINE_SG_CNT);
if (ret) if (ret)
return -ENOMEM; return -ENOMEM;
...@@ -1314,7 +1314,7 @@ static int nvme_rdma_map_data(struct nvme_rdma_queue *queue, ...@@ -1314,7 +1314,7 @@ static int nvme_rdma_map_data(struct nvme_rdma_queue *queue,
out_unmap_sg: out_unmap_sg:
ib_dma_unmap_sg(ibdev, req->sg_table.sgl, req->nents, rq_dma_dir(rq)); ib_dma_unmap_sg(ibdev, req->sg_table.sgl, req->nents, rq_dma_dir(rq));
out_free_table: out_free_table:
sg_free_table_chained(&req->sg_table, SG_CHUNK_SIZE); sg_free_table_chained(&req->sg_table, NVME_INLINE_SG_CNT);
return ret; return ret;
} }
......
...@@ -850,6 +850,7 @@ fcloop_targetport_delete(struct nvmet_fc_target_port *targetport) ...@@ -850,6 +850,7 @@ fcloop_targetport_delete(struct nvmet_fc_target_port *targetport)
#define FCLOOP_DMABOUND_4G 0xFFFFFFFF #define FCLOOP_DMABOUND_4G 0xFFFFFFFF
static struct nvme_fc_port_template fctemplate = { static struct nvme_fc_port_template fctemplate = {
.module = THIS_MODULE,
.localport_delete = fcloop_localport_delete, .localport_delete = fcloop_localport_delete,
.remoteport_delete = fcloop_remoteport_delete, .remoteport_delete = fcloop_remoteport_delete,
.create_queue = fcloop_create_queue, .create_queue = fcloop_create_queue,
......
...@@ -76,7 +76,7 @@ static void nvme_loop_complete_rq(struct request *req) ...@@ -76,7 +76,7 @@ static void nvme_loop_complete_rq(struct request *req)
{ {
struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req); struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req);
sg_free_table_chained(&iod->sg_table, SG_CHUNK_SIZE); sg_free_table_chained(&iod->sg_table, NVME_INLINE_SG_CNT);
nvme_complete_rq(req); nvme_complete_rq(req);
} }
...@@ -156,7 +156,7 @@ static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx, ...@@ -156,7 +156,7 @@ static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
iod->sg_table.sgl = iod->first_sgl; iod->sg_table.sgl = iod->first_sgl;
if (sg_alloc_table_chained(&iod->sg_table, if (sg_alloc_table_chained(&iod->sg_table,
blk_rq_nr_phys_segments(req), blk_rq_nr_phys_segments(req),
iod->sg_table.sgl, SG_CHUNK_SIZE)) { iod->sg_table.sgl, NVME_INLINE_SG_CNT)) {
nvme_cleanup_cmd(req); nvme_cleanup_cmd(req);
return BLK_STS_RESOURCE; return BLK_STS_RESOURCE;
} }
...@@ -342,7 +342,7 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl) ...@@ -342,7 +342,7 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
ctrl->admin_tag_set.reserved_tags = 2; /* connect + keep-alive */ ctrl->admin_tag_set.reserved_tags = 2; /* connect + keep-alive */
ctrl->admin_tag_set.numa_node = NUMA_NO_NODE; ctrl->admin_tag_set.numa_node = NUMA_NO_NODE;
ctrl->admin_tag_set.cmd_size = sizeof(struct nvme_loop_iod) + ctrl->admin_tag_set.cmd_size = sizeof(struct nvme_loop_iod) +
SG_CHUNK_SIZE * sizeof(struct scatterlist); NVME_INLINE_SG_CNT * sizeof(struct scatterlist);
ctrl->admin_tag_set.driver_data = ctrl; ctrl->admin_tag_set.driver_data = ctrl;
ctrl->admin_tag_set.nr_hw_queues = 1; ctrl->admin_tag_set.nr_hw_queues = 1;
ctrl->admin_tag_set.timeout = ADMIN_TIMEOUT; ctrl->admin_tag_set.timeout = ADMIN_TIMEOUT;
...@@ -516,7 +516,7 @@ static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl) ...@@ -516,7 +516,7 @@ static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl)
ctrl->tag_set.numa_node = NUMA_NO_NODE; ctrl->tag_set.numa_node = NUMA_NO_NODE;
ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
ctrl->tag_set.cmd_size = sizeof(struct nvme_loop_iod) + ctrl->tag_set.cmd_size = sizeof(struct nvme_loop_iod) +
SG_CHUNK_SIZE * sizeof(struct scatterlist); NVME_INLINE_SG_CNT * sizeof(struct scatterlist);
ctrl->tag_set.driver_data = ctrl; ctrl->tag_set.driver_data = ctrl;
ctrl->tag_set.nr_hw_queues = ctrl->ctrl.queue_count - 1; ctrl->tag_set.nr_hw_queues = ctrl->ctrl.queue_count - 1;
ctrl->tag_set.timeout = NVME_IO_TIMEOUT; ctrl->tag_set.timeout = NVME_IO_TIMEOUT;
......
...@@ -1985,6 +1985,8 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport, ...@@ -1985,6 +1985,8 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
/* Declare and initialization an instance of the FC NVME template. */ /* Declare and initialization an instance of the FC NVME template. */
static struct nvme_fc_port_template lpfc_nvme_template = { static struct nvme_fc_port_template lpfc_nvme_template = {
.module = THIS_MODULE,
/* initiator-based functions */ /* initiator-based functions */
.localport_delete = lpfc_nvme_localport_delete, .localport_delete = lpfc_nvme_localport_delete,
.remoteport_delete = lpfc_nvme_remoteport_delete, .remoteport_delete = lpfc_nvme_remoteport_delete,
......
...@@ -610,6 +610,7 @@ static void qla_nvme_remoteport_delete(struct nvme_fc_remote_port *rport) ...@@ -610,6 +610,7 @@ static void qla_nvme_remoteport_delete(struct nvme_fc_remote_port *rport)
} }
static struct nvme_fc_port_template qla_nvme_fc_transport = { static struct nvme_fc_port_template qla_nvme_fc_transport = {
.module = THIS_MODULE,
.localport_delete = qla_nvme_localport_delete, .localport_delete = qla_nvme_localport_delete,
.remoteport_delete = qla_nvme_remoteport_delete, .remoteport_delete = qla_nvme_remoteport_delete,
.create_queue = qla_nvme_alloc_queue, .create_queue = qla_nvme_alloc_queue,
......
...@@ -188,7 +188,6 @@ struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg, ...@@ -188,7 +188,6 @@ struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg,
struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg, struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
struct request_queue *q); struct request_queue *q);
int blkcg_init_queue(struct request_queue *q); int blkcg_init_queue(struct request_queue *q);
void blkcg_drain_queue(struct request_queue *q);
void blkcg_exit_queue(struct request_queue *q); void blkcg_exit_queue(struct request_queue *q);
/* Blkio controller policy registration */ /* Blkio controller policy registration */
...@@ -720,7 +719,6 @@ static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { ret ...@@ -720,7 +719,6 @@ static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { ret
static inline struct blkcg_gq *blk_queue_root_blkg(struct request_queue *q) static inline struct blkcg_gq *blk_queue_root_blkg(struct request_queue *q)
{ return NULL; } { return NULL; }
static inline int blkcg_init_queue(struct request_queue *q) { return 0; } static inline int blkcg_init_queue(struct request_queue *q) { return 0; }
static inline void blkcg_drain_queue(struct request_queue *q) { }
static inline void blkcg_exit_queue(struct request_queue *q) { } static inline void blkcg_exit_queue(struct request_queue *q) { }
static inline int blkcg_policy_register(struct blkcg_policy *pol) { return 0; } static inline int blkcg_policy_register(struct blkcg_policy *pol) { return 0; }
static inline void blkcg_policy_unregister(struct blkcg_policy *pol) { } static inline void blkcg_policy_unregister(struct blkcg_policy *pol) { }
......
...@@ -270,6 +270,8 @@ struct nvme_fc_remote_port { ...@@ -270,6 +270,8 @@ struct nvme_fc_remote_port {
* *
* Host/Initiator Transport Entrypoints/Parameters: * Host/Initiator Transport Entrypoints/Parameters:
* *
* @module: The LLDD module using the interface
*
* @localport_delete: The LLDD initiates deletion of a localport via * @localport_delete: The LLDD initiates deletion of a localport via
* nvme_fc_deregister_localport(). However, the teardown is * nvme_fc_deregister_localport(). However, the teardown is
* asynchronous. This routine is called upon the completion of the * asynchronous. This routine is called upon the completion of the
...@@ -383,6 +385,8 @@ struct nvme_fc_remote_port { ...@@ -383,6 +385,8 @@ struct nvme_fc_remote_port {
* Value is Mandatory. Allowed to be zero. * Value is Mandatory. Allowed to be zero.
*/ */
struct nvme_fc_port_template { struct nvme_fc_port_template {
struct module *module;
/* initiator-based functions */ /* initiator-based functions */
void (*localport_delete)(struct nvme_fc_local_port *); void (*localport_delete)(struct nvme_fc_local_port *);
void (*remoteport_delete)(struct nvme_fc_remote_port *); void (*remoteport_delete)(struct nvme_fc_remote_port *);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment