Commit 8e5d31eb authored by Jens Axboe's avatar Jens Axboe

Merge branch 'nvme-4.10' of git://git.infradead.org/nvme into for-linus

Christoph writes:

The most significant one is that we've agreed on shared maintaince and
a common repository for the PCIe NVMe driver and NVMe over Fabrics.  The
target code still only has a subset of the maintainers but goes through
the same tree as well.  Keith, Sagi and me will take turns at collecting
patches and sending you pull requests.
parents 72c5296f 7c3a23b8
...@@ -8852,17 +8852,22 @@ F: drivers/video/fbdev/nvidia/ ...@@ -8852,17 +8852,22 @@ F: drivers/video/fbdev/nvidia/
NVM EXPRESS DRIVER NVM EXPRESS DRIVER
M: Keith Busch <keith.busch@intel.com> M: Keith Busch <keith.busch@intel.com>
M: Jens Axboe <axboe@fb.com> M: Jens Axboe <axboe@fb.com>
M: Christoph Hellwig <hch@lst.de>
M: Sagi Grimberg <sagi@grimberg.me>
L: linux-nvme@lists.infradead.org L: linux-nvme@lists.infradead.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-block.git T: git://git.infradead.org/nvme.git
W: https://kernel.googlesource.com/pub/scm/linux/kernel/git/axboe/linux-block/ W: http://git.infradead.org/nvme.git
S: Supported S: Supported
F: drivers/nvme/host/ F: drivers/nvme/host/
F: include/linux/nvme.h F: include/linux/nvme.h
F: include/uapi/linux/nvme_ioctl.h
NVM EXPRESS TARGET DRIVER NVM EXPRESS TARGET DRIVER
M: Christoph Hellwig <hch@lst.de> M: Christoph Hellwig <hch@lst.de>
M: Sagi Grimberg <sagi@grimberg.me> M: Sagi Grimberg <sagi@grimberg.me>
L: linux-nvme@lists.infradead.org L: linux-nvme@lists.infradead.org
T: git://git.infradead.org/nvme.git
W: http://git.infradead.org/nvme.git
S: Supported S: Supported
F: drivers/nvme/target/ F: drivers/nvme/target/
......
...@@ -1193,8 +1193,8 @@ static void nvme_set_queue_limits(struct nvme_ctrl *ctrl, ...@@ -1193,8 +1193,8 @@ static void nvme_set_queue_limits(struct nvme_ctrl *ctrl,
blk_queue_max_hw_sectors(q, ctrl->max_hw_sectors); blk_queue_max_hw_sectors(q, ctrl->max_hw_sectors);
blk_queue_max_segments(q, min_t(u32, max_segments, USHRT_MAX)); blk_queue_max_segments(q, min_t(u32, max_segments, USHRT_MAX));
} }
if (ctrl->stripe_size) if (ctrl->quirks & NVME_QUIRK_STRIPE_SIZE)
blk_queue_chunk_sectors(q, ctrl->stripe_size >> 9); blk_queue_chunk_sectors(q, ctrl->max_hw_sectors);
blk_queue_virt_boundary(q, ctrl->page_size - 1); blk_queue_virt_boundary(q, ctrl->page_size - 1);
if (ctrl->vwc & NVME_CTRL_VWC_PRESENT) if (ctrl->vwc & NVME_CTRL_VWC_PRESENT)
vwc = true; vwc = true;
...@@ -1250,19 +1250,6 @@ int nvme_init_identify(struct nvme_ctrl *ctrl) ...@@ -1250,19 +1250,6 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
ctrl->max_hw_sectors = ctrl->max_hw_sectors =
min_not_zero(ctrl->max_hw_sectors, max_hw_sectors); min_not_zero(ctrl->max_hw_sectors, max_hw_sectors);
if ((ctrl->quirks & NVME_QUIRK_STRIPE_SIZE) && id->vs[3]) {
unsigned int max_hw_sectors;
ctrl->stripe_size = 1 << (id->vs[3] + page_shift);
max_hw_sectors = ctrl->stripe_size >> (page_shift - 9);
if (ctrl->max_hw_sectors) {
ctrl->max_hw_sectors = min(max_hw_sectors,
ctrl->max_hw_sectors);
} else {
ctrl->max_hw_sectors = max_hw_sectors;
}
}
nvme_set_queue_limits(ctrl, ctrl->admin_q); nvme_set_queue_limits(ctrl, ctrl->admin_q);
ctrl->sgls = le32_to_cpu(id->sgls); ctrl->sgls = le32_to_cpu(id->sgls);
ctrl->kas = le16_to_cpu(id->kas); ctrl->kas = le16_to_cpu(id->kas);
......
...@@ -1491,19 +1491,20 @@ static int ...@@ -1491,19 +1491,20 @@ static int
nvme_fc_create_hw_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize) nvme_fc_create_hw_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize)
{ {
struct nvme_fc_queue *queue = &ctrl->queues[1]; struct nvme_fc_queue *queue = &ctrl->queues[1];
int i, j, ret; int i, ret;
for (i = 1; i < ctrl->queue_count; i++, queue++) { for (i = 1; i < ctrl->queue_count; i++, queue++) {
ret = __nvme_fc_create_hw_queue(ctrl, queue, i, qsize); ret = __nvme_fc_create_hw_queue(ctrl, queue, i, qsize);
if (ret) { if (ret)
for (j = i-1; j >= 0; j--) goto delete_queues;
__nvme_fc_delete_hw_queue(ctrl,
&ctrl->queues[j], j);
return ret;
}
} }
return 0; return 0;
delete_queues:
for (; i >= 0; i--)
__nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[i], i);
return ret;
} }
static int static int
...@@ -2401,8 +2402,8 @@ __nvme_fc_create_ctrl(struct device *dev, struct nvmf_ctrl_options *opts, ...@@ -2401,8 +2402,8 @@ __nvme_fc_create_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
WARN_ON_ONCE(!changed); WARN_ON_ONCE(!changed);
dev_info(ctrl->ctrl.device, dev_info(ctrl->ctrl.device,
"NVME-FC{%d}: new ctrl: NQN \"%s\" (%p)\n", "NVME-FC{%d}: new ctrl: NQN \"%s\"\n",
ctrl->cnum, ctrl->ctrl.opts->subsysnqn, &ctrl); ctrl->cnum, ctrl->ctrl.opts->subsysnqn);
kref_get(&ctrl->ctrl.kref); kref_get(&ctrl->ctrl.kref);
......
...@@ -135,7 +135,6 @@ struct nvme_ctrl { ...@@ -135,7 +135,6 @@ struct nvme_ctrl {
u32 page_size; u32 page_size;
u32 max_hw_sectors; u32 max_hw_sectors;
u32 stripe_size;
u16 oncs; u16 oncs;
u16 vid; u16 vid;
atomic_t abort_limit; atomic_t abort_limit;
......
...@@ -712,15 +712,8 @@ static void __nvme_process_cq(struct nvme_queue *nvmeq, unsigned int *tag) ...@@ -712,15 +712,8 @@ static void __nvme_process_cq(struct nvme_queue *nvmeq, unsigned int *tag)
req = blk_mq_tag_to_rq(*nvmeq->tags, cqe.command_id); req = blk_mq_tag_to_rq(*nvmeq->tags, cqe.command_id);
nvme_req(req)->result = cqe.result; nvme_req(req)->result = cqe.result;
blk_mq_complete_request(req, le16_to_cpu(cqe.status) >> 1); blk_mq_complete_request(req, le16_to_cpu(cqe.status) >> 1);
} }
/* If the controller ignores the cq head doorbell and continuously
* writes to the queue, it is theoretically possible to wrap around
* the queue twice and mistakenly return IRQ_NONE. Linux only
* requires that 0.1% of your interrupts are handled, so this isn't
* a big problem.
*/
if (head == nvmeq->cq_head && phase == nvmeq->cq_phase) if (head == nvmeq->cq_head && phase == nvmeq->cq_phase)
return; return;
......
...@@ -2160,30 +2160,6 @@ static int nvme_trans_synchronize_cache(struct nvme_ns *ns, ...@@ -2160,30 +2160,6 @@ static int nvme_trans_synchronize_cache(struct nvme_ns *ns,
return nvme_trans_status_code(hdr, nvme_sc); return nvme_trans_status_code(hdr, nvme_sc);
} }
static int nvme_trans_start_stop(struct nvme_ns *ns, struct sg_io_hdr *hdr,
u8 *cmd)
{
u8 immed, no_flush;
immed = cmd[1] & 0x01;
no_flush = cmd[4] & 0x04;
if (immed != 0) {
return nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
} else {
if (no_flush == 0) {
/* Issue NVME FLUSH command prior to START STOP UNIT */
int res = nvme_trans_synchronize_cache(ns, hdr);
if (res)
return res;
}
return 0;
}
}
static int nvme_trans_format_unit(struct nvme_ns *ns, struct sg_io_hdr *hdr, static int nvme_trans_format_unit(struct nvme_ns *ns, struct sg_io_hdr *hdr,
u8 *cmd) u8 *cmd)
{ {
...@@ -2439,9 +2415,6 @@ static int nvme_scsi_translate(struct nvme_ns *ns, struct sg_io_hdr *hdr) ...@@ -2439,9 +2415,6 @@ static int nvme_scsi_translate(struct nvme_ns *ns, struct sg_io_hdr *hdr)
case SECURITY_PROTOCOL_OUT: case SECURITY_PROTOCOL_OUT:
retcode = nvme_trans_security_protocol(ns, hdr, cmd); retcode = nvme_trans_security_protocol(ns, hdr, cmd);
break; break;
case START_STOP:
retcode = nvme_trans_start_stop(ns, hdr, cmd);
break;
case SYNCHRONIZE_CACHE: case SYNCHRONIZE_CACHE:
retcode = nvme_trans_synchronize_cache(ns, hdr); retcode = nvme_trans_synchronize_cache(ns, hdr);
break; break;
......
...@@ -382,7 +382,6 @@ static void nvmet_execute_set_features(struct nvmet_req *req) ...@@ -382,7 +382,6 @@ static void nvmet_execute_set_features(struct nvmet_req *req)
{ {
struct nvmet_subsys *subsys = req->sq->ctrl->subsys; struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10[0]); u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10[0]);
u64 val;
u32 val32; u32 val32;
u16 status = 0; u16 status = 0;
...@@ -392,8 +391,7 @@ static void nvmet_execute_set_features(struct nvmet_req *req) ...@@ -392,8 +391,7 @@ static void nvmet_execute_set_features(struct nvmet_req *req)
(subsys->max_qid - 1) | ((subsys->max_qid - 1) << 16)); (subsys->max_qid - 1) | ((subsys->max_qid - 1) << 16));
break; break;
case NVME_FEAT_KATO: case NVME_FEAT_KATO:
val = le64_to_cpu(req->cmd->prop_set.value); val32 = le32_to_cpu(req->cmd->common.cdw10[1]);
val32 = val & 0xffff;
req->sq->ctrl->kato = DIV_ROUND_UP(val32, 1000); req->sq->ctrl->kato = DIV_ROUND_UP(val32, 1000);
nvmet_set_result(req, req->sq->ctrl->kato); nvmet_set_result(req, req->sq->ctrl->kato);
break; break;
......
...@@ -845,7 +845,7 @@ fcloop_create_remote_port(struct device *dev, struct device_attribute *attr, ...@@ -845,7 +845,7 @@ fcloop_create_remote_port(struct device *dev, struct device_attribute *attr,
rport->lport = nport->lport; rport->lport = nport->lport;
nport->rport = rport; nport->rport = rport;
return ret ? ret : count; return count;
} }
...@@ -952,7 +952,7 @@ fcloop_create_target_port(struct device *dev, struct device_attribute *attr, ...@@ -952,7 +952,7 @@ fcloop_create_target_port(struct device *dev, struct device_attribute *attr,
tport->lport = nport->lport; tport->lport = nport->lport;
nport->tport = tport; nport->tport = tport;
return ret ? ret : count; return count;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment