Commit c0c14e93 authored by Jens Axboe's avatar Jens Axboe

Merge branch 'nvme-5.1' of git://git.infradead.org/nvme into for-linus

Pull NVMe fixes from Christoph:

"Two nvme fixes for 5.1 - fixing the initial CSN for nvme-fc, and handle
 log page offsets properly in the target."

* 'nvme-5.1' of git://git.infradead.org/nvme:
  nvmet: fix discover log page when offsets are used
  nvme-fc: correct csn initialization and increments on error
parents a3761c3c d808b7f7
...@@ -1845,7 +1845,7 @@ nvme_fc_init_queue(struct nvme_fc_ctrl *ctrl, int idx) ...@@ -1845,7 +1845,7 @@ nvme_fc_init_queue(struct nvme_fc_ctrl *ctrl, int idx)
memset(queue, 0, sizeof(*queue)); memset(queue, 0, sizeof(*queue));
queue->ctrl = ctrl; queue->ctrl = ctrl;
queue->qnum = idx; queue->qnum = idx;
atomic_set(&queue->csn, 1); atomic_set(&queue->csn, 0);
queue->dev = ctrl->dev; queue->dev = ctrl->dev;
if (idx > 0) if (idx > 0)
...@@ -1887,7 +1887,7 @@ nvme_fc_free_queue(struct nvme_fc_queue *queue) ...@@ -1887,7 +1887,7 @@ nvme_fc_free_queue(struct nvme_fc_queue *queue)
*/ */
queue->connection_id = 0; queue->connection_id = 0;
atomic_set(&queue->csn, 1); atomic_set(&queue->csn, 0);
} }
static void static void
...@@ -2183,7 +2183,6 @@ nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue, ...@@ -2183,7 +2183,6 @@ nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
{ {
struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu; struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
struct nvme_command *sqe = &cmdiu->sqe; struct nvme_command *sqe = &cmdiu->sqe;
u32 csn;
int ret, opstate; int ret, opstate;
/* /*
...@@ -2198,8 +2197,6 @@ nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue, ...@@ -2198,8 +2197,6 @@ nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
/* format the FC-NVME CMD IU and fcp_req */ /* format the FC-NVME CMD IU and fcp_req */
cmdiu->connection_id = cpu_to_be64(queue->connection_id); cmdiu->connection_id = cpu_to_be64(queue->connection_id);
csn = atomic_inc_return(&queue->csn);
cmdiu->csn = cpu_to_be32(csn);
cmdiu->data_len = cpu_to_be32(data_len); cmdiu->data_len = cpu_to_be32(data_len);
switch (io_dir) { switch (io_dir) {
case NVMEFC_FCP_WRITE: case NVMEFC_FCP_WRITE:
...@@ -2257,11 +2254,24 @@ nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue, ...@@ -2257,11 +2254,24 @@ nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
if (!(op->flags & FCOP_FLAGS_AEN)) if (!(op->flags & FCOP_FLAGS_AEN))
blk_mq_start_request(op->rq); blk_mq_start_request(op->rq);
cmdiu->csn = cpu_to_be32(atomic_inc_return(&queue->csn));
ret = ctrl->lport->ops->fcp_io(&ctrl->lport->localport, ret = ctrl->lport->ops->fcp_io(&ctrl->lport->localport,
&ctrl->rport->remoteport, &ctrl->rport->remoteport,
queue->lldd_handle, &op->fcp_req); queue->lldd_handle, &op->fcp_req);
if (ret) { if (ret) {
/*
* If the lld fails to send the command is there an issue with
* the csn value? If the command that fails is the Connect,
* no - as the connection won't be live. If it is a command
* post-connect, it's possible a gap in csn may be created.
* Does this matter? As Linux initiators don't send fused
* commands, no. The gap would exist, but as there's nothing
* that depends on csn order to be delivered on the target
* side, it shouldn't hurt. It would be difficult for a
* target to even detect the csn gap as it has no idea when the
* cmd with the csn was supposed to arrive.
*/
opstate = atomic_xchg(&op->state, FCPOP_STATE_COMPLETE); opstate = atomic_xchg(&op->state, FCPOP_STATE_COMPLETE);
__nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate); __nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate);
......
...@@ -24,6 +24,11 @@ u32 nvmet_get_log_page_len(struct nvme_command *cmd) ...@@ -24,6 +24,11 @@ u32 nvmet_get_log_page_len(struct nvme_command *cmd)
return len; return len;
} }
u64 nvmet_get_log_page_offset(struct nvme_command *cmd)
{
return le64_to_cpu(cmd->get_log_page.lpo);
}
static void nvmet_execute_get_log_page_noop(struct nvmet_req *req) static void nvmet_execute_get_log_page_noop(struct nvmet_req *req)
{ {
nvmet_req_complete(req, nvmet_zero_sgl(req, 0, req->data_len)); nvmet_req_complete(req, nvmet_zero_sgl(req, 0, req->data_len));
......
...@@ -131,54 +131,76 @@ static void nvmet_set_disc_traddr(struct nvmet_req *req, struct nvmet_port *port ...@@ -131,54 +131,76 @@ static void nvmet_set_disc_traddr(struct nvmet_req *req, struct nvmet_port *port
memcpy(traddr, port->disc_addr.traddr, NVMF_TRADDR_SIZE); memcpy(traddr, port->disc_addr.traddr, NVMF_TRADDR_SIZE);
} }
static size_t discovery_log_entries(struct nvmet_req *req)
{
struct nvmet_ctrl *ctrl = req->sq->ctrl;
struct nvmet_subsys_link *p;
struct nvmet_port *r;
size_t entries = 0;
list_for_each_entry(p, &req->port->subsystems, entry) {
if (!nvmet_host_allowed(p->subsys, ctrl->hostnqn))
continue;
entries++;
}
list_for_each_entry(r, &req->port->referrals, entry)
entries++;
return entries;
}
static void nvmet_execute_get_disc_log_page(struct nvmet_req *req) static void nvmet_execute_get_disc_log_page(struct nvmet_req *req)
{ {
const int entry_size = sizeof(struct nvmf_disc_rsp_page_entry); const int entry_size = sizeof(struct nvmf_disc_rsp_page_entry);
struct nvmet_ctrl *ctrl = req->sq->ctrl; struct nvmet_ctrl *ctrl = req->sq->ctrl;
struct nvmf_disc_rsp_page_hdr *hdr; struct nvmf_disc_rsp_page_hdr *hdr;
u64 offset = nvmet_get_log_page_offset(req->cmd);
size_t data_len = nvmet_get_log_page_len(req->cmd); size_t data_len = nvmet_get_log_page_len(req->cmd);
size_t alloc_len = max(data_len, sizeof(*hdr)); size_t alloc_len;
int residual_len = data_len - sizeof(*hdr);
struct nvmet_subsys_link *p; struct nvmet_subsys_link *p;
struct nvmet_port *r; struct nvmet_port *r;
u32 numrec = 0; u32 numrec = 0;
u16 status = 0; u16 status = 0;
void *buffer;
/* Spec requires dword aligned offsets */
if (offset & 0x3) {
status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
goto out;
}
/* /*
* Make sure we're passing at least a buffer of response header size. * Make sure we're passing at least a buffer of response header size.
* If host provided data len is less than the header size, only the * If host provided data len is less than the header size, only the
* number of bytes requested by host will be sent to host. * number of bytes requested by host will be sent to host.
*/ */
hdr = kzalloc(alloc_len, GFP_KERNEL); down_read(&nvmet_config_sem);
if (!hdr) { alloc_len = sizeof(*hdr) + entry_size * discovery_log_entries(req);
buffer = kzalloc(alloc_len, GFP_KERNEL);
if (!buffer) {
up_read(&nvmet_config_sem);
status = NVME_SC_INTERNAL; status = NVME_SC_INTERNAL;
goto out; goto out;
} }
down_read(&nvmet_config_sem); hdr = buffer;
list_for_each_entry(p, &req->port->subsystems, entry) { list_for_each_entry(p, &req->port->subsystems, entry) {
char traddr[NVMF_TRADDR_SIZE];
if (!nvmet_host_allowed(p->subsys, ctrl->hostnqn)) if (!nvmet_host_allowed(p->subsys, ctrl->hostnqn))
continue; continue;
if (residual_len >= entry_size) {
char traddr[NVMF_TRADDR_SIZE]; nvmet_set_disc_traddr(req, req->port, traddr);
nvmet_format_discovery_entry(hdr, req->port,
nvmet_set_disc_traddr(req, req->port, traddr); p->subsys->subsysnqn, traddr,
nvmet_format_discovery_entry(hdr, req->port, NVME_NQN_NVME, numrec);
p->subsys->subsysnqn, traddr,
NVME_NQN_NVME, numrec);
residual_len -= entry_size;
}
numrec++; numrec++;
} }
list_for_each_entry(r, &req->port->referrals, entry) { list_for_each_entry(r, &req->port->referrals, entry) {
if (residual_len >= entry_size) { nvmet_format_discovery_entry(hdr, r,
nvmet_format_discovery_entry(hdr, r, NVME_DISC_SUBSYS_NAME,
NVME_DISC_SUBSYS_NAME, r->disc_addr.traddr,
r->disc_addr.traddr, NVME_NQN_DISC, numrec);
NVME_NQN_DISC, numrec);
residual_len -= entry_size;
}
numrec++; numrec++;
} }
...@@ -190,8 +212,8 @@ static void nvmet_execute_get_disc_log_page(struct nvmet_req *req) ...@@ -190,8 +212,8 @@ static void nvmet_execute_get_disc_log_page(struct nvmet_req *req)
up_read(&nvmet_config_sem); up_read(&nvmet_config_sem);
status = nvmet_copy_to_sgl(req, 0, hdr, data_len); status = nvmet_copy_to_sgl(req, 0, buffer + offset, data_len);
kfree(hdr); kfree(buffer);
out: out:
nvmet_req_complete(req, status); nvmet_req_complete(req, status);
} }
......
...@@ -428,6 +428,7 @@ u16 nvmet_copy_from_sgl(struct nvmet_req *req, off_t off, void *buf, ...@@ -428,6 +428,7 @@ u16 nvmet_copy_from_sgl(struct nvmet_req *req, off_t off, void *buf,
u16 nvmet_zero_sgl(struct nvmet_req *req, off_t off, size_t len); u16 nvmet_zero_sgl(struct nvmet_req *req, off_t off, size_t len);
u32 nvmet_get_log_page_len(struct nvme_command *cmd); u32 nvmet_get_log_page_len(struct nvme_command *cmd);
u64 nvmet_get_log_page_offset(struct nvme_command *cmd);
extern struct list_head *nvmet_ports; extern struct list_head *nvmet_ports;
void nvmet_port_disc_changed(struct nvmet_port *port, void nvmet_port_disc_changed(struct nvmet_port *port,
......
...@@ -967,8 +967,13 @@ struct nvme_get_log_page_command { ...@@ -967,8 +967,13 @@ struct nvme_get_log_page_command {
__le16 numdl; __le16 numdl;
__le16 numdu; __le16 numdu;
__u16 rsvd11; __u16 rsvd11;
__le32 lpol; union {
__le32 lpou; struct {
__le32 lpol;
__le32 lpou;
};
__le64 lpo;
};
__u32 rsvd14[2]; __u32 rsvd14[2];
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment