Commit ed9ea4ed authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending

Pull SCSI target updates from Nicholas Bellinger:
 "The highlights this round include:

   - Add support for T10 PI pass-through between vhost-scsi +
     virtio-scsi (MST + Paolo + MKP + nab)
   - Add support for T10 PI in qla2xxx target mode (Quinn + MKP + hch +
     nab, merged through scsi.git)
   - Add support for percpu-ida pre-allocation in qla2xxx target code
     (Quinn + nab)
   - A number of iser-target fixes related to hardening the network
     portal shutdown path (Sagi + Slava)
   - Fix response length residual handling for a number of control CDBs
     (Roland + Christophe V.)
   - Various iscsi RFC conformance fixes in the CHAP authentication path
     (Tejas and Calsoft folks + nab)
   - Return TASK_SET_FULL status for tcm_fc(FCoE) DataIn + Response
     failures (Vasu + Jun + nab)
   - Fix long-standing ABORT_TASK + session reset hang (nab)
   - Convert iser-initiator + iser-target to include T10 bytes into EDTL
     (Sagi + Or + MKP + Mike Christie)
   - Fix NULL pointer dereference regression related to XCOPY introduced
     in v3.15 + CC'ed to v3.12.y (nab)"

* 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending: (34 commits)
  target: Fix NULL pointer dereference for XCOPY in target_put_sess_cmd
  vhost-scsi: Include prot_bytes into expected data transfer length
  TARGET/sbc,loopback: Adjust command data length in case pi exists on the wire
  libiscsi, iser: Adjust data_length to include protection information
  scsi_cmnd: Introduce scsi_transfer_length helper
  target: Report correct response length for some commands
  target/sbc: Check that the LBA and number of blocks are correct in VERIFY
  target/sbc: Remove sbc_check_valid_sectors()
  Target/iscsi: Fix sendtargets response pdu for iser transport
  Target/iser: Fix a wrong dereference in case discovery session is over iser
  iscsi-target: Fix ABORT_TASK + connection reset iscsi_queue_req memory leak
  target: Use complete_all for se_cmd->t_transport_stop_comp
  target: Set CMD_T_ACTIVE bit for Task Management Requests
  target: cleanup some boolean tests
  target/spc: Simplify INQUIRY EVPD=0x80
  tcm_fc: Generate TASK_SET_FULL status for response failures
  tcm_fc: Generate TASK_SET_FULL status for DataIN failures
  iscsi-target: Reject mutual authentication with reflected CHAP_C
  iscsi-target: Remove no-op from iscsit_tpg_del_portal_group
  iscsi-target: Fix CHAP_A parameter list handling
  ...
parents c1fdb2d3 0ed6e189
...@@ -41,11 +41,11 @@ ...@@ -41,11 +41,11 @@
#include "iscsi_iser.h" #include "iscsi_iser.h"
/* Register user buffer memory and initialize passive rdma /* Register user buffer memory and initialize passive rdma
* dto descriptor. Total data size is stored in * dto descriptor. Data size is stored in
* iser_task->data[ISER_DIR_IN].data_len * task->data[ISER_DIR_IN].data_len, Protection size
* os stored in task->prot[ISER_DIR_IN].data_len
*/ */
static int iser_prepare_read_cmd(struct iscsi_task *task, static int iser_prepare_read_cmd(struct iscsi_task *task)
unsigned int edtl)
{ {
struct iscsi_iser_task *iser_task = task->dd_data; struct iscsi_iser_task *iser_task = task->dd_data;
...@@ -73,14 +73,6 @@ static int iser_prepare_read_cmd(struct iscsi_task *task, ...@@ -73,14 +73,6 @@ static int iser_prepare_read_cmd(struct iscsi_task *task,
return err; return err;
} }
if (edtl > iser_task->data[ISER_DIR_IN].data_len) {
iser_err("Total data length: %ld, less than EDTL: "
"%d, in READ cmd BHS itt: %d, conn: 0x%p\n",
iser_task->data[ISER_DIR_IN].data_len, edtl,
task->itt, iser_task->ib_conn);
return -EINVAL;
}
err = device->iser_reg_rdma_mem(iser_task, ISER_DIR_IN); err = device->iser_reg_rdma_mem(iser_task, ISER_DIR_IN);
if (err) { if (err) {
iser_err("Failed to set up Data-IN RDMA\n"); iser_err("Failed to set up Data-IN RDMA\n");
...@@ -100,8 +92,9 @@ static int iser_prepare_read_cmd(struct iscsi_task *task, ...@@ -100,8 +92,9 @@ static int iser_prepare_read_cmd(struct iscsi_task *task,
} }
/* Register user buffer memory and initialize passive rdma /* Register user buffer memory and initialize passive rdma
* dto descriptor. Total data size is stored in * dto descriptor. Data size is stored in
* task->data[ISER_DIR_OUT].data_len * task->data[ISER_DIR_OUT].data_len, Protection size
* is stored at task->prot[ISER_DIR_OUT].data_len
*/ */
static int static int
iser_prepare_write_cmd(struct iscsi_task *task, iser_prepare_write_cmd(struct iscsi_task *task,
...@@ -135,14 +128,6 @@ iser_prepare_write_cmd(struct iscsi_task *task, ...@@ -135,14 +128,6 @@ iser_prepare_write_cmd(struct iscsi_task *task,
return err; return err;
} }
if (edtl > iser_task->data[ISER_DIR_OUT].data_len) {
iser_err("Total data length: %ld, less than EDTL: %d, "
"in WRITE cmd BHS itt: %d, conn: 0x%p\n",
iser_task->data[ISER_DIR_OUT].data_len,
edtl, task->itt, task->conn);
return -EINVAL;
}
err = device->iser_reg_rdma_mem(iser_task, ISER_DIR_OUT); err = device->iser_reg_rdma_mem(iser_task, ISER_DIR_OUT);
if (err != 0) { if (err != 0) {
iser_err("Failed to register write cmd RDMA mem\n"); iser_err("Failed to register write cmd RDMA mem\n");
...@@ -417,11 +402,12 @@ int iser_send_command(struct iscsi_conn *conn, ...@@ -417,11 +402,12 @@ int iser_send_command(struct iscsi_conn *conn,
if (scsi_prot_sg_count(sc)) { if (scsi_prot_sg_count(sc)) {
prot_buf->buf = scsi_prot_sglist(sc); prot_buf->buf = scsi_prot_sglist(sc);
prot_buf->size = scsi_prot_sg_count(sc); prot_buf->size = scsi_prot_sg_count(sc);
prot_buf->data_len = sc->prot_sdb->length; prot_buf->data_len = data_buf->data_len >>
ilog2(sc->device->sector_size) * 8;
} }
if (hdr->flags & ISCSI_FLAG_CMD_READ) { if (hdr->flags & ISCSI_FLAG_CMD_READ) {
err = iser_prepare_read_cmd(task, edtl); err = iser_prepare_read_cmd(task);
if (err) if (err)
goto send_command_error; goto send_command_error;
} }
......
...@@ -663,8 +663,9 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) ...@@ -663,8 +663,9 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
pi_support = np->tpg_np->tpg->tpg_attrib.t10_pi; pi_support = np->tpg_np->tpg->tpg_attrib.t10_pi;
if (pi_support && !device->pi_capable) { if (pi_support && !device->pi_capable) {
pr_err("Protection information requested but not supported\n"); pr_err("Protection information requested but not supported, "
ret = -EINVAL; "rejecting connect request\n");
ret = rdma_reject(cma_id, NULL, 0);
goto out_mr; goto out_mr;
} }
...@@ -787,14 +788,12 @@ isert_disconnect_work(struct work_struct *work) ...@@ -787,14 +788,12 @@ isert_disconnect_work(struct work_struct *work)
isert_put_conn(isert_conn); isert_put_conn(isert_conn);
return; return;
} }
if (!isert_conn->logout_posted) {
pr_debug("Calling rdma_disconnect for !logout_posted from" if (isert_conn->disconnect) {
" isert_disconnect_work\n"); /* Send DREQ/DREP towards our initiator */
rdma_disconnect(isert_conn->conn_cm_id); rdma_disconnect(isert_conn->conn_cm_id);
mutex_unlock(&isert_conn->conn_mutex);
iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
goto wake_up;
} }
mutex_unlock(&isert_conn->conn_mutex); mutex_unlock(&isert_conn->conn_mutex);
wake_up: wake_up:
...@@ -803,10 +802,11 @@ isert_disconnect_work(struct work_struct *work) ...@@ -803,10 +802,11 @@ isert_disconnect_work(struct work_struct *work)
} }
static void static void
isert_disconnected_handler(struct rdma_cm_id *cma_id) isert_disconnected_handler(struct rdma_cm_id *cma_id, bool disconnect)
{ {
struct isert_conn *isert_conn = (struct isert_conn *)cma_id->context; struct isert_conn *isert_conn = (struct isert_conn *)cma_id->context;
isert_conn->disconnect = disconnect;
INIT_WORK(&isert_conn->conn_logout_work, isert_disconnect_work); INIT_WORK(&isert_conn->conn_logout_work, isert_disconnect_work);
schedule_work(&isert_conn->conn_logout_work); schedule_work(&isert_conn->conn_logout_work);
} }
...@@ -815,29 +815,28 @@ static int ...@@ -815,29 +815,28 @@ static int
isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
{ {
int ret = 0; int ret = 0;
bool disconnect = false;
pr_debug("isert_cma_handler: event %d status %d conn %p id %p\n", pr_debug("isert_cma_handler: event %d status %d conn %p id %p\n",
event->event, event->status, cma_id->context, cma_id); event->event, event->status, cma_id->context, cma_id);
switch (event->event) { switch (event->event) {
case RDMA_CM_EVENT_CONNECT_REQUEST: case RDMA_CM_EVENT_CONNECT_REQUEST:
pr_debug("RDMA_CM_EVENT_CONNECT_REQUEST: >>>>>>>>>>>>>>>\n");
ret = isert_connect_request(cma_id, event); ret = isert_connect_request(cma_id, event);
break; break;
case RDMA_CM_EVENT_ESTABLISHED: case RDMA_CM_EVENT_ESTABLISHED:
pr_debug("RDMA_CM_EVENT_ESTABLISHED >>>>>>>>>>>>>>\n");
isert_connected_handler(cma_id); isert_connected_handler(cma_id);
break; break;
case RDMA_CM_EVENT_DISCONNECTED: case RDMA_CM_EVENT_ADDR_CHANGE: /* FALLTHRU */
pr_debug("RDMA_CM_EVENT_DISCONNECTED: >>>>>>>>>>>>>>\n"); case RDMA_CM_EVENT_DISCONNECTED: /* FALLTHRU */
isert_disconnected_handler(cma_id); case RDMA_CM_EVENT_DEVICE_REMOVAL: /* FALLTHRU */
break; disconnect = true;
case RDMA_CM_EVENT_DEVICE_REMOVAL: case RDMA_CM_EVENT_TIMEWAIT_EXIT: /* FALLTHRU */
case RDMA_CM_EVENT_ADDR_CHANGE: isert_disconnected_handler(cma_id, disconnect);
break; break;
case RDMA_CM_EVENT_CONNECT_ERROR: case RDMA_CM_EVENT_CONNECT_ERROR:
default: default:
pr_err("Unknown RDMA CMA event: %d\n", event->event); pr_err("Unhandled RDMA CMA event: %d\n", event->event);
break; break;
} }
...@@ -1054,7 +1053,9 @@ isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login, ...@@ -1054,7 +1053,9 @@ isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login,
} }
if (!login->login_failed) { if (!login->login_failed) {
if (login->login_complete) { if (login->login_complete) {
if (isert_conn->conn_device->use_fastreg) { if (!conn->sess->sess_ops->SessionType &&
isert_conn->conn_device->use_fastreg) {
/* Normal Session and fastreg is used */
u8 pi_support = login->np->tpg_np->tpg->tpg_attrib.t10_pi; u8 pi_support = login->np->tpg_np->tpg->tpg_attrib.t10_pi;
ret = isert_conn_create_fastreg_pool(isert_conn, ret = isert_conn_create_fastreg_pool(isert_conn,
...@@ -1824,11 +1825,8 @@ isert_do_control_comp(struct work_struct *work) ...@@ -1824,11 +1825,8 @@ isert_do_control_comp(struct work_struct *work)
break; break;
case ISTATE_SEND_LOGOUTRSP: case ISTATE_SEND_LOGOUTRSP:
pr_debug("Calling iscsit_logout_post_handler >>>>>>>>>>>>>>\n"); pr_debug("Calling iscsit_logout_post_handler >>>>>>>>>>>>>>\n");
/*
* Call atomic_dec(&isert_conn->post_send_buf_count) atomic_dec(&isert_conn->post_send_buf_count);
* from isert_wait_conn()
*/
isert_conn->logout_posted = true;
iscsit_logout_post_handler(cmd, cmd->conn); iscsit_logout_post_handler(cmd, cmd->conn);
break; break;
case ISTATE_SEND_TEXTRSP: case ISTATE_SEND_TEXTRSP:
...@@ -2034,6 +2032,8 @@ isert_cq_rx_comp_err(struct isert_conn *isert_conn) ...@@ -2034,6 +2032,8 @@ isert_cq_rx_comp_err(struct isert_conn *isert_conn)
isert_conn->state = ISER_CONN_DOWN; isert_conn->state = ISER_CONN_DOWN;
mutex_unlock(&isert_conn->conn_mutex); mutex_unlock(&isert_conn->conn_mutex);
iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
complete(&isert_conn->conn_wait_comp_err); complete(&isert_conn->conn_wait_comp_err);
} }
...@@ -2320,7 +2320,7 @@ isert_put_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn) ...@@ -2320,7 +2320,7 @@ isert_put_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
int rc; int rc;
isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc); isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
rc = iscsit_build_text_rsp(cmd, conn, hdr); rc = iscsit_build_text_rsp(cmd, conn, hdr, ISCSI_INFINIBAND);
if (rc < 0) if (rc < 0)
return rc; return rc;
...@@ -3156,9 +3156,14 @@ isert_accept_np(struct iscsi_np *np, struct iscsi_conn *conn) ...@@ -3156,9 +3156,14 @@ isert_accept_np(struct iscsi_np *np, struct iscsi_conn *conn)
return -ENODEV; return -ENODEV;
spin_lock_bh(&np->np_thread_lock); spin_lock_bh(&np->np_thread_lock);
if (np->np_thread_state == ISCSI_NP_THREAD_RESET) { if (np->np_thread_state >= ISCSI_NP_THREAD_RESET) {
spin_unlock_bh(&np->np_thread_lock); spin_unlock_bh(&np->np_thread_lock);
pr_debug("ISCSI_NP_THREAD_RESET for isert_accept_np\n"); pr_debug("np_thread_state %d for isert_accept_np\n",
np->np_thread_state);
/**
* No point in stalling here when np_thread
* is in state RESET/SHUTDOWN/EXIT - bail
**/
return -ENODEV; return -ENODEV;
} }
spin_unlock_bh(&np->np_thread_lock); spin_unlock_bh(&np->np_thread_lock);
...@@ -3208,15 +3213,9 @@ static void isert_wait_conn(struct iscsi_conn *conn) ...@@ -3208,15 +3213,9 @@ static void isert_wait_conn(struct iscsi_conn *conn)
struct isert_conn *isert_conn = conn->context; struct isert_conn *isert_conn = conn->context;
pr_debug("isert_wait_conn: Starting \n"); pr_debug("isert_wait_conn: Starting \n");
/*
* Decrement post_send_buf_count for special case when called
* from isert_do_control_comp() -> iscsit_logout_post_handler()
*/
mutex_lock(&isert_conn->conn_mutex);
if (isert_conn->logout_posted)
atomic_dec(&isert_conn->post_send_buf_count);
if (isert_conn->conn_cm_id && isert_conn->state != ISER_CONN_DOWN) { mutex_lock(&isert_conn->conn_mutex);
if (isert_conn->conn_cm_id) {
pr_debug("Calling rdma_disconnect from isert_wait_conn\n"); pr_debug("Calling rdma_disconnect from isert_wait_conn\n");
rdma_disconnect(isert_conn->conn_cm_id); rdma_disconnect(isert_conn->conn_cm_id);
} }
...@@ -3293,6 +3292,7 @@ static int __init isert_init(void) ...@@ -3293,6 +3292,7 @@ static int __init isert_init(void)
static void __exit isert_exit(void) static void __exit isert_exit(void)
{ {
flush_scheduled_work();
destroy_workqueue(isert_comp_wq); destroy_workqueue(isert_comp_wq);
destroy_workqueue(isert_rx_wq); destroy_workqueue(isert_rx_wq);
iscsit_unregister_transport(&iser_target_transport); iscsit_unregister_transport(&iser_target_transport);
......
...@@ -116,7 +116,6 @@ struct isert_device; ...@@ -116,7 +116,6 @@ struct isert_device;
struct isert_conn { struct isert_conn {
enum iser_conn_state state; enum iser_conn_state state;
bool logout_posted;
int post_recv_buf_count; int post_recv_buf_count;
atomic_t post_send_buf_count; atomic_t post_send_buf_count;
u32 responder_resources; u32 responder_resources;
...@@ -151,6 +150,7 @@ struct isert_conn { ...@@ -151,6 +150,7 @@ struct isert_conn {
#define ISERT_COMP_BATCH_COUNT 8 #define ISERT_COMP_BATCH_COUNT 8
int conn_comp_batch; int conn_comp_batch;
struct llist_head conn_comp_llist; struct llist_head conn_comp_llist;
bool disconnect;
}; };
#define ISERT_MAX_CQ 64 #define ISERT_MAX_CQ 64
......
...@@ -1773,6 +1773,7 @@ config SCSI_BFA_FC ...@@ -1773,6 +1773,7 @@ config SCSI_BFA_FC
config SCSI_VIRTIO config SCSI_VIRTIO
tristate "virtio-scsi support" tristate "virtio-scsi support"
depends on VIRTIO depends on VIRTIO
select BLK_DEV_INTEGRITY
help help
This is the virtual HBA driver for virtio. If the kernel will This is the virtual HBA driver for virtio. If the kernel will
be used in a virtual machine, say Y or M. be used in a virtual machine, say Y or M.
......
...@@ -338,7 +338,7 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task) ...@@ -338,7 +338,7 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
struct iscsi_session *session = conn->session; struct iscsi_session *session = conn->session;
struct scsi_cmnd *sc = task->sc; struct scsi_cmnd *sc = task->sc;
struct iscsi_scsi_req *hdr; struct iscsi_scsi_req *hdr;
unsigned hdrlength, cmd_len; unsigned hdrlength, cmd_len, transfer_length;
itt_t itt; itt_t itt;
int rc; int rc;
...@@ -391,11 +391,11 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task) ...@@ -391,11 +391,11 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
if (scsi_get_prot_op(sc) != SCSI_PROT_NORMAL) if (scsi_get_prot_op(sc) != SCSI_PROT_NORMAL)
task->protected = true; task->protected = true;
transfer_length = scsi_transfer_length(sc);
hdr->data_length = cpu_to_be32(transfer_length);
if (sc->sc_data_direction == DMA_TO_DEVICE) { if (sc->sc_data_direction == DMA_TO_DEVICE) {
unsigned out_len = scsi_out(sc)->length;
struct iscsi_r2t_info *r2t = &task->unsol_r2t; struct iscsi_r2t_info *r2t = &task->unsol_r2t;
hdr->data_length = cpu_to_be32(out_len);
hdr->flags |= ISCSI_FLAG_CMD_WRITE; hdr->flags |= ISCSI_FLAG_CMD_WRITE;
/* /*
* Write counters: * Write counters:
...@@ -414,18 +414,19 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task) ...@@ -414,18 +414,19 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
memset(r2t, 0, sizeof(*r2t)); memset(r2t, 0, sizeof(*r2t));
if (session->imm_data_en) { if (session->imm_data_en) {
if (out_len >= session->first_burst) if (transfer_length >= session->first_burst)
task->imm_count = min(session->first_burst, task->imm_count = min(session->first_burst,
conn->max_xmit_dlength); conn->max_xmit_dlength);
else else
task->imm_count = min(out_len, task->imm_count = min(transfer_length,
conn->max_xmit_dlength); conn->max_xmit_dlength);
hton24(hdr->dlength, task->imm_count); hton24(hdr->dlength, task->imm_count);
} else } else
zero_data(hdr->dlength); zero_data(hdr->dlength);
if (!session->initial_r2t_en) { if (!session->initial_r2t_en) {
r2t->data_length = min(session->first_burst, out_len) - r2t->data_length = min(session->first_burst,
transfer_length) -
task->imm_count; task->imm_count;
r2t->data_offset = task->imm_count; r2t->data_offset = task->imm_count;
r2t->ttt = cpu_to_be32(ISCSI_RESERVED_TAG); r2t->ttt = cpu_to_be32(ISCSI_RESERVED_TAG);
...@@ -438,7 +439,6 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task) ...@@ -438,7 +439,6 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
} else { } else {
hdr->flags |= ISCSI_FLAG_CMD_FINAL; hdr->flags |= ISCSI_FLAG_CMD_FINAL;
zero_data(hdr->dlength); zero_data(hdr->dlength);
hdr->data_length = cpu_to_be32(scsi_in(sc)->length);
if (sc->sc_data_direction == DMA_FROM_DEVICE) if (sc->sc_data_direction == DMA_FROM_DEVICE)
hdr->flags |= ISCSI_FLAG_CMD_READ; hdr->flags |= ISCSI_FLAG_CMD_READ;
...@@ -466,7 +466,7 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task) ...@@ -466,7 +466,7 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
scsi_bidi_cmnd(sc) ? "bidirectional" : scsi_bidi_cmnd(sc) ? "bidirectional" :
sc->sc_data_direction == DMA_TO_DEVICE ? sc->sc_data_direction == DMA_TO_DEVICE ?
"write" : "read", conn->id, sc, sc->cmnd[0], "write" : "read", conn->id, sc, sc->cmnd[0],
task->itt, scsi_bufflen(sc), task->itt, transfer_length,
scsi_bidi_cmnd(sc) ? scsi_in(sc)->length : 0, scsi_bidi_cmnd(sc) ? scsi_in(sc)->length : 0,
session->cmdsn, session->cmdsn,
session->max_cmdsn - session->exp_cmdsn + 1); session->max_cmdsn - session->exp_cmdsn + 1);
......
...@@ -104,7 +104,6 @@ static void qlt_reject_free_srr_imm(struct scsi_qla_host *ha, ...@@ -104,7 +104,6 @@ static void qlt_reject_free_srr_imm(struct scsi_qla_host *ha,
/* /*
* Global Variables * Global Variables
*/ */
static struct kmem_cache *qla_tgt_cmd_cachep;
static struct kmem_cache *qla_tgt_mgmt_cmd_cachep; static struct kmem_cache *qla_tgt_mgmt_cmd_cachep;
static mempool_t *qla_tgt_mgmt_cmd_mempool; static mempool_t *qla_tgt_mgmt_cmd_mempool;
static struct workqueue_struct *qla_tgt_wq; static struct workqueue_struct *qla_tgt_wq;
...@@ -2705,6 +2704,8 @@ static void qlt_send_term_exchange(struct scsi_qla_host *vha, ...@@ -2705,6 +2704,8 @@ static void qlt_send_term_exchange(struct scsi_qla_host *vha,
void qlt_free_cmd(struct qla_tgt_cmd *cmd) void qlt_free_cmd(struct qla_tgt_cmd *cmd)
{ {
struct qla_tgt_sess *sess = cmd->sess;
ql_dbg(ql_dbg_tgt, cmd->vha, 0xe074, ql_dbg(ql_dbg_tgt, cmd->vha, 0xe074,
"%s: se_cmd[%p] ox_id %04x\n", "%s: se_cmd[%p] ox_id %04x\n",
__func__, &cmd->se_cmd, __func__, &cmd->se_cmd,
...@@ -2713,7 +2714,12 @@ void qlt_free_cmd(struct qla_tgt_cmd *cmd) ...@@ -2713,7 +2714,12 @@ void qlt_free_cmd(struct qla_tgt_cmd *cmd)
BUG_ON(cmd->sg_mapped); BUG_ON(cmd->sg_mapped);
if (unlikely(cmd->free_sg)) if (unlikely(cmd->free_sg))
kfree(cmd->sg); kfree(cmd->sg);
kmem_cache_free(qla_tgt_cmd_cachep, cmd);
if (!sess || !sess->se_sess) {
WARN_ON(1);
return;
}
percpu_ida_free(&sess->se_sess->sess_tag_pool, cmd->se_cmd.map_tag);
} }
EXPORT_SYMBOL(qlt_free_cmd); EXPORT_SYMBOL(qlt_free_cmd);
...@@ -3075,13 +3081,12 @@ static struct qla_tgt_sess *qlt_make_local_sess(struct scsi_qla_host *, ...@@ -3075,13 +3081,12 @@ static struct qla_tgt_sess *qlt_make_local_sess(struct scsi_qla_host *,
/* /*
* Process context for I/O path into tcm_qla2xxx code * Process context for I/O path into tcm_qla2xxx code
*/ */
static void qlt_do_work(struct work_struct *work) static void __qlt_do_work(struct qla_tgt_cmd *cmd)
{ {
struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
scsi_qla_host_t *vha = cmd->vha; scsi_qla_host_t *vha = cmd->vha;
struct qla_hw_data *ha = vha->hw; struct qla_hw_data *ha = vha->hw;
struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
struct qla_tgt_sess *sess = NULL; struct qla_tgt_sess *sess = cmd->sess;
struct atio_from_isp *atio = &cmd->atio; struct atio_from_isp *atio = &cmd->atio;
unsigned char *cdb; unsigned char *cdb;
unsigned long flags; unsigned long flags;
...@@ -3091,41 +3096,6 @@ static void qlt_do_work(struct work_struct *work) ...@@ -3091,41 +3096,6 @@ static void qlt_do_work(struct work_struct *work)
if (tgt->tgt_stop) if (tgt->tgt_stop)
goto out_term; goto out_term;
spin_lock_irqsave(&ha->hardware_lock, flags);
sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha,
atio->u.isp24.fcp_hdr.s_id);
/* Do kref_get() before dropping qla_hw_data->hardware_lock. */
if (sess)
kref_get(&sess->se_sess->sess_kref);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
if (unlikely(!sess)) {
uint8_t *s_id = atio->u.isp24.fcp_hdr.s_id;
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf022,
"qla_target(%d): Unable to find wwn login"
" (s_id %x:%x:%x), trying to create it manually\n",
vha->vp_idx, s_id[0], s_id[1], s_id[2]);
if (atio->u.raw.entry_count > 1) {
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf023,
"Dropping multy entry cmd %p\n", cmd);
goto out_term;
}
mutex_lock(&vha->vha_tgt.tgt_mutex);
sess = qlt_make_local_sess(vha, s_id);
/* sess has an extra creation ref. */
mutex_unlock(&vha->vha_tgt.tgt_mutex);
if (!sess)
goto out_term;
}
cmd->sess = sess;
cmd->loop_id = sess->loop_id;
cmd->conf_compl_supported = sess->conf_compl_supported;
cdb = &atio->u.isp24.fcp_cmnd.cdb[0]; cdb = &atio->u.isp24.fcp_cmnd.cdb[0];
cmd->tag = atio->u.isp24.exchange_addr; cmd->tag = atio->u.isp24.exchange_addr;
cmd->unpacked_lun = scsilun_to_int( cmd->unpacked_lun = scsilun_to_int(
...@@ -3153,8 +3123,8 @@ static void qlt_do_work(struct work_struct *work) ...@@ -3153,8 +3123,8 @@ static void qlt_do_work(struct work_struct *work)
cmd, &cmd->se_cmd, cmd->unpacked_lun, cmd->tag, data_length, cmd, &cmd->se_cmd, cmd->unpacked_lun, cmd->tag, data_length,
cmd->atio.u.isp24.fcp_hdr.ox_id); cmd->atio.u.isp24.fcp_hdr.ox_id);
ret = vha->hw->tgt.tgt_ops->handle_cmd(vha, cmd, cdb, data_length, ret = ha->tgt.tgt_ops->handle_cmd(vha, cmd, cdb, data_length,
fcp_task_attr, data_dir, bidi); fcp_task_attr, data_dir, bidi);
if (ret != 0) if (ret != 0)
goto out_term; goto out_term;
/* /*
...@@ -3173,17 +3143,114 @@ static void qlt_do_work(struct work_struct *work) ...@@ -3173,17 +3143,114 @@ static void qlt_do_work(struct work_struct *work)
*/ */
spin_lock_irqsave(&ha->hardware_lock, flags); spin_lock_irqsave(&ha->hardware_lock, flags);
qlt_send_term_exchange(vha, NULL, &cmd->atio, 1); qlt_send_term_exchange(vha, NULL, &cmd->atio, 1);
kmem_cache_free(qla_tgt_cmd_cachep, cmd); percpu_ida_free(&sess->se_sess->sess_tag_pool, cmd->se_cmd.map_tag);
if (sess) ha->tgt.tgt_ops->put_sess(sess);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
}
static void qlt_do_work(struct work_struct *work)
{
struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
__qlt_do_work(cmd);
}
static struct qla_tgt_cmd *qlt_get_tag(scsi_qla_host_t *vha,
struct qla_tgt_sess *sess,
struct atio_from_isp *atio)
{
struct se_session *se_sess = sess->se_sess;
struct qla_tgt_cmd *cmd;
int tag;
tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING);
if (tag < 0)
return NULL;
cmd = &((struct qla_tgt_cmd *)se_sess->sess_cmd_map)[tag];
memset(cmd, 0, sizeof(struct qla_tgt_cmd));
memcpy(&cmd->atio, atio, sizeof(*atio));
cmd->state = QLA_TGT_STATE_NEW;
cmd->tgt = vha->vha_tgt.qla_tgt;
cmd->vha = vha;
cmd->se_cmd.map_tag = tag;
cmd->sess = sess;
cmd->loop_id = sess->loop_id;
cmd->conf_compl_supported = sess->conf_compl_supported;
return cmd;
}
static void qlt_send_busy(struct scsi_qla_host *, struct atio_from_isp *,
uint16_t);
static void qlt_create_sess_from_atio(struct work_struct *work)
{
struct qla_tgt_sess_op *op = container_of(work,
struct qla_tgt_sess_op, work);
scsi_qla_host_t *vha = op->vha;
struct qla_hw_data *ha = vha->hw;
struct qla_tgt_sess *sess;
struct qla_tgt_cmd *cmd;
unsigned long flags;
uint8_t *s_id = op->atio.u.isp24.fcp_hdr.s_id;
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf022,
"qla_target(%d): Unable to find wwn login"
" (s_id %x:%x:%x), trying to create it manually\n",
vha->vp_idx, s_id[0], s_id[1], s_id[2]);
if (op->atio.u.raw.entry_count > 1) {
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf023,
"Dropping multy entry atio %p\n", &op->atio);
goto out_term;
}
mutex_lock(&vha->vha_tgt.tgt_mutex);
sess = qlt_make_local_sess(vha, s_id);
/* sess has an extra creation ref. */
mutex_unlock(&vha->vha_tgt.tgt_mutex);
if (!sess)
goto out_term;
/*
* Now obtain a pre-allocated session tag using the original op->atio
* packet header, and dispatch into __qlt_do_work() using the existing
* process context.
*/
cmd = qlt_get_tag(vha, sess, &op->atio);
if (!cmd) {
spin_lock_irqsave(&ha->hardware_lock, flags);
qlt_send_busy(vha, &op->atio, SAM_STAT_BUSY);
ha->tgt.tgt_ops->put_sess(sess); ha->tgt.tgt_ops->put_sess(sess);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
kfree(op);
return;
}
/*
* __qlt_do_work() will call ha->tgt.tgt_ops->put_sess() to release
* the extra reference taken above by qlt_make_local_sess()
*/
__qlt_do_work(cmd);
kfree(op);
return;
out_term:
spin_lock_irqsave(&ha->hardware_lock, flags);
qlt_send_term_exchange(vha, NULL, &op->atio, 1);
spin_unlock_irqrestore(&ha->hardware_lock, flags); spin_unlock_irqrestore(&ha->hardware_lock, flags);
kfree(op);
} }
/* ha->hardware_lock supposed to be held on entry */ /* ha->hardware_lock supposed to be held on entry */
static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha, static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
struct atio_from_isp *atio) struct atio_from_isp *atio)
{ {
struct qla_hw_data *ha = vha->hw;
struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
struct qla_tgt_sess *sess;
struct qla_tgt_cmd *cmd; struct qla_tgt_cmd *cmd;
if (unlikely(tgt->tgt_stop)) { if (unlikely(tgt->tgt_stop)) {
...@@ -3192,18 +3259,31 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha, ...@@ -3192,18 +3259,31 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
return -EFAULT; return -EFAULT;
} }
cmd = kmem_cache_zalloc(qla_tgt_cmd_cachep, GFP_ATOMIC); sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, atio->u.isp24.fcp_hdr.s_id);
if (unlikely(!sess)) {
struct qla_tgt_sess_op *op = kzalloc(sizeof(struct qla_tgt_sess_op),
GFP_ATOMIC);
if (!op)
return -ENOMEM;
memcpy(&op->atio, atio, sizeof(*atio));
INIT_WORK(&op->work, qlt_create_sess_from_atio);
queue_work(qla_tgt_wq, &op->work);
return 0;
}
/*
* Do kref_get() before returning + dropping qla_hw_data->hardware_lock.
*/
kref_get(&sess->se_sess->sess_kref);
cmd = qlt_get_tag(vha, sess, atio);
if (!cmd) { if (!cmd) {
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05e, ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05e,
"qla_target(%d): Allocation of cmd failed\n", vha->vp_idx); "qla_target(%d): Allocation of cmd failed\n", vha->vp_idx);
ha->tgt.tgt_ops->put_sess(sess);
return -ENOMEM; return -ENOMEM;
} }
memcpy(&cmd->atio, atio, sizeof(*atio));
cmd->state = QLA_TGT_STATE_NEW;
cmd->tgt = vha->vha_tgt.qla_tgt;
cmd->vha = vha;
INIT_WORK(&cmd->work, qlt_do_work); INIT_WORK(&cmd->work, qlt_do_work);
queue_work(qla_tgt_wq, &cmd->work); queue_work(qla_tgt_wq, &cmd->work);
return 0; return 0;
...@@ -5501,23 +5581,13 @@ int __init qlt_init(void) ...@@ -5501,23 +5581,13 @@ int __init qlt_init(void)
if (!QLA_TGT_MODE_ENABLED()) if (!QLA_TGT_MODE_ENABLED())
return 0; return 0;
qla_tgt_cmd_cachep = kmem_cache_create("qla_tgt_cmd_cachep",
sizeof(struct qla_tgt_cmd), __alignof__(struct qla_tgt_cmd), 0,
NULL);
if (!qla_tgt_cmd_cachep) {
ql_log(ql_log_fatal, NULL, 0xe06c,
"kmem_cache_create for qla_tgt_cmd_cachep failed\n");
return -ENOMEM;
}
qla_tgt_mgmt_cmd_cachep = kmem_cache_create("qla_tgt_mgmt_cmd_cachep", qla_tgt_mgmt_cmd_cachep = kmem_cache_create("qla_tgt_mgmt_cmd_cachep",
sizeof(struct qla_tgt_mgmt_cmd), __alignof__(struct sizeof(struct qla_tgt_mgmt_cmd), __alignof__(struct
qla_tgt_mgmt_cmd), 0, NULL); qla_tgt_mgmt_cmd), 0, NULL);
if (!qla_tgt_mgmt_cmd_cachep) { if (!qla_tgt_mgmt_cmd_cachep) {
ql_log(ql_log_fatal, NULL, 0xe06d, ql_log(ql_log_fatal, NULL, 0xe06d,
"kmem_cache_create for qla_tgt_mgmt_cmd_cachep failed\n"); "kmem_cache_create for qla_tgt_mgmt_cmd_cachep failed\n");
ret = -ENOMEM; return -ENOMEM;
goto out;
} }
qla_tgt_mgmt_cmd_mempool = mempool_create(25, mempool_alloc_slab, qla_tgt_mgmt_cmd_mempool = mempool_create(25, mempool_alloc_slab,
...@@ -5545,8 +5615,6 @@ int __init qlt_init(void) ...@@ -5545,8 +5615,6 @@ int __init qlt_init(void)
mempool_destroy(qla_tgt_mgmt_cmd_mempool); mempool_destroy(qla_tgt_mgmt_cmd_mempool);
out_mgmt_cmd_cachep: out_mgmt_cmd_cachep:
kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep); kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep);
out:
kmem_cache_destroy(qla_tgt_cmd_cachep);
return ret; return ret;
} }
...@@ -5558,5 +5626,4 @@ void qlt_exit(void) ...@@ -5558,5 +5626,4 @@ void qlt_exit(void)
destroy_workqueue(qla_tgt_wq); destroy_workqueue(qla_tgt_wq);
mempool_destroy(qla_tgt_mgmt_cmd_mempool); mempool_destroy(qla_tgt_mgmt_cmd_mempool);
kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep); kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep);
kmem_cache_destroy(qla_tgt_cmd_cachep);
} }
...@@ -870,6 +870,12 @@ struct qla_tgt { ...@@ -870,6 +870,12 @@ struct qla_tgt {
struct list_head tgt_list_entry; struct list_head tgt_list_entry;
}; };
struct qla_tgt_sess_op {
struct scsi_qla_host *vha;
struct atio_from_isp atio;
struct work_struct work;
};
/* /*
* Equivilant to IT Nexus (Initiator-Target) * Equivilant to IT Nexus (Initiator-Target)
*/ */
......
...@@ -1501,6 +1501,8 @@ static int tcm_qla2xxx_check_initiator_node_acl( ...@@ -1501,6 +1501,8 @@ static int tcm_qla2xxx_check_initiator_node_acl(
struct qla_tgt_sess *sess = qla_tgt_sess; struct qla_tgt_sess *sess = qla_tgt_sess;
unsigned char port_name[36]; unsigned char port_name[36];
unsigned long flags; unsigned long flags;
int num_tags = (ha->fw_xcb_count) ? ha->fw_xcb_count :
TCM_QLA2XXX_DEFAULT_TAGS;
lport = vha->vha_tgt.target_lport_ptr; lport = vha->vha_tgt.target_lport_ptr;
if (!lport) { if (!lport) {
...@@ -1518,7 +1520,9 @@ static int tcm_qla2xxx_check_initiator_node_acl( ...@@ -1518,7 +1520,9 @@ static int tcm_qla2xxx_check_initiator_node_acl(
} }
se_tpg = &tpg->se_tpg; se_tpg = &tpg->se_tpg;
se_sess = transport_init_session(TARGET_PROT_NORMAL); se_sess = transport_init_session_tags(num_tags,
sizeof(struct qla_tgt_cmd),
TARGET_PROT_NORMAL);
if (IS_ERR(se_sess)) { if (IS_ERR(se_sess)) {
pr_err("Unable to initialize struct se_session\n"); pr_err("Unable to initialize struct se_session\n");
return PTR_ERR(se_sess); return PTR_ERR(se_sess);
......
...@@ -4,6 +4,11 @@ ...@@ -4,6 +4,11 @@
#define TCM_QLA2XXX_VERSION "v0.1" #define TCM_QLA2XXX_VERSION "v0.1"
/* length of ASCII WWPNs including pad */ /* length of ASCII WWPNs including pad */
#define TCM_QLA2XXX_NAMELEN 32 #define TCM_QLA2XXX_NAMELEN 32
/*
* Number of pre-allocated per-session tags, based upon the worst-case
* per port number of iocbs
*/
#define TCM_QLA2XXX_DEFAULT_TAGS 2088
#include "qla_target.h" #include "qla_target.h"
......
...@@ -23,6 +23,7 @@ ...@@ -23,6 +23,7 @@
#include <linux/virtio_config.h> #include <linux/virtio_config.h>
#include <linux/virtio_scsi.h> #include <linux/virtio_scsi.h>
#include <linux/cpu.h> #include <linux/cpu.h>
#include <linux/blkdev.h>
#include <scsi/scsi_host.h> #include <scsi/scsi_host.h>
#include <scsi/scsi_device.h> #include <scsi/scsi_device.h>
#include <scsi/scsi_cmnd.h> #include <scsi/scsi_cmnd.h>
...@@ -37,6 +38,7 @@ struct virtio_scsi_cmd { ...@@ -37,6 +38,7 @@ struct virtio_scsi_cmd {
struct completion *comp; struct completion *comp;
union { union {
struct virtio_scsi_cmd_req cmd; struct virtio_scsi_cmd_req cmd;
struct virtio_scsi_cmd_req_pi cmd_pi;
struct virtio_scsi_ctrl_tmf_req tmf; struct virtio_scsi_ctrl_tmf_req tmf;
struct virtio_scsi_ctrl_an_req an; struct virtio_scsi_ctrl_an_req an;
} req; } req;
...@@ -399,7 +401,7 @@ static int virtscsi_add_cmd(struct virtqueue *vq, ...@@ -399,7 +401,7 @@ static int virtscsi_add_cmd(struct virtqueue *vq,
size_t req_size, size_t resp_size) size_t req_size, size_t resp_size)
{ {
struct scsi_cmnd *sc = cmd->sc; struct scsi_cmnd *sc = cmd->sc;
struct scatterlist *sgs[4], req, resp; struct scatterlist *sgs[6], req, resp;
struct sg_table *out, *in; struct sg_table *out, *in;
unsigned out_num = 0, in_num = 0; unsigned out_num = 0, in_num = 0;
...@@ -417,16 +419,24 @@ static int virtscsi_add_cmd(struct virtqueue *vq, ...@@ -417,16 +419,24 @@ static int virtscsi_add_cmd(struct virtqueue *vq,
sgs[out_num++] = &req; sgs[out_num++] = &req;
/* Data-out buffer. */ /* Data-out buffer. */
if (out) if (out) {
/* Place WRITE protection SGLs before Data OUT payload */
if (scsi_prot_sg_count(sc))
sgs[out_num++] = scsi_prot_sglist(sc);
sgs[out_num++] = out->sgl; sgs[out_num++] = out->sgl;
}
/* Response header. */ /* Response header. */
sg_init_one(&resp, &cmd->resp, resp_size); sg_init_one(&resp, &cmd->resp, resp_size);
sgs[out_num + in_num++] = &resp; sgs[out_num + in_num++] = &resp;
/* Data-in buffer */ /* Data-in buffer */
if (in) if (in) {
/* Place READ protection SGLs before Data IN payload */
if (scsi_prot_sg_count(sc))
sgs[out_num + in_num++] = scsi_prot_sglist(sc);
sgs[out_num + in_num++] = in->sgl; sgs[out_num + in_num++] = in->sgl;
}
return virtqueue_add_sgs(vq, sgs, out_num, in_num, cmd, GFP_ATOMIC); return virtqueue_add_sgs(vq, sgs, out_num, in_num, cmd, GFP_ATOMIC);
} }
...@@ -451,12 +461,45 @@ static int virtscsi_kick_cmd(struct virtio_scsi_vq *vq, ...@@ -451,12 +461,45 @@ static int virtscsi_kick_cmd(struct virtio_scsi_vq *vq,
return err; return err;
} }
static void virtio_scsi_init_hdr(struct virtio_scsi_cmd_req *cmd,
struct scsi_cmnd *sc)
{
cmd->lun[0] = 1;
cmd->lun[1] = sc->device->id;
cmd->lun[2] = (sc->device->lun >> 8) | 0x40;
cmd->lun[3] = sc->device->lun & 0xff;
cmd->tag = (unsigned long)sc;
cmd->task_attr = VIRTIO_SCSI_S_SIMPLE;
cmd->prio = 0;
cmd->crn = 0;
}
static void virtio_scsi_init_hdr_pi(struct virtio_scsi_cmd_req_pi *cmd_pi,
struct scsi_cmnd *sc)
{
struct request *rq = sc->request;
struct blk_integrity *bi;
virtio_scsi_init_hdr((struct virtio_scsi_cmd_req *)cmd_pi, sc);
if (!rq || !scsi_prot_sg_count(sc))
return;
bi = blk_get_integrity(rq->rq_disk);
if (sc->sc_data_direction == DMA_TO_DEVICE)
cmd_pi->pi_bytesout = blk_rq_sectors(rq) * bi->tuple_size;
else if (sc->sc_data_direction == DMA_FROM_DEVICE)
cmd_pi->pi_bytesin = blk_rq_sectors(rq) * bi->tuple_size;
}
static int virtscsi_queuecommand(struct virtio_scsi *vscsi, static int virtscsi_queuecommand(struct virtio_scsi *vscsi,
struct virtio_scsi_vq *req_vq, struct virtio_scsi_vq *req_vq,
struct scsi_cmnd *sc) struct scsi_cmnd *sc)
{ {
struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev); struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev);
struct virtio_scsi_cmd *cmd = scsi_cmd_priv(sc); struct virtio_scsi_cmd *cmd = scsi_cmd_priv(sc);
int req_size;
BUG_ON(scsi_sg_count(sc) > shost->sg_tablesize); BUG_ON(scsi_sg_count(sc) > shost->sg_tablesize);
...@@ -468,22 +511,20 @@ static int virtscsi_queuecommand(struct virtio_scsi *vscsi, ...@@ -468,22 +511,20 @@ static int virtscsi_queuecommand(struct virtio_scsi *vscsi,
memset(cmd, 0, sizeof(*cmd)); memset(cmd, 0, sizeof(*cmd));
cmd->sc = sc; cmd->sc = sc;
cmd->req.cmd = (struct virtio_scsi_cmd_req){
.lun[0] = 1,
.lun[1] = sc->device->id,
.lun[2] = (sc->device->lun >> 8) | 0x40,
.lun[3] = sc->device->lun & 0xff,
.tag = (unsigned long)sc,
.task_attr = VIRTIO_SCSI_S_SIMPLE,
.prio = 0,
.crn = 0,
};
BUG_ON(sc->cmd_len > VIRTIO_SCSI_CDB_SIZE); BUG_ON(sc->cmd_len > VIRTIO_SCSI_CDB_SIZE);
memcpy(cmd->req.cmd.cdb, sc->cmnd, sc->cmd_len);
if (virtscsi_kick_cmd(req_vq, cmd, if (virtio_has_feature(vscsi->vdev, VIRTIO_SCSI_F_T10_PI)) {
sizeof cmd->req.cmd, sizeof cmd->resp.cmd) != 0) virtio_scsi_init_hdr_pi(&cmd->req.cmd_pi, sc);
memcpy(cmd->req.cmd_pi.cdb, sc->cmnd, sc->cmd_len);
req_size = sizeof(cmd->req.cmd_pi);
} else {
virtio_scsi_init_hdr(&cmd->req.cmd, sc);
memcpy(cmd->req.cmd.cdb, sc->cmnd, sc->cmd_len);
req_size = sizeof(cmd->req.cmd);
}
if (virtscsi_kick_cmd(req_vq, cmd, req_size, sizeof(cmd->resp.cmd)) != 0)
return SCSI_MLQUEUE_HOST_BUSY; return SCSI_MLQUEUE_HOST_BUSY;
return 0; return 0;
} }
...@@ -820,7 +861,7 @@ static int virtscsi_probe(struct virtio_device *vdev) ...@@ -820,7 +861,7 @@ static int virtscsi_probe(struct virtio_device *vdev)
{ {
struct Scsi_Host *shost; struct Scsi_Host *shost;
struct virtio_scsi *vscsi; struct virtio_scsi *vscsi;
int err; int err, host_prot;
u32 sg_elems, num_targets; u32 sg_elems, num_targets;
u32 cmd_per_lun; u32 cmd_per_lun;
u32 num_queues; u32 num_queues;
...@@ -870,6 +911,16 @@ static int virtscsi_probe(struct virtio_device *vdev) ...@@ -870,6 +911,16 @@ static int virtscsi_probe(struct virtio_device *vdev)
shost->max_id = num_targets; shost->max_id = num_targets;
shost->max_channel = 0; shost->max_channel = 0;
shost->max_cmd_len = VIRTIO_SCSI_CDB_SIZE; shost->max_cmd_len = VIRTIO_SCSI_CDB_SIZE;
if (virtio_has_feature(vdev, VIRTIO_SCSI_F_T10_PI)) {
host_prot = SHOST_DIF_TYPE1_PROTECTION | SHOST_DIF_TYPE2_PROTECTION |
SHOST_DIF_TYPE3_PROTECTION | SHOST_DIX_TYPE1_PROTECTION |
SHOST_DIX_TYPE2_PROTECTION | SHOST_DIX_TYPE3_PROTECTION;
scsi_host_set_prot(shost, host_prot);
scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC);
}
err = scsi_add_host(shost, &vdev->dev); err = scsi_add_host(shost, &vdev->dev);
if (err) if (err)
goto scsi_add_host_failed; goto scsi_add_host_failed;
...@@ -939,6 +990,7 @@ static struct virtio_device_id id_table[] = { ...@@ -939,6 +990,7 @@ static struct virtio_device_id id_table[] = {
static unsigned int features[] = { static unsigned int features[] = {
VIRTIO_SCSI_F_HOTPLUG, VIRTIO_SCSI_F_HOTPLUG,
VIRTIO_SCSI_F_CHANGE, VIRTIO_SCSI_F_CHANGE,
VIRTIO_SCSI_F_T10_PI,
}; };
static struct virtio_driver virtio_scsi_driver = { static struct virtio_driver virtio_scsi_driver = {
......
...@@ -300,7 +300,7 @@ bool iscsit_check_np_match( ...@@ -300,7 +300,7 @@ bool iscsit_check_np_match(
port = ntohs(sock_in->sin_port); port = ntohs(sock_in->sin_port);
} }
if ((ip_match == true) && (np->np_port == port) && if (ip_match && (np->np_port == port) &&
(np->np_network_transport == network_transport)) (np->np_network_transport == network_transport))
return true; return true;
...@@ -325,7 +325,7 @@ static struct iscsi_np *iscsit_get_np( ...@@ -325,7 +325,7 @@ static struct iscsi_np *iscsit_get_np(
} }
match = iscsit_check_np_match(sockaddr, np, network_transport); match = iscsit_check_np_match(sockaddr, np, network_transport);
if (match == true) { if (match) {
/* /*
* Increment the np_exports reference count now to * Increment the np_exports reference count now to
* prevent iscsit_del_np() below from being called * prevent iscsit_del_np() below from being called
...@@ -1121,7 +1121,7 @@ iscsit_get_immediate_data(struct iscsi_cmd *cmd, struct iscsi_scsi_req *hdr, ...@@ -1121,7 +1121,7 @@ iscsit_get_immediate_data(struct iscsi_cmd *cmd, struct iscsi_scsi_req *hdr,
/* /*
* Special case for Unsupported SAM WRITE Opcodes and ImmediateData=Yes. * Special case for Unsupported SAM WRITE Opcodes and ImmediateData=Yes.
*/ */
if (dump_payload == true) if (dump_payload)
goto after_immediate_data; goto after_immediate_data;
immed_ret = iscsit_handle_immediate_data(cmd, hdr, immed_ret = iscsit_handle_immediate_data(cmd, hdr,
...@@ -3390,7 +3390,9 @@ static bool iscsit_check_inaddr_any(struct iscsi_np *np) ...@@ -3390,7 +3390,9 @@ static bool iscsit_check_inaddr_any(struct iscsi_np *np)
#define SENDTARGETS_BUF_LIMIT 32768U #define SENDTARGETS_BUF_LIMIT 32768U
static int iscsit_build_sendtargets_response(struct iscsi_cmd *cmd) static int
iscsit_build_sendtargets_response(struct iscsi_cmd *cmd,
enum iscsit_transport_type network_transport)
{ {
char *payload = NULL; char *payload = NULL;
struct iscsi_conn *conn = cmd->conn; struct iscsi_conn *conn = cmd->conn;
...@@ -3467,6 +3469,9 @@ static int iscsit_build_sendtargets_response(struct iscsi_cmd *cmd) ...@@ -3467,6 +3469,9 @@ static int iscsit_build_sendtargets_response(struct iscsi_cmd *cmd)
struct iscsi_np *np = tpg_np->tpg_np; struct iscsi_np *np = tpg_np->tpg_np;
bool inaddr_any = iscsit_check_inaddr_any(np); bool inaddr_any = iscsit_check_inaddr_any(np);
if (np->np_network_transport != network_transport)
continue;
if (!target_name_printed) { if (!target_name_printed) {
len = sprintf(buf, "TargetName=%s", len = sprintf(buf, "TargetName=%s",
tiqn->tiqn); tiqn->tiqn);
...@@ -3485,10 +3490,8 @@ static int iscsit_build_sendtargets_response(struct iscsi_cmd *cmd) ...@@ -3485,10 +3490,8 @@ static int iscsit_build_sendtargets_response(struct iscsi_cmd *cmd)
len = sprintf(buf, "TargetAddress=" len = sprintf(buf, "TargetAddress="
"%s:%hu,%hu", "%s:%hu,%hu",
(inaddr_any == false) ? inaddr_any ? conn->local_ip : np->np_ip,
np->np_ip : conn->local_ip, inaddr_any ? conn->local_port : np->np_port,
(inaddr_any == false) ?
np->np_port : conn->local_port,
tpg->tpgt); tpg->tpgt);
len += 1; len += 1;
...@@ -3520,11 +3523,12 @@ static int iscsit_build_sendtargets_response(struct iscsi_cmd *cmd) ...@@ -3520,11 +3523,12 @@ static int iscsit_build_sendtargets_response(struct iscsi_cmd *cmd)
int int
iscsit_build_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn, iscsit_build_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
struct iscsi_text_rsp *hdr) struct iscsi_text_rsp *hdr,
enum iscsit_transport_type network_transport)
{ {
int text_length, padding; int text_length, padding;
text_length = iscsit_build_sendtargets_response(cmd); text_length = iscsit_build_sendtargets_response(cmd, network_transport);
if (text_length < 0) if (text_length < 0)
return text_length; return text_length;
...@@ -3562,7 +3566,7 @@ static int iscsit_send_text_rsp( ...@@ -3562,7 +3566,7 @@ static int iscsit_send_text_rsp(
u32 tx_size = 0; u32 tx_size = 0;
int text_length, iov_count = 0, rc; int text_length, iov_count = 0, rc;
rc = iscsit_build_text_rsp(cmd, conn, hdr); rc = iscsit_build_text_rsp(cmd, conn, hdr, ISCSI_TCP);
if (rc < 0) if (rc < 0)
return rc; return rc;
...@@ -4234,8 +4238,6 @@ int iscsit_close_connection( ...@@ -4234,8 +4238,6 @@ int iscsit_close_connection(
if (conn->conn_transport->iscsit_wait_conn) if (conn->conn_transport->iscsit_wait_conn)
conn->conn_transport->iscsit_wait_conn(conn); conn->conn_transport->iscsit_wait_conn(conn);
iscsit_free_queue_reqs_for_conn(conn);
/* /*
* During Connection recovery drop unacknowledged out of order * During Connection recovery drop unacknowledged out of order
* commands for this connection, and prepare the other commands * commands for this connection, and prepare the other commands
...@@ -4252,6 +4254,7 @@ int iscsit_close_connection( ...@@ -4252,6 +4254,7 @@ int iscsit_close_connection(
iscsit_clear_ooo_cmdsns_for_conn(conn); iscsit_clear_ooo_cmdsns_for_conn(conn);
iscsit_release_commands_from_conn(conn); iscsit_release_commands_from_conn(conn);
} }
iscsit_free_queue_reqs_for_conn(conn);
/* /*
* Handle decrementing session or connection usage count if * Handle decrementing session or connection usage count if
......
...@@ -71,6 +71,40 @@ static void chap_gen_challenge( ...@@ -71,6 +71,40 @@ static void chap_gen_challenge(
challenge_asciihex); challenge_asciihex);
} }
static int chap_check_algorithm(const char *a_str)
{
char *tmp, *orig, *token;
tmp = kstrdup(a_str, GFP_KERNEL);
if (!tmp) {
pr_err("Memory allocation failed for CHAP_A temporary buffer\n");
return CHAP_DIGEST_UNKNOWN;
}
orig = tmp;
token = strsep(&tmp, "=");
if (!token)
goto out;
if (strcmp(token, "CHAP_A")) {
pr_err("Unable to locate CHAP_A key\n");
goto out;
}
while (token) {
token = strsep(&tmp, ",");
if (!token)
goto out;
if (!strncmp(token, "5", 1)) {
pr_debug("Selected MD5 Algorithm\n");
kfree(orig);
return CHAP_DIGEST_MD5;
}
}
out:
kfree(orig);
return CHAP_DIGEST_UNKNOWN;
}
static struct iscsi_chap *chap_server_open( static struct iscsi_chap *chap_server_open(
struct iscsi_conn *conn, struct iscsi_conn *conn,
...@@ -79,6 +113,7 @@ static struct iscsi_chap *chap_server_open( ...@@ -79,6 +113,7 @@ static struct iscsi_chap *chap_server_open(
char *aic_str, char *aic_str,
unsigned int *aic_len) unsigned int *aic_len)
{ {
int ret;
struct iscsi_chap *chap; struct iscsi_chap *chap;
if (!(auth->naf_flags & NAF_USERID_SET) || if (!(auth->naf_flags & NAF_USERID_SET) ||
...@@ -93,21 +128,24 @@ static struct iscsi_chap *chap_server_open( ...@@ -93,21 +128,24 @@ static struct iscsi_chap *chap_server_open(
return NULL; return NULL;
chap = conn->auth_protocol; chap = conn->auth_protocol;
/* ret = chap_check_algorithm(a_str);
* We only support MD5 MDA presently. switch (ret) {
*/ case CHAP_DIGEST_MD5:
if (strncmp(a_str, "CHAP_A=5", 8)) { pr_debug("[server] Got CHAP_A=5\n");
pr_err("CHAP_A is not MD5.\n"); /*
* Send back CHAP_A set to MD5.
*/
*aic_len = sprintf(aic_str, "CHAP_A=5");
*aic_len += 1;
chap->digest_type = CHAP_DIGEST_MD5;
pr_debug("[server] Sending CHAP_A=%d\n", chap->digest_type);
break;
case CHAP_DIGEST_UNKNOWN:
default:
pr_err("Unsupported CHAP_A value\n");
return NULL; return NULL;
} }
pr_debug("[server] Got CHAP_A=5\n");
/*
* Send back CHAP_A set to MD5.
*/
*aic_len = sprintf(aic_str, "CHAP_A=5");
*aic_len += 1;
chap->digest_type = CHAP_DIGEST_MD5;
pr_debug("[server] Sending CHAP_A=%d\n", chap->digest_type);
/* /*
* Set Identifier. * Set Identifier.
*/ */
...@@ -313,6 +351,16 @@ static int chap_server_compute_md5( ...@@ -313,6 +351,16 @@ static int chap_server_compute_md5(
pr_err("Unable to convert incoming challenge\n"); pr_err("Unable to convert incoming challenge\n");
goto out; goto out;
} }
/*
* During mutual authentication, the CHAP_C generated by the
* initiator must not match the original CHAP_C generated by
* the target.
*/
if (!memcmp(challenge_binhex, chap->challenge, CHAP_CHALLENGE_LENGTH)) {
pr_err("initiator CHAP_C matches target CHAP_C, failing"
" login attempt\n");
goto out;
}
/* /*
* Generate CHAP_N and CHAP_R for mutual authentication. * Generate CHAP_N and CHAP_R for mutual authentication.
*/ */
......
#ifndef _ISCSI_CHAP_H_ #ifndef _ISCSI_CHAP_H_
#define _ISCSI_CHAP_H_ #define _ISCSI_CHAP_H_
#define CHAP_DIGEST_UNKNOWN 0
#define CHAP_DIGEST_MD5 5 #define CHAP_DIGEST_MD5 5
#define CHAP_DIGEST_SHA 6 #define CHAP_DIGEST_SHA 6
......
...@@ -1145,7 +1145,7 @@ iscsit_conn_set_transport(struct iscsi_conn *conn, struct iscsit_transport *t) ...@@ -1145,7 +1145,7 @@ iscsit_conn_set_transport(struct iscsi_conn *conn, struct iscsit_transport *t)
void iscsi_target_login_sess_out(struct iscsi_conn *conn, void iscsi_target_login_sess_out(struct iscsi_conn *conn,
struct iscsi_np *np, bool zero_tsih, bool new_sess) struct iscsi_np *np, bool zero_tsih, bool new_sess)
{ {
if (new_sess == false) if (!new_sess)
goto old_sess_out; goto old_sess_out;
pr_err("iSCSI Login negotiation failed.\n"); pr_err("iSCSI Login negotiation failed.\n");
......
...@@ -404,7 +404,7 @@ static void iscsi_target_sk_data_ready(struct sock *sk) ...@@ -404,7 +404,7 @@ static void iscsi_target_sk_data_ready(struct sock *sk)
} }
rc = schedule_delayed_work(&conn->login_work, 0); rc = schedule_delayed_work(&conn->login_work, 0);
if (rc == false) { if (!rc) {
pr_debug("iscsi_target_sk_data_ready, schedule_delayed_work" pr_debug("iscsi_target_sk_data_ready, schedule_delayed_work"
" got false\n"); " got false\n");
} }
...@@ -513,7 +513,7 @@ static void iscsi_target_do_login_rx(struct work_struct *work) ...@@ -513,7 +513,7 @@ static void iscsi_target_do_login_rx(struct work_struct *work)
state = (tpg->tpg_state == TPG_STATE_ACTIVE); state = (tpg->tpg_state == TPG_STATE_ACTIVE);
spin_unlock(&tpg->tpg_state_lock); spin_unlock(&tpg->tpg_state_lock);
if (state == false) { if (!state) {
pr_debug("iscsi_target_do_login_rx: tpg_state != TPG_STATE_ACTIVE\n"); pr_debug("iscsi_target_do_login_rx: tpg_state != TPG_STATE_ACTIVE\n");
iscsi_target_restore_sock_callbacks(conn); iscsi_target_restore_sock_callbacks(conn);
iscsi_target_login_drop(conn, login); iscsi_target_login_drop(conn, login);
...@@ -528,7 +528,7 @@ static void iscsi_target_do_login_rx(struct work_struct *work) ...@@ -528,7 +528,7 @@ static void iscsi_target_do_login_rx(struct work_struct *work)
state = iscsi_target_sk_state_check(sk); state = iscsi_target_sk_state_check(sk);
read_unlock_bh(&sk->sk_callback_lock); read_unlock_bh(&sk->sk_callback_lock);
if (state == false) { if (!state) {
pr_debug("iscsi_target_do_login_rx, TCP state CLOSE\n"); pr_debug("iscsi_target_do_login_rx, TCP state CLOSE\n");
iscsi_target_restore_sock_callbacks(conn); iscsi_target_restore_sock_callbacks(conn);
iscsi_target_login_drop(conn, login); iscsi_target_login_drop(conn, login);
...@@ -773,6 +773,12 @@ static int iscsi_target_handle_csg_zero( ...@@ -773,6 +773,12 @@ static int iscsi_target_handle_csg_zero(
} }
goto do_auth; goto do_auth;
} else if (!payload_length) {
pr_err("Initiator sent zero length security payload,"
" login failed\n");
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
ISCSI_LOGIN_STATUS_AUTH_FAILED);
return -1;
} }
if (login->first_request) if (login->first_request)
......
...@@ -474,10 +474,10 @@ int iscsi_set_keys_to_negotiate( ...@@ -474,10 +474,10 @@ int iscsi_set_keys_to_negotiate(
if (!strcmp(param->name, AUTHMETHOD)) { if (!strcmp(param->name, AUTHMETHOD)) {
SET_PSTATE_NEGOTIATE(param); SET_PSTATE_NEGOTIATE(param);
} else if (!strcmp(param->name, HEADERDIGEST)) { } else if (!strcmp(param->name, HEADERDIGEST)) {
if (iser == false) if (!iser)
SET_PSTATE_NEGOTIATE(param); SET_PSTATE_NEGOTIATE(param);
} else if (!strcmp(param->name, DATADIGEST)) { } else if (!strcmp(param->name, DATADIGEST)) {
if (iser == false) if (!iser)
SET_PSTATE_NEGOTIATE(param); SET_PSTATE_NEGOTIATE(param);
} else if (!strcmp(param->name, MAXCONNECTIONS)) { } else if (!strcmp(param->name, MAXCONNECTIONS)) {
SET_PSTATE_NEGOTIATE(param); SET_PSTATE_NEGOTIATE(param);
...@@ -497,7 +497,7 @@ int iscsi_set_keys_to_negotiate( ...@@ -497,7 +497,7 @@ int iscsi_set_keys_to_negotiate(
} else if (!strcmp(param->name, IMMEDIATEDATA)) { } else if (!strcmp(param->name, IMMEDIATEDATA)) {
SET_PSTATE_NEGOTIATE(param); SET_PSTATE_NEGOTIATE(param);
} else if (!strcmp(param->name, MAXRECVDATASEGMENTLENGTH)) { } else if (!strcmp(param->name, MAXRECVDATASEGMENTLENGTH)) {
if (iser == false) if (!iser)
SET_PSTATE_NEGOTIATE(param); SET_PSTATE_NEGOTIATE(param);
} else if (!strcmp(param->name, MAXXMITDATASEGMENTLENGTH)) { } else if (!strcmp(param->name, MAXXMITDATASEGMENTLENGTH)) {
continue; continue;
...@@ -528,13 +528,13 @@ int iscsi_set_keys_to_negotiate( ...@@ -528,13 +528,13 @@ int iscsi_set_keys_to_negotiate(
} else if (!strcmp(param->name, OFMARKINT)) { } else if (!strcmp(param->name, OFMARKINT)) {
SET_PSTATE_NEGOTIATE(param); SET_PSTATE_NEGOTIATE(param);
} else if (!strcmp(param->name, RDMAEXTENSIONS)) { } else if (!strcmp(param->name, RDMAEXTENSIONS)) {
if (iser == true) if (iser)
SET_PSTATE_NEGOTIATE(param); SET_PSTATE_NEGOTIATE(param);
} else if (!strcmp(param->name, INITIATORRECVDATASEGMENTLENGTH)) { } else if (!strcmp(param->name, INITIATORRECVDATASEGMENTLENGTH)) {
if (iser == true) if (iser)
SET_PSTATE_NEGOTIATE(param); SET_PSTATE_NEGOTIATE(param);
} else if (!strcmp(param->name, TARGETRECVDATASEGMENTLENGTH)) { } else if (!strcmp(param->name, TARGETRECVDATASEGMENTLENGTH)) {
if (iser == true) if (iser)
SET_PSTATE_NEGOTIATE(param); SET_PSTATE_NEGOTIATE(param);
} }
} }
...@@ -1605,7 +1605,7 @@ int iscsi_decode_text_input( ...@@ -1605,7 +1605,7 @@ int iscsi_decode_text_input(
tmpbuf = kzalloc(length + 1, GFP_KERNEL); tmpbuf = kzalloc(length + 1, GFP_KERNEL);
if (!tmpbuf) { if (!tmpbuf) {
pr_err("Unable to allocate memory for tmpbuf.\n"); pr_err("Unable to allocate %u + 1 bytes for tmpbuf.\n", length);
return -1; return -1;
} }
......
...@@ -189,7 +189,7 @@ static void iscsit_clear_tpg_np_login_thread( ...@@ -189,7 +189,7 @@ static void iscsit_clear_tpg_np_login_thread(
iscsit_reset_np_thread(tpg_np->tpg_np, tpg_np, tpg, shutdown); iscsit_reset_np_thread(tpg_np->tpg_np, tpg_np, tpg, shutdown);
} }
void iscsit_clear_tpg_np_login_threads( static void iscsit_clear_tpg_np_login_threads(
struct iscsi_portal_group *tpg, struct iscsi_portal_group *tpg,
bool shutdown) bool shutdown)
{ {
...@@ -276,8 +276,6 @@ int iscsit_tpg_del_portal_group( ...@@ -276,8 +276,6 @@ int iscsit_tpg_del_portal_group(
tpg->tpg_state = TPG_STATE_INACTIVE; tpg->tpg_state = TPG_STATE_INACTIVE;
spin_unlock(&tpg->tpg_state_lock); spin_unlock(&tpg->tpg_state_lock);
iscsit_clear_tpg_np_login_threads(tpg, true);
if (iscsit_release_sessions_for_tpg(tpg, force) < 0) { if (iscsit_release_sessions_for_tpg(tpg, force) < 0) {
pr_err("Unable to delete iSCSI Target Portal Group:" pr_err("Unable to delete iSCSI Target Portal Group:"
" %hu while active sessions exist, and force=0\n", " %hu while active sessions exist, and force=0\n",
...@@ -453,7 +451,7 @@ static bool iscsit_tpg_check_network_portal( ...@@ -453,7 +451,7 @@ static bool iscsit_tpg_check_network_portal(
match = iscsit_check_np_match(sockaddr, np, match = iscsit_check_np_match(sockaddr, np,
network_transport); network_transport);
if (match == true) if (match)
break; break;
} }
spin_unlock(&tpg->tpg_np_lock); spin_unlock(&tpg->tpg_np_lock);
...@@ -475,7 +473,7 @@ struct iscsi_tpg_np *iscsit_tpg_add_network_portal( ...@@ -475,7 +473,7 @@ struct iscsi_tpg_np *iscsit_tpg_add_network_portal(
if (!tpg_np_parent) { if (!tpg_np_parent) {
if (iscsit_tpg_check_network_portal(tpg->tpg_tiqn, sockaddr, if (iscsit_tpg_check_network_portal(tpg->tpg_tiqn, sockaddr,
network_transport) == true) { network_transport)) {
pr_err("Network Portal: %s already exists on a" pr_err("Network Portal: %s already exists on a"
" different TPG on %s\n", ip_str, " different TPG on %s\n", ip_str,
tpg->tpg_tiqn->tiqn); tpg->tpg_tiqn->tiqn);
......
...@@ -8,7 +8,6 @@ extern struct iscsi_portal_group *iscsit_get_tpg_from_np(struct iscsi_tiqn *, ...@@ -8,7 +8,6 @@ extern struct iscsi_portal_group *iscsit_get_tpg_from_np(struct iscsi_tiqn *,
struct iscsi_np *, struct iscsi_tpg_np **); struct iscsi_np *, struct iscsi_tpg_np **);
extern int iscsit_get_tpg(struct iscsi_portal_group *); extern int iscsit_get_tpg(struct iscsi_portal_group *);
extern void iscsit_put_tpg(struct iscsi_portal_group *); extern void iscsit_put_tpg(struct iscsi_portal_group *);
extern void iscsit_clear_tpg_np_login_threads(struct iscsi_portal_group *, bool);
extern void iscsit_tpg_dump_params(struct iscsi_portal_group *); extern void iscsit_tpg_dump_params(struct iscsi_portal_group *);
extern int iscsit_tpg_add_portal_group(struct iscsi_tiqn *, struct iscsi_portal_group *); extern int iscsit_tpg_add_portal_group(struct iscsi_tiqn *, struct iscsi_portal_group *);
extern int iscsit_tpg_del_portal_group(struct iscsi_tiqn *, struct iscsi_portal_group *, extern int iscsit_tpg_del_portal_group(struct iscsi_tiqn *, struct iscsi_portal_group *,
......
...@@ -179,7 +179,7 @@ static void tcm_loop_submission_work(struct work_struct *work) ...@@ -179,7 +179,7 @@ static void tcm_loop_submission_work(struct work_struct *work)
struct tcm_loop_hba *tl_hba; struct tcm_loop_hba *tl_hba;
struct tcm_loop_tpg *tl_tpg; struct tcm_loop_tpg *tl_tpg;
struct scatterlist *sgl_bidi = NULL; struct scatterlist *sgl_bidi = NULL;
u32 sgl_bidi_count = 0; u32 sgl_bidi_count = 0, transfer_length;
int rc; int rc;
tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host); tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
...@@ -213,12 +213,21 @@ static void tcm_loop_submission_work(struct work_struct *work) ...@@ -213,12 +213,21 @@ static void tcm_loop_submission_work(struct work_struct *work)
} }
if (!scsi_prot_sg_count(sc) && scsi_get_prot_op(sc) != SCSI_PROT_NORMAL) transfer_length = scsi_transfer_length(sc);
if (!scsi_prot_sg_count(sc) &&
scsi_get_prot_op(sc) != SCSI_PROT_NORMAL) {
se_cmd->prot_pto = true; se_cmd->prot_pto = true;
/*
* loopback transport doesn't support
* WRITE_GENERATE, READ_STRIP protection
* information operations, go ahead unprotected.
*/
transfer_length = scsi_bufflen(sc);
}
rc = target_submit_cmd_map_sgls(se_cmd, tl_nexus->se_sess, sc->cmnd, rc = target_submit_cmd_map_sgls(se_cmd, tl_nexus->se_sess, sc->cmnd,
&tl_cmd->tl_sense_buf[0], tl_cmd->sc->device->lun, &tl_cmd->tl_sense_buf[0], tl_cmd->sc->device->lun,
scsi_bufflen(sc), tcm_loop_sam_attr(sc), transfer_length, tcm_loop_sam_attr(sc),
sc->sc_data_direction, 0, sc->sc_data_direction, 0,
scsi_sglist(sc), scsi_sg_count(sc), scsi_sglist(sc), scsi_sg_count(sc),
sgl_bidi, sgl_bidi_count, sgl_bidi, sgl_bidi_count,
......
...@@ -81,7 +81,7 @@ sbc_emulate_readcapacity(struct se_cmd *cmd) ...@@ -81,7 +81,7 @@ sbc_emulate_readcapacity(struct se_cmd *cmd)
transport_kunmap_data_sg(cmd); transport_kunmap_data_sg(cmd);
} }
target_complete_cmd(cmd, GOOD); target_complete_cmd_with_length(cmd, GOOD, 8);
return 0; return 0;
} }
...@@ -137,7 +137,7 @@ sbc_emulate_readcapacity_16(struct se_cmd *cmd) ...@@ -137,7 +137,7 @@ sbc_emulate_readcapacity_16(struct se_cmd *cmd)
transport_kunmap_data_sg(cmd); transport_kunmap_data_sg(cmd);
} }
target_complete_cmd(cmd, GOOD); target_complete_cmd_with_length(cmd, GOOD, 32);
return 0; return 0;
} }
...@@ -176,24 +176,6 @@ static inline u32 sbc_get_size(struct se_cmd *cmd, u32 sectors) ...@@ -176,24 +176,6 @@ static inline u32 sbc_get_size(struct se_cmd *cmd, u32 sectors)
return cmd->se_dev->dev_attrib.block_size * sectors; return cmd->se_dev->dev_attrib.block_size * sectors;
} }
static int sbc_check_valid_sectors(struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
unsigned long long end_lba;
u32 sectors;
sectors = cmd->data_length / dev->dev_attrib.block_size;
end_lba = dev->transport->get_blocks(dev) + 1;
if (cmd->t_task_lba + sectors > end_lba) {
pr_err("target: lba %llu, sectors %u exceeds end lba %llu\n",
cmd->t_task_lba, sectors, end_lba);
return -EINVAL;
}
return 0;
}
static inline u32 transport_get_sectors_6(unsigned char *cdb) static inline u32 transport_get_sectors_6(unsigned char *cdb)
{ {
/* /*
...@@ -665,8 +647,19 @@ sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb, ...@@ -665,8 +647,19 @@ sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb,
cmd->prot_type = dev->dev_attrib.pi_prot_type; cmd->prot_type = dev->dev_attrib.pi_prot_type;
cmd->prot_length = dev->prot_length * sectors; cmd->prot_length = dev->prot_length * sectors;
pr_debug("%s: prot_type=%d, prot_length=%d prot_op=%d prot_checks=%d\n",
__func__, cmd->prot_type, cmd->prot_length, /**
* In case protection information exists over the wire
* we modify command data length to describe pure data.
* The actual transfer length is data length + protection
* length
**/
if (protect)
cmd->data_length = sectors * dev->dev_attrib.block_size;
pr_debug("%s: prot_type=%d, data_length=%d, prot_length=%d "
"prot_op=%d prot_checks=%d\n",
__func__, cmd->prot_type, cmd->data_length, cmd->prot_length,
cmd->prot_op, cmd->prot_checks); cmd->prot_op, cmd->prot_checks);
return true; return true;
...@@ -877,15 +870,6 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) ...@@ -877,15 +870,6 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
break; break;
case SYNCHRONIZE_CACHE: case SYNCHRONIZE_CACHE:
case SYNCHRONIZE_CACHE_16: case SYNCHRONIZE_CACHE_16:
if (!ops->execute_sync_cache) {
size = 0;
cmd->execute_cmd = sbc_emulate_noop;
break;
}
/*
* Extract LBA and range to be flushed for emulated SYNCHRONIZE_CACHE
*/
if (cdb[0] == SYNCHRONIZE_CACHE) { if (cdb[0] == SYNCHRONIZE_CACHE) {
sectors = transport_get_sectors_10(cdb); sectors = transport_get_sectors_10(cdb);
cmd->t_task_lba = transport_lba_32(cdb); cmd->t_task_lba = transport_lba_32(cdb);
...@@ -893,18 +877,12 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) ...@@ -893,18 +877,12 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
sectors = transport_get_sectors_16(cdb); sectors = transport_get_sectors_16(cdb);
cmd->t_task_lba = transport_lba_64(cdb); cmd->t_task_lba = transport_lba_64(cdb);
} }
if (ops->execute_sync_cache) {
size = sbc_get_size(cmd, sectors); cmd->execute_cmd = ops->execute_sync_cache;
goto check_lba;
/*
* Check to ensure that LBA + Range does not exceed past end of
* device for IBLOCK and FILEIO ->do_sync_cache() backend calls
*/
if (cmd->t_task_lba || sectors) {
if (sbc_check_valid_sectors(cmd) < 0)
return TCM_ADDRESS_OUT_OF_RANGE;
} }
cmd->execute_cmd = ops->execute_sync_cache; size = 0;
cmd->execute_cmd = sbc_emulate_noop;
break; break;
case UNMAP: case UNMAP:
if (!ops->execute_unmap) if (!ops->execute_unmap)
...@@ -947,8 +925,10 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) ...@@ -947,8 +925,10 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
break; break;
case VERIFY: case VERIFY:
size = 0; size = 0;
sectors = transport_get_sectors_10(cdb);
cmd->t_task_lba = transport_lba_32(cdb);
cmd->execute_cmd = sbc_emulate_noop; cmd->execute_cmd = sbc_emulate_noop;
break; goto check_lba;
case REZERO_UNIT: case REZERO_UNIT:
case SEEK_6: case SEEK_6:
case SEEK_10: case SEEK_10:
...@@ -988,7 +968,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) ...@@ -988,7 +968,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
dev->dev_attrib.hw_max_sectors); dev->dev_attrib.hw_max_sectors);
return TCM_INVALID_CDB_FIELD; return TCM_INVALID_CDB_FIELD;
} }
check_lba:
end_lba = dev->transport->get_blocks(dev) + 1; end_lba = dev->transport->get_blocks(dev) + 1;
if (cmd->t_task_lba + sectors > end_lba) { if (cmd->t_task_lba + sectors > end_lba) {
pr_err("cmd exceeds last lba %llu " pr_err("cmd exceeds last lba %llu "
......
...@@ -129,15 +129,10 @@ static sense_reason_t ...@@ -129,15 +129,10 @@ static sense_reason_t
spc_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf) spc_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf)
{ {
struct se_device *dev = cmd->se_dev; struct se_device *dev = cmd->se_dev;
u16 len = 0; u16 len;
if (dev->dev_flags & DF_EMULATED_VPD_UNIT_SERIAL) { if (dev->dev_flags & DF_EMULATED_VPD_UNIT_SERIAL) {
u32 unit_serial_len; len = sprintf(&buf[4], "%s", dev->t10_wwn.unit_serial);
unit_serial_len = strlen(dev->t10_wwn.unit_serial);
unit_serial_len++; /* For NULL Terminator */
len += sprintf(&buf[4], "%s", dev->t10_wwn.unit_serial);
len++; /* Extra Byte for NULL Terminator */ len++; /* Extra Byte for NULL Terminator */
buf[3] = len; buf[3] = len;
} }
...@@ -721,6 +716,7 @@ spc_emulate_inquiry(struct se_cmd *cmd) ...@@ -721,6 +716,7 @@ spc_emulate_inquiry(struct se_cmd *cmd)
unsigned char *buf; unsigned char *buf;
sense_reason_t ret; sense_reason_t ret;
int p; int p;
int len = 0;
buf = kzalloc(SE_INQUIRY_BUF, GFP_KERNEL); buf = kzalloc(SE_INQUIRY_BUF, GFP_KERNEL);
if (!buf) { if (!buf) {
...@@ -742,6 +738,7 @@ spc_emulate_inquiry(struct se_cmd *cmd) ...@@ -742,6 +738,7 @@ spc_emulate_inquiry(struct se_cmd *cmd)
} }
ret = spc_emulate_inquiry_std(cmd, buf); ret = spc_emulate_inquiry_std(cmd, buf);
len = buf[4] + 5;
goto out; goto out;
} }
...@@ -749,6 +746,7 @@ spc_emulate_inquiry(struct se_cmd *cmd) ...@@ -749,6 +746,7 @@ spc_emulate_inquiry(struct se_cmd *cmd)
if (cdb[2] == evpd_handlers[p].page) { if (cdb[2] == evpd_handlers[p].page) {
buf[1] = cdb[2]; buf[1] = cdb[2];
ret = evpd_handlers[p].emulate(cmd, buf); ret = evpd_handlers[p].emulate(cmd, buf);
len = get_unaligned_be16(&buf[2]) + 4;
goto out; goto out;
} }
} }
...@@ -765,7 +763,7 @@ spc_emulate_inquiry(struct se_cmd *cmd) ...@@ -765,7 +763,7 @@ spc_emulate_inquiry(struct se_cmd *cmd)
kfree(buf); kfree(buf);
if (!ret) if (!ret)
target_complete_cmd(cmd, GOOD); target_complete_cmd_with_length(cmd, GOOD, len);
return ret; return ret;
} }
...@@ -1103,7 +1101,7 @@ static sense_reason_t spc_emulate_modesense(struct se_cmd *cmd) ...@@ -1103,7 +1101,7 @@ static sense_reason_t spc_emulate_modesense(struct se_cmd *cmd)
transport_kunmap_data_sg(cmd); transport_kunmap_data_sg(cmd);
} }
target_complete_cmd(cmd, GOOD); target_complete_cmd_with_length(cmd, GOOD, length);
return 0; return 0;
} }
...@@ -1279,7 +1277,7 @@ sense_reason_t spc_emulate_report_luns(struct se_cmd *cmd) ...@@ -1279,7 +1277,7 @@ sense_reason_t spc_emulate_report_luns(struct se_cmd *cmd)
buf[3] = (lun_count & 0xff); buf[3] = (lun_count & 0xff);
transport_kunmap_data_sg(cmd); transport_kunmap_data_sg(cmd);
target_complete_cmd(cmd, GOOD); target_complete_cmd_with_length(cmd, GOOD, 8 + lun_count * 8);
return 0; return 0;
} }
EXPORT_SYMBOL(spc_emulate_report_luns); EXPORT_SYMBOL(spc_emulate_report_luns);
......
...@@ -504,7 +504,7 @@ void transport_deregister_session(struct se_session *se_sess) ...@@ -504,7 +504,7 @@ void transport_deregister_session(struct se_session *se_sess)
* ->acl_free_comp caller to wakeup configfs se_node_acl->acl_group * ->acl_free_comp caller to wakeup configfs se_node_acl->acl_group
* removal context. * removal context.
*/ */
if (se_nacl && comp_nacl == true) if (se_nacl && comp_nacl)
target_put_nacl(se_nacl); target_put_nacl(se_nacl);
transport_free_session(se_sess); transport_free_session(se_sess);
...@@ -562,7 +562,7 @@ static int transport_cmd_check_stop(struct se_cmd *cmd, bool remove_from_lists, ...@@ -562,7 +562,7 @@ static int transport_cmd_check_stop(struct se_cmd *cmd, bool remove_from_lists,
spin_unlock_irqrestore(&cmd->t_state_lock, flags); spin_unlock_irqrestore(&cmd->t_state_lock, flags);
complete(&cmd->t_transport_stop_comp); complete_all(&cmd->t_transport_stop_comp);
return 1; return 1;
} }
...@@ -687,7 +687,7 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status) ...@@ -687,7 +687,7 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
if (cmd->transport_state & CMD_T_ABORTED && if (cmd->transport_state & CMD_T_ABORTED &&
cmd->transport_state & CMD_T_STOP) { cmd->transport_state & CMD_T_STOP) {
spin_unlock_irqrestore(&cmd->t_state_lock, flags); spin_unlock_irqrestore(&cmd->t_state_lock, flags);
complete(&cmd->t_transport_stop_comp); complete_all(&cmd->t_transport_stop_comp);
return; return;
} else if (!success) { } else if (!success) {
INIT_WORK(&cmd->work, target_complete_failure_work); INIT_WORK(&cmd->work, target_complete_failure_work);
...@@ -703,6 +703,23 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status) ...@@ -703,6 +703,23 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
} }
EXPORT_SYMBOL(target_complete_cmd); EXPORT_SYMBOL(target_complete_cmd);
void target_complete_cmd_with_length(struct se_cmd *cmd, u8 scsi_status, int length)
{
if (scsi_status == SAM_STAT_GOOD && length < cmd->data_length) {
if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
cmd->residual_count += cmd->data_length - length;
} else {
cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
cmd->residual_count = cmd->data_length - length;
}
cmd->data_length = length;
}
target_complete_cmd(cmd, scsi_status);
}
EXPORT_SYMBOL(target_complete_cmd_with_length);
static void target_add_to_state_list(struct se_cmd *cmd) static void target_add_to_state_list(struct se_cmd *cmd)
{ {
struct se_device *dev = cmd->se_dev; struct se_device *dev = cmd->se_dev;
...@@ -1761,7 +1778,7 @@ void target_execute_cmd(struct se_cmd *cmd) ...@@ -1761,7 +1778,7 @@ void target_execute_cmd(struct se_cmd *cmd)
cmd->se_tfo->get_task_tag(cmd)); cmd->se_tfo->get_task_tag(cmd));
spin_unlock_irq(&cmd->t_state_lock); spin_unlock_irq(&cmd->t_state_lock);
complete(&cmd->t_transport_stop_comp); complete_all(&cmd->t_transport_stop_comp);
return; return;
} }
...@@ -2363,7 +2380,7 @@ int target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd, ...@@ -2363,7 +2380,7 @@ int target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd,
* fabric acknowledgement that requires two target_put_sess_cmd() * fabric acknowledgement that requires two target_put_sess_cmd()
* invocations before se_cmd descriptor release. * invocations before se_cmd descriptor release.
*/ */
if (ack_kref == true) { if (ack_kref) {
kref_get(&se_cmd->cmd_kref); kref_get(&se_cmd->cmd_kref);
se_cmd->se_cmd_flags |= SCF_ACK_KREF; se_cmd->se_cmd_flags |= SCF_ACK_KREF;
} }
...@@ -2407,6 +2424,10 @@ static void target_release_cmd_kref(struct kref *kref) ...@@ -2407,6 +2424,10 @@ static void target_release_cmd_kref(struct kref *kref)
*/ */
int target_put_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd) int target_put_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd)
{ {
if (!se_sess) {
se_cmd->se_tfo->release_cmd(se_cmd);
return 1;
}
return kref_put_spinlock_irqsave(&se_cmd->cmd_kref, target_release_cmd_kref, return kref_put_spinlock_irqsave(&se_cmd->cmd_kref, target_release_cmd_kref,
&se_sess->sess_cmd_lock); &se_sess->sess_cmd_lock);
} }
...@@ -2934,6 +2955,12 @@ static void target_tmr_work(struct work_struct *work) ...@@ -2934,6 +2955,12 @@ static void target_tmr_work(struct work_struct *work)
int transport_generic_handle_tmr( int transport_generic_handle_tmr(
struct se_cmd *cmd) struct se_cmd *cmd)
{ {
unsigned long flags;
spin_lock_irqsave(&cmd->t_state_lock, flags);
cmd->transport_state |= CMD_T_ACTIVE;
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
INIT_WORK(&cmd->work, target_tmr_work); INIT_WORK(&cmd->work, target_tmr_work);
queue_work(cmd->se_dev->tmr_wq, &cmd->work); queue_work(cmd->se_dev->tmr_wq, &cmd->work);
return 0; return 0;
......
...@@ -70,7 +70,7 @@ static int target_xcopy_locate_se_dev_e4(struct se_cmd *se_cmd, struct xcopy_op ...@@ -70,7 +70,7 @@ static int target_xcopy_locate_se_dev_e4(struct se_cmd *se_cmd, struct xcopy_op
unsigned char tmp_dev_wwn[XCOPY_NAA_IEEE_REGEX_LEN], *dev_wwn; unsigned char tmp_dev_wwn[XCOPY_NAA_IEEE_REGEX_LEN], *dev_wwn;
int rc; int rc;
if (src == true) if (src)
dev_wwn = &xop->dst_tid_wwn[0]; dev_wwn = &xop->dst_tid_wwn[0];
else else
dev_wwn = &xop->src_tid_wwn[0]; dev_wwn = &xop->src_tid_wwn[0];
...@@ -88,7 +88,7 @@ static int target_xcopy_locate_se_dev_e4(struct se_cmd *se_cmd, struct xcopy_op ...@@ -88,7 +88,7 @@ static int target_xcopy_locate_se_dev_e4(struct se_cmd *se_cmd, struct xcopy_op
if (rc != 0) if (rc != 0)
continue; continue;
if (src == true) { if (src) {
xop->dst_dev = se_dev; xop->dst_dev = se_dev;
pr_debug("XCOPY 0xe4: Setting xop->dst_dev: %p from located" pr_debug("XCOPY 0xe4: Setting xop->dst_dev: %p from located"
" se_dev\n", xop->dst_dev); " se_dev\n", xop->dst_dev);
...@@ -166,7 +166,7 @@ static int target_xcopy_parse_tiddesc_e4(struct se_cmd *se_cmd, struct xcopy_op ...@@ -166,7 +166,7 @@ static int target_xcopy_parse_tiddesc_e4(struct se_cmd *se_cmd, struct xcopy_op
return -EINVAL; return -EINVAL;
} }
if (src == true) { if (src) {
memcpy(&xop->src_tid_wwn[0], &desc[8], XCOPY_NAA_IEEE_REGEX_LEN); memcpy(&xop->src_tid_wwn[0], &desc[8], XCOPY_NAA_IEEE_REGEX_LEN);
/* /*
* Determine if the source designator matches the local device * Determine if the source designator matches the local device
...@@ -236,7 +236,7 @@ static int target_xcopy_parse_target_descriptors(struct se_cmd *se_cmd, ...@@ -236,7 +236,7 @@ static int target_xcopy_parse_target_descriptors(struct se_cmd *se_cmd,
/* /*
* Assume target descriptors are in source -> destination order.. * Assume target descriptors are in source -> destination order..
*/ */
if (src == true) if (src)
src = false; src = false;
else else
src = true; src = true;
...@@ -560,7 +560,7 @@ static int target_xcopy_init_pt_lun( ...@@ -560,7 +560,7 @@ static int target_xcopy_init_pt_lun(
* reservations. The pt_cmd->se_lun pointer will be setup from within * reservations. The pt_cmd->se_lun pointer will be setup from within
* target_xcopy_setup_pt_port() * target_xcopy_setup_pt_port()
*/ */
if (remote_port == false) { if (!remote_port) {
pt_cmd->se_cmd_flags |= SCF_SE_LUN_CMD | SCF_CMD_XCOPY_PASSTHROUGH; pt_cmd->se_cmd_flags |= SCF_SE_LUN_CMD | SCF_CMD_XCOPY_PASSTHROUGH;
return 0; return 0;
} }
......
...@@ -128,6 +128,7 @@ int ft_queue_status(struct se_cmd *se_cmd) ...@@ -128,6 +128,7 @@ int ft_queue_status(struct se_cmd *se_cmd)
struct fc_lport *lport; struct fc_lport *lport;
struct fc_exch *ep; struct fc_exch *ep;
size_t len; size_t len;
int rc;
if (cmd->aborted) if (cmd->aborted)
return 0; return 0;
...@@ -137,9 +138,10 @@ int ft_queue_status(struct se_cmd *se_cmd) ...@@ -137,9 +138,10 @@ int ft_queue_status(struct se_cmd *se_cmd)
len = sizeof(*fcp) + se_cmd->scsi_sense_length; len = sizeof(*fcp) + se_cmd->scsi_sense_length;
fp = fc_frame_alloc(lport, len); fp = fc_frame_alloc(lport, len);
if (!fp) { if (!fp) {
/* XXX shouldn't just drop it - requeue and retry? */ se_cmd->scsi_status = SAM_STAT_TASK_SET_FULL;
return 0; return -ENOMEM;
} }
fcp = fc_frame_payload_get(fp, len); fcp = fc_frame_payload_get(fp, len);
memset(fcp, 0, len); memset(fcp, 0, len);
fcp->resp.fr_status = se_cmd->scsi_status; fcp->resp.fr_status = se_cmd->scsi_status;
...@@ -170,7 +172,18 @@ int ft_queue_status(struct se_cmd *se_cmd) ...@@ -170,7 +172,18 @@ int ft_queue_status(struct se_cmd *se_cmd)
fc_fill_fc_hdr(fp, FC_RCTL_DD_CMD_STATUS, ep->did, ep->sid, FC_TYPE_FCP, fc_fill_fc_hdr(fp, FC_RCTL_DD_CMD_STATUS, ep->did, ep->sid, FC_TYPE_FCP,
FC_FC_EX_CTX | FC_FC_LAST_SEQ | FC_FC_END_SEQ, 0); FC_FC_EX_CTX | FC_FC_LAST_SEQ | FC_FC_END_SEQ, 0);
lport->tt.seq_send(lport, cmd->seq, fp); rc = lport->tt.seq_send(lport, cmd->seq, fp);
if (rc) {
pr_info_ratelimited("%s: Failed to send response frame %p, "
"xid <0x%x>\n", __func__, fp, ep->xid);
/*
* Generate a TASK_SET_FULL status to notify the initiator
* to reduce it's queue_depth after the se_cmd response has
* been re-queued by target-core.
*/
se_cmd->scsi_status = SAM_STAT_TASK_SET_FULL;
return -ENOMEM;
}
lport->tt.exch_done(cmd->seq); lport->tt.exch_done(cmd->seq);
return 0; return 0;
} }
......
...@@ -82,6 +82,10 @@ int ft_queue_data_in(struct se_cmd *se_cmd) ...@@ -82,6 +82,10 @@ int ft_queue_data_in(struct se_cmd *se_cmd)
if (cmd->aborted) if (cmd->aborted)
return 0; return 0;
if (se_cmd->scsi_status == SAM_STAT_TASK_SET_FULL)
goto queue_status;
ep = fc_seq_exch(cmd->seq); ep = fc_seq_exch(cmd->seq);
lport = ep->lp; lport = ep->lp;
cmd->seq = lport->tt.seq_start_next(cmd->seq); cmd->seq = lport->tt.seq_start_next(cmd->seq);
...@@ -178,14 +182,23 @@ int ft_queue_data_in(struct se_cmd *se_cmd) ...@@ -178,14 +182,23 @@ int ft_queue_data_in(struct se_cmd *se_cmd)
FC_TYPE_FCP, f_ctl, fh_off); FC_TYPE_FCP, f_ctl, fh_off);
error = lport->tt.seq_send(lport, seq, fp); error = lport->tt.seq_send(lport, seq, fp);
if (error) { if (error) {
/* XXX For now, initiator will retry */ pr_info_ratelimited("%s: Failed to send frame %p, "
pr_err_ratelimited("%s: Failed to send frame %p, "
"xid <0x%x>, remaining %zu, " "xid <0x%x>, remaining %zu, "
"lso_max <0x%x>\n", "lso_max <0x%x>\n",
__func__, fp, ep->xid, __func__, fp, ep->xid,
remaining, lport->lso_max); remaining, lport->lso_max);
/*
* Go ahead and set TASK_SET_FULL status ignoring the
* rest of the DataIN, and immediately attempt to
* send the response via ft_queue_status() in order
* to notify the initiator that it should reduce it's
* per LUN queue_depth.
*/
se_cmd->scsi_status = SAM_STAT_TASK_SET_FULL;
break;
} }
} }
queue_status:
return ft_queue_status(se_cmd); return ft_queue_status(se_cmd);
} }
......
This diff is collapsed.
...@@ -35,11 +35,23 @@ struct virtio_scsi_cmd_req { ...@@ -35,11 +35,23 @@ struct virtio_scsi_cmd_req {
u8 lun[8]; /* Logical Unit Number */ u8 lun[8]; /* Logical Unit Number */
u64 tag; /* Command identifier */ u64 tag; /* Command identifier */
u8 task_attr; /* Task attribute */ u8 task_attr; /* Task attribute */
u8 prio; u8 prio; /* SAM command priority field */
u8 crn; u8 crn;
u8 cdb[VIRTIO_SCSI_CDB_SIZE]; u8 cdb[VIRTIO_SCSI_CDB_SIZE];
} __packed; } __packed;
/* SCSI command request, followed by protection information */
struct virtio_scsi_cmd_req_pi {
u8 lun[8]; /* Logical Unit Number */
u64 tag; /* Command identifier */
u8 task_attr; /* Task attribute */
u8 prio; /* SAM command priority field */
u8 crn;
u32 pi_bytesout; /* DataOUT PI Number of bytes */
u32 pi_bytesin; /* DataIN PI Number of bytes */
u8 cdb[VIRTIO_SCSI_CDB_SIZE];
} __packed;
/* Response, followed by sense data and data-in */ /* Response, followed by sense data and data-in */
struct virtio_scsi_cmd_resp { struct virtio_scsi_cmd_resp {
u32 sense_len; /* Sense data length */ u32 sense_len; /* Sense data length */
...@@ -97,6 +109,7 @@ struct virtio_scsi_config { ...@@ -97,6 +109,7 @@ struct virtio_scsi_config {
#define VIRTIO_SCSI_F_INOUT 0 #define VIRTIO_SCSI_F_INOUT 0
#define VIRTIO_SCSI_F_HOTPLUG 1 #define VIRTIO_SCSI_F_HOTPLUG 1
#define VIRTIO_SCSI_F_CHANGE 2 #define VIRTIO_SCSI_F_CHANGE 2
#define VIRTIO_SCSI_F_T10_PI 3
/* Response codes */ /* Response codes */
#define VIRTIO_SCSI_S_OK 0 #define VIRTIO_SCSI_S_OK 0
......
...@@ -7,6 +7,7 @@ ...@@ -7,6 +7,7 @@
#include <linux/types.h> #include <linux/types.h>
#include <linux/timer.h> #include <linux/timer.h>
#include <linux/scatterlist.h> #include <linux/scatterlist.h>
#include <scsi/scsi_device.h>
struct Scsi_Host; struct Scsi_Host;
struct scsi_device; struct scsi_device;
...@@ -315,4 +316,20 @@ static inline void set_driver_byte(struct scsi_cmnd *cmd, char status) ...@@ -315,4 +316,20 @@ static inline void set_driver_byte(struct scsi_cmnd *cmd, char status)
cmd->result = (cmd->result & 0x00ffffff) | (status << 24); cmd->result = (cmd->result & 0x00ffffff) | (status << 24);
} }
static inline unsigned scsi_transfer_length(struct scsi_cmnd *scmd)
{
unsigned int xfer_len = blk_rq_bytes(scmd->request);
unsigned int prot_op = scsi_get_prot_op(scmd);
unsigned int sector_size = scmd->device->sector_size;
switch (prot_op) {
case SCSI_PROT_NORMAL:
case SCSI_PROT_WRITE_STRIP:
case SCSI_PROT_READ_INSERT:
return xfer_len;
}
return xfer_len + (xfer_len >> ilog2(sector_size)) * 8;
}
#endif /* _SCSI_SCSI_CMND_H */ #endif /* _SCSI_SCSI_CMND_H */
...@@ -70,7 +70,8 @@ extern void iscsit_build_nopin_rsp(struct iscsi_cmd *, struct iscsi_conn *, ...@@ -70,7 +70,8 @@ extern void iscsit_build_nopin_rsp(struct iscsi_cmd *, struct iscsi_conn *,
extern void iscsit_build_task_mgt_rsp(struct iscsi_cmd *, struct iscsi_conn *, extern void iscsit_build_task_mgt_rsp(struct iscsi_cmd *, struct iscsi_conn *,
struct iscsi_tm_rsp *); struct iscsi_tm_rsp *);
extern int iscsit_build_text_rsp(struct iscsi_cmd *, struct iscsi_conn *, extern int iscsit_build_text_rsp(struct iscsi_cmd *, struct iscsi_conn *,
struct iscsi_text_rsp *); struct iscsi_text_rsp *,
enum iscsit_transport_type);
extern void iscsit_build_reject(struct iscsi_cmd *, struct iscsi_conn *, extern void iscsit_build_reject(struct iscsi_cmd *, struct iscsi_conn *,
struct iscsi_reject *); struct iscsi_reject *);
extern int iscsit_build_logout_rsp(struct iscsi_cmd *, struct iscsi_conn *, extern int iscsit_build_logout_rsp(struct iscsi_cmd *, struct iscsi_conn *,
......
...@@ -59,6 +59,7 @@ int transport_subsystem_register(struct se_subsystem_api *); ...@@ -59,6 +59,7 @@ int transport_subsystem_register(struct se_subsystem_api *);
void transport_subsystem_release(struct se_subsystem_api *); void transport_subsystem_release(struct se_subsystem_api *);
void target_complete_cmd(struct se_cmd *, u8); void target_complete_cmd(struct se_cmd *, u8);
void target_complete_cmd_with_length(struct se_cmd *, u8, int);
sense_reason_t spc_parse_cdb(struct se_cmd *cmd, unsigned int *size); sense_reason_t spc_parse_cdb(struct se_cmd *cmd, unsigned int *size);
sense_reason_t spc_emulate_report_luns(struct se_cmd *cmd); sense_reason_t spc_emulate_report_luns(struct se_cmd *cmd);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment