Commit e20d3ef5 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending

Pull SCSI target updates from Nicholas Bellinger:
 "The highlights this round include:

   - Update vhost-scsi to support F_ANY_LAYOUT using mm/iov_iter.c
     logic, and signal VERSION_1 support (MST + Viro + nab)

   - Fix iscsi/iser-target to remove problematic active_ts_set usage
     (Gavin Guo)

   - Update iscsi/iser-target to support multi-sequence sendtargets
     (Sagi)

   - Fix original PR_APTPL_BUF_LEN 8k size limitation (Martin Svec)

   - Add missing WRITE_SAME end-of-device sanity check (Bart)

   - Check for LBA + sectors wrap-around in sbc_parse_cdb() (nab)

   - Other various minor SPC/SBC compliance fixes based upon Ronnie
     Sahlberg test suite (nab)"

* 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending: (32 commits)
  target: Set LBPWS10 bit in Logical Block Provisioning EVPD
  target: Fail UNMAP when emulate_tpu=0
  target: Fail WRITE_SAME w/ UNMAP=1 when emulate_tpws=0
  target: Add sanity checks for DPO/FUA bit usage
  target: Perform PROTECT sanity checks for WRITE_SAME
  target: Fail I/O with PROTECT bit when protection is unsupported
  target: Check for LBA + sectors wrap-around in sbc_parse_cdb
  target: Add missing WRITE_SAME end-of-device sanity check
  iscsi-target: Avoid IN_LOGOUT failure case for iser-target
  target: Fix PR_APTPL_BUF_LEN buffer size limitation
  iscsi-target: Drop problematic active_ts_list usage
  iscsi/iser-target: Support multi-sequence sendtargets text response
  iser-target: Remove duplicate function names
  vhost/scsi: potential memory corruption
  vhost/scsi: Global tcm_vhost -> vhost_scsi rename
  vhost/scsi: Drop left-over scsi_tcq.h include
  vhost/scsi: Set VIRTIO_F_ANY_LAYOUT + VIRTIO_F_VERSION_1 feature bits
  vhost/scsi: Add ANY_LAYOUT support in vhost_scsi_handle_vq
  vhost/scsi: Add ANY_LAYOUT iov -> sgl mapping prerequisites
  vhost/scsi: Change vhost_scsi_map_to_sgl to accept iov ptr + len
  ...
parents 1acd2de5 aa04dae4
...@@ -38,7 +38,7 @@ ...@@ -38,7 +38,7 @@
#define ISER_MAX_CQ_LEN (ISER_MAX_RX_CQ_LEN + ISER_MAX_TX_CQ_LEN + \ #define ISER_MAX_CQ_LEN (ISER_MAX_RX_CQ_LEN + ISER_MAX_TX_CQ_LEN + \
ISERT_MAX_CONN) ISERT_MAX_CONN)
int isert_debug_level = 0; static int isert_debug_level;
module_param_named(debug_level, isert_debug_level, int, 0644); module_param_named(debug_level, isert_debug_level, int, 0644);
MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0 (default:0)"); MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0 (default:0)");
...@@ -949,7 +949,7 @@ isert_post_recv(struct isert_conn *isert_conn, u32 count) ...@@ -949,7 +949,7 @@ isert_post_recv(struct isert_conn *isert_conn, u32 count)
isert_err("ib_post_recv() failed with ret: %d\n", ret); isert_err("ib_post_recv() failed with ret: %d\n", ret);
isert_conn->post_recv_buf_count -= count; isert_conn->post_recv_buf_count -= count;
} else { } else {
isert_dbg("isert_post_recv(): Posted %d RX buffers\n", count); isert_dbg("Posted %d RX buffers\n", count);
isert_conn->conn_rx_desc_head = rx_head; isert_conn->conn_rx_desc_head = rx_head;
} }
return ret; return ret;
...@@ -1351,18 +1351,20 @@ isert_handle_text_cmd(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd ...@@ -1351,18 +1351,20 @@ isert_handle_text_cmd(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd
struct iscsi_conn *conn = isert_conn->conn; struct iscsi_conn *conn = isert_conn->conn;
u32 payload_length = ntoh24(hdr->dlength); u32 payload_length = ntoh24(hdr->dlength);
int rc; int rc;
unsigned char *text_in; unsigned char *text_in = NULL;
rc = iscsit_setup_text_cmd(conn, cmd, hdr); rc = iscsit_setup_text_cmd(conn, cmd, hdr);
if (rc < 0) if (rc < 0)
return rc; return rc;
if (payload_length) {
text_in = kzalloc(payload_length, GFP_KERNEL); text_in = kzalloc(payload_length, GFP_KERNEL);
if (!text_in) { if (!text_in) {
isert_err("Unable to allocate text_in of payload_length: %u\n", isert_err("Unable to allocate text_in of payload_length: %u\n",
payload_length); payload_length);
return -ENOMEM; return -ENOMEM;
} }
}
cmd->text_in_ptr = text_in; cmd->text_in_ptr = text_in;
memcpy(cmd->text_in_ptr, &rx_desc->data[0], payload_length); memcpy(cmd->text_in_ptr, &rx_desc->data[0], payload_length);
...@@ -1434,9 +1436,15 @@ isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc, ...@@ -1434,9 +1436,15 @@ isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc,
ret = iscsit_handle_logout_cmd(conn, cmd, (unsigned char *)hdr); ret = iscsit_handle_logout_cmd(conn, cmd, (unsigned char *)hdr);
break; break;
case ISCSI_OP_TEXT: case ISCSI_OP_TEXT:
if (be32_to_cpu(hdr->ttt) != 0xFFFFFFFF) {
cmd = iscsit_find_cmd_from_itt(conn, hdr->itt);
if (!cmd)
break;
} else {
cmd = isert_allocate_cmd(conn); cmd = isert_allocate_cmd(conn);
if (!cmd) if (!cmd)
break; break;
}
isert_cmd = iscsit_priv_cmd(cmd); isert_cmd = iscsit_priv_cmd(cmd);
ret = isert_handle_text_cmd(isert_conn, isert_cmd, cmd, ret = isert_handle_text_cmd(isert_conn, isert_cmd, cmd,
...@@ -1658,6 +1666,7 @@ isert_put_cmd(struct isert_cmd *isert_cmd, bool comp_err) ...@@ -1658,6 +1666,7 @@ isert_put_cmd(struct isert_cmd *isert_cmd, bool comp_err)
struct isert_conn *isert_conn = isert_cmd->conn; struct isert_conn *isert_conn = isert_cmd->conn;
struct iscsi_conn *conn = isert_conn->conn; struct iscsi_conn *conn = isert_conn->conn;
struct isert_device *device = isert_conn->conn_device; struct isert_device *device = isert_conn->conn_device;
struct iscsi_text_rsp *hdr;
isert_dbg("Cmd %p\n", isert_cmd); isert_dbg("Cmd %p\n", isert_cmd);
...@@ -1698,6 +1707,11 @@ isert_put_cmd(struct isert_cmd *isert_cmd, bool comp_err) ...@@ -1698,6 +1707,11 @@ isert_put_cmd(struct isert_cmd *isert_cmd, bool comp_err)
case ISCSI_OP_REJECT: case ISCSI_OP_REJECT:
case ISCSI_OP_NOOP_OUT: case ISCSI_OP_NOOP_OUT:
case ISCSI_OP_TEXT: case ISCSI_OP_TEXT:
hdr = (struct iscsi_text_rsp *)&isert_cmd->tx_desc.iscsi_header;
/* If the continue bit is on, keep the command alive */
if (hdr->flags & ISCSI_FLAG_TEXT_CONTINUE)
break;
spin_lock_bh(&conn->cmd_lock); spin_lock_bh(&conn->cmd_lock);
if (!list_empty(&cmd->i_conn_node)) if (!list_empty(&cmd->i_conn_node))
list_del_init(&cmd->i_conn_node); list_del_init(&cmd->i_conn_node);
...@@ -1709,8 +1723,7 @@ isert_put_cmd(struct isert_cmd *isert_cmd, bool comp_err) ...@@ -1709,8 +1723,7 @@ isert_put_cmd(struct isert_cmd *isert_cmd, bool comp_err)
* associated cmd->se_cmd needs to be released. * associated cmd->se_cmd needs to be released.
*/ */
if (cmd->se_cmd.se_tfo != NULL) { if (cmd->se_cmd.se_tfo != NULL) {
isert_dbg("Calling transport_generic_free_cmd from" isert_dbg("Calling transport_generic_free_cmd for 0x%02x\n",
" isert_put_cmd for 0x%02x\n",
cmd->iscsi_opcode); cmd->iscsi_opcode);
transport_generic_free_cmd(&cmd->se_cmd, 0); transport_generic_free_cmd(&cmd->se_cmd, 0);
break; break;
...@@ -2275,7 +2288,7 @@ isert_put_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn) ...@@ -2275,7 +2288,7 @@ isert_put_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
} }
isert_init_send_wr(isert_conn, isert_cmd, send_wr); isert_init_send_wr(isert_conn, isert_cmd, send_wr);
isert_dbg("conn %p Text Reject\n", isert_conn); isert_dbg("conn %p Text Response\n", isert_conn);
return isert_post_response(isert_conn, isert_cmd); return isert_post_response(isert_conn, isert_cmd);
} }
...@@ -3136,7 +3149,7 @@ isert_accept_np(struct iscsi_np *np, struct iscsi_conn *conn) ...@@ -3136,7 +3149,7 @@ isert_accept_np(struct iscsi_np *np, struct iscsi_conn *conn)
spin_lock_bh(&np->np_thread_lock); spin_lock_bh(&np->np_thread_lock);
if (np->np_thread_state >= ISCSI_NP_THREAD_RESET) { if (np->np_thread_state >= ISCSI_NP_THREAD_RESET) {
spin_unlock_bh(&np->np_thread_lock); spin_unlock_bh(&np->np_thread_lock);
isert_dbg("np_thread_state %d for isert_accept_np\n", isert_dbg("np_thread_state %d\n",
np->np_thread_state); np->np_thread_state);
/** /**
* No point in stalling here when np_thread * No point in stalling here when np_thread
...@@ -3320,7 +3333,8 @@ static int __init isert_init(void) ...@@ -3320,7 +3333,8 @@ static int __init isert_init(void)
{ {
int ret; int ret;
isert_comp_wq = alloc_workqueue("isert_comp_wq", 0, 0); isert_comp_wq = alloc_workqueue("isert_comp_wq",
WQ_UNBOUND | WQ_HIGHPRI, 0);
if (!isert_comp_wq) { if (!isert_comp_wq) {
isert_err("Unable to allocate isert_comp_wq\n"); isert_err("Unable to allocate isert_comp_wq\n");
ret = -ENOMEM; ret = -ENOMEM;
......
...@@ -3518,7 +3518,7 @@ static void srpt_close_session(struct se_session *se_sess) ...@@ -3518,7 +3518,7 @@ static void srpt_close_session(struct se_session *se_sess)
DECLARE_COMPLETION_ONSTACK(release_done); DECLARE_COMPLETION_ONSTACK(release_done);
struct srpt_rdma_ch *ch; struct srpt_rdma_ch *ch;
struct srpt_device *sdev; struct srpt_device *sdev;
int res; unsigned long res;
ch = se_sess->fabric_sess_ptr; ch = se_sess->fabric_sess_ptr;
WARN_ON(ch->sess != se_sess); WARN_ON(ch->sess != se_sess);
...@@ -3533,7 +3533,7 @@ static void srpt_close_session(struct se_session *se_sess) ...@@ -3533,7 +3533,7 @@ static void srpt_close_session(struct se_session *se_sess)
spin_unlock_irq(&sdev->spinlock); spin_unlock_irq(&sdev->spinlock);
res = wait_for_completion_timeout(&release_done, 60 * HZ); res = wait_for_completion_timeout(&release_done, 60 * HZ);
WARN_ON(res <= 0); WARN_ON(res == 0);
} }
/** /**
......
...@@ -1570,9 +1570,7 @@ static int tcm_qla2xxx_check_initiator_node_acl( ...@@ -1570,9 +1570,7 @@ static int tcm_qla2xxx_check_initiator_node_acl(
* match the format by tcm_qla2xxx explict ConfigFS NodeACLs. * match the format by tcm_qla2xxx explict ConfigFS NodeACLs.
*/ */
memset(&port_name, 0, 36); memset(&port_name, 0, 36);
snprintf(port_name, 36, "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x", snprintf(port_name, sizeof(port_name), "%8phC", fc_wwpn);
fc_wwpn[0], fc_wwpn[1], fc_wwpn[2], fc_wwpn[3], fc_wwpn[4],
fc_wwpn[5], fc_wwpn[6], fc_wwpn[7]);
/* /*
* Locate our struct se_node_acl either from an explict NodeACL created * Locate our struct se_node_acl either from an explict NodeACL created
* via ConfigFS, or via running in TPG demo mode. * via ConfigFS, or via running in TPG demo mode.
......
...@@ -30,7 +30,7 @@ ...@@ -30,7 +30,7 @@
#include <target/target_core_fabric.h> #include <target/target_core_fabric.h>
#include <target/target_core_configfs.h> #include <target/target_core_configfs.h>
#include "iscsi_target_core.h" #include <target/iscsi/iscsi_target_core.h>
#include "iscsi_target_parameters.h" #include "iscsi_target_parameters.h"
#include "iscsi_target_seq_pdu_list.h" #include "iscsi_target_seq_pdu_list.h"
#include "iscsi_target_tq.h" #include "iscsi_target_tq.h"
...@@ -45,7 +45,7 @@ ...@@ -45,7 +45,7 @@
#include "iscsi_target_util.h" #include "iscsi_target_util.h"
#include "iscsi_target.h" #include "iscsi_target.h"
#include "iscsi_target_device.h" #include "iscsi_target_device.h"
#include "iscsi_target_stat.h" #include <target/iscsi/iscsi_target_stat.h>
#include <target/iscsi/iscsi_transport.h> #include <target/iscsi/iscsi_transport.h>
...@@ -968,11 +968,7 @@ int iscsit_setup_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd, ...@@ -968,11 +968,7 @@ int iscsit_setup_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
conn->sess->init_task_tag = cmd->init_task_tag = hdr->itt; conn->sess->init_task_tag = cmd->init_task_tag = hdr->itt;
if (hdr->flags & ISCSI_FLAG_CMD_READ) { if (hdr->flags & ISCSI_FLAG_CMD_READ) {
spin_lock_bh(&conn->sess->ttt_lock); cmd->targ_xfer_tag = session_get_next_ttt(conn->sess);
cmd->targ_xfer_tag = conn->sess->targ_xfer_tag++;
if (cmd->targ_xfer_tag == 0xFFFFFFFF)
cmd->targ_xfer_tag = conn->sess->targ_xfer_tag++;
spin_unlock_bh(&conn->sess->ttt_lock);
} else if (hdr->flags & ISCSI_FLAG_CMD_WRITE) } else if (hdr->flags & ISCSI_FLAG_CMD_WRITE)
cmd->targ_xfer_tag = 0xFFFFFFFF; cmd->targ_xfer_tag = 0xFFFFFFFF;
cmd->cmd_sn = be32_to_cpu(hdr->cmdsn); cmd->cmd_sn = be32_to_cpu(hdr->cmdsn);
...@@ -1998,6 +1994,7 @@ iscsit_setup_text_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd, ...@@ -1998,6 +1994,7 @@ iscsit_setup_text_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
cmd->cmd_sn = be32_to_cpu(hdr->cmdsn); cmd->cmd_sn = be32_to_cpu(hdr->cmdsn);
cmd->exp_stat_sn = be32_to_cpu(hdr->exp_statsn); cmd->exp_stat_sn = be32_to_cpu(hdr->exp_statsn);
cmd->data_direction = DMA_NONE; cmd->data_direction = DMA_NONE;
cmd->text_in_ptr = NULL;
return 0; return 0;
} }
...@@ -2011,10 +2008,14 @@ iscsit_process_text_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd, ...@@ -2011,10 +2008,14 @@ iscsit_process_text_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
int cmdsn_ret; int cmdsn_ret;
if (!text_in) { if (!text_in) {
cmd->targ_xfer_tag = be32_to_cpu(hdr->ttt);
if (cmd->targ_xfer_tag == 0xFFFFFFFF) {
pr_err("Unable to locate text_in buffer for sendtargets" pr_err("Unable to locate text_in buffer for sendtargets"
" discovery\n"); " discovery\n");
goto reject; goto reject;
} }
goto empty_sendtargets;
}
if (strncmp("SendTargets", text_in, 11) != 0) { if (strncmp("SendTargets", text_in, 11) != 0) {
pr_err("Received Text Data that is not" pr_err("Received Text Data that is not"
" SendTargets, cannot continue.\n"); " SendTargets, cannot continue.\n");
...@@ -2040,6 +2041,7 @@ iscsit_process_text_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd, ...@@ -2040,6 +2041,7 @@ iscsit_process_text_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list); list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
spin_unlock_bh(&conn->cmd_lock); spin_unlock_bh(&conn->cmd_lock);
empty_sendtargets:
iscsit_ack_from_expstatsn(conn, be32_to_cpu(hdr->exp_statsn)); iscsit_ack_from_expstatsn(conn, be32_to_cpu(hdr->exp_statsn));
if (!(hdr->opcode & ISCSI_OP_IMMEDIATE)) { if (!(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
...@@ -3047,11 +3049,7 @@ static int iscsit_send_r2t( ...@@ -3047,11 +3049,7 @@ static int iscsit_send_r2t(
int_to_scsilun(cmd->se_cmd.orig_fe_lun, int_to_scsilun(cmd->se_cmd.orig_fe_lun,
(struct scsi_lun *)&hdr->lun); (struct scsi_lun *)&hdr->lun);
hdr->itt = cmd->init_task_tag; hdr->itt = cmd->init_task_tag;
spin_lock_bh(&conn->sess->ttt_lock); r2t->targ_xfer_tag = session_get_next_ttt(conn->sess);
r2t->targ_xfer_tag = conn->sess->targ_xfer_tag++;
if (r2t->targ_xfer_tag == 0xFFFFFFFF)
r2t->targ_xfer_tag = conn->sess->targ_xfer_tag++;
spin_unlock_bh(&conn->sess->ttt_lock);
hdr->ttt = cpu_to_be32(r2t->targ_xfer_tag); hdr->ttt = cpu_to_be32(r2t->targ_xfer_tag);
hdr->statsn = cpu_to_be32(conn->stat_sn); hdr->statsn = cpu_to_be32(conn->stat_sn);
hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn); hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
...@@ -3393,7 +3391,8 @@ static bool iscsit_check_inaddr_any(struct iscsi_np *np) ...@@ -3393,7 +3391,8 @@ static bool iscsit_check_inaddr_any(struct iscsi_np *np)
static int static int
iscsit_build_sendtargets_response(struct iscsi_cmd *cmd, iscsit_build_sendtargets_response(struct iscsi_cmd *cmd,
enum iscsit_transport_type network_transport) enum iscsit_transport_type network_transport,
int skip_bytes, bool *completed)
{ {
char *payload = NULL; char *payload = NULL;
struct iscsi_conn *conn = cmd->conn; struct iscsi_conn *conn = cmd->conn;
...@@ -3405,7 +3404,7 @@ iscsit_build_sendtargets_response(struct iscsi_cmd *cmd, ...@@ -3405,7 +3404,7 @@ iscsit_build_sendtargets_response(struct iscsi_cmd *cmd,
unsigned char buf[ISCSI_IQN_LEN+12]; /* iqn + "TargetName=" + \0 */ unsigned char buf[ISCSI_IQN_LEN+12]; /* iqn + "TargetName=" + \0 */
unsigned char *text_in = cmd->text_in_ptr, *text_ptr = NULL; unsigned char *text_in = cmd->text_in_ptr, *text_ptr = NULL;
buffer_len = max(conn->conn_ops->MaxRecvDataSegmentLength, buffer_len = min(conn->conn_ops->MaxRecvDataSegmentLength,
SENDTARGETS_BUF_LIMIT); SENDTARGETS_BUF_LIMIT);
payload = kzalloc(buffer_len, GFP_KERNEL); payload = kzalloc(buffer_len, GFP_KERNEL);
...@@ -3484,9 +3483,16 @@ iscsit_build_sendtargets_response(struct iscsi_cmd *cmd, ...@@ -3484,9 +3483,16 @@ iscsit_build_sendtargets_response(struct iscsi_cmd *cmd,
end_of_buf = 1; end_of_buf = 1;
goto eob; goto eob;
} }
if (skip_bytes && len <= skip_bytes) {
skip_bytes -= len;
} else {
memcpy(payload + payload_len, buf, len); memcpy(payload + payload_len, buf, len);
payload_len += len; payload_len += len;
target_name_printed = 1; target_name_printed = 1;
if (len > skip_bytes)
skip_bytes = 0;
}
} }
len = sprintf(buf, "TargetAddress=" len = sprintf(buf, "TargetAddress="
...@@ -3502,15 +3508,24 @@ iscsit_build_sendtargets_response(struct iscsi_cmd *cmd, ...@@ -3502,15 +3508,24 @@ iscsit_build_sendtargets_response(struct iscsi_cmd *cmd,
end_of_buf = 1; end_of_buf = 1;
goto eob; goto eob;
} }
if (skip_bytes && len <= skip_bytes) {
skip_bytes -= len;
} else {
memcpy(payload + payload_len, buf, len); memcpy(payload + payload_len, buf, len);
payload_len += len; payload_len += len;
if (len > skip_bytes)
skip_bytes = 0;
}
} }
spin_unlock(&tpg->tpg_np_lock); spin_unlock(&tpg->tpg_np_lock);
} }
spin_unlock(&tiqn->tiqn_tpg_lock); spin_unlock(&tiqn->tiqn_tpg_lock);
eob: eob:
if (end_of_buf) if (end_of_buf) {
*completed = false;
break; break;
}
if (cmd->cmd_flags & ICF_SENDTARGETS_SINGLE) if (cmd->cmd_flags & ICF_SENDTARGETS_SINGLE)
break; break;
...@@ -3528,13 +3543,23 @@ iscsit_build_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn, ...@@ -3528,13 +3543,23 @@ iscsit_build_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
enum iscsit_transport_type network_transport) enum iscsit_transport_type network_transport)
{ {
int text_length, padding; int text_length, padding;
bool completed = true;
text_length = iscsit_build_sendtargets_response(cmd, network_transport); text_length = iscsit_build_sendtargets_response(cmd, network_transport,
cmd->read_data_done,
&completed);
if (text_length < 0) if (text_length < 0)
return text_length; return text_length;
hdr->opcode = ISCSI_OP_TEXT_RSP; if (completed) {
hdr->flags |= ISCSI_FLAG_CMD_FINAL; hdr->flags |= ISCSI_FLAG_CMD_FINAL;
} else {
hdr->flags |= ISCSI_FLAG_TEXT_CONTINUE;
cmd->read_data_done += text_length;
if (cmd->targ_xfer_tag == 0xFFFFFFFF)
cmd->targ_xfer_tag = session_get_next_ttt(conn->sess);
}
hdr->opcode = ISCSI_OP_TEXT_RSP;
padding = ((-text_length) & 3); padding = ((-text_length) & 3);
hton24(hdr->dlength, text_length); hton24(hdr->dlength, text_length);
hdr->itt = cmd->init_task_tag; hdr->itt = cmd->init_task_tag;
...@@ -3543,21 +3568,25 @@ iscsit_build_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn, ...@@ -3543,21 +3568,25 @@ iscsit_build_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
hdr->statsn = cpu_to_be32(cmd->stat_sn); hdr->statsn = cpu_to_be32(cmd->stat_sn);
iscsit_increment_maxcmdsn(cmd, conn->sess); iscsit_increment_maxcmdsn(cmd, conn->sess);
/*
* Reset maxcmdsn_inc in multi-part text payload exchanges to
* correctly increment MaxCmdSN for each response answering a
* non immediate text request with a valid CmdSN.
*/
cmd->maxcmdsn_inc = 0;
hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn); hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn); hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn);
pr_debug("Built Text Response: ITT: 0x%08x, StatSN: 0x%08x," pr_debug("Built Text Response: ITT: 0x%08x, TTT: 0x%08x, StatSN: 0x%08x,"
" Length: %u, CID: %hu\n", cmd->init_task_tag, cmd->stat_sn, " Length: %u, CID: %hu F: %d C: %d\n", cmd->init_task_tag,
text_length, conn->cid); cmd->targ_xfer_tag, cmd->stat_sn, text_length, conn->cid,
!!(hdr->flags & ISCSI_FLAG_CMD_FINAL),
!!(hdr->flags & ISCSI_FLAG_TEXT_CONTINUE));
return text_length + padding; return text_length + padding;
} }
EXPORT_SYMBOL(iscsit_build_text_rsp); EXPORT_SYMBOL(iscsit_build_text_rsp);
/*
* FIXME: Add support for F_BIT and C_BIT when the length is longer than
* MaxRecvDataSegmentLength.
*/
static int iscsit_send_text_rsp( static int iscsit_send_text_rsp(
struct iscsi_cmd *cmd, struct iscsi_cmd *cmd,
struct iscsi_conn *conn) struct iscsi_conn *conn)
...@@ -4021,9 +4050,15 @@ static int iscsi_target_rx_opcode(struct iscsi_conn *conn, unsigned char *buf) ...@@ -4021,9 +4050,15 @@ static int iscsi_target_rx_opcode(struct iscsi_conn *conn, unsigned char *buf)
ret = iscsit_handle_task_mgt_cmd(conn, cmd, buf); ret = iscsit_handle_task_mgt_cmd(conn, cmd, buf);
break; break;
case ISCSI_OP_TEXT: case ISCSI_OP_TEXT:
if (hdr->ttt != cpu_to_be32(0xFFFFFFFF)) {
cmd = iscsit_find_cmd_from_itt(conn, hdr->itt);
if (!cmd)
goto reject;
} else {
cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE); cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
if (!cmd) if (!cmd)
goto reject; goto reject;
}
ret = iscsit_handle_text_cmd(conn, cmd, buf); ret = iscsit_handle_text_cmd(conn, cmd, buf);
break; break;
......
...@@ -22,7 +22,7 @@ ...@@ -22,7 +22,7 @@
#include <linux/err.h> #include <linux/err.h>
#include <linux/scatterlist.h> #include <linux/scatterlist.h>
#include "iscsi_target_core.h" #include <target/iscsi/iscsi_target_core.h>
#include "iscsi_target_nego.h" #include "iscsi_target_nego.h"
#include "iscsi_target_auth.h" #include "iscsi_target_auth.h"
......
...@@ -28,7 +28,7 @@ ...@@ -28,7 +28,7 @@
#include <target/configfs_macros.h> #include <target/configfs_macros.h>
#include <target/iscsi/iscsi_transport.h> #include <target/iscsi/iscsi_transport.h>
#include "iscsi_target_core.h" #include <target/iscsi/iscsi_target_core.h>
#include "iscsi_target_parameters.h" #include "iscsi_target_parameters.h"
#include "iscsi_target_device.h" #include "iscsi_target_device.h"
#include "iscsi_target_erl0.h" #include "iscsi_target_erl0.h"
...@@ -36,7 +36,7 @@ ...@@ -36,7 +36,7 @@
#include "iscsi_target_tpg.h" #include "iscsi_target_tpg.h"
#include "iscsi_target_util.h" #include "iscsi_target_util.h"
#include "iscsi_target.h" #include "iscsi_target.h"
#include "iscsi_target_stat.h" #include <target/iscsi/iscsi_target_stat.h>
#include "iscsi_target_configfs.h" #include "iscsi_target_configfs.h"
struct target_fabric_configfs *lio_target_fabric_configfs; struct target_fabric_configfs *lio_target_fabric_configfs;
...@@ -674,12 +674,9 @@ static ssize_t lio_target_nacl_show_info( ...@@ -674,12 +674,9 @@ static ssize_t lio_target_nacl_show_info(
rb += sprintf(page+rb, "InitiatorAlias: %s\n", rb += sprintf(page+rb, "InitiatorAlias: %s\n",
sess->sess_ops->InitiatorAlias); sess->sess_ops->InitiatorAlias);
rb += sprintf(page+rb, "LIO Session ID: %u " rb += sprintf(page+rb,
"ISID: 0x%02x %02x %02x %02x %02x %02x " "LIO Session ID: %u ISID: 0x%6ph TSIH: %hu ",
"TSIH: %hu ", sess->sid, sess->sid, sess->isid, sess->tsih);
sess->isid[0], sess->isid[1], sess->isid[2],
sess->isid[3], sess->isid[4], sess->isid[5],
sess->tsih);
rb += sprintf(page+rb, "SessionType: %s\n", rb += sprintf(page+rb, "SessionType: %s\n",
(sess->sess_ops->SessionType) ? (sess->sess_ops->SessionType) ?
"Discovery" : "Normal"); "Discovery" : "Normal");
...@@ -1758,9 +1755,7 @@ static u32 lio_sess_get_initiator_sid( ...@@ -1758,9 +1755,7 @@ static u32 lio_sess_get_initiator_sid(
/* /*
* iSCSI Initiator Session Identifier from RFC-3720. * iSCSI Initiator Session Identifier from RFC-3720.
*/ */
return snprintf(buf, size, "%02x%02x%02x%02x%02x%02x", return snprintf(buf, size, "%6phN", sess->isid);
sess->isid[0], sess->isid[1], sess->isid[2],
sess->isid[3], sess->isid[4], sess->isid[5]);
} }
static int lio_queue_data_in(struct se_cmd *se_cmd) static int lio_queue_data_in(struct se_cmd *se_cmd)
......
...@@ -18,7 +18,7 @@ ...@@ -18,7 +18,7 @@
#include <scsi/iscsi_proto.h> #include <scsi/iscsi_proto.h>
#include "iscsi_target_core.h" #include <target/iscsi/iscsi_target_core.h>
#include "iscsi_target_seq_pdu_list.h" #include "iscsi_target_seq_pdu_list.h"
#include "iscsi_target_erl1.h" #include "iscsi_target_erl1.h"
#include "iscsi_target_util.h" #include "iscsi_target_util.h"
......
...@@ -21,7 +21,7 @@ ...@@ -21,7 +21,7 @@
#include <target/target_core_base.h> #include <target/target_core_base.h>
#include <target/target_core_fabric.h> #include <target/target_core_fabric.h>
#include "iscsi_target_core.h" #include <target/iscsi/iscsi_target_core.h>
#include "iscsi_target_device.h" #include "iscsi_target_device.h"
#include "iscsi_target_tpg.h" #include "iscsi_target_tpg.h"
#include "iscsi_target_util.h" #include "iscsi_target_util.h"
......
...@@ -21,7 +21,8 @@ ...@@ -21,7 +21,8 @@
#include <target/target_core_base.h> #include <target/target_core_base.h>
#include <target/target_core_fabric.h> #include <target/target_core_fabric.h>
#include "iscsi_target_core.h" #include <target/iscsi/iscsi_target_core.h>
#include <target/iscsi/iscsi_transport.h>
#include "iscsi_target_seq_pdu_list.h" #include "iscsi_target_seq_pdu_list.h"
#include "iscsi_target_tq.h" #include "iscsi_target_tq.h"
#include "iscsi_target_erl0.h" #include "iscsi_target_erl0.h"
...@@ -939,6 +940,7 @@ void iscsit_take_action_for_connection_exit(struct iscsi_conn *conn) ...@@ -939,6 +940,7 @@ void iscsit_take_action_for_connection_exit(struct iscsi_conn *conn)
if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT) { if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT) {
spin_unlock_bh(&conn->state_lock); spin_unlock_bh(&conn->state_lock);
if (conn->conn_transport->transport_type == ISCSI_TCP)
iscsit_close_connection(conn); iscsit_close_connection(conn);
return; return;
} }
......
...@@ -22,7 +22,7 @@ ...@@ -22,7 +22,7 @@
#include <target/target_core_fabric.h> #include <target/target_core_fabric.h>
#include <target/iscsi/iscsi_transport.h> #include <target/iscsi/iscsi_transport.h>
#include "iscsi_target_core.h" #include <target/iscsi/iscsi_target_core.h>
#include "iscsi_target_seq_pdu_list.h" #include "iscsi_target_seq_pdu_list.h"
#include "iscsi_target_datain_values.h" #include "iscsi_target_datain_values.h"
#include "iscsi_target_device.h" #include "iscsi_target_device.h"
......
...@@ -21,7 +21,7 @@ ...@@ -21,7 +21,7 @@
#include <target/target_core_base.h> #include <target/target_core_base.h>
#include <target/target_core_fabric.h> #include <target/target_core_fabric.h>
#include "iscsi_target_core.h" #include <target/iscsi/iscsi_target_core.h>
#include "iscsi_target_datain_values.h" #include "iscsi_target_datain_values.h"
#include "iscsi_target_util.h" #include "iscsi_target_util.h"
#include "iscsi_target_erl0.h" #include "iscsi_target_erl0.h"
......
...@@ -24,14 +24,14 @@ ...@@ -24,14 +24,14 @@
#include <target/target_core_base.h> #include <target/target_core_base.h>
#include <target/target_core_fabric.h> #include <target/target_core_fabric.h>
#include "iscsi_target_core.h" #include <target/iscsi/iscsi_target_core.h>
#include <target/iscsi/iscsi_target_stat.h>
#include "iscsi_target_tq.h" #include "iscsi_target_tq.h"
#include "iscsi_target_device.h" #include "iscsi_target_device.h"
#include "iscsi_target_nego.h" #include "iscsi_target_nego.h"
#include "iscsi_target_erl0.h" #include "iscsi_target_erl0.h"
#include "iscsi_target_erl2.h" #include "iscsi_target_erl2.h"
#include "iscsi_target_login.h" #include "iscsi_target_login.h"
#include "iscsi_target_stat.h"
#include "iscsi_target_tpg.h" #include "iscsi_target_tpg.h"
#include "iscsi_target_util.h" #include "iscsi_target_util.h"
#include "iscsi_target.h" #include "iscsi_target.h"
......
...@@ -22,7 +22,7 @@ ...@@ -22,7 +22,7 @@
#include <target/target_core_fabric.h> #include <target/target_core_fabric.h>
#include <target/iscsi/iscsi_transport.h> #include <target/iscsi/iscsi_transport.h>
#include "iscsi_target_core.h" #include <target/iscsi/iscsi_target_core.h>
#include "iscsi_target_parameters.h" #include "iscsi_target_parameters.h"
#include "iscsi_target_login.h" #include "iscsi_target_login.h"
#include "iscsi_target_nego.h" #include "iscsi_target_nego.h"
......
...@@ -18,7 +18,7 @@ ...@@ -18,7 +18,7 @@
#include <target/target_core_base.h> #include <target/target_core_base.h>
#include "iscsi_target_core.h" #include <target/iscsi/iscsi_target_core.h>
#include "iscsi_target_device.h" #include "iscsi_target_device.h"
#include "iscsi_target_tpg.h" #include "iscsi_target_tpg.h"
#include "iscsi_target_util.h" #include "iscsi_target_util.h"
......
...@@ -18,7 +18,7 @@ ...@@ -18,7 +18,7 @@
#include <linux/slab.h> #include <linux/slab.h>
#include "iscsi_target_core.h" #include <target/iscsi/iscsi_target_core.h>
#include "iscsi_target_util.h" #include "iscsi_target_util.h"
#include "iscsi_target_parameters.h" #include "iscsi_target_parameters.h"
......
...@@ -20,7 +20,7 @@ ...@@ -20,7 +20,7 @@
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/random.h> #include <linux/random.h>
#include "iscsi_target_core.h" #include <target/iscsi/iscsi_target_core.h>
#include "iscsi_target_util.h" #include "iscsi_target_util.h"
#include "iscsi_target_tpg.h" #include "iscsi_target_tpg.h"
#include "iscsi_target_seq_pdu_list.h" #include "iscsi_target_seq_pdu_list.h"
......
...@@ -23,12 +23,12 @@ ...@@ -23,12 +23,12 @@
#include <target/target_core_base.h> #include <target/target_core_base.h>
#include <target/configfs_macros.h> #include <target/configfs_macros.h>
#include "iscsi_target_core.h" #include <target/iscsi/iscsi_target_core.h>
#include "iscsi_target_parameters.h" #include "iscsi_target_parameters.h"
#include "iscsi_target_device.h" #include "iscsi_target_device.h"
#include "iscsi_target_tpg.h" #include "iscsi_target_tpg.h"
#include "iscsi_target_util.h" #include "iscsi_target_util.h"
#include "iscsi_target_stat.h" #include <target/iscsi/iscsi_target_stat.h>
#ifndef INITIAL_JIFFIES #ifndef INITIAL_JIFFIES
#define INITIAL_JIFFIES ((unsigned long)(unsigned int) (-300*HZ)) #define INITIAL_JIFFIES ((unsigned long)(unsigned int) (-300*HZ))
......
...@@ -23,7 +23,7 @@ ...@@ -23,7 +23,7 @@
#include <target/target_core_fabric.h> #include <target/target_core_fabric.h>
#include <target/iscsi/iscsi_transport.h> #include <target/iscsi/iscsi_transport.h>
#include "iscsi_target_core.h" #include <target/iscsi/iscsi_target_core.h>
#include "iscsi_target_seq_pdu_list.h" #include "iscsi_target_seq_pdu_list.h"
#include "iscsi_target_datain_values.h" #include "iscsi_target_datain_values.h"
#include "iscsi_target_device.h" #include "iscsi_target_device.h"
......
...@@ -20,7 +20,7 @@ ...@@ -20,7 +20,7 @@
#include <target/target_core_fabric.h> #include <target/target_core_fabric.h>
#include <target/target_core_configfs.h> #include <target/target_core_configfs.h>
#include "iscsi_target_core.h" #include <target/iscsi/iscsi_target_core.h>
#include "iscsi_target_erl0.h" #include "iscsi_target_erl0.h"
#include "iscsi_target_login.h" #include "iscsi_target_login.h"
#include "iscsi_target_nodeattrib.h" #include "iscsi_target_nodeattrib.h"
......
...@@ -20,40 +20,26 @@ ...@@ -20,40 +20,26 @@
#include <linux/list.h> #include <linux/list.h>
#include <linux/bitmap.h> #include <linux/bitmap.h>
#include "iscsi_target_core.h" #include <target/iscsi/iscsi_target_core.h>
#include "iscsi_target_tq.h" #include "iscsi_target_tq.h"
#include "iscsi_target.h" #include "iscsi_target.h"
static LIST_HEAD(active_ts_list);
static LIST_HEAD(inactive_ts_list); static LIST_HEAD(inactive_ts_list);
static DEFINE_SPINLOCK(active_ts_lock);
static DEFINE_SPINLOCK(inactive_ts_lock); static DEFINE_SPINLOCK(inactive_ts_lock);
static DEFINE_SPINLOCK(ts_bitmap_lock); static DEFINE_SPINLOCK(ts_bitmap_lock);
static void iscsi_add_ts_to_active_list(struct iscsi_thread_set *ts)
{
spin_lock(&active_ts_lock);
list_add_tail(&ts->ts_list, &active_ts_list);
iscsit_global->active_ts++;
spin_unlock(&active_ts_lock);
}
static void iscsi_add_ts_to_inactive_list(struct iscsi_thread_set *ts) static void iscsi_add_ts_to_inactive_list(struct iscsi_thread_set *ts)
{ {
if (!list_empty(&ts->ts_list)) {
WARN_ON(1);
return;
}
spin_lock(&inactive_ts_lock); spin_lock(&inactive_ts_lock);
list_add_tail(&ts->ts_list, &inactive_ts_list); list_add_tail(&ts->ts_list, &inactive_ts_list);
iscsit_global->inactive_ts++; iscsit_global->inactive_ts++;
spin_unlock(&inactive_ts_lock); spin_unlock(&inactive_ts_lock);
} }
static void iscsi_del_ts_from_active_list(struct iscsi_thread_set *ts)
{
spin_lock(&active_ts_lock);
list_del(&ts->ts_list);
iscsit_global->active_ts--;
spin_unlock(&active_ts_lock);
}
static struct iscsi_thread_set *iscsi_get_ts_from_inactive_list(void) static struct iscsi_thread_set *iscsi_get_ts_from_inactive_list(void)
{ {
struct iscsi_thread_set *ts; struct iscsi_thread_set *ts;
...@@ -66,7 +52,7 @@ static struct iscsi_thread_set *iscsi_get_ts_from_inactive_list(void) ...@@ -66,7 +52,7 @@ static struct iscsi_thread_set *iscsi_get_ts_from_inactive_list(void)
ts = list_first_entry(&inactive_ts_list, struct iscsi_thread_set, ts_list); ts = list_first_entry(&inactive_ts_list, struct iscsi_thread_set, ts_list);
list_del(&ts->ts_list); list_del_init(&ts->ts_list);
iscsit_global->inactive_ts--; iscsit_global->inactive_ts--;
spin_unlock(&inactive_ts_lock); spin_unlock(&inactive_ts_lock);
...@@ -204,8 +190,6 @@ static void iscsi_deallocate_extra_thread_sets(void) ...@@ -204,8 +190,6 @@ static void iscsi_deallocate_extra_thread_sets(void)
void iscsi_activate_thread_set(struct iscsi_conn *conn, struct iscsi_thread_set *ts) void iscsi_activate_thread_set(struct iscsi_conn *conn, struct iscsi_thread_set *ts)
{ {
iscsi_add_ts_to_active_list(ts);
spin_lock_bh(&ts->ts_state_lock); spin_lock_bh(&ts->ts_state_lock);
conn->thread_set = ts; conn->thread_set = ts;
ts->conn = conn; ts->conn = conn;
...@@ -397,7 +381,6 @@ struct iscsi_conn *iscsi_rx_thread_pre_handler(struct iscsi_thread_set *ts) ...@@ -397,7 +381,6 @@ struct iscsi_conn *iscsi_rx_thread_pre_handler(struct iscsi_thread_set *ts)
if (ts->delay_inactive && (--ts->thread_count == 0)) { if (ts->delay_inactive && (--ts->thread_count == 0)) {
spin_unlock_bh(&ts->ts_state_lock); spin_unlock_bh(&ts->ts_state_lock);
iscsi_del_ts_from_active_list(ts);
if (!iscsit_global->in_shutdown) if (!iscsit_global->in_shutdown)
iscsi_deallocate_extra_thread_sets(); iscsi_deallocate_extra_thread_sets();
...@@ -452,7 +435,6 @@ struct iscsi_conn *iscsi_tx_thread_pre_handler(struct iscsi_thread_set *ts) ...@@ -452,7 +435,6 @@ struct iscsi_conn *iscsi_tx_thread_pre_handler(struct iscsi_thread_set *ts)
if (ts->delay_inactive && (--ts->thread_count == 0)) { if (ts->delay_inactive && (--ts->thread_count == 0)) {
spin_unlock_bh(&ts->ts_state_lock); spin_unlock_bh(&ts->ts_state_lock);
iscsi_del_ts_from_active_list(ts);
if (!iscsit_global->in_shutdown) if (!iscsit_global->in_shutdown)
iscsi_deallocate_extra_thread_sets(); iscsi_deallocate_extra_thread_sets();
......
...@@ -25,7 +25,7 @@ ...@@ -25,7 +25,7 @@
#include <target/target_core_configfs.h> #include <target/target_core_configfs.h>
#include <target/iscsi/iscsi_transport.h> #include <target/iscsi/iscsi_transport.h>
#include "iscsi_target_core.h" #include <target/iscsi/iscsi_target_core.h>
#include "iscsi_target_parameters.h" #include "iscsi_target_parameters.h"
#include "iscsi_target_seq_pdu_list.h" #include "iscsi_target_seq_pdu_list.h"
#include "iscsi_target_datain_values.h" #include "iscsi_target_datain_values.h"
...@@ -390,6 +390,7 @@ struct iscsi_cmd *iscsit_find_cmd_from_itt( ...@@ -390,6 +390,7 @@ struct iscsi_cmd *iscsit_find_cmd_from_itt(
init_task_tag, conn->cid); init_task_tag, conn->cid);
return NULL; return NULL;
} }
EXPORT_SYMBOL(iscsit_find_cmd_from_itt);
struct iscsi_cmd *iscsit_find_cmd_from_itt_or_dump( struct iscsi_cmd *iscsit_find_cmd_from_itt_or_dump(
struct iscsi_conn *conn, struct iscsi_conn *conn,
...@@ -939,13 +940,8 @@ static int iscsit_add_nopin(struct iscsi_conn *conn, int want_response) ...@@ -939,13 +940,8 @@ static int iscsit_add_nopin(struct iscsi_conn *conn, int want_response)
state = (want_response) ? ISTATE_SEND_NOPIN_WANT_RESPONSE : state = (want_response) ? ISTATE_SEND_NOPIN_WANT_RESPONSE :
ISTATE_SEND_NOPIN_NO_RESPONSE; ISTATE_SEND_NOPIN_NO_RESPONSE;
cmd->init_task_tag = RESERVED_ITT; cmd->init_task_tag = RESERVED_ITT;
spin_lock_bh(&conn->sess->ttt_lock); cmd->targ_xfer_tag = (want_response) ?
cmd->targ_xfer_tag = (want_response) ? conn->sess->targ_xfer_tag++ : session_get_next_ttt(conn->sess) : 0xFFFFFFFF;
0xFFFFFFFF;
if (want_response && (cmd->targ_xfer_tag == 0xFFFFFFFF))
cmd->targ_xfer_tag = conn->sess->targ_xfer_tag++;
spin_unlock_bh(&conn->sess->ttt_lock);
spin_lock_bh(&conn->cmd_lock); spin_lock_bh(&conn->cmd_lock);
list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list); list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
spin_unlock_bh(&conn->cmd_lock); spin_unlock_bh(&conn->cmd_lock);
......
...@@ -16,7 +16,6 @@ extern struct iscsi_r2t *iscsit_get_holder_for_r2tsn(struct iscsi_cmd *, u32); ...@@ -16,7 +16,6 @@ extern struct iscsi_r2t *iscsit_get_holder_for_r2tsn(struct iscsi_cmd *, u32);
extern int iscsit_sequence_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd, extern int iscsit_sequence_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
unsigned char * ,__be32 cmdsn); unsigned char * ,__be32 cmdsn);
extern int iscsit_check_unsolicited_dataout(struct iscsi_cmd *, unsigned char *); extern int iscsit_check_unsolicited_dataout(struct iscsi_cmd *, unsigned char *);
extern struct iscsi_cmd *iscsit_find_cmd_from_itt(struct iscsi_conn *, itt_t);
extern struct iscsi_cmd *iscsit_find_cmd_from_itt_or_dump(struct iscsi_conn *, extern struct iscsi_cmd *iscsit_find_cmd_from_itt_or_dump(struct iscsi_conn *,
itt_t, u32); itt_t, u32);
extern struct iscsi_cmd *iscsit_find_cmd_from_ttt(struct iscsi_conn *, u32); extern struct iscsi_cmd *iscsit_find_cmd_from_ttt(struct iscsi_conn *, u32);
......
...@@ -494,6 +494,11 @@ fd_execute_write_same(struct se_cmd *cmd) ...@@ -494,6 +494,11 @@ fd_execute_write_same(struct se_cmd *cmd)
target_complete_cmd(cmd, SAM_STAT_GOOD); target_complete_cmd(cmd, SAM_STAT_GOOD);
return 0; return 0;
} }
if (cmd->prot_op) {
pr_err("WRITE_SAME: Protection information with FILEIO"
" backends not supported\n");
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
sg = &cmd->t_data_sg[0]; sg = &cmd->t_data_sg[0];
if (cmd->t_data_nents > 1 || if (cmd->t_data_nents > 1 ||
......
...@@ -464,6 +464,11 @@ iblock_execute_write_same(struct se_cmd *cmd) ...@@ -464,6 +464,11 @@ iblock_execute_write_same(struct se_cmd *cmd)
sector_t block_lba = cmd->t_task_lba; sector_t block_lba = cmd->t_task_lba;
sector_t sectors = sbc_get_write_same_sectors(cmd); sector_t sectors = sbc_get_write_same_sectors(cmd);
if (cmd->prot_op) {
pr_err("WRITE_SAME: Protection information with IBLOCK"
" backends not supported\n");
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
sg = &cmd->t_data_sg[0]; sg = &cmd->t_data_sg[0];
if (cmd->t_data_nents > 1 || if (cmd->t_data_nents > 1 ||
......
...@@ -1874,8 +1874,8 @@ static int core_scsi3_update_aptpl_buf( ...@@ -1874,8 +1874,8 @@ static int core_scsi3_update_aptpl_buf(
} }
if ((len + strlen(tmp) >= pr_aptpl_buf_len)) { if ((len + strlen(tmp) >= pr_aptpl_buf_len)) {
pr_err("Unable to update renaming" pr_err("Unable to update renaming APTPL metadata,"
" APTPL metadata\n"); " reallocating larger buffer\n");
ret = -EMSGSIZE; ret = -EMSGSIZE;
goto out; goto out;
} }
...@@ -1892,8 +1892,8 @@ static int core_scsi3_update_aptpl_buf( ...@@ -1892,8 +1892,8 @@ static int core_scsi3_update_aptpl_buf(
lun->lun_sep->sep_rtpi, lun->unpacked_lun, reg_count); lun->lun_sep->sep_rtpi, lun->unpacked_lun, reg_count);
if ((len + strlen(tmp) >= pr_aptpl_buf_len)) { if ((len + strlen(tmp) >= pr_aptpl_buf_len)) {
pr_err("Unable to update renaming" pr_err("Unable to update renaming APTPL metadata,"
" APTPL metadata\n"); " reallocating larger buffer\n");
ret = -EMSGSIZE; ret = -EMSGSIZE;
goto out; goto out;
} }
...@@ -1956,7 +1956,7 @@ static int __core_scsi3_write_aptpl_to_file( ...@@ -1956,7 +1956,7 @@ static int __core_scsi3_write_aptpl_to_file(
static sense_reason_t core_scsi3_update_and_write_aptpl(struct se_device *dev, bool aptpl) static sense_reason_t core_scsi3_update_and_write_aptpl(struct se_device *dev, bool aptpl)
{ {
unsigned char *buf; unsigned char *buf;
int rc; int rc, len = PR_APTPL_BUF_LEN;
if (!aptpl) { if (!aptpl) {
char *null_buf = "No Registrations or Reservations\n"; char *null_buf = "No Registrations or Reservations\n";
...@@ -1970,25 +1970,26 @@ static sense_reason_t core_scsi3_update_and_write_aptpl(struct se_device *dev, b ...@@ -1970,25 +1970,26 @@ static sense_reason_t core_scsi3_update_and_write_aptpl(struct se_device *dev, b
return 0; return 0;
} }
retry:
buf = kzalloc(PR_APTPL_BUF_LEN, GFP_KERNEL); buf = vzalloc(len);
if (!buf) if (!buf)
return TCM_OUT_OF_RESOURCES; return TCM_OUT_OF_RESOURCES;
rc = core_scsi3_update_aptpl_buf(dev, buf, PR_APTPL_BUF_LEN); rc = core_scsi3_update_aptpl_buf(dev, buf, len);
if (rc < 0) { if (rc < 0) {
kfree(buf); vfree(buf);
return TCM_OUT_OF_RESOURCES; len *= 2;
goto retry;
} }
rc = __core_scsi3_write_aptpl_to_file(dev, buf); rc = __core_scsi3_write_aptpl_to_file(dev, buf);
if (rc != 0) { if (rc != 0) {
pr_err("SPC-3 PR: Could not update APTPL\n"); pr_err("SPC-3 PR: Could not update APTPL\n");
kfree(buf); vfree(buf);
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
} }
dev->t10_pr.pr_aptpl_active = 1; dev->t10_pr.pr_aptpl_active = 1;
kfree(buf); vfree(buf);
pr_debug("SPC-3 PR: Set APTPL Bit Activated\n"); pr_debug("SPC-3 PR: Set APTPL Bit Activated\n");
return 0; return 0;
} }
......
...@@ -36,6 +36,9 @@ ...@@ -36,6 +36,9 @@
#include "target_core_ua.h" #include "target_core_ua.h"
#include "target_core_alua.h" #include "target_core_alua.h"
static sense_reason_t
sbc_check_prot(struct se_device *, struct se_cmd *, unsigned char *, u32, bool);
static sense_reason_t static sense_reason_t
sbc_emulate_readcapacity(struct se_cmd *cmd) sbc_emulate_readcapacity(struct se_cmd *cmd)
{ {
...@@ -251,7 +254,10 @@ static inline unsigned long long transport_lba_64_ext(unsigned char *cdb) ...@@ -251,7 +254,10 @@ static inline unsigned long long transport_lba_64_ext(unsigned char *cdb)
static sense_reason_t static sense_reason_t
sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *ops) sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *ops)
{ {
struct se_device *dev = cmd->se_dev;
sector_t end_lba = dev->transport->get_blocks(dev) + 1;
unsigned int sectors = sbc_get_write_same_sectors(cmd); unsigned int sectors = sbc_get_write_same_sectors(cmd);
sense_reason_t ret;
if ((flags[0] & 0x04) || (flags[0] & 0x02)) { if ((flags[0] & 0x04) || (flags[0] & 0x02)) {
pr_err("WRITE_SAME PBDATA and LBDATA" pr_err("WRITE_SAME PBDATA and LBDATA"
...@@ -264,6 +270,16 @@ sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *o ...@@ -264,6 +270,16 @@ sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *o
sectors, cmd->se_dev->dev_attrib.max_write_same_len); sectors, cmd->se_dev->dev_attrib.max_write_same_len);
return TCM_INVALID_CDB_FIELD; return TCM_INVALID_CDB_FIELD;
} }
/*
* Sanity check for LBA wrap and request past end of device.
*/
if (((cmd->t_task_lba + sectors) < cmd->t_task_lba) ||
((cmd->t_task_lba + sectors) > end_lba)) {
pr_err("WRITE_SAME exceeds last lba %llu (lba %llu, sectors %u)\n",
(unsigned long long)end_lba, cmd->t_task_lba, sectors);
return TCM_ADDRESS_OUT_OF_RANGE;
}
/* We always have ANC_SUP == 0 so setting ANCHOR is always an error */ /* We always have ANC_SUP == 0 so setting ANCHOR is always an error */
if (flags[0] & 0x10) { if (flags[0] & 0x10) {
pr_warn("WRITE SAME with ANCHOR not supported\n"); pr_warn("WRITE SAME with ANCHOR not supported\n");
...@@ -277,12 +293,21 @@ sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *o ...@@ -277,12 +293,21 @@ sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *o
if (!ops->execute_write_same_unmap) if (!ops->execute_write_same_unmap)
return TCM_UNSUPPORTED_SCSI_OPCODE; return TCM_UNSUPPORTED_SCSI_OPCODE;
if (!dev->dev_attrib.emulate_tpws) {
pr_err("Got WRITE_SAME w/ UNMAP=1, but backend device"
" has emulate_tpws disabled\n");
return TCM_UNSUPPORTED_SCSI_OPCODE;
}
cmd->execute_cmd = ops->execute_write_same_unmap; cmd->execute_cmd = ops->execute_write_same_unmap;
return 0; return 0;
} }
if (!ops->execute_write_same) if (!ops->execute_write_same)
return TCM_UNSUPPORTED_SCSI_OPCODE; return TCM_UNSUPPORTED_SCSI_OPCODE;
ret = sbc_check_prot(dev, cmd, &cmd->t_task_cdb[0], sectors, true);
if (ret)
return ret;
cmd->execute_cmd = ops->execute_write_same; cmd->execute_cmd = ops->execute_write_same;
return 0; return 0;
} }
...@@ -614,14 +639,21 @@ sbc_set_prot_op_checks(u8 protect, enum target_prot_type prot_type, ...@@ -614,14 +639,21 @@ sbc_set_prot_op_checks(u8 protect, enum target_prot_type prot_type,
return 0; return 0;
} }
static bool static sense_reason_t
sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb, sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb,
u32 sectors, bool is_write) u32 sectors, bool is_write)
{ {
u8 protect = cdb[1] >> 5; u8 protect = cdb[1] >> 5;
if ((!cmd->t_prot_sg || !cmd->t_prot_nents) && cmd->prot_pto) if (!cmd->t_prot_sg || !cmd->t_prot_nents) {
return true; if (protect && !dev->dev_attrib.pi_prot_type) {
pr_err("CDB contains protect bit, but device does not"
" advertise PROTECT=1 feature bit\n");
return TCM_INVALID_CDB_FIELD;
}
if (cmd->prot_pto)
return TCM_NO_SENSE;
}
switch (dev->dev_attrib.pi_prot_type) { switch (dev->dev_attrib.pi_prot_type) {
case TARGET_DIF_TYPE3_PROT: case TARGET_DIF_TYPE3_PROT:
...@@ -629,7 +661,7 @@ sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb, ...@@ -629,7 +661,7 @@ sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb,
break; break;
case TARGET_DIF_TYPE2_PROT: case TARGET_DIF_TYPE2_PROT:
if (protect) if (protect)
return false; return TCM_INVALID_CDB_FIELD;
cmd->reftag_seed = cmd->t_task_lba; cmd->reftag_seed = cmd->t_task_lba;
break; break;
...@@ -638,12 +670,12 @@ sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb, ...@@ -638,12 +670,12 @@ sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb,
break; break;
case TARGET_DIF_TYPE0_PROT: case TARGET_DIF_TYPE0_PROT:
default: default:
return true; return TCM_NO_SENSE;
} }
if (sbc_set_prot_op_checks(protect, dev->dev_attrib.pi_prot_type, if (sbc_set_prot_op_checks(protect, dev->dev_attrib.pi_prot_type,
is_write, cmd)) is_write, cmd))
return false; return TCM_INVALID_CDB_FIELD;
cmd->prot_type = dev->dev_attrib.pi_prot_type; cmd->prot_type = dev->dev_attrib.pi_prot_type;
cmd->prot_length = dev->prot_length * sectors; cmd->prot_length = dev->prot_length * sectors;
...@@ -662,7 +694,30 @@ sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb, ...@@ -662,7 +694,30 @@ sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb,
__func__, cmd->prot_type, cmd->data_length, cmd->prot_length, __func__, cmd->prot_type, cmd->data_length, cmd->prot_length,
cmd->prot_op, cmd->prot_checks); cmd->prot_op, cmd->prot_checks);
return true; return TCM_NO_SENSE;
}
static int
sbc_check_dpofua(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb)
{
if (cdb[1] & 0x10) {
if (!dev->dev_attrib.emulate_dpo) {
pr_err("Got CDB: 0x%02x with DPO bit set, but device"
" does not advertise support for DPO\n", cdb[0]);
return -EINVAL;
}
}
if (cdb[1] & 0x8) {
if (!dev->dev_attrib.emulate_fua_write ||
!dev->dev_attrib.emulate_write_cache) {
pr_err("Got CDB: 0x%02x with FUA bit set, but device"
" does not advertise support for FUA write\n",
cdb[0]);
return -EINVAL;
}
cmd->se_cmd_flags |= SCF_FUA;
}
return 0;
} }
sense_reason_t sense_reason_t
...@@ -686,8 +741,12 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) ...@@ -686,8 +741,12 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
sectors = transport_get_sectors_10(cdb); sectors = transport_get_sectors_10(cdb);
cmd->t_task_lba = transport_lba_32(cdb); cmd->t_task_lba = transport_lba_32(cdb);
if (!sbc_check_prot(dev, cmd, cdb, sectors, false)) if (sbc_check_dpofua(dev, cmd, cdb))
return TCM_UNSUPPORTED_SCSI_OPCODE; return TCM_INVALID_CDB_FIELD;
ret = sbc_check_prot(dev, cmd, cdb, sectors, false);
if (ret)
return ret;
cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
cmd->execute_rw = ops->execute_rw; cmd->execute_rw = ops->execute_rw;
...@@ -697,8 +756,12 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) ...@@ -697,8 +756,12 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
sectors = transport_get_sectors_12(cdb); sectors = transport_get_sectors_12(cdb);
cmd->t_task_lba = transport_lba_32(cdb); cmd->t_task_lba = transport_lba_32(cdb);
if (!sbc_check_prot(dev, cmd, cdb, sectors, false)) if (sbc_check_dpofua(dev, cmd, cdb))
return TCM_UNSUPPORTED_SCSI_OPCODE; return TCM_INVALID_CDB_FIELD;
ret = sbc_check_prot(dev, cmd, cdb, sectors, false);
if (ret)
return ret;
cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
cmd->execute_rw = ops->execute_rw; cmd->execute_rw = ops->execute_rw;
...@@ -708,8 +771,12 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) ...@@ -708,8 +771,12 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
sectors = transport_get_sectors_16(cdb); sectors = transport_get_sectors_16(cdb);
cmd->t_task_lba = transport_lba_64(cdb); cmd->t_task_lba = transport_lba_64(cdb);
if (!sbc_check_prot(dev, cmd, cdb, sectors, false)) if (sbc_check_dpofua(dev, cmd, cdb))
return TCM_UNSUPPORTED_SCSI_OPCODE; return TCM_INVALID_CDB_FIELD;
ret = sbc_check_prot(dev, cmd, cdb, sectors, false);
if (ret)
return ret;
cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
cmd->execute_rw = ops->execute_rw; cmd->execute_rw = ops->execute_rw;
...@@ -727,11 +794,13 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) ...@@ -727,11 +794,13 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
sectors = transport_get_sectors_10(cdb); sectors = transport_get_sectors_10(cdb);
cmd->t_task_lba = transport_lba_32(cdb); cmd->t_task_lba = transport_lba_32(cdb);
if (!sbc_check_prot(dev, cmd, cdb, sectors, true)) if (sbc_check_dpofua(dev, cmd, cdb))
return TCM_UNSUPPORTED_SCSI_OPCODE; return TCM_INVALID_CDB_FIELD;
ret = sbc_check_prot(dev, cmd, cdb, sectors, true);
if (ret)
return ret;
if (cdb[1] & 0x8)
cmd->se_cmd_flags |= SCF_FUA;
cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
cmd->execute_rw = ops->execute_rw; cmd->execute_rw = ops->execute_rw;
cmd->execute_cmd = sbc_execute_rw; cmd->execute_cmd = sbc_execute_rw;
...@@ -740,11 +809,13 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) ...@@ -740,11 +809,13 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
sectors = transport_get_sectors_12(cdb); sectors = transport_get_sectors_12(cdb);
cmd->t_task_lba = transport_lba_32(cdb); cmd->t_task_lba = transport_lba_32(cdb);
if (!sbc_check_prot(dev, cmd, cdb, sectors, true)) if (sbc_check_dpofua(dev, cmd, cdb))
return TCM_UNSUPPORTED_SCSI_OPCODE; return TCM_INVALID_CDB_FIELD;
ret = sbc_check_prot(dev, cmd, cdb, sectors, true);
if (ret)
return ret;
if (cdb[1] & 0x8)
cmd->se_cmd_flags |= SCF_FUA;
cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
cmd->execute_rw = ops->execute_rw; cmd->execute_rw = ops->execute_rw;
cmd->execute_cmd = sbc_execute_rw; cmd->execute_cmd = sbc_execute_rw;
...@@ -753,11 +824,13 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) ...@@ -753,11 +824,13 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
sectors = transport_get_sectors_16(cdb); sectors = transport_get_sectors_16(cdb);
cmd->t_task_lba = transport_lba_64(cdb); cmd->t_task_lba = transport_lba_64(cdb);
if (!sbc_check_prot(dev, cmd, cdb, sectors, true)) if (sbc_check_dpofua(dev, cmd, cdb))
return TCM_UNSUPPORTED_SCSI_OPCODE; return TCM_INVALID_CDB_FIELD;
ret = sbc_check_prot(dev, cmd, cdb, sectors, true);
if (ret)
return ret;
if (cdb[1] & 0x8)
cmd->se_cmd_flags |= SCF_FUA;
cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
cmd->execute_rw = ops->execute_rw; cmd->execute_rw = ops->execute_rw;
cmd->execute_cmd = sbc_execute_rw; cmd->execute_cmd = sbc_execute_rw;
...@@ -768,6 +841,9 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) ...@@ -768,6 +841,9 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
return TCM_INVALID_CDB_FIELD; return TCM_INVALID_CDB_FIELD;
sectors = transport_get_sectors_10(cdb); sectors = transport_get_sectors_10(cdb);
if (sbc_check_dpofua(dev, cmd, cdb))
return TCM_INVALID_CDB_FIELD;
cmd->t_task_lba = transport_lba_32(cdb); cmd->t_task_lba = transport_lba_32(cdb);
cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
...@@ -777,8 +853,6 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) ...@@ -777,8 +853,6 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
cmd->execute_rw = ops->execute_rw; cmd->execute_rw = ops->execute_rw;
cmd->execute_cmd = sbc_execute_rw; cmd->execute_cmd = sbc_execute_rw;
cmd->transport_complete_callback = &xdreadwrite_callback; cmd->transport_complete_callback = &xdreadwrite_callback;
if (cdb[1] & 0x8)
cmd->se_cmd_flags |= SCF_FUA;
break; break;
case VARIABLE_LENGTH_CMD: case VARIABLE_LENGTH_CMD:
{ {
...@@ -787,6 +861,8 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) ...@@ -787,6 +861,8 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
case XDWRITEREAD_32: case XDWRITEREAD_32:
sectors = transport_get_sectors_32(cdb); sectors = transport_get_sectors_32(cdb);
if (sbc_check_dpofua(dev, cmd, cdb))
return TCM_INVALID_CDB_FIELD;
/* /*
* Use WRITE_32 and READ_32 opcodes for the emulated * Use WRITE_32 and READ_32 opcodes for the emulated
* XDWRITE_READ_32 logic. * XDWRITE_READ_32 logic.
...@@ -801,8 +877,6 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) ...@@ -801,8 +877,6 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
cmd->execute_rw = ops->execute_rw; cmd->execute_rw = ops->execute_rw;
cmd->execute_cmd = sbc_execute_rw; cmd->execute_cmd = sbc_execute_rw;
cmd->transport_complete_callback = &xdreadwrite_callback; cmd->transport_complete_callback = &xdreadwrite_callback;
if (cdb[1] & 0x8)
cmd->se_cmd_flags |= SCF_FUA;
break; break;
case WRITE_SAME_32: case WRITE_SAME_32:
sectors = transport_get_sectors_32(cdb); sectors = transport_get_sectors_32(cdb);
...@@ -888,6 +962,11 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) ...@@ -888,6 +962,11 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
if (!ops->execute_unmap) if (!ops->execute_unmap)
return TCM_UNSUPPORTED_SCSI_OPCODE; return TCM_UNSUPPORTED_SCSI_OPCODE;
if (!dev->dev_attrib.emulate_tpu) {
pr_err("Got UNMAP, but backend device has"
" emulate_tpu disabled\n");
return TCM_UNSUPPORTED_SCSI_OPCODE;
}
size = get_unaligned_be16(&cdb[7]); size = get_unaligned_be16(&cdb[7]);
cmd->execute_cmd = ops->execute_unmap; cmd->execute_cmd = ops->execute_unmap;
break; break;
...@@ -955,7 +1034,8 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) ...@@ -955,7 +1034,8 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
unsigned long long end_lba; unsigned long long end_lba;
check_lba: check_lba:
end_lba = dev->transport->get_blocks(dev) + 1; end_lba = dev->transport->get_blocks(dev) + 1;
if (cmd->t_task_lba + sectors > end_lba) { if (((cmd->t_task_lba + sectors) < cmd->t_task_lba) ||
((cmd->t_task_lba + sectors) > end_lba)) {
pr_err("cmd exceeds last lba %llu " pr_err("cmd exceeds last lba %llu "
"(lba %llu, sectors %u)\n", "(lba %llu, sectors %u)\n",
end_lba, cmd->t_task_lba, sectors); end_lba, cmd->t_task_lba, sectors);
......
...@@ -647,7 +647,7 @@ spc_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf) ...@@ -647,7 +647,7 @@ spc_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf)
* support the use of the WRITE SAME (16) command to unmap LBAs. * support the use of the WRITE SAME (16) command to unmap LBAs.
*/ */
if (dev->dev_attrib.emulate_tpws != 0) if (dev->dev_attrib.emulate_tpws != 0)
buf[5] |= 0x40; buf[5] |= 0x40 | 0x20;
return 0; return 0;
} }
......
...@@ -38,7 +38,6 @@ ...@@ -38,7 +38,6 @@
#include <linux/miscdevice.h> #include <linux/miscdevice.h>
#include <asm/unaligned.h> #include <asm/unaligned.h>
#include <scsi/scsi.h> #include <scsi/scsi.h>
#include <scsi/scsi_tcq.h>
#include <target/target_core_base.h> #include <target/target_core_base.h>
#include <target/target_core_fabric.h> #include <target/target_core_fabric.h>
#include <target/target_core_fabric_configfs.h> #include <target/target_core_fabric_configfs.h>
...@@ -52,13 +51,13 @@ ...@@ -52,13 +51,13 @@
#include "vhost.h" #include "vhost.h"
#define TCM_VHOST_VERSION "v0.1" #define VHOST_SCSI_VERSION "v0.1"
#define TCM_VHOST_NAMELEN 256 #define VHOST_SCSI_NAMELEN 256
#define TCM_VHOST_MAX_CDB_SIZE 32 #define VHOST_SCSI_MAX_CDB_SIZE 32
#define TCM_VHOST_DEFAULT_TAGS 256 #define VHOST_SCSI_DEFAULT_TAGS 256
#define TCM_VHOST_PREALLOC_SGLS 2048 #define VHOST_SCSI_PREALLOC_SGLS 2048
#define TCM_VHOST_PREALLOC_UPAGES 2048 #define VHOST_SCSI_PREALLOC_UPAGES 2048
#define TCM_VHOST_PREALLOC_PROT_SGLS 512 #define VHOST_SCSI_PREALLOC_PROT_SGLS 512
struct vhost_scsi_inflight { struct vhost_scsi_inflight {
/* Wait for the flush operation to finish */ /* Wait for the flush operation to finish */
...@@ -67,11 +66,13 @@ struct vhost_scsi_inflight { ...@@ -67,11 +66,13 @@ struct vhost_scsi_inflight {
struct kref kref; struct kref kref;
}; };
struct tcm_vhost_cmd { struct vhost_scsi_cmd {
/* Descriptor from vhost_get_vq_desc() for virt_queue segment */ /* Descriptor from vhost_get_vq_desc() for virt_queue segment */
int tvc_vq_desc; int tvc_vq_desc;
/* virtio-scsi initiator task attribute */ /* virtio-scsi initiator task attribute */
int tvc_task_attr; int tvc_task_attr;
/* virtio-scsi response incoming iovecs */
int tvc_in_iovs;
/* virtio-scsi initiator data direction */ /* virtio-scsi initiator data direction */
enum dma_data_direction tvc_data_direction; enum dma_data_direction tvc_data_direction;
/* Expected data transfer length from virtio-scsi header */ /* Expected data transfer length from virtio-scsi header */
...@@ -81,26 +82,26 @@ struct tcm_vhost_cmd { ...@@ -81,26 +82,26 @@ struct tcm_vhost_cmd {
/* The number of scatterlists associated with this cmd */ /* The number of scatterlists associated with this cmd */
u32 tvc_sgl_count; u32 tvc_sgl_count;
u32 tvc_prot_sgl_count; u32 tvc_prot_sgl_count;
/* Saved unpacked SCSI LUN for tcm_vhost_submission_work() */ /* Saved unpacked SCSI LUN for vhost_scsi_submission_work() */
u32 tvc_lun; u32 tvc_lun;
/* Pointer to the SGL formatted memory from virtio-scsi */ /* Pointer to the SGL formatted memory from virtio-scsi */
struct scatterlist *tvc_sgl; struct scatterlist *tvc_sgl;
struct scatterlist *tvc_prot_sgl; struct scatterlist *tvc_prot_sgl;
struct page **tvc_upages; struct page **tvc_upages;
/* Pointer to response */ /* Pointer to response header iovec */
struct virtio_scsi_cmd_resp __user *tvc_resp; struct iovec *tvc_resp_iov;
/* Pointer to vhost_scsi for our device */ /* Pointer to vhost_scsi for our device */
struct vhost_scsi *tvc_vhost; struct vhost_scsi *tvc_vhost;
/* Pointer to vhost_virtqueue for the cmd */ /* Pointer to vhost_virtqueue for the cmd */
struct vhost_virtqueue *tvc_vq; struct vhost_virtqueue *tvc_vq;
/* Pointer to vhost nexus memory */ /* Pointer to vhost nexus memory */
struct tcm_vhost_nexus *tvc_nexus; struct vhost_scsi_nexus *tvc_nexus;
/* The TCM I/O descriptor that is accessed via container_of() */ /* The TCM I/O descriptor that is accessed via container_of() */
struct se_cmd tvc_se_cmd; struct se_cmd tvc_se_cmd;
/* work item used for cmwq dispatch to tcm_vhost_submission_work() */ /* work item used for cmwq dispatch to vhost_scsi_submission_work() */
struct work_struct work; struct work_struct work;
/* Copy of the incoming SCSI command descriptor block (CDB) */ /* Copy of the incoming SCSI command descriptor block (CDB) */
unsigned char tvc_cdb[TCM_VHOST_MAX_CDB_SIZE]; unsigned char tvc_cdb[VHOST_SCSI_MAX_CDB_SIZE];
/* Sense buffer that will be mapped into outgoing status */ /* Sense buffer that will be mapped into outgoing status */
unsigned char tvc_sense_buf[TRANSPORT_SENSE_BUFFER]; unsigned char tvc_sense_buf[TRANSPORT_SENSE_BUFFER];
/* Completed commands list, serviced from vhost worker thread */ /* Completed commands list, serviced from vhost worker thread */
...@@ -109,53 +110,53 @@ struct tcm_vhost_cmd { ...@@ -109,53 +110,53 @@ struct tcm_vhost_cmd {
struct vhost_scsi_inflight *inflight; struct vhost_scsi_inflight *inflight;
}; };
struct tcm_vhost_nexus { struct vhost_scsi_nexus {
/* Pointer to TCM session for I_T Nexus */ /* Pointer to TCM session for I_T Nexus */
struct se_session *tvn_se_sess; struct se_session *tvn_se_sess;
}; };
struct tcm_vhost_nacl { struct vhost_scsi_nacl {
/* Binary World Wide unique Port Name for Vhost Initiator port */ /* Binary World Wide unique Port Name for Vhost Initiator port */
u64 iport_wwpn; u64 iport_wwpn;
/* ASCII formatted WWPN for Sas Initiator port */ /* ASCII formatted WWPN for Sas Initiator port */
char iport_name[TCM_VHOST_NAMELEN]; char iport_name[VHOST_SCSI_NAMELEN];
/* Returned by tcm_vhost_make_nodeacl() */ /* Returned by vhost_scsi_make_nodeacl() */
struct se_node_acl se_node_acl; struct se_node_acl se_node_acl;
}; };
struct tcm_vhost_tpg { struct vhost_scsi_tpg {
/* Vhost port target portal group tag for TCM */ /* Vhost port target portal group tag for TCM */
u16 tport_tpgt; u16 tport_tpgt;
/* Used to track number of TPG Port/Lun Links wrt to explict I_T Nexus shutdown */ /* Used to track number of TPG Port/Lun Links wrt to explict I_T Nexus shutdown */
int tv_tpg_port_count; int tv_tpg_port_count;
/* Used for vhost_scsi device reference to tpg_nexus, protected by tv_tpg_mutex */ /* Used for vhost_scsi device reference to tpg_nexus, protected by tv_tpg_mutex */
int tv_tpg_vhost_count; int tv_tpg_vhost_count;
/* list for tcm_vhost_list */ /* list for vhost_scsi_list */
struct list_head tv_tpg_list; struct list_head tv_tpg_list;
/* Used to protect access for tpg_nexus */ /* Used to protect access for tpg_nexus */
struct mutex tv_tpg_mutex; struct mutex tv_tpg_mutex;
/* Pointer to the TCM VHost I_T Nexus for this TPG endpoint */ /* Pointer to the TCM VHost I_T Nexus for this TPG endpoint */
struct tcm_vhost_nexus *tpg_nexus; struct vhost_scsi_nexus *tpg_nexus;
/* Pointer back to tcm_vhost_tport */ /* Pointer back to vhost_scsi_tport */
struct tcm_vhost_tport *tport; struct vhost_scsi_tport *tport;
/* Returned by tcm_vhost_make_tpg() */ /* Returned by vhost_scsi_make_tpg() */
struct se_portal_group se_tpg; struct se_portal_group se_tpg;
/* Pointer back to vhost_scsi, protected by tv_tpg_mutex */ /* Pointer back to vhost_scsi, protected by tv_tpg_mutex */
struct vhost_scsi *vhost_scsi; struct vhost_scsi *vhost_scsi;
}; };
struct tcm_vhost_tport { struct vhost_scsi_tport {
/* SCSI protocol the tport is providing */ /* SCSI protocol the tport is providing */
u8 tport_proto_id; u8 tport_proto_id;
/* Binary World Wide unique Port Name for Vhost Target port */ /* Binary World Wide unique Port Name for Vhost Target port */
u64 tport_wwpn; u64 tport_wwpn;
/* ASCII formatted WWPN for Vhost Target port */ /* ASCII formatted WWPN for Vhost Target port */
char tport_name[TCM_VHOST_NAMELEN]; char tport_name[VHOST_SCSI_NAMELEN];
/* Returned by tcm_vhost_make_tport() */ /* Returned by vhost_scsi_make_tport() */
struct se_wwn tport_wwn; struct se_wwn tport_wwn;
}; };
struct tcm_vhost_evt { struct vhost_scsi_evt {
/* event to be sent to guest */ /* event to be sent to guest */
struct virtio_scsi_event event; struct virtio_scsi_event event;
/* event list, serviced from vhost worker thread */ /* event list, serviced from vhost worker thread */
...@@ -171,7 +172,9 @@ enum { ...@@ -171,7 +172,9 @@ enum {
/* Note: can't set VIRTIO_F_VERSION_1 yet, since that implies ANY_LAYOUT. */ /* Note: can't set VIRTIO_F_VERSION_1 yet, since that implies ANY_LAYOUT. */
enum { enum {
VHOST_SCSI_FEATURES = VHOST_FEATURES | (1ULL << VIRTIO_SCSI_F_HOTPLUG) | VHOST_SCSI_FEATURES = VHOST_FEATURES | (1ULL << VIRTIO_SCSI_F_HOTPLUG) |
(1ULL << VIRTIO_SCSI_F_T10_PI) (1ULL << VIRTIO_SCSI_F_T10_PI) |
(1ULL << VIRTIO_F_ANY_LAYOUT) |
(1ULL << VIRTIO_F_VERSION_1)
}; };
#define VHOST_SCSI_MAX_TARGET 256 #define VHOST_SCSI_MAX_TARGET 256
...@@ -195,7 +198,7 @@ struct vhost_scsi_virtqueue { ...@@ -195,7 +198,7 @@ struct vhost_scsi_virtqueue {
struct vhost_scsi { struct vhost_scsi {
/* Protected by vhost_scsi->dev.mutex */ /* Protected by vhost_scsi->dev.mutex */
struct tcm_vhost_tpg **vs_tpg; struct vhost_scsi_tpg **vs_tpg;
char vs_vhost_wwpn[TRANSPORT_IQN_LEN]; char vs_vhost_wwpn[TRANSPORT_IQN_LEN];
struct vhost_dev dev; struct vhost_dev dev;
...@@ -212,21 +215,21 @@ struct vhost_scsi { ...@@ -212,21 +215,21 @@ struct vhost_scsi {
}; };
/* Local pointer to allocated TCM configfs fabric module */ /* Local pointer to allocated TCM configfs fabric module */
static struct target_fabric_configfs *tcm_vhost_fabric_configfs; static struct target_fabric_configfs *vhost_scsi_fabric_configfs;
static struct workqueue_struct *tcm_vhost_workqueue; static struct workqueue_struct *vhost_scsi_workqueue;
/* Global spinlock to protect tcm_vhost TPG list for vhost IOCTL access */ /* Global spinlock to protect vhost_scsi TPG list for vhost IOCTL access */
static DEFINE_MUTEX(tcm_vhost_mutex); static DEFINE_MUTEX(vhost_scsi_mutex);
static LIST_HEAD(tcm_vhost_list); static LIST_HEAD(vhost_scsi_list);
static int iov_num_pages(struct iovec *iov) static int iov_num_pages(void __user *iov_base, size_t iov_len)
{ {
return (PAGE_ALIGN((unsigned long)iov->iov_base + iov->iov_len) - return (PAGE_ALIGN((unsigned long)iov_base + iov_len) -
((unsigned long)iov->iov_base & PAGE_MASK)) >> PAGE_SHIFT; ((unsigned long)iov_base & PAGE_MASK)) >> PAGE_SHIFT;
} }
static void tcm_vhost_done_inflight(struct kref *kref) static void vhost_scsi_done_inflight(struct kref *kref)
{ {
struct vhost_scsi_inflight *inflight; struct vhost_scsi_inflight *inflight;
...@@ -234,7 +237,7 @@ static void tcm_vhost_done_inflight(struct kref *kref) ...@@ -234,7 +237,7 @@ static void tcm_vhost_done_inflight(struct kref *kref)
complete(&inflight->comp); complete(&inflight->comp);
} }
static void tcm_vhost_init_inflight(struct vhost_scsi *vs, static void vhost_scsi_init_inflight(struct vhost_scsi *vs,
struct vhost_scsi_inflight *old_inflight[]) struct vhost_scsi_inflight *old_inflight[])
{ {
struct vhost_scsi_inflight *new_inflight; struct vhost_scsi_inflight *new_inflight;
...@@ -262,7 +265,7 @@ static void tcm_vhost_init_inflight(struct vhost_scsi *vs, ...@@ -262,7 +265,7 @@ static void tcm_vhost_init_inflight(struct vhost_scsi *vs,
} }
static struct vhost_scsi_inflight * static struct vhost_scsi_inflight *
tcm_vhost_get_inflight(struct vhost_virtqueue *vq) vhost_scsi_get_inflight(struct vhost_virtqueue *vq)
{ {
struct vhost_scsi_inflight *inflight; struct vhost_scsi_inflight *inflight;
struct vhost_scsi_virtqueue *svq; struct vhost_scsi_virtqueue *svq;
...@@ -274,31 +277,31 @@ tcm_vhost_get_inflight(struct vhost_virtqueue *vq) ...@@ -274,31 +277,31 @@ tcm_vhost_get_inflight(struct vhost_virtqueue *vq)
return inflight; return inflight;
} }
static void tcm_vhost_put_inflight(struct vhost_scsi_inflight *inflight) static void vhost_scsi_put_inflight(struct vhost_scsi_inflight *inflight)
{ {
kref_put(&inflight->kref, tcm_vhost_done_inflight); kref_put(&inflight->kref, vhost_scsi_done_inflight);
} }
static int tcm_vhost_check_true(struct se_portal_group *se_tpg) static int vhost_scsi_check_true(struct se_portal_group *se_tpg)
{ {
return 1; return 1;
} }
static int tcm_vhost_check_false(struct se_portal_group *se_tpg) static int vhost_scsi_check_false(struct se_portal_group *se_tpg)
{ {
return 0; return 0;
} }
static char *tcm_vhost_get_fabric_name(void) static char *vhost_scsi_get_fabric_name(void)
{ {
return "vhost"; return "vhost";
} }
static u8 tcm_vhost_get_fabric_proto_ident(struct se_portal_group *se_tpg) static u8 vhost_scsi_get_fabric_proto_ident(struct se_portal_group *se_tpg)
{ {
struct tcm_vhost_tpg *tpg = container_of(se_tpg, struct vhost_scsi_tpg *tpg = container_of(se_tpg,
struct tcm_vhost_tpg, se_tpg); struct vhost_scsi_tpg, se_tpg);
struct tcm_vhost_tport *tport = tpg->tport; struct vhost_scsi_tport *tport = tpg->tport;
switch (tport->tport_proto_id) { switch (tport->tport_proto_id) {
case SCSI_PROTOCOL_SAS: case SCSI_PROTOCOL_SAS:
...@@ -316,37 +319,37 @@ static u8 tcm_vhost_get_fabric_proto_ident(struct se_portal_group *se_tpg) ...@@ -316,37 +319,37 @@ static u8 tcm_vhost_get_fabric_proto_ident(struct se_portal_group *se_tpg)
return sas_get_fabric_proto_ident(se_tpg); return sas_get_fabric_proto_ident(se_tpg);
} }
static char *tcm_vhost_get_fabric_wwn(struct se_portal_group *se_tpg) static char *vhost_scsi_get_fabric_wwn(struct se_portal_group *se_tpg)
{ {
struct tcm_vhost_tpg *tpg = container_of(se_tpg, struct vhost_scsi_tpg *tpg = container_of(se_tpg,
struct tcm_vhost_tpg, se_tpg); struct vhost_scsi_tpg, se_tpg);
struct tcm_vhost_tport *tport = tpg->tport; struct vhost_scsi_tport *tport = tpg->tport;
return &tport->tport_name[0]; return &tport->tport_name[0];
} }
static u16 tcm_vhost_get_tag(struct se_portal_group *se_tpg) static u16 vhost_scsi_get_tpgt(struct se_portal_group *se_tpg)
{ {
struct tcm_vhost_tpg *tpg = container_of(se_tpg, struct vhost_scsi_tpg *tpg = container_of(se_tpg,
struct tcm_vhost_tpg, se_tpg); struct vhost_scsi_tpg, se_tpg);
return tpg->tport_tpgt; return tpg->tport_tpgt;
} }
static u32 tcm_vhost_get_default_depth(struct se_portal_group *se_tpg) static u32 vhost_scsi_get_default_depth(struct se_portal_group *se_tpg)
{ {
return 1; return 1;
} }
static u32 static u32
tcm_vhost_get_pr_transport_id(struct se_portal_group *se_tpg, vhost_scsi_get_pr_transport_id(struct se_portal_group *se_tpg,
struct se_node_acl *se_nacl, struct se_node_acl *se_nacl,
struct t10_pr_registration *pr_reg, struct t10_pr_registration *pr_reg,
int *format_code, int *format_code,
unsigned char *buf) unsigned char *buf)
{ {
struct tcm_vhost_tpg *tpg = container_of(se_tpg, struct vhost_scsi_tpg *tpg = container_of(se_tpg,
struct tcm_vhost_tpg, se_tpg); struct vhost_scsi_tpg, se_tpg);
struct tcm_vhost_tport *tport = tpg->tport; struct vhost_scsi_tport *tport = tpg->tport;
switch (tport->tport_proto_id) { switch (tport->tport_proto_id) {
case SCSI_PROTOCOL_SAS: case SCSI_PROTOCOL_SAS:
...@@ -369,14 +372,14 @@ tcm_vhost_get_pr_transport_id(struct se_portal_group *se_tpg, ...@@ -369,14 +372,14 @@ tcm_vhost_get_pr_transport_id(struct se_portal_group *se_tpg,
} }
static u32 static u32
tcm_vhost_get_pr_transport_id_len(struct se_portal_group *se_tpg, vhost_scsi_get_pr_transport_id_len(struct se_portal_group *se_tpg,
struct se_node_acl *se_nacl, struct se_node_acl *se_nacl,
struct t10_pr_registration *pr_reg, struct t10_pr_registration *pr_reg,
int *format_code) int *format_code)
{ {
struct tcm_vhost_tpg *tpg = container_of(se_tpg, struct vhost_scsi_tpg *tpg = container_of(se_tpg,
struct tcm_vhost_tpg, se_tpg); struct vhost_scsi_tpg, se_tpg);
struct tcm_vhost_tport *tport = tpg->tport; struct vhost_scsi_tport *tport = tpg->tport;
switch (tport->tport_proto_id) { switch (tport->tport_proto_id) {
case SCSI_PROTOCOL_SAS: case SCSI_PROTOCOL_SAS:
...@@ -399,14 +402,14 @@ tcm_vhost_get_pr_transport_id_len(struct se_portal_group *se_tpg, ...@@ -399,14 +402,14 @@ tcm_vhost_get_pr_transport_id_len(struct se_portal_group *se_tpg,
} }
static char * static char *
tcm_vhost_parse_pr_out_transport_id(struct se_portal_group *se_tpg, vhost_scsi_parse_pr_out_transport_id(struct se_portal_group *se_tpg,
const char *buf, const char *buf,
u32 *out_tid_len, u32 *out_tid_len,
char **port_nexus_ptr) char **port_nexus_ptr)
{ {
struct tcm_vhost_tpg *tpg = container_of(se_tpg, struct vhost_scsi_tpg *tpg = container_of(se_tpg,
struct tcm_vhost_tpg, se_tpg); struct vhost_scsi_tpg, se_tpg);
struct tcm_vhost_tport *tport = tpg->tport; struct vhost_scsi_tport *tport = tpg->tport;
switch (tport->tport_proto_id) { switch (tport->tport_proto_id) {
case SCSI_PROTOCOL_SAS: case SCSI_PROTOCOL_SAS:
...@@ -429,13 +432,13 @@ tcm_vhost_parse_pr_out_transport_id(struct se_portal_group *se_tpg, ...@@ -429,13 +432,13 @@ tcm_vhost_parse_pr_out_transport_id(struct se_portal_group *se_tpg,
} }
static struct se_node_acl * static struct se_node_acl *
tcm_vhost_alloc_fabric_acl(struct se_portal_group *se_tpg) vhost_scsi_alloc_fabric_acl(struct se_portal_group *se_tpg)
{ {
struct tcm_vhost_nacl *nacl; struct vhost_scsi_nacl *nacl;
nacl = kzalloc(sizeof(struct tcm_vhost_nacl), GFP_KERNEL); nacl = kzalloc(sizeof(struct vhost_scsi_nacl), GFP_KERNEL);
if (!nacl) { if (!nacl) {
pr_err("Unable to allocate struct tcm_vhost_nacl\n"); pr_err("Unable to allocate struct vhost_scsi_nacl\n");
return NULL; return NULL;
} }
...@@ -443,24 +446,24 @@ tcm_vhost_alloc_fabric_acl(struct se_portal_group *se_tpg) ...@@ -443,24 +446,24 @@ tcm_vhost_alloc_fabric_acl(struct se_portal_group *se_tpg)
} }
static void static void
tcm_vhost_release_fabric_acl(struct se_portal_group *se_tpg, vhost_scsi_release_fabric_acl(struct se_portal_group *se_tpg,
struct se_node_acl *se_nacl) struct se_node_acl *se_nacl)
{ {
struct tcm_vhost_nacl *nacl = container_of(se_nacl, struct vhost_scsi_nacl *nacl = container_of(se_nacl,
struct tcm_vhost_nacl, se_node_acl); struct vhost_scsi_nacl, se_node_acl);
kfree(nacl); kfree(nacl);
} }
static u32 tcm_vhost_tpg_get_inst_index(struct se_portal_group *se_tpg) static u32 vhost_scsi_tpg_get_inst_index(struct se_portal_group *se_tpg)
{ {
return 1; return 1;
} }
static void tcm_vhost_release_cmd(struct se_cmd *se_cmd) static void vhost_scsi_release_cmd(struct se_cmd *se_cmd)
{ {
struct tcm_vhost_cmd *tv_cmd = container_of(se_cmd, struct vhost_scsi_cmd *tv_cmd = container_of(se_cmd,
struct tcm_vhost_cmd, tvc_se_cmd); struct vhost_scsi_cmd, tvc_se_cmd);
struct se_session *se_sess = se_cmd->se_sess; struct se_session *se_sess = tv_cmd->tvc_nexus->tvn_se_sess;
int i; int i;
if (tv_cmd->tvc_sgl_count) { if (tv_cmd->tvc_sgl_count) {
...@@ -472,53 +475,53 @@ static void tcm_vhost_release_cmd(struct se_cmd *se_cmd) ...@@ -472,53 +475,53 @@ static void tcm_vhost_release_cmd(struct se_cmd *se_cmd)
put_page(sg_page(&tv_cmd->tvc_prot_sgl[i])); put_page(sg_page(&tv_cmd->tvc_prot_sgl[i]));
} }
tcm_vhost_put_inflight(tv_cmd->inflight); vhost_scsi_put_inflight(tv_cmd->inflight);
percpu_ida_free(&se_sess->sess_tag_pool, se_cmd->map_tag); percpu_ida_free(&se_sess->sess_tag_pool, se_cmd->map_tag);
} }
static int tcm_vhost_shutdown_session(struct se_session *se_sess) static int vhost_scsi_shutdown_session(struct se_session *se_sess)
{ {
return 0; return 0;
} }
static void tcm_vhost_close_session(struct se_session *se_sess) static void vhost_scsi_close_session(struct se_session *se_sess)
{ {
return; return;
} }
static u32 tcm_vhost_sess_get_index(struct se_session *se_sess) static u32 vhost_scsi_sess_get_index(struct se_session *se_sess)
{ {
return 0; return 0;
} }
static int tcm_vhost_write_pending(struct se_cmd *se_cmd) static int vhost_scsi_write_pending(struct se_cmd *se_cmd)
{ {
/* Go ahead and process the write immediately */ /* Go ahead and process the write immediately */
target_execute_cmd(se_cmd); target_execute_cmd(se_cmd);
return 0; return 0;
} }
static int tcm_vhost_write_pending_status(struct se_cmd *se_cmd) static int vhost_scsi_write_pending_status(struct se_cmd *se_cmd)
{ {
return 0; return 0;
} }
static void tcm_vhost_set_default_node_attrs(struct se_node_acl *nacl) static void vhost_scsi_set_default_node_attrs(struct se_node_acl *nacl)
{ {
return; return;
} }
static u32 tcm_vhost_get_task_tag(struct se_cmd *se_cmd) static u32 vhost_scsi_get_task_tag(struct se_cmd *se_cmd)
{ {
return 0; return 0;
} }
static int tcm_vhost_get_cmd_state(struct se_cmd *se_cmd) static int vhost_scsi_get_cmd_state(struct se_cmd *se_cmd)
{ {
return 0; return 0;
} }
static void vhost_scsi_complete_cmd(struct tcm_vhost_cmd *cmd) static void vhost_scsi_complete_cmd(struct vhost_scsi_cmd *cmd)
{ {
struct vhost_scsi *vs = cmd->tvc_vhost; struct vhost_scsi *vs = cmd->tvc_vhost;
...@@ -527,44 +530,44 @@ static void vhost_scsi_complete_cmd(struct tcm_vhost_cmd *cmd) ...@@ -527,44 +530,44 @@ static void vhost_scsi_complete_cmd(struct tcm_vhost_cmd *cmd)
vhost_work_queue(&vs->dev, &vs->vs_completion_work); vhost_work_queue(&vs->dev, &vs->vs_completion_work);
} }
static int tcm_vhost_queue_data_in(struct se_cmd *se_cmd) static int vhost_scsi_queue_data_in(struct se_cmd *se_cmd)
{ {
struct tcm_vhost_cmd *cmd = container_of(se_cmd, struct vhost_scsi_cmd *cmd = container_of(se_cmd,
struct tcm_vhost_cmd, tvc_se_cmd); struct vhost_scsi_cmd, tvc_se_cmd);
vhost_scsi_complete_cmd(cmd); vhost_scsi_complete_cmd(cmd);
return 0; return 0;
} }
static int tcm_vhost_queue_status(struct se_cmd *se_cmd) static int vhost_scsi_queue_status(struct se_cmd *se_cmd)
{ {
struct tcm_vhost_cmd *cmd = container_of(se_cmd, struct vhost_scsi_cmd *cmd = container_of(se_cmd,
struct tcm_vhost_cmd, tvc_se_cmd); struct vhost_scsi_cmd, tvc_se_cmd);
vhost_scsi_complete_cmd(cmd); vhost_scsi_complete_cmd(cmd);
return 0; return 0;
} }
static void tcm_vhost_queue_tm_rsp(struct se_cmd *se_cmd) static void vhost_scsi_queue_tm_rsp(struct se_cmd *se_cmd)
{ {
return; return;
} }
static void tcm_vhost_aborted_task(struct se_cmd *se_cmd) static void vhost_scsi_aborted_task(struct se_cmd *se_cmd)
{ {
return; return;
} }
static void tcm_vhost_free_evt(struct vhost_scsi *vs, struct tcm_vhost_evt *evt) static void vhost_scsi_free_evt(struct vhost_scsi *vs, struct vhost_scsi_evt *evt)
{ {
vs->vs_events_nr--; vs->vs_events_nr--;
kfree(evt); kfree(evt);
} }
static struct tcm_vhost_evt * static struct vhost_scsi_evt *
tcm_vhost_allocate_evt(struct vhost_scsi *vs, vhost_scsi_allocate_evt(struct vhost_scsi *vs,
u32 event, u32 reason) u32 event, u32 reason)
{ {
struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq; struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
struct tcm_vhost_evt *evt; struct vhost_scsi_evt *evt;
if (vs->vs_events_nr > VHOST_SCSI_MAX_EVENT) { if (vs->vs_events_nr > VHOST_SCSI_MAX_EVENT) {
vs->vs_events_missed = true; vs->vs_events_missed = true;
...@@ -573,7 +576,7 @@ tcm_vhost_allocate_evt(struct vhost_scsi *vs, ...@@ -573,7 +576,7 @@ tcm_vhost_allocate_evt(struct vhost_scsi *vs,
evt = kzalloc(sizeof(*evt), GFP_KERNEL); evt = kzalloc(sizeof(*evt), GFP_KERNEL);
if (!evt) { if (!evt) {
vq_err(vq, "Failed to allocate tcm_vhost_evt\n"); vq_err(vq, "Failed to allocate vhost_scsi_evt\n");
vs->vs_events_missed = true; vs->vs_events_missed = true;
return NULL; return NULL;
} }
...@@ -585,7 +588,7 @@ tcm_vhost_allocate_evt(struct vhost_scsi *vs, ...@@ -585,7 +588,7 @@ tcm_vhost_allocate_evt(struct vhost_scsi *vs,
return evt; return evt;
} }
static void vhost_scsi_free_cmd(struct tcm_vhost_cmd *cmd) static void vhost_scsi_free_cmd(struct vhost_scsi_cmd *cmd)
{ {
struct se_cmd *se_cmd = &cmd->tvc_se_cmd; struct se_cmd *se_cmd = &cmd->tvc_se_cmd;
...@@ -600,7 +603,7 @@ static int vhost_scsi_check_stop_free(struct se_cmd *se_cmd) ...@@ -600,7 +603,7 @@ static int vhost_scsi_check_stop_free(struct se_cmd *se_cmd)
} }
static void static void
tcm_vhost_do_evt_work(struct vhost_scsi *vs, struct tcm_vhost_evt *evt) vhost_scsi_do_evt_work(struct vhost_scsi *vs, struct vhost_scsi_evt *evt)
{ {
struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq; struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
struct virtio_scsi_event *event = &evt->event; struct virtio_scsi_event *event = &evt->event;
...@@ -646,24 +649,24 @@ tcm_vhost_do_evt_work(struct vhost_scsi *vs, struct tcm_vhost_evt *evt) ...@@ -646,24 +649,24 @@ tcm_vhost_do_evt_work(struct vhost_scsi *vs, struct tcm_vhost_evt *evt)
if (!ret) if (!ret)
vhost_add_used_and_signal(&vs->dev, vq, head, 0); vhost_add_used_and_signal(&vs->dev, vq, head, 0);
else else
vq_err(vq, "Faulted on tcm_vhost_send_event\n"); vq_err(vq, "Faulted on vhost_scsi_send_event\n");
} }
static void tcm_vhost_evt_work(struct vhost_work *work) static void vhost_scsi_evt_work(struct vhost_work *work)
{ {
struct vhost_scsi *vs = container_of(work, struct vhost_scsi, struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
vs_event_work); vs_event_work);
struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq; struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
struct tcm_vhost_evt *evt; struct vhost_scsi_evt *evt;
struct llist_node *llnode; struct llist_node *llnode;
mutex_lock(&vq->mutex); mutex_lock(&vq->mutex);
llnode = llist_del_all(&vs->vs_event_list); llnode = llist_del_all(&vs->vs_event_list);
while (llnode) { while (llnode) {
evt = llist_entry(llnode, struct tcm_vhost_evt, list); evt = llist_entry(llnode, struct vhost_scsi_evt, list);
llnode = llist_next(llnode); llnode = llist_next(llnode);
tcm_vhost_do_evt_work(vs, evt); vhost_scsi_do_evt_work(vs, evt);
tcm_vhost_free_evt(vs, evt); vhost_scsi_free_evt(vs, evt);
} }
mutex_unlock(&vq->mutex); mutex_unlock(&vq->mutex);
} }
...@@ -679,15 +682,16 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work) ...@@ -679,15 +682,16 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
vs_completion_work); vs_completion_work);
DECLARE_BITMAP(signal, VHOST_SCSI_MAX_VQ); DECLARE_BITMAP(signal, VHOST_SCSI_MAX_VQ);
struct virtio_scsi_cmd_resp v_rsp; struct virtio_scsi_cmd_resp v_rsp;
struct tcm_vhost_cmd *cmd; struct vhost_scsi_cmd *cmd;
struct llist_node *llnode; struct llist_node *llnode;
struct se_cmd *se_cmd; struct se_cmd *se_cmd;
struct iov_iter iov_iter;
int ret, vq; int ret, vq;
bitmap_zero(signal, VHOST_SCSI_MAX_VQ); bitmap_zero(signal, VHOST_SCSI_MAX_VQ);
llnode = llist_del_all(&vs->vs_completion_list); llnode = llist_del_all(&vs->vs_completion_list);
while (llnode) { while (llnode) {
cmd = llist_entry(llnode, struct tcm_vhost_cmd, cmd = llist_entry(llnode, struct vhost_scsi_cmd,
tvc_completion_list); tvc_completion_list);
llnode = llist_next(llnode); llnode = llist_next(llnode);
se_cmd = &cmd->tvc_se_cmd; se_cmd = &cmd->tvc_se_cmd;
...@@ -703,8 +707,11 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work) ...@@ -703,8 +707,11 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
se_cmd->scsi_sense_length); se_cmd->scsi_sense_length);
memcpy(v_rsp.sense, cmd->tvc_sense_buf, memcpy(v_rsp.sense, cmd->tvc_sense_buf,
se_cmd->scsi_sense_length); se_cmd->scsi_sense_length);
ret = copy_to_user(cmd->tvc_resp, &v_rsp, sizeof(v_rsp));
if (likely(ret == 0)) { iov_iter_init(&iov_iter, READ, cmd->tvc_resp_iov,
cmd->tvc_in_iovs, sizeof(v_rsp));
ret = copy_to_iter(&v_rsp, sizeof(v_rsp), &iov_iter);
if (likely(ret == sizeof(v_rsp))) {
struct vhost_scsi_virtqueue *q; struct vhost_scsi_virtqueue *q;
vhost_add_used(cmd->tvc_vq, cmd->tvc_vq_desc, 0); vhost_add_used(cmd->tvc_vq, cmd->tvc_vq_desc, 0);
q = container_of(cmd->tvc_vq, struct vhost_scsi_virtqueue, vq); q = container_of(cmd->tvc_vq, struct vhost_scsi_virtqueue, vq);
...@@ -722,13 +729,13 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work) ...@@ -722,13 +729,13 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
vhost_signal(&vs->dev, &vs->vqs[vq].vq); vhost_signal(&vs->dev, &vs->vqs[vq].vq);
} }
static struct tcm_vhost_cmd * static struct vhost_scsi_cmd *
vhost_scsi_get_tag(struct vhost_virtqueue *vq, struct tcm_vhost_tpg *tpg, vhost_scsi_get_tag(struct vhost_virtqueue *vq, struct vhost_scsi_tpg *tpg,
unsigned char *cdb, u64 scsi_tag, u16 lun, u8 task_attr, unsigned char *cdb, u64 scsi_tag, u16 lun, u8 task_attr,
u32 exp_data_len, int data_direction) u32 exp_data_len, int data_direction)
{ {
struct tcm_vhost_cmd *cmd; struct vhost_scsi_cmd *cmd;
struct tcm_vhost_nexus *tv_nexus; struct vhost_scsi_nexus *tv_nexus;
struct se_session *se_sess; struct se_session *se_sess;
struct scatterlist *sg, *prot_sg; struct scatterlist *sg, *prot_sg;
struct page **pages; struct page **pages;
...@@ -736,22 +743,22 @@ vhost_scsi_get_tag(struct vhost_virtqueue *vq, struct tcm_vhost_tpg *tpg, ...@@ -736,22 +743,22 @@ vhost_scsi_get_tag(struct vhost_virtqueue *vq, struct tcm_vhost_tpg *tpg,
tv_nexus = tpg->tpg_nexus; tv_nexus = tpg->tpg_nexus;
if (!tv_nexus) { if (!tv_nexus) {
pr_err("Unable to locate active struct tcm_vhost_nexus\n"); pr_err("Unable to locate active struct vhost_scsi_nexus\n");
return ERR_PTR(-EIO); return ERR_PTR(-EIO);
} }
se_sess = tv_nexus->tvn_se_sess; se_sess = tv_nexus->tvn_se_sess;
tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING); tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING);
if (tag < 0) { if (tag < 0) {
pr_err("Unable to obtain tag for tcm_vhost_cmd\n"); pr_err("Unable to obtain tag for vhost_scsi_cmd\n");
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
} }
cmd = &((struct tcm_vhost_cmd *)se_sess->sess_cmd_map)[tag]; cmd = &((struct vhost_scsi_cmd *)se_sess->sess_cmd_map)[tag];
sg = cmd->tvc_sgl; sg = cmd->tvc_sgl;
prot_sg = cmd->tvc_prot_sgl; prot_sg = cmd->tvc_prot_sgl;
pages = cmd->tvc_upages; pages = cmd->tvc_upages;
memset(cmd, 0, sizeof(struct tcm_vhost_cmd)); memset(cmd, 0, sizeof(struct vhost_scsi_cmd));
cmd->tvc_sgl = sg; cmd->tvc_sgl = sg;
cmd->tvc_prot_sgl = prot_sg; cmd->tvc_prot_sgl = prot_sg;
...@@ -763,9 +770,9 @@ vhost_scsi_get_tag(struct vhost_virtqueue *vq, struct tcm_vhost_tpg *tpg, ...@@ -763,9 +770,9 @@ vhost_scsi_get_tag(struct vhost_virtqueue *vq, struct tcm_vhost_tpg *tpg,
cmd->tvc_exp_data_len = exp_data_len; cmd->tvc_exp_data_len = exp_data_len;
cmd->tvc_data_direction = data_direction; cmd->tvc_data_direction = data_direction;
cmd->tvc_nexus = tv_nexus; cmd->tvc_nexus = tv_nexus;
cmd->inflight = tcm_vhost_get_inflight(vq); cmd->inflight = vhost_scsi_get_inflight(vq);
memcpy(cmd->tvc_cdb, cdb, TCM_VHOST_MAX_CDB_SIZE); memcpy(cmd->tvc_cdb, cdb, VHOST_SCSI_MAX_CDB_SIZE);
return cmd; return cmd;
} }
...@@ -776,29 +783,22 @@ vhost_scsi_get_tag(struct vhost_virtqueue *vq, struct tcm_vhost_tpg *tpg, ...@@ -776,29 +783,22 @@ vhost_scsi_get_tag(struct vhost_virtqueue *vq, struct tcm_vhost_tpg *tpg,
* Returns the number of scatterlist entries used or -errno on error. * Returns the number of scatterlist entries used or -errno on error.
*/ */
static int static int
vhost_scsi_map_to_sgl(struct tcm_vhost_cmd *tv_cmd, vhost_scsi_map_to_sgl(struct vhost_scsi_cmd *cmd,
void __user *ptr,
size_t len,
struct scatterlist *sgl, struct scatterlist *sgl,
unsigned int sgl_count,
struct iovec *iov,
struct page **pages,
bool write) bool write)
{ {
unsigned int npages = 0, pages_nr, offset, nbytes; unsigned int npages = 0, offset, nbytes;
unsigned int pages_nr = iov_num_pages(ptr, len);
struct scatterlist *sg = sgl; struct scatterlist *sg = sgl;
void __user *ptr = iov->iov_base; struct page **pages = cmd->tvc_upages;
size_t len = iov->iov_len;
int ret, i; int ret, i;
pages_nr = iov_num_pages(iov); if (pages_nr > VHOST_SCSI_PREALLOC_UPAGES) {
if (pages_nr > sgl_count) {
pr_err("vhost_scsi_map_to_sgl() pages_nr: %u greater than"
" sgl_count: %u\n", pages_nr, sgl_count);
return -ENOBUFS;
}
if (pages_nr > TCM_VHOST_PREALLOC_UPAGES) {
pr_err("vhost_scsi_map_to_sgl() pages_nr: %u greater than" pr_err("vhost_scsi_map_to_sgl() pages_nr: %u greater than"
" preallocated TCM_VHOST_PREALLOC_UPAGES: %u\n", " preallocated VHOST_SCSI_PREALLOC_UPAGES: %u\n",
pages_nr, TCM_VHOST_PREALLOC_UPAGES); pages_nr, VHOST_SCSI_PREALLOC_UPAGES);
return -ENOBUFS; return -ENOBUFS;
} }
...@@ -829,84 +829,94 @@ vhost_scsi_map_to_sgl(struct tcm_vhost_cmd *tv_cmd, ...@@ -829,84 +829,94 @@ vhost_scsi_map_to_sgl(struct tcm_vhost_cmd *tv_cmd,
} }
static int static int
vhost_scsi_map_iov_to_sgl(struct tcm_vhost_cmd *cmd, vhost_scsi_calc_sgls(struct iov_iter *iter, size_t bytes, int max_sgls)
struct iovec *iov,
int niov,
bool write)
{ {
struct scatterlist *sg = cmd->tvc_sgl; int sgl_count = 0;
unsigned int sgl_count = 0;
int ret, i;
for (i = 0; i < niov; i++) if (!iter || !iter->iov) {
sgl_count += iov_num_pages(&iov[i]); pr_err("%s: iter->iov is NULL, but expected bytes: %zu"
" present\n", __func__, bytes);
return -EINVAL;
}
if (sgl_count > TCM_VHOST_PREALLOC_SGLS) { sgl_count = iov_iter_npages(iter, 0xffff);
pr_err("vhost_scsi_map_iov_to_sgl() sgl_count: %u greater than" if (sgl_count > max_sgls) {
" preallocated TCM_VHOST_PREALLOC_SGLS: %u\n", pr_err("%s: requested sgl_count: %d exceeds pre-allocated"
sgl_count, TCM_VHOST_PREALLOC_SGLS); " max_sgls: %d\n", __func__, sgl_count, max_sgls);
return -ENOBUFS; return -EINVAL;
} }
return sgl_count;
}
pr_debug("%s sg %p sgl_count %u\n", __func__, sg, sgl_count); static int
sg_init_table(sg, sgl_count); vhost_scsi_iov_to_sgl(struct vhost_scsi_cmd *cmd, bool write,
cmd->tvc_sgl_count = sgl_count; struct iov_iter *iter,
struct scatterlist *sg, int sg_count)
{
size_t off = iter->iov_offset;
int i, ret;
pr_debug("Mapping iovec %p for %u pages\n", &iov[0], sgl_count); for (i = 0; i < iter->nr_segs; i++) {
void __user *base = iter->iov[i].iov_base + off;
size_t len = iter->iov[i].iov_len - off;
for (i = 0; i < niov; i++) { ret = vhost_scsi_map_to_sgl(cmd, base, len, sg, write);
ret = vhost_scsi_map_to_sgl(cmd, sg, sgl_count, &iov[i],
cmd->tvc_upages, write);
if (ret < 0) { if (ret < 0) {
for (i = 0; i < cmd->tvc_sgl_count; i++) for (i = 0; i < sg_count; i++) {
put_page(sg_page(&cmd->tvc_sgl[i])); struct page *page = sg_page(&sg[i]);
if (page)
cmd->tvc_sgl_count = 0; put_page(page);
}
return ret; return ret;
} }
sg += ret; sg += ret;
sgl_count -= ret; off = 0;
} }
return 0; return 0;
} }
static int static int
vhost_scsi_map_iov_to_prot(struct tcm_vhost_cmd *cmd, vhost_scsi_mapal(struct vhost_scsi_cmd *cmd,
struct iovec *iov, size_t prot_bytes, struct iov_iter *prot_iter,
int niov, size_t data_bytes, struct iov_iter *data_iter)
bool write)
{ {
struct scatterlist *prot_sg = cmd->tvc_prot_sgl; int sgl_count, ret;
unsigned int prot_sgl_count = 0; bool write = (cmd->tvc_data_direction == DMA_FROM_DEVICE);
int ret, i;
for (i = 0; i < niov; i++)
prot_sgl_count += iov_num_pages(&iov[i]);
if (prot_sgl_count > TCM_VHOST_PREALLOC_PROT_SGLS) { if (prot_bytes) {
pr_err("vhost_scsi_map_iov_to_prot() sgl_count: %u greater than" sgl_count = vhost_scsi_calc_sgls(prot_iter, prot_bytes,
" preallocated TCM_VHOST_PREALLOC_PROT_SGLS: %u\n", VHOST_SCSI_PREALLOC_PROT_SGLS);
prot_sgl_count, TCM_VHOST_PREALLOC_PROT_SGLS); if (sgl_count < 0)
return -ENOBUFS; return sgl_count;
}
sg_init_table(cmd->tvc_prot_sgl, sgl_count);
cmd->tvc_prot_sgl_count = sgl_count;
pr_debug("%s prot_sg %p prot_sgl_count %u\n", __func__, pr_debug("%s prot_sg %p prot_sgl_count %u\n", __func__,
prot_sg, prot_sgl_count); cmd->tvc_prot_sgl, cmd->tvc_prot_sgl_count);
sg_init_table(prot_sg, prot_sgl_count);
cmd->tvc_prot_sgl_count = prot_sgl_count;
for (i = 0; i < niov; i++) { ret = vhost_scsi_iov_to_sgl(cmd, write, prot_iter,
ret = vhost_scsi_map_to_sgl(cmd, prot_sg, prot_sgl_count, &iov[i], cmd->tvc_prot_sgl,
cmd->tvc_upages, write); cmd->tvc_prot_sgl_count);
if (ret < 0) { if (ret < 0) {
for (i = 0; i < cmd->tvc_prot_sgl_count; i++)
put_page(sg_page(&cmd->tvc_prot_sgl[i]));
cmd->tvc_prot_sgl_count = 0; cmd->tvc_prot_sgl_count = 0;
return ret; return ret;
} }
prot_sg += ret; }
prot_sgl_count -= ret; sgl_count = vhost_scsi_calc_sgls(data_iter, data_bytes,
VHOST_SCSI_PREALLOC_SGLS);
if (sgl_count < 0)
return sgl_count;
sg_init_table(cmd->tvc_sgl, sgl_count);
cmd->tvc_sgl_count = sgl_count;
pr_debug("%s data_sg %p data_sgl_count %u\n", __func__,
cmd->tvc_sgl, cmd->tvc_sgl_count);
ret = vhost_scsi_iov_to_sgl(cmd, write, data_iter,
cmd->tvc_sgl, cmd->tvc_sgl_count);
if (ret < 0) {
cmd->tvc_sgl_count = 0;
return ret;
} }
return 0; return 0;
} }
...@@ -928,11 +938,11 @@ static int vhost_scsi_to_tcm_attr(int attr) ...@@ -928,11 +938,11 @@ static int vhost_scsi_to_tcm_attr(int attr)
return TCM_SIMPLE_TAG; return TCM_SIMPLE_TAG;
} }
static void tcm_vhost_submission_work(struct work_struct *work) static void vhost_scsi_submission_work(struct work_struct *work)
{ {
struct tcm_vhost_cmd *cmd = struct vhost_scsi_cmd *cmd =
container_of(work, struct tcm_vhost_cmd, work); container_of(work, struct vhost_scsi_cmd, work);
struct tcm_vhost_nexus *tv_nexus; struct vhost_scsi_nexus *tv_nexus;
struct se_cmd *se_cmd = &cmd->tvc_se_cmd; struct se_cmd *se_cmd = &cmd->tvc_se_cmd;
struct scatterlist *sg_ptr, *sg_prot_ptr = NULL; struct scatterlist *sg_ptr, *sg_prot_ptr = NULL;
int rc; int rc;
...@@ -986,19 +996,20 @@ vhost_scsi_send_bad_target(struct vhost_scsi *vs, ...@@ -986,19 +996,20 @@ vhost_scsi_send_bad_target(struct vhost_scsi *vs,
static void static void
vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
{ {
struct tcm_vhost_tpg **vs_tpg; struct vhost_scsi_tpg **vs_tpg, *tpg;
struct virtio_scsi_cmd_req v_req; struct virtio_scsi_cmd_req v_req;
struct virtio_scsi_cmd_req_pi v_req_pi; struct virtio_scsi_cmd_req_pi v_req_pi;
struct tcm_vhost_tpg *tpg; struct vhost_scsi_cmd *cmd;
struct tcm_vhost_cmd *cmd; struct iov_iter out_iter, in_iter, prot_iter, data_iter;
u64 tag; u64 tag;
u32 exp_data_len, data_first, data_num, data_direction, prot_first; u32 exp_data_len, data_direction;
unsigned out, in, i; unsigned out, in;
int head, ret, data_niov, prot_niov, prot_bytes; int head, ret, prot_bytes;
size_t req_size; size_t req_size, rsp_size = sizeof(struct virtio_scsi_cmd_resp);
size_t out_size, in_size;
u16 lun; u16 lun;
u8 *target, *lunp, task_attr; u8 *target, *lunp, task_attr;
bool hdr_pi; bool t10_pi = vhost_has_feature(vq, VIRTIO_SCSI_F_T10_PI);
void *req, *cdb; void *req, *cdb;
mutex_lock(&vq->mutex); mutex_lock(&vq->mutex);
...@@ -1029,113 +1040,134 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) ...@@ -1029,113 +1040,134 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
} }
break; break;
} }
/* FIXME: BIDI operation */
if (out == 1 && in == 1) {
data_direction = DMA_NONE;
data_first = 0;
data_num = 0;
} else if (out == 1 && in > 1) {
data_direction = DMA_FROM_DEVICE;
data_first = out + 1;
data_num = in - 1;
} else if (out > 1 && in == 1) {
data_direction = DMA_TO_DEVICE;
data_first = 1;
data_num = out - 1;
} else {
vq_err(vq, "Invalid buffer layout out: %u in: %u\n",
out, in);
break;
}
/* /*
* Check for a sane resp buffer so we can report errors to * Check for a sane response buffer so we can report early
* the guest. * errors back to the guest.
*/ */
if (unlikely(vq->iov[out].iov_len != if (unlikely(vq->iov[out].iov_len < rsp_size)) {
sizeof(struct virtio_scsi_cmd_resp))) { vq_err(vq, "Expecting at least virtio_scsi_cmd_resp"
vq_err(vq, "Expecting virtio_scsi_cmd_resp, got %zu" " size, got %zu bytes\n", vq->iov[out].iov_len);
" bytes\n", vq->iov[out].iov_len);
break; break;
} }
/*
if (vhost_has_feature(vq, VIRTIO_SCSI_F_T10_PI)) { * Setup pointers and values based upon different virtio-scsi
* request header if T10_PI is enabled in KVM guest.
*/
if (t10_pi) {
req = &v_req_pi; req = &v_req_pi;
req_size = sizeof(v_req_pi);
lunp = &v_req_pi.lun[0]; lunp = &v_req_pi.lun[0];
target = &v_req_pi.lun[1]; target = &v_req_pi.lun[1];
req_size = sizeof(v_req_pi);
hdr_pi = true;
} else { } else {
req = &v_req; req = &v_req;
req_size = sizeof(v_req);
lunp = &v_req.lun[0]; lunp = &v_req.lun[0];
target = &v_req.lun[1]; target = &v_req.lun[1];
req_size = sizeof(v_req);
hdr_pi = false;
} }
/*
* FIXME: Not correct for BIDI operation
*/
out_size = iov_length(vq->iov, out);
in_size = iov_length(&vq->iov[out], in);
if (unlikely(vq->iov[0].iov_len < req_size)) { /*
pr_err("Expecting virtio-scsi header: %zu, got %zu\n", * Copy over the virtio-scsi request header, which for a
req_size, vq->iov[0].iov_len); * ANY_LAYOUT enabled guest may span multiple iovecs, or a
break; * single iovec may contain both the header + outgoing
} * WRITE payloads.
ret = copy_from_user(req, vq->iov[0].iov_base, req_size); *
if (unlikely(ret)) { * copy_from_iter() will advance out_iter, so that it will
vq_err(vq, "Faulted on virtio_scsi_cmd_req\n"); * point at the start of the outgoing WRITE payload, if
break; * DMA_TO_DEVICE is set.
} */
iov_iter_init(&out_iter, WRITE, vq->iov, out, out_size);
ret = copy_from_iter(req, req_size, &out_iter);
if (unlikely(ret != req_size)) {
vq_err(vq, "Faulted on copy_from_iter\n");
vhost_scsi_send_bad_target(vs, vq, head, out);
continue;
}
/* virtio-scsi spec requires byte 0 of the lun to be 1 */ /* virtio-scsi spec requires byte 0 of the lun to be 1 */
if (unlikely(*lunp != 1)) { if (unlikely(*lunp != 1)) {
vq_err(vq, "Illegal virtio-scsi lun: %u\n", *lunp);
vhost_scsi_send_bad_target(vs, vq, head, out); vhost_scsi_send_bad_target(vs, vq, head, out);
continue; continue;
} }
tpg = ACCESS_ONCE(vs_tpg[*target]); tpg = ACCESS_ONCE(vs_tpg[*target]);
/* Target does not exist, fail the request */
if (unlikely(!tpg)) { if (unlikely(!tpg)) {
/* Target does not exist, fail the request */
vhost_scsi_send_bad_target(vs, vq, head, out); vhost_scsi_send_bad_target(vs, vq, head, out);
continue; continue;
} }
data_niov = data_num;
prot_niov = prot_first = prot_bytes = 0;
/* /*
* Determine if any protection information iovecs are preceeding * Determine data_direction by calculating the total outgoing
* the actual data payload, and adjust data_first + data_niov * iovec sizes + incoming iovec sizes vs. virtio-scsi request +
* values accordingly for vhost_scsi_map_iov_to_sgl() below. * response headers respectively.
*
* For DMA_TO_DEVICE this is out_iter, which is already pointing
* to the right place.
*
* For DMA_FROM_DEVICE, the iovec will be just past the end
* of the virtio-scsi response header in either the same
* or immediately following iovec.
* *
* Also extract virtio_scsi header bits for vhost_scsi_get_tag() * Any associated T10_PI bytes for the outgoing / incoming
* payloads are included in calculation of exp_data_len here.
*/ */
if (hdr_pi) { prot_bytes = 0;
if (out_size > req_size) {
data_direction = DMA_TO_DEVICE;
exp_data_len = out_size - req_size;
data_iter = out_iter;
} else if (in_size > rsp_size) {
data_direction = DMA_FROM_DEVICE;
exp_data_len = in_size - rsp_size;
iov_iter_init(&in_iter, READ, &vq->iov[out], in,
rsp_size + exp_data_len);
iov_iter_advance(&in_iter, rsp_size);
data_iter = in_iter;
} else {
data_direction = DMA_NONE;
exp_data_len = 0;
}
/*
* If T10_PI header + payload is present, setup prot_iter values
* and recalculate data_iter for vhost_scsi_mapal() mapping to
* host scatterlists via get_user_pages_fast().
*/
if (t10_pi) {
if (v_req_pi.pi_bytesout) { if (v_req_pi.pi_bytesout) {
if (data_direction != DMA_TO_DEVICE) { if (data_direction != DMA_TO_DEVICE) {
vq_err(vq, "Received non zero do_pi_niov" vq_err(vq, "Received non zero pi_bytesout,"
", but wrong data_direction\n"); " but wrong data_direction\n");
goto err_cmd; vhost_scsi_send_bad_target(vs, vq, head, out);
continue;
} }
prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesout); prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesout);
} else if (v_req_pi.pi_bytesin) { } else if (v_req_pi.pi_bytesin) {
if (data_direction != DMA_FROM_DEVICE) { if (data_direction != DMA_FROM_DEVICE) {
vq_err(vq, "Received non zero di_pi_niov" vq_err(vq, "Received non zero pi_bytesin,"
", but wrong data_direction\n"); " but wrong data_direction\n");
goto err_cmd; vhost_scsi_send_bad_target(vs, vq, head, out);
continue;
} }
prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesin); prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesin);
} }
/*
* Set prot_iter to data_iter, and advance past any
* preceeding prot_bytes that may be present.
*
* Also fix up the exp_data_len to reflect only the
* actual data payload length.
*/
if (prot_bytes) { if (prot_bytes) {
int tmp = 0; exp_data_len -= prot_bytes;
prot_iter = data_iter;
for (i = 0; i < data_num; i++) { iov_iter_advance(&data_iter, prot_bytes);
tmp += vq->iov[data_first + i].iov_len;
prot_niov++;
if (tmp >= prot_bytes)
break;
}
prot_first = data_first;
data_first += prot_niov;
data_niov = data_num - prot_niov;
} }
tag = vhost64_to_cpu(vq, v_req_pi.tag); tag = vhost64_to_cpu(vq, v_req_pi.tag);
task_attr = v_req_pi.task_attr; task_attr = v_req_pi.task_attr;
...@@ -1147,83 +1179,65 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) ...@@ -1147,83 +1179,65 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
cdb = &v_req.cdb[0]; cdb = &v_req.cdb[0];
lun = ((v_req.lun[2] << 8) | v_req.lun[3]) & 0x3FFF; lun = ((v_req.lun[2] << 8) | v_req.lun[3]) & 0x3FFF;
} }
exp_data_len = 0;
for (i = 0; i < data_niov; i++)
exp_data_len += vq->iov[data_first + i].iov_len;
/* /*
* Check that the recieved CDB size does not exceeded our * Check that the received CDB size does not exceeded our
* hardcoded max for vhost-scsi * hardcoded max for vhost-scsi, then get a pre-allocated
* cmd descriptor for the new virtio-scsi tag.
* *
* TODO what if cdb was too small for varlen cdb header? * TODO what if cdb was too small for varlen cdb header?
*/ */
if (unlikely(scsi_command_size(cdb) > TCM_VHOST_MAX_CDB_SIZE)) { if (unlikely(scsi_command_size(cdb) > VHOST_SCSI_MAX_CDB_SIZE)) {
vq_err(vq, "Received SCSI CDB with command_size: %d that" vq_err(vq, "Received SCSI CDB with command_size: %d that"
" exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n", " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
scsi_command_size(cdb), TCM_VHOST_MAX_CDB_SIZE); scsi_command_size(cdb), VHOST_SCSI_MAX_CDB_SIZE);
goto err_cmd; vhost_scsi_send_bad_target(vs, vq, head, out);
continue;
} }
cmd = vhost_scsi_get_tag(vq, tpg, cdb, tag, lun, task_attr, cmd = vhost_scsi_get_tag(vq, tpg, cdb, tag, lun, task_attr,
exp_data_len + prot_bytes, exp_data_len + prot_bytes,
data_direction); data_direction);
if (IS_ERR(cmd)) { if (IS_ERR(cmd)) {
vq_err(vq, "vhost_scsi_get_tag failed %ld\n", vq_err(vq, "vhost_scsi_get_tag failed %ld\n",
PTR_ERR(cmd)); PTR_ERR(cmd));
goto err_cmd; vhost_scsi_send_bad_target(vs, vq, head, out);
continue;
} }
pr_debug("Allocated tv_cmd: %p exp_data_len: %d, data_direction"
": %d\n", cmd, exp_data_len, data_direction);
cmd->tvc_vhost = vs; cmd->tvc_vhost = vs;
cmd->tvc_vq = vq; cmd->tvc_vq = vq;
cmd->tvc_resp = vq->iov[out].iov_base; cmd->tvc_resp_iov = &vq->iov[out];
cmd->tvc_in_iovs = in;
pr_debug("vhost_scsi got command opcode: %#02x, lun: %d\n", pr_debug("vhost_scsi got command opcode: %#02x, lun: %d\n",
cmd->tvc_cdb[0], cmd->tvc_lun); cmd->tvc_cdb[0], cmd->tvc_lun);
pr_debug("cmd: %p exp_data_len: %d, prot_bytes: %d data_direction:"
" %d\n", cmd, exp_data_len, prot_bytes, data_direction);
if (prot_niov) {
ret = vhost_scsi_map_iov_to_prot(cmd,
&vq->iov[prot_first], prot_niov,
data_direction == DMA_FROM_DEVICE);
if (unlikely(ret)) {
vq_err(vq, "Failed to map iov to"
" prot_sgl\n");
goto err_free;
}
}
if (data_direction != DMA_NONE) { if (data_direction != DMA_NONE) {
ret = vhost_scsi_map_iov_to_sgl(cmd, ret = vhost_scsi_mapal(cmd,
&vq->iov[data_first], data_niov, prot_bytes, &prot_iter,
data_direction == DMA_FROM_DEVICE); exp_data_len, &data_iter);
if (unlikely(ret)) { if (unlikely(ret)) {
vq_err(vq, "Failed to map iov to sgl\n"); vq_err(vq, "Failed to map iov to sgl\n");
goto err_free; vhost_scsi_release_cmd(&cmd->tvc_se_cmd);
vhost_scsi_send_bad_target(vs, vq, head, out);
continue;
} }
} }
/* /*
* Save the descriptor from vhost_get_vq_desc() to be used to * Save the descriptor from vhost_get_vq_desc() to be used to
* complete the virtio-scsi request in TCM callback context via * complete the virtio-scsi request in TCM callback context via
* tcm_vhost_queue_data_in() and tcm_vhost_queue_status() * vhost_scsi_queue_data_in() and vhost_scsi_queue_status()
*/ */
cmd->tvc_vq_desc = head; cmd->tvc_vq_desc = head;
/* /*
* Dispatch tv_cmd descriptor for cmwq execution in process * Dispatch cmd descriptor for cmwq execution in process
* context provided by tcm_vhost_workqueue. This also ensures * context provided by vhost_scsi_workqueue. This also ensures
* tv_cmd is executed on the same kworker CPU as this vhost * cmd is executed on the same kworker CPU as this vhost
* thread to gain positive L2 cache locality effects.. * thread to gain positive L2 cache locality effects.
*/ */
INIT_WORK(&cmd->work, tcm_vhost_submission_work); INIT_WORK(&cmd->work, vhost_scsi_submission_work);
queue_work(tcm_vhost_workqueue, &cmd->work); queue_work(vhost_scsi_workqueue, &cmd->work);
} }
mutex_unlock(&vq->mutex);
return;
err_free:
vhost_scsi_free_cmd(cmd);
err_cmd:
vhost_scsi_send_bad_target(vs, vq, head, out);
out: out:
mutex_unlock(&vq->mutex); mutex_unlock(&vq->mutex);
} }
...@@ -1234,15 +1248,15 @@ static void vhost_scsi_ctl_handle_kick(struct vhost_work *work) ...@@ -1234,15 +1248,15 @@ static void vhost_scsi_ctl_handle_kick(struct vhost_work *work)
} }
static void static void
tcm_vhost_send_evt(struct vhost_scsi *vs, vhost_scsi_send_evt(struct vhost_scsi *vs,
struct tcm_vhost_tpg *tpg, struct vhost_scsi_tpg *tpg,
struct se_lun *lun, struct se_lun *lun,
u32 event, u32 event,
u32 reason) u32 reason)
{ {
struct tcm_vhost_evt *evt; struct vhost_scsi_evt *evt;
evt = tcm_vhost_allocate_evt(vs, event, reason); evt = vhost_scsi_allocate_evt(vs, event, reason);
if (!evt) if (!evt)
return; return;
...@@ -1253,7 +1267,7 @@ tcm_vhost_send_evt(struct vhost_scsi *vs, ...@@ -1253,7 +1267,7 @@ tcm_vhost_send_evt(struct vhost_scsi *vs,
* lun[4-7] need to be zero according to virtio-scsi spec. * lun[4-7] need to be zero according to virtio-scsi spec.
*/ */
evt->event.lun[0] = 0x01; evt->event.lun[0] = 0x01;
evt->event.lun[1] = tpg->tport_tpgt & 0xFF; evt->event.lun[1] = tpg->tport_tpgt;
if (lun->unpacked_lun >= 256) if (lun->unpacked_lun >= 256)
evt->event.lun[2] = lun->unpacked_lun >> 8 | 0x40 ; evt->event.lun[2] = lun->unpacked_lun >> 8 | 0x40 ;
evt->event.lun[3] = lun->unpacked_lun & 0xFF; evt->event.lun[3] = lun->unpacked_lun & 0xFF;
...@@ -1274,7 +1288,7 @@ static void vhost_scsi_evt_handle_kick(struct vhost_work *work) ...@@ -1274,7 +1288,7 @@ static void vhost_scsi_evt_handle_kick(struct vhost_work *work)
goto out; goto out;
if (vs->vs_events_missed) if (vs->vs_events_missed)
tcm_vhost_send_evt(vs, NULL, NULL, VIRTIO_SCSI_T_NO_EVENT, 0); vhost_scsi_send_evt(vs, NULL, NULL, VIRTIO_SCSI_T_NO_EVENT, 0);
out: out:
mutex_unlock(&vq->mutex); mutex_unlock(&vq->mutex);
} }
...@@ -1300,7 +1314,7 @@ static void vhost_scsi_flush(struct vhost_scsi *vs) ...@@ -1300,7 +1314,7 @@ static void vhost_scsi_flush(struct vhost_scsi *vs)
int i; int i;
/* Init new inflight and remember the old inflight */ /* Init new inflight and remember the old inflight */
tcm_vhost_init_inflight(vs, old_inflight); vhost_scsi_init_inflight(vs, old_inflight);
/* /*
* The inflight->kref was initialized to 1. We decrement it here to * The inflight->kref was initialized to 1. We decrement it here to
...@@ -1308,7 +1322,7 @@ static void vhost_scsi_flush(struct vhost_scsi *vs) ...@@ -1308,7 +1322,7 @@ static void vhost_scsi_flush(struct vhost_scsi *vs)
* when all the reqs are finished. * when all the reqs are finished.
*/ */
for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
kref_put(&old_inflight[i]->kref, tcm_vhost_done_inflight); kref_put(&old_inflight[i]->kref, vhost_scsi_done_inflight);
/* Flush both the vhost poll and vhost work */ /* Flush both the vhost poll and vhost work */
for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
...@@ -1323,24 +1337,24 @@ static void vhost_scsi_flush(struct vhost_scsi *vs) ...@@ -1323,24 +1337,24 @@ static void vhost_scsi_flush(struct vhost_scsi *vs)
/* /*
* Called from vhost_scsi_ioctl() context to walk the list of available * Called from vhost_scsi_ioctl() context to walk the list of available
* tcm_vhost_tpg with an active struct tcm_vhost_nexus * vhost_scsi_tpg with an active struct vhost_scsi_nexus
* *
* The lock nesting rule is: * The lock nesting rule is:
* tcm_vhost_mutex -> vs->dev.mutex -> tpg->tv_tpg_mutex -> vq->mutex * vhost_scsi_mutex -> vs->dev.mutex -> tpg->tv_tpg_mutex -> vq->mutex
*/ */
static int static int
vhost_scsi_set_endpoint(struct vhost_scsi *vs, vhost_scsi_set_endpoint(struct vhost_scsi *vs,
struct vhost_scsi_target *t) struct vhost_scsi_target *t)
{ {
struct se_portal_group *se_tpg; struct se_portal_group *se_tpg;
struct tcm_vhost_tport *tv_tport; struct vhost_scsi_tport *tv_tport;
struct tcm_vhost_tpg *tpg; struct vhost_scsi_tpg *tpg;
struct tcm_vhost_tpg **vs_tpg; struct vhost_scsi_tpg **vs_tpg;
struct vhost_virtqueue *vq; struct vhost_virtqueue *vq;
int index, ret, i, len; int index, ret, i, len;
bool match = false; bool match = false;
mutex_lock(&tcm_vhost_mutex); mutex_lock(&vhost_scsi_mutex);
mutex_lock(&vs->dev.mutex); mutex_lock(&vs->dev.mutex);
/* Verify that ring has been setup correctly. */ /* Verify that ring has been setup correctly. */
...@@ -1361,7 +1375,7 @@ vhost_scsi_set_endpoint(struct vhost_scsi *vs, ...@@ -1361,7 +1375,7 @@ vhost_scsi_set_endpoint(struct vhost_scsi *vs,
if (vs->vs_tpg) if (vs->vs_tpg)
memcpy(vs_tpg, vs->vs_tpg, len); memcpy(vs_tpg, vs->vs_tpg, len);
list_for_each_entry(tpg, &tcm_vhost_list, tv_tpg_list) { list_for_each_entry(tpg, &vhost_scsi_list, tv_tpg_list) {
mutex_lock(&tpg->tv_tpg_mutex); mutex_lock(&tpg->tv_tpg_mutex);
if (!tpg->tpg_nexus) { if (!tpg->tpg_nexus) {
mutex_unlock(&tpg->tv_tpg_mutex); mutex_unlock(&tpg->tv_tpg_mutex);
...@@ -1429,7 +1443,7 @@ vhost_scsi_set_endpoint(struct vhost_scsi *vs, ...@@ -1429,7 +1443,7 @@ vhost_scsi_set_endpoint(struct vhost_scsi *vs,
out: out:
mutex_unlock(&vs->dev.mutex); mutex_unlock(&vs->dev.mutex);
mutex_unlock(&tcm_vhost_mutex); mutex_unlock(&vhost_scsi_mutex);
return ret; return ret;
} }
...@@ -1438,14 +1452,14 @@ vhost_scsi_clear_endpoint(struct vhost_scsi *vs, ...@@ -1438,14 +1452,14 @@ vhost_scsi_clear_endpoint(struct vhost_scsi *vs,
struct vhost_scsi_target *t) struct vhost_scsi_target *t)
{ {
struct se_portal_group *se_tpg; struct se_portal_group *se_tpg;
struct tcm_vhost_tport *tv_tport; struct vhost_scsi_tport *tv_tport;
struct tcm_vhost_tpg *tpg; struct vhost_scsi_tpg *tpg;
struct vhost_virtqueue *vq; struct vhost_virtqueue *vq;
bool match = false; bool match = false;
int index, ret, i; int index, ret, i;
u8 target; u8 target;
mutex_lock(&tcm_vhost_mutex); mutex_lock(&vhost_scsi_mutex);
mutex_lock(&vs->dev.mutex); mutex_lock(&vs->dev.mutex);
/* Verify that ring has been setup correctly. */ /* Verify that ring has been setup correctly. */
for (index = 0; index < vs->dev.nvqs; ++index) { for (index = 0; index < vs->dev.nvqs; ++index) {
...@@ -1511,14 +1525,14 @@ vhost_scsi_clear_endpoint(struct vhost_scsi *vs, ...@@ -1511,14 +1525,14 @@ vhost_scsi_clear_endpoint(struct vhost_scsi *vs,
vs->vs_tpg = NULL; vs->vs_tpg = NULL;
WARN_ON(vs->vs_events_nr); WARN_ON(vs->vs_events_nr);
mutex_unlock(&vs->dev.mutex); mutex_unlock(&vs->dev.mutex);
mutex_unlock(&tcm_vhost_mutex); mutex_unlock(&vhost_scsi_mutex);
return 0; return 0;
err_tpg: err_tpg:
mutex_unlock(&tpg->tv_tpg_mutex); mutex_unlock(&tpg->tv_tpg_mutex);
err_dev: err_dev:
mutex_unlock(&vs->dev.mutex); mutex_unlock(&vs->dev.mutex);
mutex_unlock(&tcm_vhost_mutex); mutex_unlock(&vhost_scsi_mutex);
return ret; return ret;
} }
...@@ -1565,7 +1579,7 @@ static int vhost_scsi_open(struct inode *inode, struct file *f) ...@@ -1565,7 +1579,7 @@ static int vhost_scsi_open(struct inode *inode, struct file *f)
goto err_vqs; goto err_vqs;
vhost_work_init(&vs->vs_completion_work, vhost_scsi_complete_cmd_work); vhost_work_init(&vs->vs_completion_work, vhost_scsi_complete_cmd_work);
vhost_work_init(&vs->vs_event_work, tcm_vhost_evt_work); vhost_work_init(&vs->vs_event_work, vhost_scsi_evt_work);
vs->vs_events_nr = 0; vs->vs_events_nr = 0;
vs->vs_events_missed = false; vs->vs_events_missed = false;
...@@ -1580,7 +1594,7 @@ static int vhost_scsi_open(struct inode *inode, struct file *f) ...@@ -1580,7 +1594,7 @@ static int vhost_scsi_open(struct inode *inode, struct file *f)
} }
vhost_dev_init(&vs->dev, vqs, VHOST_SCSI_MAX_VQ); vhost_dev_init(&vs->dev, vqs, VHOST_SCSI_MAX_VQ);
tcm_vhost_init_inflight(vs, NULL); vhost_scsi_init_inflight(vs, NULL);
f->private_data = vs; f->private_data = vs;
return 0; return 0;
...@@ -1712,7 +1726,7 @@ static int vhost_scsi_deregister(void) ...@@ -1712,7 +1726,7 @@ static int vhost_scsi_deregister(void)
return misc_deregister(&vhost_scsi_misc); return misc_deregister(&vhost_scsi_misc);
} }
static char *tcm_vhost_dump_proto_id(struct tcm_vhost_tport *tport) static char *vhost_scsi_dump_proto_id(struct vhost_scsi_tport *tport)
{ {
switch (tport->tport_proto_id) { switch (tport->tport_proto_id) {
case SCSI_PROTOCOL_SAS: case SCSI_PROTOCOL_SAS:
...@@ -1729,7 +1743,7 @@ static char *tcm_vhost_dump_proto_id(struct tcm_vhost_tport *tport) ...@@ -1729,7 +1743,7 @@ static char *tcm_vhost_dump_proto_id(struct tcm_vhost_tport *tport)
} }
static void static void
tcm_vhost_do_plug(struct tcm_vhost_tpg *tpg, vhost_scsi_do_plug(struct vhost_scsi_tpg *tpg,
struct se_lun *lun, bool plug) struct se_lun *lun, bool plug)
{ {
...@@ -1750,71 +1764,71 @@ tcm_vhost_do_plug(struct tcm_vhost_tpg *tpg, ...@@ -1750,71 +1764,71 @@ tcm_vhost_do_plug(struct tcm_vhost_tpg *tpg,
vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq; vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
mutex_lock(&vq->mutex); mutex_lock(&vq->mutex);
if (vhost_has_feature(vq, VIRTIO_SCSI_F_HOTPLUG)) if (vhost_has_feature(vq, VIRTIO_SCSI_F_HOTPLUG))
tcm_vhost_send_evt(vs, tpg, lun, vhost_scsi_send_evt(vs, tpg, lun,
VIRTIO_SCSI_T_TRANSPORT_RESET, reason); VIRTIO_SCSI_T_TRANSPORT_RESET, reason);
mutex_unlock(&vq->mutex); mutex_unlock(&vq->mutex);
mutex_unlock(&vs->dev.mutex); mutex_unlock(&vs->dev.mutex);
} }
static void tcm_vhost_hotplug(struct tcm_vhost_tpg *tpg, struct se_lun *lun) static void vhost_scsi_hotplug(struct vhost_scsi_tpg *tpg, struct se_lun *lun)
{ {
tcm_vhost_do_plug(tpg, lun, true); vhost_scsi_do_plug(tpg, lun, true);
} }
static void tcm_vhost_hotunplug(struct tcm_vhost_tpg *tpg, struct se_lun *lun) static void vhost_scsi_hotunplug(struct vhost_scsi_tpg *tpg, struct se_lun *lun)
{ {
tcm_vhost_do_plug(tpg, lun, false); vhost_scsi_do_plug(tpg, lun, false);
} }
static int tcm_vhost_port_link(struct se_portal_group *se_tpg, static int vhost_scsi_port_link(struct se_portal_group *se_tpg,
struct se_lun *lun) struct se_lun *lun)
{ {
struct tcm_vhost_tpg *tpg = container_of(se_tpg, struct vhost_scsi_tpg *tpg = container_of(se_tpg,
struct tcm_vhost_tpg, se_tpg); struct vhost_scsi_tpg, se_tpg);
mutex_lock(&tcm_vhost_mutex); mutex_lock(&vhost_scsi_mutex);
mutex_lock(&tpg->tv_tpg_mutex); mutex_lock(&tpg->tv_tpg_mutex);
tpg->tv_tpg_port_count++; tpg->tv_tpg_port_count++;
mutex_unlock(&tpg->tv_tpg_mutex); mutex_unlock(&tpg->tv_tpg_mutex);
tcm_vhost_hotplug(tpg, lun); vhost_scsi_hotplug(tpg, lun);
mutex_unlock(&tcm_vhost_mutex); mutex_unlock(&vhost_scsi_mutex);
return 0; return 0;
} }
static void tcm_vhost_port_unlink(struct se_portal_group *se_tpg, static void vhost_scsi_port_unlink(struct se_portal_group *se_tpg,
struct se_lun *lun) struct se_lun *lun)
{ {
struct tcm_vhost_tpg *tpg = container_of(se_tpg, struct vhost_scsi_tpg *tpg = container_of(se_tpg,
struct tcm_vhost_tpg, se_tpg); struct vhost_scsi_tpg, se_tpg);
mutex_lock(&tcm_vhost_mutex); mutex_lock(&vhost_scsi_mutex);
mutex_lock(&tpg->tv_tpg_mutex); mutex_lock(&tpg->tv_tpg_mutex);
tpg->tv_tpg_port_count--; tpg->tv_tpg_port_count--;
mutex_unlock(&tpg->tv_tpg_mutex); mutex_unlock(&tpg->tv_tpg_mutex);
tcm_vhost_hotunplug(tpg, lun); vhost_scsi_hotunplug(tpg, lun);
mutex_unlock(&tcm_vhost_mutex); mutex_unlock(&vhost_scsi_mutex);
} }
static struct se_node_acl * static struct se_node_acl *
tcm_vhost_make_nodeacl(struct se_portal_group *se_tpg, vhost_scsi_make_nodeacl(struct se_portal_group *se_tpg,
struct config_group *group, struct config_group *group,
const char *name) const char *name)
{ {
struct se_node_acl *se_nacl, *se_nacl_new; struct se_node_acl *se_nacl, *se_nacl_new;
struct tcm_vhost_nacl *nacl; struct vhost_scsi_nacl *nacl;
u64 wwpn = 0; u64 wwpn = 0;
u32 nexus_depth; u32 nexus_depth;
/* tcm_vhost_parse_wwn(name, &wwpn, 1) < 0) /* vhost_scsi_parse_wwn(name, &wwpn, 1) < 0)
return ERR_PTR(-EINVAL); */ return ERR_PTR(-EINVAL); */
se_nacl_new = tcm_vhost_alloc_fabric_acl(se_tpg); se_nacl_new = vhost_scsi_alloc_fabric_acl(se_tpg);
if (!se_nacl_new) if (!se_nacl_new)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
...@@ -1826,37 +1840,37 @@ tcm_vhost_make_nodeacl(struct se_portal_group *se_tpg, ...@@ -1826,37 +1840,37 @@ tcm_vhost_make_nodeacl(struct se_portal_group *se_tpg,
se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new, se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,
name, nexus_depth); name, nexus_depth);
if (IS_ERR(se_nacl)) { if (IS_ERR(se_nacl)) {
tcm_vhost_release_fabric_acl(se_tpg, se_nacl_new); vhost_scsi_release_fabric_acl(se_tpg, se_nacl_new);
return se_nacl; return se_nacl;
} }
/* /*
* Locate our struct tcm_vhost_nacl and set the FC Nport WWPN * Locate our struct vhost_scsi_nacl and set the FC Nport WWPN
*/ */
nacl = container_of(se_nacl, struct tcm_vhost_nacl, se_node_acl); nacl = container_of(se_nacl, struct vhost_scsi_nacl, se_node_acl);
nacl->iport_wwpn = wwpn; nacl->iport_wwpn = wwpn;
return se_nacl; return se_nacl;
} }
static void tcm_vhost_drop_nodeacl(struct se_node_acl *se_acl) static void vhost_scsi_drop_nodeacl(struct se_node_acl *se_acl)
{ {
struct tcm_vhost_nacl *nacl = container_of(se_acl, struct vhost_scsi_nacl *nacl = container_of(se_acl,
struct tcm_vhost_nacl, se_node_acl); struct vhost_scsi_nacl, se_node_acl);
core_tpg_del_initiator_node_acl(se_acl->se_tpg, se_acl, 1); core_tpg_del_initiator_node_acl(se_acl->se_tpg, se_acl, 1);
kfree(nacl); kfree(nacl);
} }
static void tcm_vhost_free_cmd_map_res(struct tcm_vhost_nexus *nexus, static void vhost_scsi_free_cmd_map_res(struct vhost_scsi_nexus *nexus,
struct se_session *se_sess) struct se_session *se_sess)
{ {
struct tcm_vhost_cmd *tv_cmd; struct vhost_scsi_cmd *tv_cmd;
unsigned int i; unsigned int i;
if (!se_sess->sess_cmd_map) if (!se_sess->sess_cmd_map)
return; return;
for (i = 0; i < TCM_VHOST_DEFAULT_TAGS; i++) { for (i = 0; i < VHOST_SCSI_DEFAULT_TAGS; i++) {
tv_cmd = &((struct tcm_vhost_cmd *)se_sess->sess_cmd_map)[i]; tv_cmd = &((struct vhost_scsi_cmd *)se_sess->sess_cmd_map)[i];
kfree(tv_cmd->tvc_sgl); kfree(tv_cmd->tvc_sgl);
kfree(tv_cmd->tvc_prot_sgl); kfree(tv_cmd->tvc_prot_sgl);
...@@ -1864,13 +1878,13 @@ static void tcm_vhost_free_cmd_map_res(struct tcm_vhost_nexus *nexus, ...@@ -1864,13 +1878,13 @@ static void tcm_vhost_free_cmd_map_res(struct tcm_vhost_nexus *nexus,
} }
} }
static int tcm_vhost_make_nexus(struct tcm_vhost_tpg *tpg, static int vhost_scsi_make_nexus(struct vhost_scsi_tpg *tpg,
const char *name) const char *name)
{ {
struct se_portal_group *se_tpg; struct se_portal_group *se_tpg;
struct se_session *se_sess; struct se_session *se_sess;
struct tcm_vhost_nexus *tv_nexus; struct vhost_scsi_nexus *tv_nexus;
struct tcm_vhost_cmd *tv_cmd; struct vhost_scsi_cmd *tv_cmd;
unsigned int i; unsigned int i;
mutex_lock(&tpg->tv_tpg_mutex); mutex_lock(&tpg->tv_tpg_mutex);
...@@ -1881,19 +1895,19 @@ static int tcm_vhost_make_nexus(struct tcm_vhost_tpg *tpg, ...@@ -1881,19 +1895,19 @@ static int tcm_vhost_make_nexus(struct tcm_vhost_tpg *tpg,
} }
se_tpg = &tpg->se_tpg; se_tpg = &tpg->se_tpg;
tv_nexus = kzalloc(sizeof(struct tcm_vhost_nexus), GFP_KERNEL); tv_nexus = kzalloc(sizeof(struct vhost_scsi_nexus), GFP_KERNEL);
if (!tv_nexus) { if (!tv_nexus) {
mutex_unlock(&tpg->tv_tpg_mutex); mutex_unlock(&tpg->tv_tpg_mutex);
pr_err("Unable to allocate struct tcm_vhost_nexus\n"); pr_err("Unable to allocate struct vhost_scsi_nexus\n");
return -ENOMEM; return -ENOMEM;
} }
/* /*
* Initialize the struct se_session pointer and setup tagpool * Initialize the struct se_session pointer and setup tagpool
* for struct tcm_vhost_cmd descriptors * for struct vhost_scsi_cmd descriptors
*/ */
tv_nexus->tvn_se_sess = transport_init_session_tags( tv_nexus->tvn_se_sess = transport_init_session_tags(
TCM_VHOST_DEFAULT_TAGS, VHOST_SCSI_DEFAULT_TAGS,
sizeof(struct tcm_vhost_cmd), sizeof(struct vhost_scsi_cmd),
TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS); TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS);
if (IS_ERR(tv_nexus->tvn_se_sess)) { if (IS_ERR(tv_nexus->tvn_se_sess)) {
mutex_unlock(&tpg->tv_tpg_mutex); mutex_unlock(&tpg->tv_tpg_mutex);
...@@ -1901,11 +1915,11 @@ static int tcm_vhost_make_nexus(struct tcm_vhost_tpg *tpg, ...@@ -1901,11 +1915,11 @@ static int tcm_vhost_make_nexus(struct tcm_vhost_tpg *tpg,
return -ENOMEM; return -ENOMEM;
} }
se_sess = tv_nexus->tvn_se_sess; se_sess = tv_nexus->tvn_se_sess;
for (i = 0; i < TCM_VHOST_DEFAULT_TAGS; i++) { for (i = 0; i < VHOST_SCSI_DEFAULT_TAGS; i++) {
tv_cmd = &((struct tcm_vhost_cmd *)se_sess->sess_cmd_map)[i]; tv_cmd = &((struct vhost_scsi_cmd *)se_sess->sess_cmd_map)[i];
tv_cmd->tvc_sgl = kzalloc(sizeof(struct scatterlist) * tv_cmd->tvc_sgl = kzalloc(sizeof(struct scatterlist) *
TCM_VHOST_PREALLOC_SGLS, GFP_KERNEL); VHOST_SCSI_PREALLOC_SGLS, GFP_KERNEL);
if (!tv_cmd->tvc_sgl) { if (!tv_cmd->tvc_sgl) {
mutex_unlock(&tpg->tv_tpg_mutex); mutex_unlock(&tpg->tv_tpg_mutex);
pr_err("Unable to allocate tv_cmd->tvc_sgl\n"); pr_err("Unable to allocate tv_cmd->tvc_sgl\n");
...@@ -1913,7 +1927,7 @@ static int tcm_vhost_make_nexus(struct tcm_vhost_tpg *tpg, ...@@ -1913,7 +1927,7 @@ static int tcm_vhost_make_nexus(struct tcm_vhost_tpg *tpg,
} }
tv_cmd->tvc_upages = kzalloc(sizeof(struct page *) * tv_cmd->tvc_upages = kzalloc(sizeof(struct page *) *
TCM_VHOST_PREALLOC_UPAGES, GFP_KERNEL); VHOST_SCSI_PREALLOC_UPAGES, GFP_KERNEL);
if (!tv_cmd->tvc_upages) { if (!tv_cmd->tvc_upages) {
mutex_unlock(&tpg->tv_tpg_mutex); mutex_unlock(&tpg->tv_tpg_mutex);
pr_err("Unable to allocate tv_cmd->tvc_upages\n"); pr_err("Unable to allocate tv_cmd->tvc_upages\n");
...@@ -1921,7 +1935,7 @@ static int tcm_vhost_make_nexus(struct tcm_vhost_tpg *tpg, ...@@ -1921,7 +1935,7 @@ static int tcm_vhost_make_nexus(struct tcm_vhost_tpg *tpg,
} }
tv_cmd->tvc_prot_sgl = kzalloc(sizeof(struct scatterlist) * tv_cmd->tvc_prot_sgl = kzalloc(sizeof(struct scatterlist) *
TCM_VHOST_PREALLOC_PROT_SGLS, GFP_KERNEL); VHOST_SCSI_PREALLOC_PROT_SGLS, GFP_KERNEL);
if (!tv_cmd->tvc_prot_sgl) { if (!tv_cmd->tvc_prot_sgl) {
mutex_unlock(&tpg->tv_tpg_mutex); mutex_unlock(&tpg->tv_tpg_mutex);
pr_err("Unable to allocate tv_cmd->tvc_prot_sgl\n"); pr_err("Unable to allocate tv_cmd->tvc_prot_sgl\n");
...@@ -1930,7 +1944,7 @@ static int tcm_vhost_make_nexus(struct tcm_vhost_tpg *tpg, ...@@ -1930,7 +1944,7 @@ static int tcm_vhost_make_nexus(struct tcm_vhost_tpg *tpg,
} }
/* /*
* Since we are running in 'demo mode' this call with generate a * Since we are running in 'demo mode' this call with generate a
* struct se_node_acl for the tcm_vhost struct se_portal_group with * struct se_node_acl for the vhost_scsi struct se_portal_group with
* the SCSI Initiator port name of the passed configfs group 'name'. * the SCSI Initiator port name of the passed configfs group 'name'.
*/ */
tv_nexus->tvn_se_sess->se_node_acl = core_tpg_check_initiator_node_acl( tv_nexus->tvn_se_sess->se_node_acl = core_tpg_check_initiator_node_acl(
...@@ -1953,16 +1967,16 @@ static int tcm_vhost_make_nexus(struct tcm_vhost_tpg *tpg, ...@@ -1953,16 +1967,16 @@ static int tcm_vhost_make_nexus(struct tcm_vhost_tpg *tpg,
return 0; return 0;
out: out:
tcm_vhost_free_cmd_map_res(tv_nexus, se_sess); vhost_scsi_free_cmd_map_res(tv_nexus, se_sess);
transport_free_session(se_sess); transport_free_session(se_sess);
kfree(tv_nexus); kfree(tv_nexus);
return -ENOMEM; return -ENOMEM;
} }
static int tcm_vhost_drop_nexus(struct tcm_vhost_tpg *tpg) static int vhost_scsi_drop_nexus(struct vhost_scsi_tpg *tpg)
{ {
struct se_session *se_sess; struct se_session *se_sess;
struct tcm_vhost_nexus *tv_nexus; struct vhost_scsi_nexus *tv_nexus;
mutex_lock(&tpg->tv_tpg_mutex); mutex_lock(&tpg->tv_tpg_mutex);
tv_nexus = tpg->tpg_nexus; tv_nexus = tpg->tpg_nexus;
...@@ -1994,10 +2008,10 @@ static int tcm_vhost_drop_nexus(struct tcm_vhost_tpg *tpg) ...@@ -1994,10 +2008,10 @@ static int tcm_vhost_drop_nexus(struct tcm_vhost_tpg *tpg)
} }
pr_debug("TCM_vhost_ConfigFS: Removing I_T Nexus to emulated" pr_debug("TCM_vhost_ConfigFS: Removing I_T Nexus to emulated"
" %s Initiator Port: %s\n", tcm_vhost_dump_proto_id(tpg->tport), " %s Initiator Port: %s\n", vhost_scsi_dump_proto_id(tpg->tport),
tv_nexus->tvn_se_sess->se_node_acl->initiatorname); tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
tcm_vhost_free_cmd_map_res(tv_nexus, se_sess); vhost_scsi_free_cmd_map_res(tv_nexus, se_sess);
/* /*
* Release the SCSI I_T Nexus to the emulated vhost Target Port * Release the SCSI I_T Nexus to the emulated vhost Target Port
*/ */
...@@ -2009,12 +2023,12 @@ static int tcm_vhost_drop_nexus(struct tcm_vhost_tpg *tpg) ...@@ -2009,12 +2023,12 @@ static int tcm_vhost_drop_nexus(struct tcm_vhost_tpg *tpg)
return 0; return 0;
} }
static ssize_t tcm_vhost_tpg_show_nexus(struct se_portal_group *se_tpg, static ssize_t vhost_scsi_tpg_show_nexus(struct se_portal_group *se_tpg,
char *page) char *page)
{ {
struct tcm_vhost_tpg *tpg = container_of(se_tpg, struct vhost_scsi_tpg *tpg = container_of(se_tpg,
struct tcm_vhost_tpg, se_tpg); struct vhost_scsi_tpg, se_tpg);
struct tcm_vhost_nexus *tv_nexus; struct vhost_scsi_nexus *tv_nexus;
ssize_t ret; ssize_t ret;
mutex_lock(&tpg->tv_tpg_mutex); mutex_lock(&tpg->tv_tpg_mutex);
...@@ -2030,40 +2044,40 @@ static ssize_t tcm_vhost_tpg_show_nexus(struct se_portal_group *se_tpg, ...@@ -2030,40 +2044,40 @@ static ssize_t tcm_vhost_tpg_show_nexus(struct se_portal_group *se_tpg,
return ret; return ret;
} }
static ssize_t tcm_vhost_tpg_store_nexus(struct se_portal_group *se_tpg, static ssize_t vhost_scsi_tpg_store_nexus(struct se_portal_group *se_tpg,
const char *page, const char *page,
size_t count) size_t count)
{ {
struct tcm_vhost_tpg *tpg = container_of(se_tpg, struct vhost_scsi_tpg *tpg = container_of(se_tpg,
struct tcm_vhost_tpg, se_tpg); struct vhost_scsi_tpg, se_tpg);
struct tcm_vhost_tport *tport_wwn = tpg->tport; struct vhost_scsi_tport *tport_wwn = tpg->tport;
unsigned char i_port[TCM_VHOST_NAMELEN], *ptr, *port_ptr; unsigned char i_port[VHOST_SCSI_NAMELEN], *ptr, *port_ptr;
int ret; int ret;
/* /*
* Shutdown the active I_T nexus if 'NULL' is passed.. * Shutdown the active I_T nexus if 'NULL' is passed..
*/ */
if (!strncmp(page, "NULL", 4)) { if (!strncmp(page, "NULL", 4)) {
ret = tcm_vhost_drop_nexus(tpg); ret = vhost_scsi_drop_nexus(tpg);
return (!ret) ? count : ret; return (!ret) ? count : ret;
} }
/* /*
* Otherwise make sure the passed virtual Initiator port WWN matches * Otherwise make sure the passed virtual Initiator port WWN matches
* the fabric protocol_id set in tcm_vhost_make_tport(), and call * the fabric protocol_id set in vhost_scsi_make_tport(), and call
* tcm_vhost_make_nexus(). * vhost_scsi_make_nexus().
*/ */
if (strlen(page) >= TCM_VHOST_NAMELEN) { if (strlen(page) >= VHOST_SCSI_NAMELEN) {
pr_err("Emulated NAA Sas Address: %s, exceeds" pr_err("Emulated NAA Sas Address: %s, exceeds"
" max: %d\n", page, TCM_VHOST_NAMELEN); " max: %d\n", page, VHOST_SCSI_NAMELEN);
return -EINVAL; return -EINVAL;
} }
snprintf(&i_port[0], TCM_VHOST_NAMELEN, "%s", page); snprintf(&i_port[0], VHOST_SCSI_NAMELEN, "%s", page);
ptr = strstr(i_port, "naa."); ptr = strstr(i_port, "naa.");
if (ptr) { if (ptr) {
if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_SAS) { if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_SAS) {
pr_err("Passed SAS Initiator Port %s does not" pr_err("Passed SAS Initiator Port %s does not"
" match target port protoid: %s\n", i_port, " match target port protoid: %s\n", i_port,
tcm_vhost_dump_proto_id(tport_wwn)); vhost_scsi_dump_proto_id(tport_wwn));
return -EINVAL; return -EINVAL;
} }
port_ptr = &i_port[0]; port_ptr = &i_port[0];
...@@ -2074,7 +2088,7 @@ static ssize_t tcm_vhost_tpg_store_nexus(struct se_portal_group *se_tpg, ...@@ -2074,7 +2088,7 @@ static ssize_t tcm_vhost_tpg_store_nexus(struct se_portal_group *se_tpg,
if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_FCP) { if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_FCP) {
pr_err("Passed FCP Initiator Port %s does not" pr_err("Passed FCP Initiator Port %s does not"
" match target port protoid: %s\n", i_port, " match target port protoid: %s\n", i_port,
tcm_vhost_dump_proto_id(tport_wwn)); vhost_scsi_dump_proto_id(tport_wwn));
return -EINVAL; return -EINVAL;
} }
port_ptr = &i_port[3]; /* Skip over "fc." */ port_ptr = &i_port[3]; /* Skip over "fc." */
...@@ -2085,7 +2099,7 @@ static ssize_t tcm_vhost_tpg_store_nexus(struct se_portal_group *se_tpg, ...@@ -2085,7 +2099,7 @@ static ssize_t tcm_vhost_tpg_store_nexus(struct se_portal_group *se_tpg,
if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_ISCSI) { if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_ISCSI) {
pr_err("Passed iSCSI Initiator Port %s does not" pr_err("Passed iSCSI Initiator Port %s does not"
" match target port protoid: %s\n", i_port, " match target port protoid: %s\n", i_port,
tcm_vhost_dump_proto_id(tport_wwn)); vhost_scsi_dump_proto_id(tport_wwn));
return -EINVAL; return -EINVAL;
} }
port_ptr = &i_port[0]; port_ptr = &i_port[0];
...@@ -2101,40 +2115,40 @@ static ssize_t tcm_vhost_tpg_store_nexus(struct se_portal_group *se_tpg, ...@@ -2101,40 +2115,40 @@ static ssize_t tcm_vhost_tpg_store_nexus(struct se_portal_group *se_tpg,
if (i_port[strlen(i_port)-1] == '\n') if (i_port[strlen(i_port)-1] == '\n')
i_port[strlen(i_port)-1] = '\0'; i_port[strlen(i_port)-1] = '\0';
ret = tcm_vhost_make_nexus(tpg, port_ptr); ret = vhost_scsi_make_nexus(tpg, port_ptr);
if (ret < 0) if (ret < 0)
return ret; return ret;
return count; return count;
} }
TF_TPG_BASE_ATTR(tcm_vhost, nexus, S_IRUGO | S_IWUSR); TF_TPG_BASE_ATTR(vhost_scsi, nexus, S_IRUGO | S_IWUSR);
static struct configfs_attribute *tcm_vhost_tpg_attrs[] = { static struct configfs_attribute *vhost_scsi_tpg_attrs[] = {
&tcm_vhost_tpg_nexus.attr, &vhost_scsi_tpg_nexus.attr,
NULL, NULL,
}; };
static struct se_portal_group * static struct se_portal_group *
tcm_vhost_make_tpg(struct se_wwn *wwn, vhost_scsi_make_tpg(struct se_wwn *wwn,
struct config_group *group, struct config_group *group,
const char *name) const char *name)
{ {
struct tcm_vhost_tport *tport = container_of(wwn, struct vhost_scsi_tport *tport = container_of(wwn,
struct tcm_vhost_tport, tport_wwn); struct vhost_scsi_tport, tport_wwn);
struct tcm_vhost_tpg *tpg; struct vhost_scsi_tpg *tpg;
unsigned long tpgt; u16 tpgt;
int ret; int ret;
if (strstr(name, "tpgt_") != name) if (strstr(name, "tpgt_") != name)
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
if (kstrtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX) if (kstrtou16(name + 5, 10, &tpgt) || tpgt >= VHOST_SCSI_MAX_TARGET)
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
tpg = kzalloc(sizeof(struct tcm_vhost_tpg), GFP_KERNEL); tpg = kzalloc(sizeof(struct vhost_scsi_tpg), GFP_KERNEL);
if (!tpg) { if (!tpg) {
pr_err("Unable to allocate struct tcm_vhost_tpg"); pr_err("Unable to allocate struct vhost_scsi_tpg");
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
} }
mutex_init(&tpg->tv_tpg_mutex); mutex_init(&tpg->tv_tpg_mutex);
...@@ -2142,31 +2156,31 @@ tcm_vhost_make_tpg(struct se_wwn *wwn, ...@@ -2142,31 +2156,31 @@ tcm_vhost_make_tpg(struct se_wwn *wwn,
tpg->tport = tport; tpg->tport = tport;
tpg->tport_tpgt = tpgt; tpg->tport_tpgt = tpgt;
ret = core_tpg_register(&tcm_vhost_fabric_configfs->tf_ops, wwn, ret = core_tpg_register(&vhost_scsi_fabric_configfs->tf_ops, wwn,
&tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL); &tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL);
if (ret < 0) { if (ret < 0) {
kfree(tpg); kfree(tpg);
return NULL; return NULL;
} }
mutex_lock(&tcm_vhost_mutex); mutex_lock(&vhost_scsi_mutex);
list_add_tail(&tpg->tv_tpg_list, &tcm_vhost_list); list_add_tail(&tpg->tv_tpg_list, &vhost_scsi_list);
mutex_unlock(&tcm_vhost_mutex); mutex_unlock(&vhost_scsi_mutex);
return &tpg->se_tpg; return &tpg->se_tpg;
} }
static void tcm_vhost_drop_tpg(struct se_portal_group *se_tpg) static void vhost_scsi_drop_tpg(struct se_portal_group *se_tpg)
{ {
struct tcm_vhost_tpg *tpg = container_of(se_tpg, struct vhost_scsi_tpg *tpg = container_of(se_tpg,
struct tcm_vhost_tpg, se_tpg); struct vhost_scsi_tpg, se_tpg);
mutex_lock(&tcm_vhost_mutex); mutex_lock(&vhost_scsi_mutex);
list_del(&tpg->tv_tpg_list); list_del(&tpg->tv_tpg_list);
mutex_unlock(&tcm_vhost_mutex); mutex_unlock(&vhost_scsi_mutex);
/* /*
* Release the virtual I_T Nexus for this vhost TPG * Release the virtual I_T Nexus for this vhost TPG
*/ */
tcm_vhost_drop_nexus(tpg); vhost_scsi_drop_nexus(tpg);
/* /*
* Deregister the se_tpg from TCM.. * Deregister the se_tpg from TCM..
*/ */
...@@ -2175,21 +2189,21 @@ static void tcm_vhost_drop_tpg(struct se_portal_group *se_tpg) ...@@ -2175,21 +2189,21 @@ static void tcm_vhost_drop_tpg(struct se_portal_group *se_tpg)
} }
static struct se_wwn * static struct se_wwn *
tcm_vhost_make_tport(struct target_fabric_configfs *tf, vhost_scsi_make_tport(struct target_fabric_configfs *tf,
struct config_group *group, struct config_group *group,
const char *name) const char *name)
{ {
struct tcm_vhost_tport *tport; struct vhost_scsi_tport *tport;
char *ptr; char *ptr;
u64 wwpn = 0; u64 wwpn = 0;
int off = 0; int off = 0;
/* if (tcm_vhost_parse_wwn(name, &wwpn, 1) < 0) /* if (vhost_scsi_parse_wwn(name, &wwpn, 1) < 0)
return ERR_PTR(-EINVAL); */ return ERR_PTR(-EINVAL); */
tport = kzalloc(sizeof(struct tcm_vhost_tport), GFP_KERNEL); tport = kzalloc(sizeof(struct vhost_scsi_tport), GFP_KERNEL);
if (!tport) { if (!tport) {
pr_err("Unable to allocate struct tcm_vhost_tport"); pr_err("Unable to allocate struct vhost_scsi_tport");
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
} }
tport->tport_wwpn = wwpn; tport->tport_wwpn = wwpn;
...@@ -2220,102 +2234,102 @@ tcm_vhost_make_tport(struct target_fabric_configfs *tf, ...@@ -2220,102 +2234,102 @@ tcm_vhost_make_tport(struct target_fabric_configfs *tf,
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
check_len: check_len:
if (strlen(name) >= TCM_VHOST_NAMELEN) { if (strlen(name) >= VHOST_SCSI_NAMELEN) {
pr_err("Emulated %s Address: %s, exceeds" pr_err("Emulated %s Address: %s, exceeds"
" max: %d\n", name, tcm_vhost_dump_proto_id(tport), " max: %d\n", name, vhost_scsi_dump_proto_id(tport),
TCM_VHOST_NAMELEN); VHOST_SCSI_NAMELEN);
kfree(tport); kfree(tport);
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
} }
snprintf(&tport->tport_name[0], TCM_VHOST_NAMELEN, "%s", &name[off]); snprintf(&tport->tport_name[0], VHOST_SCSI_NAMELEN, "%s", &name[off]);
pr_debug("TCM_VHost_ConfigFS: Allocated emulated Target" pr_debug("TCM_VHost_ConfigFS: Allocated emulated Target"
" %s Address: %s\n", tcm_vhost_dump_proto_id(tport), name); " %s Address: %s\n", vhost_scsi_dump_proto_id(tport), name);
return &tport->tport_wwn; return &tport->tport_wwn;
} }
static void tcm_vhost_drop_tport(struct se_wwn *wwn) static void vhost_scsi_drop_tport(struct se_wwn *wwn)
{ {
struct tcm_vhost_tport *tport = container_of(wwn, struct vhost_scsi_tport *tport = container_of(wwn,
struct tcm_vhost_tport, tport_wwn); struct vhost_scsi_tport, tport_wwn);
pr_debug("TCM_VHost_ConfigFS: Deallocating emulated Target" pr_debug("TCM_VHost_ConfigFS: Deallocating emulated Target"
" %s Address: %s\n", tcm_vhost_dump_proto_id(tport), " %s Address: %s\n", vhost_scsi_dump_proto_id(tport),
tport->tport_name); tport->tport_name);
kfree(tport); kfree(tport);
} }
static ssize_t static ssize_t
tcm_vhost_wwn_show_attr_version(struct target_fabric_configfs *tf, vhost_scsi_wwn_show_attr_version(struct target_fabric_configfs *tf,
char *page) char *page)
{ {
return sprintf(page, "TCM_VHOST fabric module %s on %s/%s" return sprintf(page, "TCM_VHOST fabric module %s on %s/%s"
"on "UTS_RELEASE"\n", TCM_VHOST_VERSION, utsname()->sysname, "on "UTS_RELEASE"\n", VHOST_SCSI_VERSION, utsname()->sysname,
utsname()->machine); utsname()->machine);
} }
TF_WWN_ATTR_RO(tcm_vhost, version); TF_WWN_ATTR_RO(vhost_scsi, version);
static struct configfs_attribute *tcm_vhost_wwn_attrs[] = { static struct configfs_attribute *vhost_scsi_wwn_attrs[] = {
&tcm_vhost_wwn_version.attr, &vhost_scsi_wwn_version.attr,
NULL, NULL,
}; };
static struct target_core_fabric_ops tcm_vhost_ops = { static struct target_core_fabric_ops vhost_scsi_ops = {
.get_fabric_name = tcm_vhost_get_fabric_name, .get_fabric_name = vhost_scsi_get_fabric_name,
.get_fabric_proto_ident = tcm_vhost_get_fabric_proto_ident, .get_fabric_proto_ident = vhost_scsi_get_fabric_proto_ident,
.tpg_get_wwn = tcm_vhost_get_fabric_wwn, .tpg_get_wwn = vhost_scsi_get_fabric_wwn,
.tpg_get_tag = tcm_vhost_get_tag, .tpg_get_tag = vhost_scsi_get_tpgt,
.tpg_get_default_depth = tcm_vhost_get_default_depth, .tpg_get_default_depth = vhost_scsi_get_default_depth,
.tpg_get_pr_transport_id = tcm_vhost_get_pr_transport_id, .tpg_get_pr_transport_id = vhost_scsi_get_pr_transport_id,
.tpg_get_pr_transport_id_len = tcm_vhost_get_pr_transport_id_len, .tpg_get_pr_transport_id_len = vhost_scsi_get_pr_transport_id_len,
.tpg_parse_pr_out_transport_id = tcm_vhost_parse_pr_out_transport_id, .tpg_parse_pr_out_transport_id = vhost_scsi_parse_pr_out_transport_id,
.tpg_check_demo_mode = tcm_vhost_check_true, .tpg_check_demo_mode = vhost_scsi_check_true,
.tpg_check_demo_mode_cache = tcm_vhost_check_true, .tpg_check_demo_mode_cache = vhost_scsi_check_true,
.tpg_check_demo_mode_write_protect = tcm_vhost_check_false, .tpg_check_demo_mode_write_protect = vhost_scsi_check_false,
.tpg_check_prod_mode_write_protect = tcm_vhost_check_false, .tpg_check_prod_mode_write_protect = vhost_scsi_check_false,
.tpg_alloc_fabric_acl = tcm_vhost_alloc_fabric_acl, .tpg_alloc_fabric_acl = vhost_scsi_alloc_fabric_acl,
.tpg_release_fabric_acl = tcm_vhost_release_fabric_acl, .tpg_release_fabric_acl = vhost_scsi_release_fabric_acl,
.tpg_get_inst_index = tcm_vhost_tpg_get_inst_index, .tpg_get_inst_index = vhost_scsi_tpg_get_inst_index,
.release_cmd = tcm_vhost_release_cmd, .release_cmd = vhost_scsi_release_cmd,
.check_stop_free = vhost_scsi_check_stop_free, .check_stop_free = vhost_scsi_check_stop_free,
.shutdown_session = tcm_vhost_shutdown_session, .shutdown_session = vhost_scsi_shutdown_session,
.close_session = tcm_vhost_close_session, .close_session = vhost_scsi_close_session,
.sess_get_index = tcm_vhost_sess_get_index, .sess_get_index = vhost_scsi_sess_get_index,
.sess_get_initiator_sid = NULL, .sess_get_initiator_sid = NULL,
.write_pending = tcm_vhost_write_pending, .write_pending = vhost_scsi_write_pending,
.write_pending_status = tcm_vhost_write_pending_status, .write_pending_status = vhost_scsi_write_pending_status,
.set_default_node_attributes = tcm_vhost_set_default_node_attrs, .set_default_node_attributes = vhost_scsi_set_default_node_attrs,
.get_task_tag = tcm_vhost_get_task_tag, .get_task_tag = vhost_scsi_get_task_tag,
.get_cmd_state = tcm_vhost_get_cmd_state, .get_cmd_state = vhost_scsi_get_cmd_state,
.queue_data_in = tcm_vhost_queue_data_in, .queue_data_in = vhost_scsi_queue_data_in,
.queue_status = tcm_vhost_queue_status, .queue_status = vhost_scsi_queue_status,
.queue_tm_rsp = tcm_vhost_queue_tm_rsp, .queue_tm_rsp = vhost_scsi_queue_tm_rsp,
.aborted_task = tcm_vhost_aborted_task, .aborted_task = vhost_scsi_aborted_task,
/* /*
* Setup callers for generic logic in target_core_fabric_configfs.c * Setup callers for generic logic in target_core_fabric_configfs.c
*/ */
.fabric_make_wwn = tcm_vhost_make_tport, .fabric_make_wwn = vhost_scsi_make_tport,
.fabric_drop_wwn = tcm_vhost_drop_tport, .fabric_drop_wwn = vhost_scsi_drop_tport,
.fabric_make_tpg = tcm_vhost_make_tpg, .fabric_make_tpg = vhost_scsi_make_tpg,
.fabric_drop_tpg = tcm_vhost_drop_tpg, .fabric_drop_tpg = vhost_scsi_drop_tpg,
.fabric_post_link = tcm_vhost_port_link, .fabric_post_link = vhost_scsi_port_link,
.fabric_pre_unlink = tcm_vhost_port_unlink, .fabric_pre_unlink = vhost_scsi_port_unlink,
.fabric_make_np = NULL, .fabric_make_np = NULL,
.fabric_drop_np = NULL, .fabric_drop_np = NULL,
.fabric_make_nodeacl = tcm_vhost_make_nodeacl, .fabric_make_nodeacl = vhost_scsi_make_nodeacl,
.fabric_drop_nodeacl = tcm_vhost_drop_nodeacl, .fabric_drop_nodeacl = vhost_scsi_drop_nodeacl,
}; };
static int tcm_vhost_register_configfs(void) static int vhost_scsi_register_configfs(void)
{ {
struct target_fabric_configfs *fabric; struct target_fabric_configfs *fabric;
int ret; int ret;
pr_debug("TCM_VHOST fabric module %s on %s/%s" pr_debug("vhost-scsi fabric module %s on %s/%s"
" on "UTS_RELEASE"\n", TCM_VHOST_VERSION, utsname()->sysname, " on "UTS_RELEASE"\n", VHOST_SCSI_VERSION, utsname()->sysname,
utsname()->machine); utsname()->machine);
/* /*
* Register the top level struct config_item_type with TCM core * Register the top level struct config_item_type with TCM core
...@@ -2326,14 +2340,14 @@ static int tcm_vhost_register_configfs(void) ...@@ -2326,14 +2340,14 @@ static int tcm_vhost_register_configfs(void)
return PTR_ERR(fabric); return PTR_ERR(fabric);
} }
/* /*
* Setup fabric->tf_ops from our local tcm_vhost_ops * Setup fabric->tf_ops from our local vhost_scsi_ops
*/ */
fabric->tf_ops = tcm_vhost_ops; fabric->tf_ops = vhost_scsi_ops;
/* /*
* Setup default attribute lists for various fabric->tf_cit_tmpl * Setup default attribute lists for various fabric->tf_cit_tmpl
*/ */
fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = tcm_vhost_wwn_attrs; fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = vhost_scsi_wwn_attrs;
fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = tcm_vhost_tpg_attrs; fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = vhost_scsi_tpg_attrs;
fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = NULL; fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = NULL;
fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL; fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL;
fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL; fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL;
...@@ -2353,37 +2367,37 @@ static int tcm_vhost_register_configfs(void) ...@@ -2353,37 +2367,37 @@ static int tcm_vhost_register_configfs(void)
/* /*
* Setup our local pointer to *fabric * Setup our local pointer to *fabric
*/ */
tcm_vhost_fabric_configfs = fabric; vhost_scsi_fabric_configfs = fabric;
pr_debug("TCM_VHOST[0] - Set fabric -> tcm_vhost_fabric_configfs\n"); pr_debug("TCM_VHOST[0] - Set fabric -> vhost_scsi_fabric_configfs\n");
return 0; return 0;
}; };
static void tcm_vhost_deregister_configfs(void) static void vhost_scsi_deregister_configfs(void)
{ {
if (!tcm_vhost_fabric_configfs) if (!vhost_scsi_fabric_configfs)
return; return;
target_fabric_configfs_deregister(tcm_vhost_fabric_configfs); target_fabric_configfs_deregister(vhost_scsi_fabric_configfs);
tcm_vhost_fabric_configfs = NULL; vhost_scsi_fabric_configfs = NULL;
pr_debug("TCM_VHOST[0] - Cleared tcm_vhost_fabric_configfs\n"); pr_debug("TCM_VHOST[0] - Cleared vhost_scsi_fabric_configfs\n");
}; };
static int __init tcm_vhost_init(void) static int __init vhost_scsi_init(void)
{ {
int ret = -ENOMEM; int ret = -ENOMEM;
/* /*
* Use our own dedicated workqueue for submitting I/O into * Use our own dedicated workqueue for submitting I/O into
* target core to avoid contention within system_wq. * target core to avoid contention within system_wq.
*/ */
tcm_vhost_workqueue = alloc_workqueue("tcm_vhost", 0, 0); vhost_scsi_workqueue = alloc_workqueue("vhost_scsi", 0, 0);
if (!tcm_vhost_workqueue) if (!vhost_scsi_workqueue)
goto out; goto out;
ret = vhost_scsi_register(); ret = vhost_scsi_register();
if (ret < 0) if (ret < 0)
goto out_destroy_workqueue; goto out_destroy_workqueue;
ret = tcm_vhost_register_configfs(); ret = vhost_scsi_register_configfs();
if (ret < 0) if (ret < 0)
goto out_vhost_scsi_deregister; goto out_vhost_scsi_deregister;
...@@ -2392,20 +2406,20 @@ static int __init tcm_vhost_init(void) ...@@ -2392,20 +2406,20 @@ static int __init tcm_vhost_init(void)
out_vhost_scsi_deregister: out_vhost_scsi_deregister:
vhost_scsi_deregister(); vhost_scsi_deregister();
out_destroy_workqueue: out_destroy_workqueue:
destroy_workqueue(tcm_vhost_workqueue); destroy_workqueue(vhost_scsi_workqueue);
out: out:
return ret; return ret;
}; };
static void tcm_vhost_exit(void) static void vhost_scsi_exit(void)
{ {
tcm_vhost_deregister_configfs(); vhost_scsi_deregister_configfs();
vhost_scsi_deregister(); vhost_scsi_deregister();
destroy_workqueue(tcm_vhost_workqueue); destroy_workqueue(vhost_scsi_workqueue);
}; };
MODULE_DESCRIPTION("VHOST_SCSI series fabric driver"); MODULE_DESCRIPTION("VHOST_SCSI series fabric driver");
MODULE_ALIAS("tcm_vhost"); MODULE_ALIAS("tcm_vhost");
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
module_init(tcm_vhost_init); module_init(vhost_scsi_init);
module_exit(tcm_vhost_exit); module_exit(vhost_scsi_exit);
...@@ -880,4 +880,18 @@ struct iscsit_global { ...@@ -880,4 +880,18 @@ struct iscsit_global {
struct iscsi_portal_group *discovery_tpg; struct iscsi_portal_group *discovery_tpg;
}; };
static inline u32 session_get_next_ttt(struct iscsi_session *session)
{
u32 ttt;
spin_lock_bh(&session->ttt_lock);
ttt = session->targ_xfer_tag++;
if (ttt == 0xFFFFFFFF)
ttt = session->targ_xfer_tag++;
spin_unlock_bh(&session->ttt_lock);
return ttt;
}
extern struct iscsi_cmd *iscsit_find_cmd_from_itt(struct iscsi_conn *, itt_t);
#endif /* ISCSI_TARGET_CORE_H */ #endif /* ISCSI_TARGET_CORE_H */
#include <linux/module.h> #include <linux/module.h>
#include <linux/list.h> #include <linux/list.h>
#include "../../../drivers/target/iscsi/iscsi_target_core.h" #include "iscsi_target_core.h"
struct iscsit_transport { struct iscsit_transport {
#define ISCSIT_TRANSPORT_NAME 16 #define ISCSIT_TRANSPORT_NAME 16
......
...@@ -407,7 +407,7 @@ struct t10_reservation { ...@@ -407,7 +407,7 @@ struct t10_reservation {
/* Activate Persistence across Target Power Loss enabled /* Activate Persistence across Target Power Loss enabled
* for SCSI device */ * for SCSI device */
int pr_aptpl_active; int pr_aptpl_active;
#define PR_APTPL_BUF_LEN 8192 #define PR_APTPL_BUF_LEN 262144
u32 pr_generation; u32 pr_generation;
spinlock_t registration_lock; spinlock_t registration_lock;
spinlock_t aptpl_reg_lock; spinlock_t aptpl_reg_lock;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment