Commit 025def92 authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending

Pull SCSI target fixes from Nicholas Bellinger:

 "There has been work in a number of different areas over the last
  weeks, including:

   - Fix target-core-user (TCMU) back-end bi-directional handling (Xiubo
     Li + Mike Christie + Ilias Tsitsimpis)

   - Fix iscsi-target TMR reference leak during session shutdown (Rob
     Millner + Chu Yuan Lin)

   - Fix target_core_fabric_configfs.c race between LUN shutdown +
     mapped LUN creation (James Shen)

   - Fix target-core unknown fabric callback queue-full errors (Potnuri
     Bharat Teja)

   - Fix iscsi-target + iser-target queue-full handling in order to
     support iw_cxgb4 RNICs. (Potnuri Bharat Teja + Sagi Grimberg)

   - Fix ALUA transition state race between multiple initiator (Mike
     Christie)

   - Drop work-around for legacy GlobalSAN initiator, to allow QLogic
     57840S + 579xx offload HBAs to work out-of-the-box in MSFT
     environments. (Martin Svec + Arun Easi)

  Note that a number are CC'ed for stable, and although the queue-full
  bug-fixes required for iser-target to work with iw_cxgb4 aren't CC'ed
  here, they'll be posted to Greg-KH separately"

* git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending:
  tcmu: Skip Data-Out blocks before gathering Data-In buffer for BIDI case
  iscsi-target: Drop work-around for legacy GlobalSAN initiator
  target: Fix ALUA transition state race between multiple initiators
  iser-target: avoid posting a recv buffer twice
  iser-target: Fix queue-full response handling
  iscsi-target: Propigate queue_data_in + queue_status errors
  target: Fix unknown fabric callback queue-full errors
  tcmu: Fix wrongly calculating of the base_command_size
  tcmu: Fix possible overwrite of t_data_sg's last iov[]
  target: Avoid mappedlun symlink creation during lun shutdown
  iscsi-target: Fix TMR reference leak during session shutdown
  usb: gadget: Correct usb EP argument for BOT status request
  tcmu: Allow cmd_time_out to be set to zero (disabled)
parents 06ea4c38 a5d68ba8
...@@ -817,6 +817,7 @@ isert_post_recvm(struct isert_conn *isert_conn, u32 count) ...@@ -817,6 +817,7 @@ isert_post_recvm(struct isert_conn *isert_conn, u32 count)
rx_wr->sg_list = &rx_desc->rx_sg; rx_wr->sg_list = &rx_desc->rx_sg;
rx_wr->num_sge = 1; rx_wr->num_sge = 1;
rx_wr->next = rx_wr + 1; rx_wr->next = rx_wr + 1;
rx_desc->in_use = false;
} }
rx_wr--; rx_wr--;
rx_wr->next = NULL; /* mark end of work requests list */ rx_wr->next = NULL; /* mark end of work requests list */
...@@ -835,6 +836,15 @@ isert_post_recv(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc) ...@@ -835,6 +836,15 @@ isert_post_recv(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc)
struct ib_recv_wr *rx_wr_failed, rx_wr; struct ib_recv_wr *rx_wr_failed, rx_wr;
int ret; int ret;
if (!rx_desc->in_use) {
/*
* if the descriptor is not in-use we already reposted it
* for recv, so just silently return
*/
return 0;
}
rx_desc->in_use = false;
rx_wr.wr_cqe = &rx_desc->rx_cqe; rx_wr.wr_cqe = &rx_desc->rx_cqe;
rx_wr.sg_list = &rx_desc->rx_sg; rx_wr.sg_list = &rx_desc->rx_sg;
rx_wr.num_sge = 1; rx_wr.num_sge = 1;
...@@ -1397,6 +1407,8 @@ isert_recv_done(struct ib_cq *cq, struct ib_wc *wc) ...@@ -1397,6 +1407,8 @@ isert_recv_done(struct ib_cq *cq, struct ib_wc *wc)
return; return;
} }
rx_desc->in_use = true;
ib_dma_sync_single_for_cpu(ib_dev, rx_desc->dma_addr, ib_dma_sync_single_for_cpu(ib_dev, rx_desc->dma_addr,
ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
...@@ -1659,10 +1671,23 @@ isert_rdma_write_done(struct ib_cq *cq, struct ib_wc *wc) ...@@ -1659,10 +1671,23 @@ isert_rdma_write_done(struct ib_cq *cq, struct ib_wc *wc)
ret = isert_check_pi_status(cmd, isert_cmd->rw.sig->sig_mr); ret = isert_check_pi_status(cmd, isert_cmd->rw.sig->sig_mr);
isert_rdma_rw_ctx_destroy(isert_cmd, isert_conn); isert_rdma_rw_ctx_destroy(isert_cmd, isert_conn);
if (ret) if (ret) {
transport_send_check_condition_and_sense(cmd, cmd->pi_err, 0); /*
else * transport_generic_request_failure() expects to have
isert_put_response(isert_conn->conn, isert_cmd->iscsi_cmd); * plus two references to handle queue-full, so re-add
* one here as target-core will have already dropped
* it after the first isert_put_datain() callback.
*/
kref_get(&cmd->cmd_kref);
transport_generic_request_failure(cmd, cmd->pi_err);
} else {
/*
* XXX: isert_put_response() failure is not retried.
*/
ret = isert_put_response(isert_conn->conn, isert_cmd->iscsi_cmd);
if (ret)
pr_warn_ratelimited("isert_put_response() ret: %d\n", ret);
}
} }
static void static void
...@@ -1699,13 +1724,15 @@ isert_rdma_read_done(struct ib_cq *cq, struct ib_wc *wc) ...@@ -1699,13 +1724,15 @@ isert_rdma_read_done(struct ib_cq *cq, struct ib_wc *wc)
cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT; cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
spin_unlock_bh(&cmd->istate_lock); spin_unlock_bh(&cmd->istate_lock);
if (ret) { /*
target_put_sess_cmd(se_cmd); * transport_generic_request_failure() will drop the extra
transport_send_check_condition_and_sense(se_cmd, * se_cmd->cmd_kref reference after T10-PI error, and handle
se_cmd->pi_err, 0); * any non-zero ->queue_status() callback error retries.
} else { */
if (ret)
transport_generic_request_failure(se_cmd, se_cmd->pi_err);
else
target_execute_cmd(se_cmd); target_execute_cmd(se_cmd);
}
} }
static void static void
...@@ -2171,26 +2198,28 @@ isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd) ...@@ -2171,26 +2198,28 @@ isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
chain_wr = &isert_cmd->tx_desc.send_wr; chain_wr = &isert_cmd->tx_desc.send_wr;
} }
isert_rdma_rw_ctx_post(isert_cmd, isert_conn, cqe, chain_wr); rc = isert_rdma_rw_ctx_post(isert_cmd, isert_conn, cqe, chain_wr);
isert_dbg("Cmd: %p posted RDMA_WRITE for iSER Data READ\n", isert_cmd); isert_dbg("Cmd: %p posted RDMA_WRITE for iSER Data READ rc: %d\n",
return 1; isert_cmd, rc);
return rc;
} }
static int static int
isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery) isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery)
{ {
struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
int ret;
isert_dbg("Cmd: %p RDMA_READ data_length: %u write_data_done: %u\n", isert_dbg("Cmd: %p RDMA_READ data_length: %u write_data_done: %u\n",
isert_cmd, cmd->se_cmd.data_length, cmd->write_data_done); isert_cmd, cmd->se_cmd.data_length, cmd->write_data_done);
isert_cmd->tx_desc.tx_cqe.done = isert_rdma_read_done; isert_cmd->tx_desc.tx_cqe.done = isert_rdma_read_done;
isert_rdma_rw_ctx_post(isert_cmd, conn->context, ret = isert_rdma_rw_ctx_post(isert_cmd, conn->context,
&isert_cmd->tx_desc.tx_cqe, NULL); &isert_cmd->tx_desc.tx_cqe, NULL);
isert_dbg("Cmd: %p posted RDMA_READ memory for ISER Data WRITE\n", isert_dbg("Cmd: %p posted RDMA_READ memory for ISER Data WRITE rc: %d\n",
isert_cmd); isert_cmd, ret);
return 0; return ret;
} }
static int static int
......
...@@ -60,7 +60,7 @@ ...@@ -60,7 +60,7 @@
#define ISER_RX_PAD_SIZE (ISCSI_DEF_MAX_RECV_SEG_LEN + 4096 - \ #define ISER_RX_PAD_SIZE (ISCSI_DEF_MAX_RECV_SEG_LEN + 4096 - \
(ISER_RX_PAYLOAD_SIZE + sizeof(u64) + sizeof(struct ib_sge) + \ (ISER_RX_PAYLOAD_SIZE + sizeof(u64) + sizeof(struct ib_sge) + \
sizeof(struct ib_cqe))) sizeof(struct ib_cqe) + sizeof(bool)))
#define ISCSI_ISER_SG_TABLESIZE 256 #define ISCSI_ISER_SG_TABLESIZE 256
...@@ -85,6 +85,7 @@ struct iser_rx_desc { ...@@ -85,6 +85,7 @@ struct iser_rx_desc {
u64 dma_addr; u64 dma_addr;
struct ib_sge rx_sg; struct ib_sge rx_sg;
struct ib_cqe rx_cqe; struct ib_cqe rx_cqe;
bool in_use;
char pad[ISER_RX_PAD_SIZE]; char pad[ISER_RX_PAD_SIZE];
} __packed; } __packed;
......
...@@ -485,8 +485,7 @@ static void iscsit_get_rx_pdu(struct iscsi_conn *); ...@@ -485,8 +485,7 @@ static void iscsit_get_rx_pdu(struct iscsi_conn *);
int iscsit_queue_rsp(struct iscsi_conn *conn, struct iscsi_cmd *cmd) int iscsit_queue_rsp(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
{ {
iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state); return iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state);
return 0;
} }
EXPORT_SYMBOL(iscsit_queue_rsp); EXPORT_SYMBOL(iscsit_queue_rsp);
......
...@@ -1398,11 +1398,10 @@ static u32 lio_sess_get_initiator_sid( ...@@ -1398,11 +1398,10 @@ static u32 lio_sess_get_initiator_sid(
static int lio_queue_data_in(struct se_cmd *se_cmd) static int lio_queue_data_in(struct se_cmd *se_cmd)
{ {
struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd); struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
struct iscsi_conn *conn = cmd->conn;
cmd->i_state = ISTATE_SEND_DATAIN; cmd->i_state = ISTATE_SEND_DATAIN;
cmd->conn->conn_transport->iscsit_queue_data_in(cmd->conn, cmd); return conn->conn_transport->iscsit_queue_data_in(conn, cmd);
return 0;
} }
static int lio_write_pending(struct se_cmd *se_cmd) static int lio_write_pending(struct se_cmd *se_cmd)
...@@ -1431,16 +1430,14 @@ static int lio_write_pending_status(struct se_cmd *se_cmd) ...@@ -1431,16 +1430,14 @@ static int lio_write_pending_status(struct se_cmd *se_cmd)
static int lio_queue_status(struct se_cmd *se_cmd) static int lio_queue_status(struct se_cmd *se_cmd)
{ {
struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd); struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
struct iscsi_conn *conn = cmd->conn;
cmd->i_state = ISTATE_SEND_STATUS; cmd->i_state = ISTATE_SEND_STATUS;
if (cmd->se_cmd.scsi_status || cmd->sense_reason) { if (cmd->se_cmd.scsi_status || cmd->sense_reason) {
iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state); return iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
return 0;
} }
cmd->conn->conn_transport->iscsit_queue_status(cmd->conn, cmd); return conn->conn_transport->iscsit_queue_status(conn, cmd);
return 0;
} }
static void lio_queue_tm_rsp(struct se_cmd *se_cmd) static void lio_queue_tm_rsp(struct se_cmd *se_cmd)
......
...@@ -781,22 +781,6 @@ static void iscsi_check_proposer_for_optional_reply(struct iscsi_param *param) ...@@ -781,22 +781,6 @@ static void iscsi_check_proposer_for_optional_reply(struct iscsi_param *param)
} else if (IS_TYPE_NUMBER(param)) { } else if (IS_TYPE_NUMBER(param)) {
if (!strcmp(param->name, MAXRECVDATASEGMENTLENGTH)) if (!strcmp(param->name, MAXRECVDATASEGMENTLENGTH))
SET_PSTATE_REPLY_OPTIONAL(param); SET_PSTATE_REPLY_OPTIONAL(param);
/*
* The GlobalSAN iSCSI Initiator for MacOSX does
* not respond to MaxBurstLength, FirstBurstLength,
* DefaultTime2Wait or DefaultTime2Retain parameter keys.
* So, we set them to 'reply optional' here, and assume the
* the defaults from iscsi_parameters.h if the initiator
* is not RFC compliant and the keys are not negotiated.
*/
if (!strcmp(param->name, MAXBURSTLENGTH))
SET_PSTATE_REPLY_OPTIONAL(param);
if (!strcmp(param->name, FIRSTBURSTLENGTH))
SET_PSTATE_REPLY_OPTIONAL(param);
if (!strcmp(param->name, DEFAULTTIME2WAIT))
SET_PSTATE_REPLY_OPTIONAL(param);
if (!strcmp(param->name, DEFAULTTIME2RETAIN))
SET_PSTATE_REPLY_OPTIONAL(param);
/* /*
* Required for gPXE iSCSI boot client * Required for gPXE iSCSI boot client
*/ */
......
...@@ -567,7 +567,7 @@ static void iscsit_remove_cmd_from_immediate_queue( ...@@ -567,7 +567,7 @@ static void iscsit_remove_cmd_from_immediate_queue(
} }
} }
void iscsit_add_cmd_to_response_queue( int iscsit_add_cmd_to_response_queue(
struct iscsi_cmd *cmd, struct iscsi_cmd *cmd,
struct iscsi_conn *conn, struct iscsi_conn *conn,
u8 state) u8 state)
...@@ -578,7 +578,7 @@ void iscsit_add_cmd_to_response_queue( ...@@ -578,7 +578,7 @@ void iscsit_add_cmd_to_response_queue(
if (!qr) { if (!qr) {
pr_err("Unable to allocate memory for" pr_err("Unable to allocate memory for"
" struct iscsi_queue_req\n"); " struct iscsi_queue_req\n");
return; return -ENOMEM;
} }
INIT_LIST_HEAD(&qr->qr_list); INIT_LIST_HEAD(&qr->qr_list);
qr->cmd = cmd; qr->cmd = cmd;
...@@ -590,6 +590,7 @@ void iscsit_add_cmd_to_response_queue( ...@@ -590,6 +590,7 @@ void iscsit_add_cmd_to_response_queue(
spin_unlock_bh(&conn->response_queue_lock); spin_unlock_bh(&conn->response_queue_lock);
wake_up(&conn->queues_wq); wake_up(&conn->queues_wq);
return 0;
} }
struct iscsi_queue_req *iscsit_get_cmd_from_response_queue(struct iscsi_conn *conn) struct iscsi_queue_req *iscsit_get_cmd_from_response_queue(struct iscsi_conn *conn)
...@@ -737,21 +738,23 @@ void iscsit_free_cmd(struct iscsi_cmd *cmd, bool shutdown) ...@@ -737,21 +738,23 @@ void iscsit_free_cmd(struct iscsi_cmd *cmd, bool shutdown)
{ {
struct se_cmd *se_cmd = NULL; struct se_cmd *se_cmd = NULL;
int rc; int rc;
bool op_scsi = false;
/* /*
* Determine if a struct se_cmd is associated with * Determine if a struct se_cmd is associated with
* this struct iscsi_cmd. * this struct iscsi_cmd.
*/ */
switch (cmd->iscsi_opcode) { switch (cmd->iscsi_opcode) {
case ISCSI_OP_SCSI_CMD: case ISCSI_OP_SCSI_CMD:
se_cmd = &cmd->se_cmd; op_scsi = true;
__iscsit_free_cmd(cmd, true, shutdown);
/* /*
* Fallthrough * Fallthrough
*/ */
case ISCSI_OP_SCSI_TMFUNC: case ISCSI_OP_SCSI_TMFUNC:
rc = transport_generic_free_cmd(&cmd->se_cmd, shutdown); se_cmd = &cmd->se_cmd;
if (!rc && shutdown && se_cmd && se_cmd->se_sess) { __iscsit_free_cmd(cmd, op_scsi, shutdown);
__iscsit_free_cmd(cmd, true, shutdown); rc = transport_generic_free_cmd(se_cmd, shutdown);
if (!rc && shutdown && se_cmd->se_sess) {
__iscsit_free_cmd(cmd, op_scsi, shutdown);
target_put_sess_cmd(se_cmd); target_put_sess_cmd(se_cmd);
} }
break; break;
......
...@@ -31,7 +31,7 @@ extern int iscsit_find_cmd_for_recovery(struct iscsi_session *, struct iscsi_cmd ...@@ -31,7 +31,7 @@ extern int iscsit_find_cmd_for_recovery(struct iscsi_session *, struct iscsi_cmd
struct iscsi_conn_recovery **, itt_t); struct iscsi_conn_recovery **, itt_t);
extern void iscsit_add_cmd_to_immediate_queue(struct iscsi_cmd *, struct iscsi_conn *, u8); extern void iscsit_add_cmd_to_immediate_queue(struct iscsi_cmd *, struct iscsi_conn *, u8);
extern struct iscsi_queue_req *iscsit_get_cmd_from_immediate_queue(struct iscsi_conn *); extern struct iscsi_queue_req *iscsit_get_cmd_from_immediate_queue(struct iscsi_conn *);
extern void iscsit_add_cmd_to_response_queue(struct iscsi_cmd *, struct iscsi_conn *, u8); extern int iscsit_add_cmd_to_response_queue(struct iscsi_cmd *, struct iscsi_conn *, u8);
extern struct iscsi_queue_req *iscsit_get_cmd_from_response_queue(struct iscsi_conn *); extern struct iscsi_queue_req *iscsit_get_cmd_from_response_queue(struct iscsi_conn *);
extern void iscsit_remove_cmd_from_tx_queues(struct iscsi_cmd *, struct iscsi_conn *); extern void iscsit_remove_cmd_from_tx_queues(struct iscsi_cmd *, struct iscsi_conn *);
extern bool iscsit_conn_all_queues_empty(struct iscsi_conn *); extern bool iscsit_conn_all_queues_empty(struct iscsi_conn *);
......
...@@ -197,8 +197,7 @@ target_emulate_report_target_port_groups(struct se_cmd *cmd) ...@@ -197,8 +197,7 @@ target_emulate_report_target_port_groups(struct se_cmd *cmd)
/* /*
* Set the ASYMMETRIC ACCESS State * Set the ASYMMETRIC ACCESS State
*/ */
buf[off++] |= (atomic_read( buf[off++] |= tg_pt_gp->tg_pt_gp_alua_access_state & 0xff;
&tg_pt_gp->tg_pt_gp_alua_access_state) & 0xff);
/* /*
* Set supported ASYMMETRIC ACCESS State bits * Set supported ASYMMETRIC ACCESS State bits
*/ */
...@@ -710,7 +709,7 @@ target_alua_state_check(struct se_cmd *cmd) ...@@ -710,7 +709,7 @@ target_alua_state_check(struct se_cmd *cmd)
spin_lock(&lun->lun_tg_pt_gp_lock); spin_lock(&lun->lun_tg_pt_gp_lock);
tg_pt_gp = lun->lun_tg_pt_gp; tg_pt_gp = lun->lun_tg_pt_gp;
out_alua_state = atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state); out_alua_state = tg_pt_gp->tg_pt_gp_alua_access_state;
nonop_delay_msecs = tg_pt_gp->tg_pt_gp_nonop_delay_msecs; nonop_delay_msecs = tg_pt_gp->tg_pt_gp_nonop_delay_msecs;
// XXX: keeps using tg_pt_gp witout reference after unlock // XXX: keeps using tg_pt_gp witout reference after unlock
...@@ -911,7 +910,7 @@ static int core_alua_write_tpg_metadata( ...@@ -911,7 +910,7 @@ static int core_alua_write_tpg_metadata(
} }
/* /*
* Called with tg_pt_gp->tg_pt_gp_md_mutex held * Called with tg_pt_gp->tg_pt_gp_transition_mutex held
*/ */
static int core_alua_update_tpg_primary_metadata( static int core_alua_update_tpg_primary_metadata(
struct t10_alua_tg_pt_gp *tg_pt_gp) struct t10_alua_tg_pt_gp *tg_pt_gp)
...@@ -934,7 +933,7 @@ static int core_alua_update_tpg_primary_metadata( ...@@ -934,7 +933,7 @@ static int core_alua_update_tpg_primary_metadata(
"alua_access_state=0x%02x\n" "alua_access_state=0x%02x\n"
"alua_access_status=0x%02x\n", "alua_access_status=0x%02x\n",
tg_pt_gp->tg_pt_gp_id, tg_pt_gp->tg_pt_gp_id,
tg_pt_gp->tg_pt_gp_alua_pending_state, tg_pt_gp->tg_pt_gp_alua_access_state,
tg_pt_gp->tg_pt_gp_alua_access_status); tg_pt_gp->tg_pt_gp_alua_access_status);
snprintf(path, ALUA_METADATA_PATH_LEN, snprintf(path, ALUA_METADATA_PATH_LEN,
...@@ -1013,93 +1012,41 @@ static void core_alua_queue_state_change_ua(struct t10_alua_tg_pt_gp *tg_pt_gp) ...@@ -1013,93 +1012,41 @@ static void core_alua_queue_state_change_ua(struct t10_alua_tg_pt_gp *tg_pt_gp)
spin_unlock(&tg_pt_gp->tg_pt_gp_lock); spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
} }
static void core_alua_do_transition_tg_pt_work(struct work_struct *work)
{
struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(work,
struct t10_alua_tg_pt_gp, tg_pt_gp_transition_work);
struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
bool explicit = (tg_pt_gp->tg_pt_gp_alua_access_status ==
ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG);
/*
* Update the ALUA metadata buf that has been allocated in
* core_alua_do_port_transition(), this metadata will be written
* to struct file.
*
* Note that there is the case where we do not want to update the
* metadata when the saved metadata is being parsed in userspace
* when setting the existing port access state and access status.
*
* Also note that the failure to write out the ALUA metadata to
* struct file does NOT affect the actual ALUA transition.
*/
if (tg_pt_gp->tg_pt_gp_write_metadata) {
mutex_lock(&tg_pt_gp->tg_pt_gp_md_mutex);
core_alua_update_tpg_primary_metadata(tg_pt_gp);
mutex_unlock(&tg_pt_gp->tg_pt_gp_md_mutex);
}
/*
* Set the current primary ALUA access state to the requested new state
*/
atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
tg_pt_gp->tg_pt_gp_alua_pending_state);
pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu"
" from primary access state %s to %s\n", (explicit) ? "explicit" :
"implicit", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item),
tg_pt_gp->tg_pt_gp_id,
core_alua_dump_state(tg_pt_gp->tg_pt_gp_alua_previous_state),
core_alua_dump_state(tg_pt_gp->tg_pt_gp_alua_pending_state));
core_alua_queue_state_change_ua(tg_pt_gp);
spin_lock(&dev->t10_alua.tg_pt_gps_lock);
atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
if (tg_pt_gp->tg_pt_gp_transition_complete)
complete(tg_pt_gp->tg_pt_gp_transition_complete);
}
static int core_alua_do_transition_tg_pt( static int core_alua_do_transition_tg_pt(
struct t10_alua_tg_pt_gp *tg_pt_gp, struct t10_alua_tg_pt_gp *tg_pt_gp,
int new_state, int new_state,
int explicit) int explicit)
{ {
struct se_device *dev = tg_pt_gp->tg_pt_gp_dev; int prev_state;
DECLARE_COMPLETION_ONSTACK(wait);
mutex_lock(&tg_pt_gp->tg_pt_gp_transition_mutex);
/* Nothing to be done here */ /* Nothing to be done here */
if (atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state) == new_state) if (tg_pt_gp->tg_pt_gp_alua_access_state == new_state) {
mutex_unlock(&tg_pt_gp->tg_pt_gp_transition_mutex);
return 0; return 0;
}
if (explicit && new_state == ALUA_ACCESS_STATE_TRANSITION) if (explicit && new_state == ALUA_ACCESS_STATE_TRANSITION) {
mutex_unlock(&tg_pt_gp->tg_pt_gp_transition_mutex);
return -EAGAIN; return -EAGAIN;
}
/*
* Flush any pending transitions
*/
if (!explicit)
flush_work(&tg_pt_gp->tg_pt_gp_transition_work);
/* /*
* Save the old primary ALUA access state, and set the current state * Save the old primary ALUA access state, and set the current state
* to ALUA_ACCESS_STATE_TRANSITION. * to ALUA_ACCESS_STATE_TRANSITION.
*/ */
atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, prev_state = tg_pt_gp->tg_pt_gp_alua_access_state;
ALUA_ACCESS_STATE_TRANSITION); tg_pt_gp->tg_pt_gp_alua_access_state = ALUA_ACCESS_STATE_TRANSITION;
tg_pt_gp->tg_pt_gp_alua_access_status = (explicit) ? tg_pt_gp->tg_pt_gp_alua_access_status = (explicit) ?
ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG : ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG :
ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA; ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA;
core_alua_queue_state_change_ua(tg_pt_gp); core_alua_queue_state_change_ua(tg_pt_gp);
if (new_state == ALUA_ACCESS_STATE_TRANSITION) if (new_state == ALUA_ACCESS_STATE_TRANSITION) {
mutex_unlock(&tg_pt_gp->tg_pt_gp_transition_mutex);
return 0; return 0;
}
tg_pt_gp->tg_pt_gp_alua_previous_state =
atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state);
tg_pt_gp->tg_pt_gp_alua_pending_state = new_state;
/* /*
* Check for the optional ALUA primary state transition delay * Check for the optional ALUA primary state transition delay
...@@ -1108,19 +1055,36 @@ static int core_alua_do_transition_tg_pt( ...@@ -1108,19 +1055,36 @@ static int core_alua_do_transition_tg_pt(
msleep_interruptible(tg_pt_gp->tg_pt_gp_trans_delay_msecs); msleep_interruptible(tg_pt_gp->tg_pt_gp_trans_delay_msecs);
/* /*
* Take a reference for workqueue item * Set the current primary ALUA access state to the requested new state
*/ */
spin_lock(&dev->t10_alua.tg_pt_gps_lock); tg_pt_gp->tg_pt_gp_alua_access_state = new_state;
atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
schedule_work(&tg_pt_gp->tg_pt_gp_transition_work); /*
if (explicit) { * Update the ALUA metadata buf that has been allocated in
tg_pt_gp->tg_pt_gp_transition_complete = &wait; * core_alua_do_port_transition(), this metadata will be written
wait_for_completion(&wait); * to struct file.
tg_pt_gp->tg_pt_gp_transition_complete = NULL; *
* Note that there is the case where we do not want to update the
* metadata when the saved metadata is being parsed in userspace
* when setting the existing port access state and access status.
*
* Also note that the failure to write out the ALUA metadata to
* struct file does NOT affect the actual ALUA transition.
*/
if (tg_pt_gp->tg_pt_gp_write_metadata) {
core_alua_update_tpg_primary_metadata(tg_pt_gp);
} }
pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu"
" from primary access state %s to %s\n", (explicit) ? "explicit" :
"implicit", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item),
tg_pt_gp->tg_pt_gp_id,
core_alua_dump_state(prev_state),
core_alua_dump_state(new_state));
core_alua_queue_state_change_ua(tg_pt_gp);
mutex_unlock(&tg_pt_gp->tg_pt_gp_transition_mutex);
return 0; return 0;
} }
...@@ -1685,14 +1649,12 @@ struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(struct se_device *dev, ...@@ -1685,14 +1649,12 @@ struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(struct se_device *dev,
} }
INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_list); INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_list);
INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_lun_list); INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_lun_list);
mutex_init(&tg_pt_gp->tg_pt_gp_md_mutex); mutex_init(&tg_pt_gp->tg_pt_gp_transition_mutex);
spin_lock_init(&tg_pt_gp->tg_pt_gp_lock); spin_lock_init(&tg_pt_gp->tg_pt_gp_lock);
atomic_set(&tg_pt_gp->tg_pt_gp_ref_cnt, 0); atomic_set(&tg_pt_gp->tg_pt_gp_ref_cnt, 0);
INIT_WORK(&tg_pt_gp->tg_pt_gp_transition_work,
core_alua_do_transition_tg_pt_work);
tg_pt_gp->tg_pt_gp_dev = dev; tg_pt_gp->tg_pt_gp_dev = dev;
atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, tg_pt_gp->tg_pt_gp_alua_access_state =
ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED); ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED;
/* /*
* Enable both explicit and implicit ALUA support by default * Enable both explicit and implicit ALUA support by default
*/ */
...@@ -1797,8 +1759,6 @@ void core_alua_free_tg_pt_gp( ...@@ -1797,8 +1759,6 @@ void core_alua_free_tg_pt_gp(
dev->t10_alua.alua_tg_pt_gps_counter--; dev->t10_alua.alua_tg_pt_gps_counter--;
spin_unlock(&dev->t10_alua.tg_pt_gps_lock); spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
flush_work(&tg_pt_gp->tg_pt_gp_transition_work);
/* /*
* Allow a struct t10_alua_tg_pt_gp_member * referenced by * Allow a struct t10_alua_tg_pt_gp_member * referenced by
* core_alua_get_tg_pt_gp_by_name() in * core_alua_get_tg_pt_gp_by_name() in
...@@ -1938,8 +1898,8 @@ ssize_t core_alua_show_tg_pt_gp_info(struct se_lun *lun, char *page) ...@@ -1938,8 +1898,8 @@ ssize_t core_alua_show_tg_pt_gp_info(struct se_lun *lun, char *page)
"Primary Access Status: %s\nTG Port Secondary Access" "Primary Access Status: %s\nTG Port Secondary Access"
" State: %s\nTG Port Secondary Access Status: %s\n", " State: %s\nTG Port Secondary Access Status: %s\n",
config_item_name(tg_pt_ci), tg_pt_gp->tg_pt_gp_id, config_item_name(tg_pt_ci), tg_pt_gp->tg_pt_gp_id,
core_alua_dump_state(atomic_read( core_alua_dump_state(
&tg_pt_gp->tg_pt_gp_alua_access_state)), tg_pt_gp->tg_pt_gp_alua_access_state),
core_alua_dump_status( core_alua_dump_status(
tg_pt_gp->tg_pt_gp_alua_access_status), tg_pt_gp->tg_pt_gp_alua_access_status),
atomic_read(&lun->lun_tg_pt_secondary_offline) ? atomic_read(&lun->lun_tg_pt_secondary_offline) ?
......
...@@ -2392,7 +2392,7 @@ static ssize_t target_tg_pt_gp_alua_access_state_show(struct config_item *item, ...@@ -2392,7 +2392,7 @@ static ssize_t target_tg_pt_gp_alua_access_state_show(struct config_item *item,
char *page) char *page)
{ {
return sprintf(page, "%d\n", return sprintf(page, "%d\n",
atomic_read(&to_tg_pt_gp(item)->tg_pt_gp_alua_access_state)); to_tg_pt_gp(item)->tg_pt_gp_alua_access_state);
} }
static ssize_t target_tg_pt_gp_alua_access_state_store(struct config_item *item, static ssize_t target_tg_pt_gp_alua_access_state_store(struct config_item *item,
......
...@@ -92,6 +92,11 @@ static int target_fabric_mappedlun_link( ...@@ -92,6 +92,11 @@ static int target_fabric_mappedlun_link(
pr_err("Source se_lun->lun_se_dev does not exist\n"); pr_err("Source se_lun->lun_se_dev does not exist\n");
return -EINVAL; return -EINVAL;
} }
if (lun->lun_shutdown) {
pr_err("Unable to create mappedlun symlink because"
" lun->lun_shutdown=true\n");
return -EINVAL;
}
se_tpg = lun->lun_tpg; se_tpg = lun->lun_tpg;
nacl_ci = &lun_acl_ci->ci_parent->ci_group->cg_item; nacl_ci = &lun_acl_ci->ci_parent->ci_group->cg_item;
......
...@@ -642,6 +642,8 @@ void core_tpg_remove_lun( ...@@ -642,6 +642,8 @@ void core_tpg_remove_lun(
*/ */
struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev); struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev);
lun->lun_shutdown = true;
core_clear_lun_from_tpg(lun, tpg); core_clear_lun_from_tpg(lun, tpg);
/* /*
* Wait for any active I/O references to percpu se_lun->lun_ref to * Wait for any active I/O references to percpu se_lun->lun_ref to
...@@ -663,6 +665,8 @@ void core_tpg_remove_lun( ...@@ -663,6 +665,8 @@ void core_tpg_remove_lun(
} }
if (!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) if (!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
hlist_del_rcu(&lun->link); hlist_del_rcu(&lun->link);
lun->lun_shutdown = false;
mutex_unlock(&tpg->tpg_lun_mutex); mutex_unlock(&tpg->tpg_lun_mutex);
percpu_ref_exit(&lun->lun_ref); percpu_ref_exit(&lun->lun_ref);
......
...@@ -64,8 +64,9 @@ struct kmem_cache *t10_alua_lba_map_cache; ...@@ -64,8 +64,9 @@ struct kmem_cache *t10_alua_lba_map_cache;
struct kmem_cache *t10_alua_lba_map_mem_cache; struct kmem_cache *t10_alua_lba_map_mem_cache;
static void transport_complete_task_attr(struct se_cmd *cmd); static void transport_complete_task_attr(struct se_cmd *cmd);
static int translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason);
static void transport_handle_queue_full(struct se_cmd *cmd, static void transport_handle_queue_full(struct se_cmd *cmd,
struct se_device *dev); struct se_device *dev, int err, bool write_pending);
static int transport_put_cmd(struct se_cmd *cmd); static int transport_put_cmd(struct se_cmd *cmd);
static void target_complete_ok_work(struct work_struct *work); static void target_complete_ok_work(struct work_struct *work);
...@@ -804,7 +805,8 @@ void target_qf_do_work(struct work_struct *work) ...@@ -804,7 +805,8 @@ void target_qf_do_work(struct work_struct *work)
if (cmd->t_state == TRANSPORT_COMPLETE_QF_WP) if (cmd->t_state == TRANSPORT_COMPLETE_QF_WP)
transport_write_pending_qf(cmd); transport_write_pending_qf(cmd);
else if (cmd->t_state == TRANSPORT_COMPLETE_QF_OK) else if (cmd->t_state == TRANSPORT_COMPLETE_QF_OK ||
cmd->t_state == TRANSPORT_COMPLETE_QF_ERR)
transport_complete_qf(cmd); transport_complete_qf(cmd);
} }
} }
...@@ -1719,7 +1721,7 @@ void transport_generic_request_failure(struct se_cmd *cmd, ...@@ -1719,7 +1721,7 @@ void transport_generic_request_failure(struct se_cmd *cmd,
} }
trace_target_cmd_complete(cmd); trace_target_cmd_complete(cmd);
ret = cmd->se_tfo->queue_status(cmd); ret = cmd->se_tfo->queue_status(cmd);
if (ret == -EAGAIN || ret == -ENOMEM) if (ret)
goto queue_full; goto queue_full;
goto check_stop; goto check_stop;
default: default:
...@@ -1730,7 +1732,7 @@ void transport_generic_request_failure(struct se_cmd *cmd, ...@@ -1730,7 +1732,7 @@ void transport_generic_request_failure(struct se_cmd *cmd,
} }
ret = transport_send_check_condition_and_sense(cmd, sense_reason, 0); ret = transport_send_check_condition_and_sense(cmd, sense_reason, 0);
if (ret == -EAGAIN || ret == -ENOMEM) if (ret)
goto queue_full; goto queue_full;
check_stop: check_stop:
...@@ -1739,8 +1741,7 @@ void transport_generic_request_failure(struct se_cmd *cmd, ...@@ -1739,8 +1741,7 @@ void transport_generic_request_failure(struct se_cmd *cmd,
return; return;
queue_full: queue_full:
cmd->t_state = TRANSPORT_COMPLETE_QF_OK; transport_handle_queue_full(cmd, cmd->se_dev, ret, false);
transport_handle_queue_full(cmd, cmd->se_dev);
} }
EXPORT_SYMBOL(transport_generic_request_failure); EXPORT_SYMBOL(transport_generic_request_failure);
...@@ -1977,13 +1978,29 @@ static void transport_complete_qf(struct se_cmd *cmd) ...@@ -1977,13 +1978,29 @@ static void transport_complete_qf(struct se_cmd *cmd)
int ret = 0; int ret = 0;
transport_complete_task_attr(cmd); transport_complete_task_attr(cmd);
/*
* If a fabric driver ->write_pending() or ->queue_data_in() callback
* has returned neither -ENOMEM or -EAGAIN, assume it's fatal and
* the same callbacks should not be retried. Return CHECK_CONDITION
* if a scsi_status is not already set.
*
* If a fabric driver ->queue_status() has returned non zero, always
* keep retrying no matter what..
*/
if (cmd->t_state == TRANSPORT_COMPLETE_QF_ERR) {
if (cmd->scsi_status)
goto queue_status;
if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) { cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE;
trace_target_cmd_complete(cmd); cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
ret = cmd->se_tfo->queue_status(cmd); cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER;
goto out; translate_sense_reason(cmd, TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE);
goto queue_status;
} }
if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE)
goto queue_status;
switch (cmd->data_direction) { switch (cmd->data_direction) {
case DMA_FROM_DEVICE: case DMA_FROM_DEVICE:
if (cmd->scsi_status) if (cmd->scsi_status)
...@@ -2007,19 +2024,33 @@ static void transport_complete_qf(struct se_cmd *cmd) ...@@ -2007,19 +2024,33 @@ static void transport_complete_qf(struct se_cmd *cmd)
break; break;
} }
out:
if (ret < 0) { if (ret < 0) {
transport_handle_queue_full(cmd, cmd->se_dev); transport_handle_queue_full(cmd, cmd->se_dev, ret, false);
return; return;
} }
transport_lun_remove_cmd(cmd); transport_lun_remove_cmd(cmd);
transport_cmd_check_stop_to_fabric(cmd); transport_cmd_check_stop_to_fabric(cmd);
} }
static void transport_handle_queue_full( static void transport_handle_queue_full(struct se_cmd *cmd, struct se_device *dev,
struct se_cmd *cmd, int err, bool write_pending)
struct se_device *dev)
{ {
/*
* -EAGAIN or -ENOMEM signals retry of ->write_pending() and/or
* ->queue_data_in() callbacks from new process context.
*
* Otherwise for other errors, transport_complete_qf() will send
* CHECK_CONDITION via ->queue_status() instead of attempting to
* retry associated fabric driver data-transfer callbacks.
*/
if (err == -EAGAIN || err == -ENOMEM) {
cmd->t_state = (write_pending) ? TRANSPORT_COMPLETE_QF_WP :
TRANSPORT_COMPLETE_QF_OK;
} else {
pr_warn_ratelimited("Got unknown fabric queue status: %d\n", err);
cmd->t_state = TRANSPORT_COMPLETE_QF_ERR;
}
spin_lock_irq(&dev->qf_cmd_lock); spin_lock_irq(&dev->qf_cmd_lock);
list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list); list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list);
atomic_inc_mb(&dev->dev_qf_count); atomic_inc_mb(&dev->dev_qf_count);
...@@ -2083,7 +2114,7 @@ static void target_complete_ok_work(struct work_struct *work) ...@@ -2083,7 +2114,7 @@ static void target_complete_ok_work(struct work_struct *work)
WARN_ON(!cmd->scsi_status); WARN_ON(!cmd->scsi_status);
ret = transport_send_check_condition_and_sense( ret = transport_send_check_condition_and_sense(
cmd, 0, 1); cmd, 0, 1);
if (ret == -EAGAIN || ret == -ENOMEM) if (ret)
goto queue_full; goto queue_full;
transport_lun_remove_cmd(cmd); transport_lun_remove_cmd(cmd);
...@@ -2109,7 +2140,7 @@ static void target_complete_ok_work(struct work_struct *work) ...@@ -2109,7 +2140,7 @@ static void target_complete_ok_work(struct work_struct *work)
} else if (rc) { } else if (rc) {
ret = transport_send_check_condition_and_sense(cmd, ret = transport_send_check_condition_and_sense(cmd,
rc, 0); rc, 0);
if (ret == -EAGAIN || ret == -ENOMEM) if (ret)
goto queue_full; goto queue_full;
transport_lun_remove_cmd(cmd); transport_lun_remove_cmd(cmd);
...@@ -2134,7 +2165,7 @@ static void target_complete_ok_work(struct work_struct *work) ...@@ -2134,7 +2165,7 @@ static void target_complete_ok_work(struct work_struct *work)
if (target_read_prot_action(cmd)) { if (target_read_prot_action(cmd)) {
ret = transport_send_check_condition_and_sense(cmd, ret = transport_send_check_condition_and_sense(cmd,
cmd->pi_err, 0); cmd->pi_err, 0);
if (ret == -EAGAIN || ret == -ENOMEM) if (ret)
goto queue_full; goto queue_full;
transport_lun_remove_cmd(cmd); transport_lun_remove_cmd(cmd);
...@@ -2144,7 +2175,7 @@ static void target_complete_ok_work(struct work_struct *work) ...@@ -2144,7 +2175,7 @@ static void target_complete_ok_work(struct work_struct *work)
trace_target_cmd_complete(cmd); trace_target_cmd_complete(cmd);
ret = cmd->se_tfo->queue_data_in(cmd); ret = cmd->se_tfo->queue_data_in(cmd);
if (ret == -EAGAIN || ret == -ENOMEM) if (ret)
goto queue_full; goto queue_full;
break; break;
case DMA_TO_DEVICE: case DMA_TO_DEVICE:
...@@ -2157,7 +2188,7 @@ static void target_complete_ok_work(struct work_struct *work) ...@@ -2157,7 +2188,7 @@ static void target_complete_ok_work(struct work_struct *work)
atomic_long_add(cmd->data_length, atomic_long_add(cmd->data_length,
&cmd->se_lun->lun_stats.tx_data_octets); &cmd->se_lun->lun_stats.tx_data_octets);
ret = cmd->se_tfo->queue_data_in(cmd); ret = cmd->se_tfo->queue_data_in(cmd);
if (ret == -EAGAIN || ret == -ENOMEM) if (ret)
goto queue_full; goto queue_full;
break; break;
} }
...@@ -2166,7 +2197,7 @@ static void target_complete_ok_work(struct work_struct *work) ...@@ -2166,7 +2197,7 @@ static void target_complete_ok_work(struct work_struct *work)
queue_status: queue_status:
trace_target_cmd_complete(cmd); trace_target_cmd_complete(cmd);
ret = cmd->se_tfo->queue_status(cmd); ret = cmd->se_tfo->queue_status(cmd);
if (ret == -EAGAIN || ret == -ENOMEM) if (ret)
goto queue_full; goto queue_full;
break; break;
default: default:
...@@ -2180,8 +2211,8 @@ static void target_complete_ok_work(struct work_struct *work) ...@@ -2180,8 +2211,8 @@ static void target_complete_ok_work(struct work_struct *work)
queue_full: queue_full:
pr_debug("Handling complete_ok QUEUE_FULL: se_cmd: %p," pr_debug("Handling complete_ok QUEUE_FULL: se_cmd: %p,"
" data_direction: %d\n", cmd, cmd->data_direction); " data_direction: %d\n", cmd, cmd->data_direction);
cmd->t_state = TRANSPORT_COMPLETE_QF_OK;
transport_handle_queue_full(cmd, cmd->se_dev); transport_handle_queue_full(cmd, cmd->se_dev, ret, false);
} }
void target_free_sgl(struct scatterlist *sgl, int nents) void target_free_sgl(struct scatterlist *sgl, int nents)
...@@ -2449,18 +2480,14 @@ transport_generic_new_cmd(struct se_cmd *cmd) ...@@ -2449,18 +2480,14 @@ transport_generic_new_cmd(struct se_cmd *cmd)
spin_unlock_irqrestore(&cmd->t_state_lock, flags); spin_unlock_irqrestore(&cmd->t_state_lock, flags);
ret = cmd->se_tfo->write_pending(cmd); ret = cmd->se_tfo->write_pending(cmd);
if (ret == -EAGAIN || ret == -ENOMEM) if (ret)
goto queue_full; goto queue_full;
/* fabric drivers should only return -EAGAIN or -ENOMEM as error */ return 0;
WARN_ON(ret);
return (!ret) ? 0 : TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
queue_full: queue_full:
pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd); pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd);
cmd->t_state = TRANSPORT_COMPLETE_QF_WP; transport_handle_queue_full(cmd, cmd->se_dev, ret, true);
transport_handle_queue_full(cmd, cmd->se_dev);
return 0; return 0;
} }
EXPORT_SYMBOL(transport_generic_new_cmd); EXPORT_SYMBOL(transport_generic_new_cmd);
...@@ -2470,10 +2497,10 @@ static void transport_write_pending_qf(struct se_cmd *cmd) ...@@ -2470,10 +2497,10 @@ static void transport_write_pending_qf(struct se_cmd *cmd)
int ret; int ret;
ret = cmd->se_tfo->write_pending(cmd); ret = cmd->se_tfo->write_pending(cmd);
if (ret == -EAGAIN || ret == -ENOMEM) { if (ret) {
pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n",
cmd); cmd);
transport_handle_queue_full(cmd, cmd->se_dev); transport_handle_queue_full(cmd, cmd->se_dev, ret, true);
} }
} }
...@@ -3011,6 +3038,8 @@ static int __transport_check_aborted_status(struct se_cmd *cmd, int send_status) ...@@ -3011,6 +3038,8 @@ static int __transport_check_aborted_status(struct se_cmd *cmd, int send_status)
__releases(&cmd->t_state_lock) __releases(&cmd->t_state_lock)
__acquires(&cmd->t_state_lock) __acquires(&cmd->t_state_lock)
{ {
int ret;
assert_spin_locked(&cmd->t_state_lock); assert_spin_locked(&cmd->t_state_lock);
WARN_ON_ONCE(!irqs_disabled()); WARN_ON_ONCE(!irqs_disabled());
...@@ -3034,7 +3063,9 @@ static int __transport_check_aborted_status(struct se_cmd *cmd, int send_status) ...@@ -3034,7 +3063,9 @@ static int __transport_check_aborted_status(struct se_cmd *cmd, int send_status)
trace_target_cmd_complete(cmd); trace_target_cmd_complete(cmd);
spin_unlock_irq(&cmd->t_state_lock); spin_unlock_irq(&cmd->t_state_lock);
cmd->se_tfo->queue_status(cmd); ret = cmd->se_tfo->queue_status(cmd);
if (ret)
transport_handle_queue_full(cmd, cmd->se_dev, ret, false);
spin_lock_irq(&cmd->t_state_lock); spin_lock_irq(&cmd->t_state_lock);
return 1; return 1;
...@@ -3055,6 +3086,7 @@ EXPORT_SYMBOL(transport_check_aborted_status); ...@@ -3055,6 +3086,7 @@ EXPORT_SYMBOL(transport_check_aborted_status);
void transport_send_task_abort(struct se_cmd *cmd) void transport_send_task_abort(struct se_cmd *cmd)
{ {
unsigned long flags; unsigned long flags;
int ret;
spin_lock_irqsave(&cmd->t_state_lock, flags); spin_lock_irqsave(&cmd->t_state_lock, flags);
if (cmd->se_cmd_flags & (SCF_SENT_CHECK_CONDITION)) { if (cmd->se_cmd_flags & (SCF_SENT_CHECK_CONDITION)) {
...@@ -3090,7 +3122,9 @@ void transport_send_task_abort(struct se_cmd *cmd) ...@@ -3090,7 +3122,9 @@ void transport_send_task_abort(struct se_cmd *cmd)
cmd->t_task_cdb[0], cmd->tag); cmd->t_task_cdb[0], cmd->tag);
trace_target_cmd_complete(cmd); trace_target_cmd_complete(cmd);
cmd->se_tfo->queue_status(cmd); ret = cmd->se_tfo->queue_status(cmd);
if (ret)
transport_handle_queue_full(cmd, cmd->se_dev, ret, false);
} }
static void target_tmr_work(struct work_struct *work) static void target_tmr_work(struct work_struct *work)
......
...@@ -311,24 +311,50 @@ static void free_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd) ...@@ -311,24 +311,50 @@ static void free_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd)
DATA_BLOCK_BITS); DATA_BLOCK_BITS);
} }
static void gather_data_area(struct tcmu_dev *udev, unsigned long *cmd_bitmap, static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
struct scatterlist *data_sg, unsigned int data_nents) bool bidi)
{ {
struct se_cmd *se_cmd = cmd->se_cmd;
int i, block; int i, block;
int block_remaining = 0; int block_remaining = 0;
void *from, *to; void *from, *to;
size_t copy_bytes, from_offset; size_t copy_bytes, from_offset;
struct scatterlist *sg; struct scatterlist *sg, *data_sg;
unsigned int data_nents;
DECLARE_BITMAP(bitmap, DATA_BLOCK_BITS);
bitmap_copy(bitmap, cmd->data_bitmap, DATA_BLOCK_BITS);
if (!bidi) {
data_sg = se_cmd->t_data_sg;
data_nents = se_cmd->t_data_nents;
} else {
uint32_t count;
/*
* For bidi case, the first count blocks are for Data-Out
* buffer blocks, and before gathering the Data-In buffer
* the Data-Out buffer blocks should be discarded.
*/
count = DIV_ROUND_UP(se_cmd->data_length, DATA_BLOCK_SIZE);
while (count--) {
block = find_first_bit(bitmap, DATA_BLOCK_BITS);
clear_bit(block, bitmap);
}
data_sg = se_cmd->t_bidi_data_sg;
data_nents = se_cmd->t_bidi_data_nents;
}
for_each_sg(data_sg, sg, data_nents, i) { for_each_sg(data_sg, sg, data_nents, i) {
int sg_remaining = sg->length; int sg_remaining = sg->length;
to = kmap_atomic(sg_page(sg)) + sg->offset; to = kmap_atomic(sg_page(sg)) + sg->offset;
while (sg_remaining > 0) { while (sg_remaining > 0) {
if (block_remaining == 0) { if (block_remaining == 0) {
block = find_first_bit(cmd_bitmap, block = find_first_bit(bitmap,
DATA_BLOCK_BITS); DATA_BLOCK_BITS);
block_remaining = DATA_BLOCK_SIZE; block_remaining = DATA_BLOCK_SIZE;
clear_bit(block, cmd_bitmap); clear_bit(block, bitmap);
} }
copy_bytes = min_t(size_t, sg_remaining, copy_bytes = min_t(size_t, sg_remaining,
block_remaining); block_remaining);
...@@ -394,6 +420,27 @@ static bool is_ring_space_avail(struct tcmu_dev *udev, size_t cmd_size, size_t d ...@@ -394,6 +420,27 @@ static bool is_ring_space_avail(struct tcmu_dev *udev, size_t cmd_size, size_t d
return true; return true;
} }
static inline size_t tcmu_cmd_get_data_length(struct tcmu_cmd *tcmu_cmd)
{
struct se_cmd *se_cmd = tcmu_cmd->se_cmd;
size_t data_length = round_up(se_cmd->data_length, DATA_BLOCK_SIZE);
if (se_cmd->se_cmd_flags & SCF_BIDI) {
BUG_ON(!(se_cmd->t_bidi_data_sg && se_cmd->t_bidi_data_nents));
data_length += round_up(se_cmd->t_bidi_data_sg->length,
DATA_BLOCK_SIZE);
}
return data_length;
}
static inline uint32_t tcmu_cmd_get_block_cnt(struct tcmu_cmd *tcmu_cmd)
{
size_t data_length = tcmu_cmd_get_data_length(tcmu_cmd);
return data_length / DATA_BLOCK_SIZE;
}
static sense_reason_t static sense_reason_t
tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd) tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
{ {
...@@ -407,7 +454,7 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd) ...@@ -407,7 +454,7 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
uint32_t cmd_head; uint32_t cmd_head;
uint64_t cdb_off; uint64_t cdb_off;
bool copy_to_data_area; bool copy_to_data_area;
size_t data_length; size_t data_length = tcmu_cmd_get_data_length(tcmu_cmd);
DECLARE_BITMAP(old_bitmap, DATA_BLOCK_BITS); DECLARE_BITMAP(old_bitmap, DATA_BLOCK_BITS);
if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags))
...@@ -421,8 +468,7 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd) ...@@ -421,8 +468,7 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
* expensive to tell how many regions are freed in the bitmap * expensive to tell how many regions are freed in the bitmap
*/ */
base_command_size = max(offsetof(struct tcmu_cmd_entry, base_command_size = max(offsetof(struct tcmu_cmd_entry,
req.iov[se_cmd->t_bidi_data_nents + req.iov[tcmu_cmd_get_block_cnt(tcmu_cmd)]),
se_cmd->t_data_nents]),
sizeof(struct tcmu_cmd_entry)); sizeof(struct tcmu_cmd_entry));
command_size = base_command_size command_size = base_command_size
+ round_up(scsi_command_size(se_cmd->t_task_cdb), TCMU_OP_ALIGN_SIZE); + round_up(scsi_command_size(se_cmd->t_task_cdb), TCMU_OP_ALIGN_SIZE);
...@@ -433,11 +479,6 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd) ...@@ -433,11 +479,6 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
mb = udev->mb_addr; mb = udev->mb_addr;
cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */ cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */
data_length = se_cmd->data_length;
if (se_cmd->se_cmd_flags & SCF_BIDI) {
BUG_ON(!(se_cmd->t_bidi_data_sg && se_cmd->t_bidi_data_nents));
data_length += se_cmd->t_bidi_data_sg->length;
}
if ((command_size > (udev->cmdr_size / 2)) || if ((command_size > (udev->cmdr_size / 2)) ||
data_length > udev->data_size) { data_length > udev->data_size) {
pr_warn("TCMU: Request of size %zu/%zu is too big for %u/%zu " pr_warn("TCMU: Request of size %zu/%zu is too big for %u/%zu "
...@@ -511,11 +552,14 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd) ...@@ -511,11 +552,14 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
entry->req.iov_dif_cnt = 0; entry->req.iov_dif_cnt = 0;
/* Handle BIDI commands */ /* Handle BIDI commands */
iov_cnt = 0; if (se_cmd->se_cmd_flags & SCF_BIDI) {
alloc_and_scatter_data_area(udev, se_cmd->t_bidi_data_sg, iov_cnt = 0;
se_cmd->t_bidi_data_nents, &iov, &iov_cnt, false); iov++;
entry->req.iov_bidi_cnt = iov_cnt; alloc_and_scatter_data_area(udev, se_cmd->t_bidi_data_sg,
se_cmd->t_bidi_data_nents, &iov, &iov_cnt,
false);
entry->req.iov_bidi_cnt = iov_cnt;
}
/* cmd's data_bitmap is what changed in process */ /* cmd's data_bitmap is what changed in process */
bitmap_xor(tcmu_cmd->data_bitmap, old_bitmap, udev->data_bitmap, bitmap_xor(tcmu_cmd->data_bitmap, old_bitmap, udev->data_bitmap,
DATA_BLOCK_BITS); DATA_BLOCK_BITS);
...@@ -592,19 +636,11 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry * ...@@ -592,19 +636,11 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *
se_cmd->scsi_sense_length); se_cmd->scsi_sense_length);
free_data_area(udev, cmd); free_data_area(udev, cmd);
} else if (se_cmd->se_cmd_flags & SCF_BIDI) { } else if (se_cmd->se_cmd_flags & SCF_BIDI) {
DECLARE_BITMAP(bitmap, DATA_BLOCK_BITS);
/* Get Data-In buffer before clean up */ /* Get Data-In buffer before clean up */
bitmap_copy(bitmap, cmd->data_bitmap, DATA_BLOCK_BITS); gather_data_area(udev, cmd, true);
gather_data_area(udev, bitmap,
se_cmd->t_bidi_data_sg, se_cmd->t_bidi_data_nents);
free_data_area(udev, cmd); free_data_area(udev, cmd);
} else if (se_cmd->data_direction == DMA_FROM_DEVICE) { } else if (se_cmd->data_direction == DMA_FROM_DEVICE) {
DECLARE_BITMAP(bitmap, DATA_BLOCK_BITS); gather_data_area(udev, cmd, false);
bitmap_copy(bitmap, cmd->data_bitmap, DATA_BLOCK_BITS);
gather_data_area(udev, bitmap,
se_cmd->t_data_sg, se_cmd->t_data_nents);
free_data_area(udev, cmd); free_data_area(udev, cmd);
} else if (se_cmd->data_direction == DMA_TO_DEVICE) { } else if (se_cmd->data_direction == DMA_TO_DEVICE) {
free_data_area(udev, cmd); free_data_area(udev, cmd);
...@@ -1196,11 +1232,6 @@ static ssize_t tcmu_cmd_time_out_store(struct config_item *item, const char *pag ...@@ -1196,11 +1232,6 @@ static ssize_t tcmu_cmd_time_out_store(struct config_item *item, const char *pag
if (ret < 0) if (ret < 0)
return ret; return ret;
if (!val) {
pr_err("Illegal value for cmd_time_out\n");
return -EINVAL;
}
udev->cmd_time_out = val * MSEC_PER_SEC; udev->cmd_time_out = val * MSEC_PER_SEC;
return count; return count;
} }
......
...@@ -373,7 +373,7 @@ static void bot_cleanup_old_alt(struct f_uas *fu) ...@@ -373,7 +373,7 @@ static void bot_cleanup_old_alt(struct f_uas *fu)
usb_ep_free_request(fu->ep_in, fu->bot_req_in); usb_ep_free_request(fu->ep_in, fu->bot_req_in);
usb_ep_free_request(fu->ep_out, fu->bot_req_out); usb_ep_free_request(fu->ep_out, fu->bot_req_out);
usb_ep_free_request(fu->ep_out, fu->cmd.req); usb_ep_free_request(fu->ep_out, fu->cmd.req);
usb_ep_free_request(fu->ep_out, fu->bot_status.req); usb_ep_free_request(fu->ep_in, fu->bot_status.req);
kfree(fu->cmd.buf); kfree(fu->cmd.buf);
......
...@@ -117,6 +117,7 @@ enum transport_state_table { ...@@ -117,6 +117,7 @@ enum transport_state_table {
TRANSPORT_ISTATE_PROCESSING = 11, TRANSPORT_ISTATE_PROCESSING = 11,
TRANSPORT_COMPLETE_QF_WP = 18, TRANSPORT_COMPLETE_QF_WP = 18,
TRANSPORT_COMPLETE_QF_OK = 19, TRANSPORT_COMPLETE_QF_OK = 19,
TRANSPORT_COMPLETE_QF_ERR = 20,
}; };
/* Used for struct se_cmd->se_cmd_flags */ /* Used for struct se_cmd->se_cmd_flags */
...@@ -279,8 +280,6 @@ struct t10_alua_tg_pt_gp { ...@@ -279,8 +280,6 @@ struct t10_alua_tg_pt_gp {
u16 tg_pt_gp_id; u16 tg_pt_gp_id;
int tg_pt_gp_valid_id; int tg_pt_gp_valid_id;
int tg_pt_gp_alua_supported_states; int tg_pt_gp_alua_supported_states;
int tg_pt_gp_alua_pending_state;
int tg_pt_gp_alua_previous_state;
int tg_pt_gp_alua_access_status; int tg_pt_gp_alua_access_status;
int tg_pt_gp_alua_access_type; int tg_pt_gp_alua_access_type;
int tg_pt_gp_nonop_delay_msecs; int tg_pt_gp_nonop_delay_msecs;
...@@ -289,18 +288,16 @@ struct t10_alua_tg_pt_gp { ...@@ -289,18 +288,16 @@ struct t10_alua_tg_pt_gp {
int tg_pt_gp_pref; int tg_pt_gp_pref;
int tg_pt_gp_write_metadata; int tg_pt_gp_write_metadata;
u32 tg_pt_gp_members; u32 tg_pt_gp_members;
atomic_t tg_pt_gp_alua_access_state; int tg_pt_gp_alua_access_state;
atomic_t tg_pt_gp_ref_cnt; atomic_t tg_pt_gp_ref_cnt;
spinlock_t tg_pt_gp_lock; spinlock_t tg_pt_gp_lock;
struct mutex tg_pt_gp_md_mutex; struct mutex tg_pt_gp_transition_mutex;
struct se_device *tg_pt_gp_dev; struct se_device *tg_pt_gp_dev;
struct config_group tg_pt_gp_group; struct config_group tg_pt_gp_group;
struct list_head tg_pt_gp_list; struct list_head tg_pt_gp_list;
struct list_head tg_pt_gp_lun_list; struct list_head tg_pt_gp_lun_list;
struct se_lun *tg_pt_gp_alua_lun; struct se_lun *tg_pt_gp_alua_lun;
struct se_node_acl *tg_pt_gp_alua_nacl; struct se_node_acl *tg_pt_gp_alua_nacl;
struct work_struct tg_pt_gp_transition_work;
struct completion *tg_pt_gp_transition_complete;
}; };
struct t10_vpd { struct t10_vpd {
...@@ -705,6 +702,7 @@ struct se_lun { ...@@ -705,6 +702,7 @@ struct se_lun {
u64 unpacked_lun; u64 unpacked_lun;
#define SE_LUN_LINK_MAGIC 0xffff7771 #define SE_LUN_LINK_MAGIC 0xffff7771
u32 lun_link_magic; u32 lun_link_magic;
bool lun_shutdown;
bool lun_access_ro; bool lun_access_ro;
u32 lun_index; u32 lun_index;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment