Commit 7d680f3b authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Nicholas Bellinger

target: replace various cmd flags with a transport state

Replace various atomic_ts used as flags in struct se_cmd with a single
transport_state bitmap that requires t_state_lock to be held for modifications.

In the target core that assumption generally is true, but some recently added
code in the SRP target had to grow new lock calls.  I can't say I like the way
how it messes with the command state directly, but let's leave that for later.

(Re-add missing ib_srpt.c changes that nab dropped..)
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarNicholas Bellinger <nab@linux-iscsi.org>
parent b01543df
...@@ -1378,7 +1378,9 @@ static int srpt_abort_cmd(struct srpt_send_ioctx *ioctx) ...@@ -1378,7 +1378,9 @@ static int srpt_abort_cmd(struct srpt_send_ioctx *ioctx)
break; break;
case SRPT_STATE_NEED_DATA: case SRPT_STATE_NEED_DATA:
/* DMA_TO_DEVICE (write) - RDMA read error. */ /* DMA_TO_DEVICE (write) - RDMA read error. */
atomic_set(&ioctx->cmd.transport_lun_stop, 1); spin_lock_irqsave(&ioctx->cmd.t_state_lock, flags);
ioctx->cmd.transport_state |= CMD_T_LUN_STOP;
spin_unlock_irqrestore(&ioctx->cmd.t_state_lock, flags);
transport_generic_handle_data(&ioctx->cmd); transport_generic_handle_data(&ioctx->cmd);
break; break;
case SRPT_STATE_CMD_RSP_SENT: case SRPT_STATE_CMD_RSP_SENT:
...@@ -1387,7 +1389,9 @@ static int srpt_abort_cmd(struct srpt_send_ioctx *ioctx) ...@@ -1387,7 +1389,9 @@ static int srpt_abort_cmd(struct srpt_send_ioctx *ioctx)
* not been received in time. * not been received in time.
*/ */
srpt_unmap_sg_to_ib_sge(ioctx->ch, ioctx); srpt_unmap_sg_to_ib_sge(ioctx->ch, ioctx);
atomic_set(&ioctx->cmd.transport_lun_stop, 1); spin_lock_irqsave(&ioctx->cmd.t_state_lock, flags);
ioctx->cmd.transport_state |= CMD_T_LUN_STOP;
spin_unlock_irqrestore(&ioctx->cmd.t_state_lock, flags);
kref_put(&ioctx->kref, srpt_put_send_ioctx_kref); kref_put(&ioctx->kref, srpt_put_send_ioctx_kref);
break; break;
case SRPT_STATE_MGMT_RSP_SENT: case SRPT_STATE_MGMT_RSP_SENT:
...@@ -1494,6 +1498,7 @@ static void srpt_handle_rdma_err_comp(struct srpt_rdma_ch *ch, ...@@ -1494,6 +1498,7 @@ static void srpt_handle_rdma_err_comp(struct srpt_rdma_ch *ch,
{ {
struct se_cmd *cmd; struct se_cmd *cmd;
enum srpt_command_state state; enum srpt_command_state state;
unsigned long flags;
cmd = &ioctx->cmd; cmd = &ioctx->cmd;
state = srpt_get_cmd_state(ioctx); state = srpt_get_cmd_state(ioctx);
...@@ -1513,7 +1518,9 @@ static void srpt_handle_rdma_err_comp(struct srpt_rdma_ch *ch, ...@@ -1513,7 +1518,9 @@ static void srpt_handle_rdma_err_comp(struct srpt_rdma_ch *ch,
__func__, __LINE__, state); __func__, __LINE__, state);
break; break;
case SRPT_RDMA_WRITE_LAST: case SRPT_RDMA_WRITE_LAST:
atomic_set(&ioctx->cmd.transport_lun_stop, 1); spin_lock_irqsave(&ioctx->cmd.t_state_lock, flags);
ioctx->cmd.transport_state |= CMD_T_LUN_STOP;
spin_unlock_irqrestore(&ioctx->cmd.t_state_lock, flags);
break; break;
default: default:
printk(KERN_ERR "%s[%d]: opcode = %u\n", __func__, printk(KERN_ERR "%s[%d]: opcode = %u\n", __func__,
......
...@@ -1363,7 +1363,7 @@ static int iscsit_handle_data_out(struct iscsi_conn *conn, unsigned char *buf) ...@@ -1363,7 +1363,7 @@ static int iscsit_handle_data_out(struct iscsi_conn *conn, unsigned char *buf)
* outstanding_r2ts reaches zero, go ahead and send the delayed * outstanding_r2ts reaches zero, go ahead and send the delayed
* TASK_ABORTED status. * TASK_ABORTED status.
*/ */
if (atomic_read(&se_cmd->t_transport_aborted) != 0) { if (se_cmd->transport_state & CMD_T_ABORTED) {
if (hdr->flags & ISCSI_FLAG_CMD_FINAL) if (hdr->flags & ISCSI_FLAG_CMD_FINAL)
if (--cmd->outstanding_r2ts < 1) { if (--cmd->outstanding_r2ts < 1) {
iscsit_stop_dataout_timer(cmd); iscsit_stop_dataout_timer(cmd);
......
...@@ -416,7 +416,7 @@ static int iscsit_handle_recovery_datain( ...@@ -416,7 +416,7 @@ static int iscsit_handle_recovery_datain(
struct iscsi_datain_req *dr; struct iscsi_datain_req *dr;
struct se_cmd *se_cmd = &cmd->se_cmd; struct se_cmd *se_cmd = &cmd->se_cmd;
if (!atomic_read(&se_cmd->t_transport_complete)) { if (!(se_cmd->transport_state & CMD_T_COMPLETE)) {
pr_err("Ignoring ITT: 0x%08x Data SNACK\n", pr_err("Ignoring ITT: 0x%08x Data SNACK\n",
cmd->init_task_tag); cmd->init_task_tag);
return 0; return 0;
......
...@@ -250,7 +250,7 @@ static int iscsit_task_reassign_complete_write( ...@@ -250,7 +250,7 @@ static int iscsit_task_reassign_complete_write(
* so if we have received all DataOUT we can safety ignore Initiator. * so if we have received all DataOUT we can safety ignore Initiator.
*/ */
if (cmd->cmd_flags & ICF_GOT_LAST_DATAOUT) { if (cmd->cmd_flags & ICF_GOT_LAST_DATAOUT) {
if (!atomic_read(&cmd->se_cmd.t_transport_sent)) { if (!(cmd->se_cmd.transport_state & CMD_T_SENT)) {
pr_debug("WRITE ITT: 0x%08x: t_state: %d" pr_debug("WRITE ITT: 0x%08x: t_state: %d"
" never sent to transport\n", " never sent to transport\n",
cmd->init_task_tag, cmd->se_cmd.t_state); cmd->init_task_tag, cmd->se_cmd.t_state);
...@@ -314,7 +314,7 @@ static int iscsit_task_reassign_complete_read( ...@@ -314,7 +314,7 @@ static int iscsit_task_reassign_complete_read(
cmd->acked_data_sn = (tmr_req->exp_data_sn - 1); cmd->acked_data_sn = (tmr_req->exp_data_sn - 1);
} }
if (!atomic_read(&cmd->se_cmd.t_transport_sent)) { if (!(cmd->se_cmd.transport_state & CMD_T_SENT)) {
pr_debug("READ ITT: 0x%08x: t_state: %d never sent to" pr_debug("READ ITT: 0x%08x: t_state: %d never sent to"
" transport\n", cmd->init_task_tag, " transport\n", cmd->init_task_tag,
cmd->se_cmd.t_state); cmd->se_cmd.t_state);
...@@ -322,7 +322,7 @@ static int iscsit_task_reassign_complete_read( ...@@ -322,7 +322,7 @@ static int iscsit_task_reassign_complete_read(
return 0; return 0;
} }
if (!atomic_read(&se_cmd->t_transport_complete)) { if (!(se_cmd->transport_state & CMD_T_COMPLETE)) {
pr_err("READ ITT: 0x%08x: t_state: %d, never returned" pr_err("READ ITT: 0x%08x: t_state: %d, never returned"
" from transport\n", cmd->init_task_tag, " from transport\n", cmd->init_task_tag,
cmd->se_cmd.t_state); cmd->se_cmd.t_state);
......
...@@ -150,7 +150,7 @@ static void core_tmr_drain_tmr_list( ...@@ -150,7 +150,7 @@ static void core_tmr_drain_tmr_list(
continue; continue;
spin_lock(&cmd->t_state_lock); spin_lock(&cmd->t_state_lock);
if (!atomic_read(&cmd->t_transport_active)) { if (!(cmd->transport_state & CMD_T_ACTIVE)) {
spin_unlock(&cmd->t_state_lock); spin_unlock(&cmd->t_state_lock);
continue; continue;
} }
...@@ -255,15 +255,15 @@ static void core_tmr_drain_task_list( ...@@ -255,15 +255,15 @@ static void core_tmr_drain_task_list(
cmd->t_task_cdb[0]); cmd->t_task_cdb[0]);
pr_debug("LUN_RESET: ITT[0x%08x] - pr_res_key: 0x%016Lx" pr_debug("LUN_RESET: ITT[0x%08x] - pr_res_key: 0x%016Lx"
" t_task_cdbs: %d t_task_cdbs_left: %d" " t_task_cdbs: %d t_task_cdbs_left: %d"
" t_task_cdbs_sent: %d -- t_transport_active: %d" " t_task_cdbs_sent: %d -- CMD_T_ACTIVE: %d"
" t_transport_stop: %d t_transport_sent: %d\n", " CMD_T_STOP: %d CMD_T_SENT: %d\n",
cmd->se_tfo->get_task_tag(cmd), cmd->pr_res_key, cmd->se_tfo->get_task_tag(cmd), cmd->pr_res_key,
cmd->t_task_list_num, cmd->t_task_list_num,
atomic_read(&cmd->t_task_cdbs_left), atomic_read(&cmd->t_task_cdbs_left),
atomic_read(&cmd->t_task_cdbs_sent), atomic_read(&cmd->t_task_cdbs_sent),
atomic_read(&cmd->t_transport_active), (cmd->transport_state & CMD_T_ACTIVE) != 0,
atomic_read(&cmd->t_transport_stop), (cmd->transport_state & CMD_T_STOP) != 0,
atomic_read(&cmd->t_transport_sent)); (cmd->transport_state & CMD_T_SENT) != 0);
/* /*
* If the command may be queued onto a workqueue cancel it now. * If the command may be queued onto a workqueue cancel it now.
...@@ -287,19 +287,19 @@ static void core_tmr_drain_task_list( ...@@ -287,19 +287,19 @@ static void core_tmr_drain_task_list(
} }
fe_count = atomic_read(&cmd->t_fe_count); fe_count = atomic_read(&cmd->t_fe_count);
if (atomic_read(&cmd->t_transport_active)) { if (!(cmd->transport_state & CMD_T_ACTIVE)) {
pr_debug("LUN_RESET: got t_transport_active = 1 for" pr_debug("LUN_RESET: got CMD_T_ACTIVE for"
" task: %p, t_fe_count: %d dev: %p\n", task, " task: %p, t_fe_count: %d dev: %p\n", task,
fe_count, dev); fe_count, dev);
atomic_set(&cmd->t_transport_aborted, 1); cmd->transport_state |= CMD_T_ABORTED;
spin_unlock_irqrestore(&cmd->t_state_lock, flags); spin_unlock_irqrestore(&cmd->t_state_lock, flags);
core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count); core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
continue; continue;
} }
pr_debug("LUN_RESET: Got t_transport_active = 0 for task: %p," pr_debug("LUN_RESET: Got !CMD_T_ACTIVE for task: %p,"
" t_fe_count: %d dev: %p\n", task, fe_count, dev); " t_fe_count: %d dev: %p\n", task, fe_count, dev);
atomic_set(&cmd->t_transport_aborted, 1); cmd->transport_state |= CMD_T_ABORTED;
spin_unlock_irqrestore(&cmd->t_state_lock, flags); spin_unlock_irqrestore(&cmd->t_state_lock, flags);
core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count); core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
...@@ -339,7 +339,7 @@ static void core_tmr_drain_cmd_list( ...@@ -339,7 +339,7 @@ static void core_tmr_drain_cmd_list(
if (prout_cmd == cmd) if (prout_cmd == cmd)
continue; continue;
atomic_set(&cmd->t_transport_queue_active, 0); cmd->transport_state &= ~CMD_T_QUEUED;
atomic_dec(&qobj->queue_cnt); atomic_dec(&qobj->queue_cnt);
list_move_tail(&cmd->se_queue_node, &drain_cmd_list); list_move_tail(&cmd->se_queue_node, &drain_cmd_list);
} }
......
...@@ -437,7 +437,7 @@ static void transport_all_task_dev_remove_state(struct se_cmd *cmd) ...@@ -437,7 +437,7 @@ static void transport_all_task_dev_remove_state(struct se_cmd *cmd)
/* transport_cmd_check_stop(): /* transport_cmd_check_stop():
* *
* 'transport_off = 1' determines if t_transport_active should be cleared. * 'transport_off = 1' determines if CMD_T_ACTIVE should be cleared.
* 'transport_off = 2' determines if task_dev_state should be removed. * 'transport_off = 2' determines if task_dev_state should be removed.
* *
* A non-zero u8 t_state sets cmd->t_state. * A non-zero u8 t_state sets cmd->t_state.
...@@ -455,12 +455,11 @@ static int transport_cmd_check_stop( ...@@ -455,12 +455,11 @@ static int transport_cmd_check_stop(
* Determine if IOCTL context caller in requesting the stopping of this * Determine if IOCTL context caller in requesting the stopping of this
* command for LUN shutdown purposes. * command for LUN shutdown purposes.
*/ */
if (atomic_read(&cmd->transport_lun_stop)) { if (cmd->transport_state & CMD_T_LUN_STOP) {
pr_debug("%s:%d atomic_read(&cmd->transport_lun_stop)" pr_debug("%s:%d CMD_T_LUN_STOP for ITT: 0x%08x\n",
" == TRUE for ITT: 0x%08x\n", __func__, __LINE__, __func__, __LINE__, cmd->se_tfo->get_task_tag(cmd));
cmd->se_tfo->get_task_tag(cmd));
atomic_set(&cmd->t_transport_active, 0); cmd->transport_state &= ~CMD_T_ACTIVE;
if (transport_off == 2) if (transport_off == 2)
transport_all_task_dev_remove_state(cmd); transport_all_task_dev_remove_state(cmd);
spin_unlock_irqrestore(&cmd->t_state_lock, flags); spin_unlock_irqrestore(&cmd->t_state_lock, flags);
...@@ -472,9 +471,9 @@ static int transport_cmd_check_stop( ...@@ -472,9 +471,9 @@ static int transport_cmd_check_stop(
* Determine if frontend context caller is requesting the stopping of * Determine if frontend context caller is requesting the stopping of
* this command for frontend exceptions. * this command for frontend exceptions.
*/ */
if (atomic_read(&cmd->t_transport_stop)) { if (cmd->transport_state & CMD_T_STOP) {
pr_debug("%s:%d atomic_read(&cmd->t_transport_stop) ==" pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08x\n",
" TRUE for ITT: 0x%08x\n", __func__, __LINE__, __func__, __LINE__,
cmd->se_tfo->get_task_tag(cmd)); cmd->se_tfo->get_task_tag(cmd));
if (transport_off == 2) if (transport_off == 2)
...@@ -492,7 +491,7 @@ static int transport_cmd_check_stop( ...@@ -492,7 +491,7 @@ static int transport_cmd_check_stop(
return 1; return 1;
} }
if (transport_off) { if (transport_off) {
atomic_set(&cmd->t_transport_active, 0); cmd->transport_state &= ~CMD_T_ACTIVE;
if (transport_off == 2) { if (transport_off == 2) {
transport_all_task_dev_remove_state(cmd); transport_all_task_dev_remove_state(cmd);
/* /*
...@@ -540,16 +539,12 @@ static void transport_lun_remove_cmd(struct se_cmd *cmd) ...@@ -540,16 +539,12 @@ static void transport_lun_remove_cmd(struct se_cmd *cmd)
return; return;
spin_lock_irqsave(&cmd->t_state_lock, flags); spin_lock_irqsave(&cmd->t_state_lock, flags);
if (!atomic_read(&cmd->transport_dev_active)) { if (cmd->transport_state & CMD_T_DEV_ACTIVE) {
spin_unlock_irqrestore(&cmd->t_state_lock, flags); cmd->transport_state &= ~CMD_T_DEV_ACTIVE;
goto check_lun; transport_all_task_dev_remove_state(cmd);
} }
atomic_set(&cmd->transport_dev_active, 0);
transport_all_task_dev_remove_state(cmd);
spin_unlock_irqrestore(&cmd->t_state_lock, flags); spin_unlock_irqrestore(&cmd->t_state_lock, flags);
check_lun:
spin_lock_irqsave(&lun->lun_cmd_lock, flags); spin_lock_irqsave(&lun->lun_cmd_lock, flags);
if (atomic_read(&cmd->transport_lun_active)) { if (atomic_read(&cmd->transport_lun_active)) {
list_del(&cmd->se_lun_node); list_del(&cmd->se_lun_node);
...@@ -585,7 +580,7 @@ static void transport_add_cmd_to_queue(struct se_cmd *cmd, int t_state, ...@@ -585,7 +580,7 @@ static void transport_add_cmd_to_queue(struct se_cmd *cmd, int t_state,
if (t_state) { if (t_state) {
spin_lock_irqsave(&cmd->t_state_lock, flags); spin_lock_irqsave(&cmd->t_state_lock, flags);
cmd->t_state = t_state; cmd->t_state = t_state;
atomic_set(&cmd->t_transport_active, 1); cmd->transport_state |= CMD_T_ACTIVE;
spin_unlock_irqrestore(&cmd->t_state_lock, flags); spin_unlock_irqrestore(&cmd->t_state_lock, flags);
} }
...@@ -601,7 +596,7 @@ static void transport_add_cmd_to_queue(struct se_cmd *cmd, int t_state, ...@@ -601,7 +596,7 @@ static void transport_add_cmd_to_queue(struct se_cmd *cmd, int t_state,
list_add(&cmd->se_queue_node, &qobj->qobj_list); list_add(&cmd->se_queue_node, &qobj->qobj_list);
else else
list_add_tail(&cmd->se_queue_node, &qobj->qobj_list); list_add_tail(&cmd->se_queue_node, &qobj->qobj_list);
atomic_set(&cmd->t_transport_queue_active, 1); cmd->transport_state |= CMD_T_QUEUED;
spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
wake_up_interruptible(&qobj->thread_wq); wake_up_interruptible(&qobj->thread_wq);
...@@ -620,8 +615,7 @@ transport_get_cmd_from_queue(struct se_queue_obj *qobj) ...@@ -620,8 +615,7 @@ transport_get_cmd_from_queue(struct se_queue_obj *qobj)
} }
cmd = list_first_entry(&qobj->qobj_list, struct se_cmd, se_queue_node); cmd = list_first_entry(&qobj->qobj_list, struct se_cmd, se_queue_node);
atomic_set(&cmd->t_transport_queue_active, 0); cmd->transport_state &= ~CMD_T_QUEUED;
list_del_init(&cmd->se_queue_node); list_del_init(&cmd->se_queue_node);
atomic_dec(&qobj->queue_cnt); atomic_dec(&qobj->queue_cnt);
spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
...@@ -635,20 +629,14 @@ static void transport_remove_cmd_from_queue(struct se_cmd *cmd) ...@@ -635,20 +629,14 @@ static void transport_remove_cmd_from_queue(struct se_cmd *cmd)
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&qobj->cmd_queue_lock, flags); spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
if (!atomic_read(&cmd->t_transport_queue_active)) { if (!(cmd->transport_state & CMD_T_QUEUED)) {
spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
return; return;
} }
atomic_set(&cmd->t_transport_queue_active, 0); cmd->transport_state &= ~CMD_T_QUEUED;
atomic_dec(&qobj->queue_cnt); atomic_dec(&qobj->queue_cnt);
list_del_init(&cmd->se_queue_node); list_del_init(&cmd->se_queue_node);
spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
if (atomic_read(&cmd->t_transport_queue_active)) {
pr_err("ITT: 0x%08x t_transport_queue_active: %d\n",
cmd->se_tfo->get_task_tag(cmd),
atomic_read(&cmd->t_transport_queue_active));
}
} }
/* /*
...@@ -719,7 +707,7 @@ void transport_complete_task(struct se_task *task, int success) ...@@ -719,7 +707,7 @@ void transport_complete_task(struct se_task *task, int success)
} }
if (!success) if (!success)
cmd->t_tasks_failed = 1; cmd->transport_state |= CMD_T_FAILED;
/* /*
* Decrement the outstanding t_task_cdbs_left count. The last * Decrement the outstanding t_task_cdbs_left count. The last
...@@ -731,16 +719,16 @@ void transport_complete_task(struct se_task *task, int success) ...@@ -731,16 +719,16 @@ void transport_complete_task(struct se_task *task, int success)
return; return;
} }
if (cmd->t_tasks_failed) { if (cmd->transport_state & CMD_T_FAILED) {
cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
INIT_WORK(&cmd->work, target_complete_failure_work); INIT_WORK(&cmd->work, target_complete_failure_work);
} else { } else {
atomic_set(&cmd->t_transport_complete, 1); cmd->transport_state |= CMD_T_COMPLETE;
INIT_WORK(&cmd->work, target_complete_ok_work); INIT_WORK(&cmd->work, target_complete_ok_work);
} }
cmd->t_state = TRANSPORT_COMPLETE; cmd->t_state = TRANSPORT_COMPLETE;
atomic_set(&cmd->t_transport_active, 1); cmd->transport_state |= CMD_T_ACTIVE;
spin_unlock_irqrestore(&cmd->t_state_lock, flags); spin_unlock_irqrestore(&cmd->t_state_lock, flags);
queue_work(target_completion_wq, &cmd->work); queue_work(target_completion_wq, &cmd->work);
...@@ -1488,7 +1476,7 @@ void transport_init_se_cmd( ...@@ -1488,7 +1476,7 @@ void transport_init_se_cmd(
init_completion(&cmd->t_transport_stop_comp); init_completion(&cmd->t_transport_stop_comp);
init_completion(&cmd->cmd_wait_comp); init_completion(&cmd->cmd_wait_comp);
spin_lock_init(&cmd->t_state_lock); spin_lock_init(&cmd->t_state_lock);
atomic_set(&cmd->transport_dev_active, 1); cmd->transport_state = CMD_T_DEV_ACTIVE;
cmd->se_tfo = tfo; cmd->se_tfo = tfo;
cmd->se_sess = se_sess; cmd->se_sess = se_sess;
...@@ -1618,7 +1606,7 @@ int transport_handle_cdb_direct( ...@@ -1618,7 +1606,7 @@ int transport_handle_cdb_direct(
return -EINVAL; return -EINVAL;
} }
/* /*
* Set TRANSPORT_NEW_CMD state and cmd->t_transport_active=1 following * Set TRANSPORT_NEW_CMD state and CMD_T_ACTIVE following
* transport_generic_handle_cdb*() -> transport_add_cmd_to_queue() * transport_generic_handle_cdb*() -> transport_add_cmd_to_queue()
* in existing usage to ensure that outstanding descriptors are handled * in existing usage to ensure that outstanding descriptors are handled
* correctly during shutdown via transport_wait_for_tasks() * correctly during shutdown via transport_wait_for_tasks()
...@@ -1627,7 +1615,8 @@ int transport_handle_cdb_direct( ...@@ -1627,7 +1615,8 @@ int transport_handle_cdb_direct(
* this to be called for initial descriptor submission. * this to be called for initial descriptor submission.
*/ */
cmd->t_state = TRANSPORT_NEW_CMD; cmd->t_state = TRANSPORT_NEW_CMD;
atomic_set(&cmd->t_transport_active, 1); cmd->transport_state |= CMD_T_ACTIVE;
/* /*
* transport_generic_new_cmd() is already handling QUEUE_FULL, * transport_generic_new_cmd() is already handling QUEUE_FULL,
* so follow TRANSPORT_NEW_CMD processing thread context usage * so follow TRANSPORT_NEW_CMD processing thread context usage
...@@ -1859,14 +1848,14 @@ static void transport_generic_request_failure(struct se_cmd *cmd) ...@@ -1859,14 +1848,14 @@ static void transport_generic_request_failure(struct se_cmd *cmd)
cmd->t_state, cmd->scsi_sense_reason); cmd->t_state, cmd->scsi_sense_reason);
pr_debug("-----[ t_tasks: %d t_task_cdbs_left: %d" pr_debug("-----[ t_tasks: %d t_task_cdbs_left: %d"
" t_task_cdbs_sent: %d t_task_cdbs_ex_left: %d --" " t_task_cdbs_sent: %d t_task_cdbs_ex_left: %d --"
" t_transport_active: %d t_transport_stop: %d" " CMD_T_ACTIVE: %d CMD_T_STOP: %d CMD_T_SENT: %d\n",
" t_transport_sent: %d\n", cmd->t_task_list_num, cmd->t_task_list_num,
atomic_read(&cmd->t_task_cdbs_left), atomic_read(&cmd->t_task_cdbs_left),
atomic_read(&cmd->t_task_cdbs_sent), atomic_read(&cmd->t_task_cdbs_sent),
atomic_read(&cmd->t_task_cdbs_ex_left), atomic_read(&cmd->t_task_cdbs_ex_left),
atomic_read(&cmd->t_transport_active), (cmd->transport_state & CMD_T_ACTIVE) != 0,
atomic_read(&cmd->t_transport_stop), (cmd->transport_state & CMD_T_STOP) != 0,
atomic_read(&cmd->t_transport_sent)); (cmd->transport_state & CMD_T_SENT) != 0);
/* /*
* For SAM Task Attribute emulation for failed struct se_cmd * For SAM Task Attribute emulation for failed struct se_cmd
...@@ -2125,7 +2114,7 @@ static int __transport_execute_tasks(struct se_device *dev, struct se_cmd *new_c ...@@ -2125,7 +2114,7 @@ static int __transport_execute_tasks(struct se_device *dev, struct se_cmd *new_c
if (atomic_read(&cmd->t_task_cdbs_sent) == if (atomic_read(&cmd->t_task_cdbs_sent) ==
cmd->t_task_list_num) cmd->t_task_list_num)
atomic_set(&cmd->t_transport_sent, 1); cmd->transport_state |= CMD_T_SENT;
spin_unlock_irqrestore(&cmd->t_state_lock, flags); spin_unlock_irqrestore(&cmd->t_state_lock, flags);
...@@ -2136,8 +2125,9 @@ static int __transport_execute_tasks(struct se_device *dev, struct se_cmd *new_c ...@@ -2136,8 +2125,9 @@ static int __transport_execute_tasks(struct se_device *dev, struct se_cmd *new_c
if (error != 0) { if (error != 0) {
spin_lock_irqsave(&cmd->t_state_lock, flags); spin_lock_irqsave(&cmd->t_state_lock, flags);
task->task_flags &= ~TF_ACTIVE; task->task_flags &= ~TF_ACTIVE;
cmd->transport_state &= ~CMD_T_SENT;
spin_unlock_irqrestore(&cmd->t_state_lock, flags); spin_unlock_irqrestore(&cmd->t_state_lock, flags);
atomic_set(&cmd->t_transport_sent, 0);
transport_stop_tasks_for_cmd(cmd); transport_stop_tasks_for_cmd(cmd);
transport_generic_request_failure(cmd); transport_generic_request_failure(cmd);
} }
...@@ -3420,8 +3410,8 @@ static void transport_put_cmd(struct se_cmd *cmd) ...@@ -3420,8 +3410,8 @@ static void transport_put_cmd(struct se_cmd *cmd)
goto out_busy; goto out_busy;
} }
if (atomic_read(&cmd->transport_dev_active)) { if (cmd->transport_state & CMD_T_DEV_ACTIVE) {
atomic_set(&cmd->transport_dev_active, 0); cmd->transport_state &= ~CMD_T_DEV_ACTIVE;
transport_all_task_dev_remove_state(cmd); transport_all_task_dev_remove_state(cmd);
free_tasks = 1; free_tasks = 1;
} }
...@@ -3859,8 +3849,10 @@ int transport_generic_new_cmd(struct se_cmd *cmd) ...@@ -3859,8 +3849,10 @@ int transport_generic_new_cmd(struct se_cmd *cmd)
if (task_cdbs < 0) if (task_cdbs < 0)
goto out_fail; goto out_fail;
else if (!task_cdbs && (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)) { else if (!task_cdbs && (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)) {
spin_lock_irq(&cmd->t_state_lock);
cmd->t_state = TRANSPORT_COMPLETE; cmd->t_state = TRANSPORT_COMPLETE;
atomic_set(&cmd->t_transport_active, 1); cmd->transport_state |= CMD_T_ACTIVE;
spin_unlock_irq(&cmd->t_state_lock);
if (cmd->t_task_cdb[0] == REQUEST_SENSE) { if (cmd->t_task_cdb[0] == REQUEST_SENSE) {
u8 ua_asc = 0, ua_ascq = 0; u8 ua_asc = 0, ua_ascq = 0;
...@@ -3941,9 +3933,9 @@ static int transport_generic_write_pending(struct se_cmd *cmd) ...@@ -3941,9 +3933,9 @@ static int transport_generic_write_pending(struct se_cmd *cmd)
/* /*
* Clear the se_cmd for WRITE_PENDING status in order to set * Clear the se_cmd for WRITE_PENDING status in order to set
* cmd->t_transport_active=0 so that transport_generic_handle_data * CMD_T_ACTIVE so that transport_generic_handle_data can be called
* can be called from HW target mode interrupt code. This is safe * from HW target mode interrupt code. This is safe to be called
* to be called with transport_off=1 before the cmd->se_tfo->write_pending * with transport_off=1 before the cmd->se_tfo->write_pending
* because the se_cmd->se_lun pointer is not being cleared. * because the se_cmd->se_lun pointer is not being cleared.
*/ */
transport_cmd_check_stop(cmd, 1, 0); transport_cmd_check_stop(cmd, 1, 0);
...@@ -4129,15 +4121,16 @@ static int transport_lun_wait_for_tasks(struct se_cmd *cmd, struct se_lun *lun) ...@@ -4129,15 +4121,16 @@ static int transport_lun_wait_for_tasks(struct se_cmd *cmd, struct se_lun *lun)
* be stopped, we can safely ignore this struct se_cmd. * be stopped, we can safely ignore this struct se_cmd.
*/ */
spin_lock_irqsave(&cmd->t_state_lock, flags); spin_lock_irqsave(&cmd->t_state_lock, flags);
if (atomic_read(&cmd->t_transport_stop)) { if (cmd->transport_state & CMD_T_STOP) {
atomic_set(&cmd->transport_lun_stop, 0); cmd->transport_state &= ~CMD_T_LUN_STOP;
pr_debug("ConfigFS ITT[0x%08x] - t_transport_stop =="
" TRUE, skipping\n", cmd->se_tfo->get_task_tag(cmd)); pr_debug("ConfigFS ITT[0x%08x] - CMD_T_STOP, skipping\n",
cmd->se_tfo->get_task_tag(cmd));
spin_unlock_irqrestore(&cmd->t_state_lock, flags); spin_unlock_irqrestore(&cmd->t_state_lock, flags);
transport_cmd_check_stop(cmd, 1, 0); transport_cmd_check_stop(cmd, 1, 0);
return -EPERM; return -EPERM;
} }
atomic_set(&cmd->transport_lun_fe_stop, 1); cmd->transport_state |= CMD_T_LUN_FE_STOP;
spin_unlock_irqrestore(&cmd->t_state_lock, flags); spin_unlock_irqrestore(&cmd->t_state_lock, flags);
wake_up_interruptible(&cmd->se_dev->dev_queue_obj.thread_wq); wake_up_interruptible(&cmd->se_dev->dev_queue_obj.thread_wq);
...@@ -4183,7 +4176,7 @@ static void __transport_clear_lun_from_sessions(struct se_lun *lun) ...@@ -4183,7 +4176,7 @@ static void __transport_clear_lun_from_sessions(struct se_lun *lun)
"_lun_stop for ITT: 0x%08x\n", "_lun_stop for ITT: 0x%08x\n",
cmd->se_lun->unpacked_lun, cmd->se_lun->unpacked_lun,
cmd->se_tfo->get_task_tag(cmd)); cmd->se_tfo->get_task_tag(cmd));
atomic_set(&cmd->transport_lun_stop, 1); cmd->transport_state |= CMD_T_LUN_STOP;
spin_unlock(&cmd->t_state_lock); spin_unlock(&cmd->t_state_lock);
spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags); spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags);
...@@ -4213,11 +4206,11 @@ static void __transport_clear_lun_from_sessions(struct se_lun *lun) ...@@ -4213,11 +4206,11 @@ static void __transport_clear_lun_from_sessions(struct se_lun *lun)
cmd->se_tfo->get_task_tag(cmd)); cmd->se_tfo->get_task_tag(cmd));
spin_lock_irqsave(&cmd->t_state_lock, cmd_flags); spin_lock_irqsave(&cmd->t_state_lock, cmd_flags);
if (!atomic_read(&cmd->transport_dev_active)) { if (!(cmd->transport_state & CMD_T_DEV_ACTIVE)) {
spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags); spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags);
goto check_cond; goto check_cond;
} }
atomic_set(&cmd->transport_dev_active, 0); cmd->transport_state &= ~CMD_T_DEV_ACTIVE;
transport_all_task_dev_remove_state(cmd); transport_all_task_dev_remove_state(cmd);
spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags); spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags);
...@@ -4237,7 +4230,7 @@ static void __transport_clear_lun_from_sessions(struct se_lun *lun) ...@@ -4237,7 +4230,7 @@ static void __transport_clear_lun_from_sessions(struct se_lun *lun)
* finished accessing it. * finished accessing it.
*/ */
spin_lock_irqsave(&cmd->t_state_lock, cmd_flags); spin_lock_irqsave(&cmd->t_state_lock, cmd_flags);
if (atomic_read(&cmd->transport_lun_fe_stop)) { if (cmd->transport_state & CMD_T_LUN_FE_STOP) {
pr_debug("SE_LUN[%d] - Detected FE stop for" pr_debug("SE_LUN[%d] - Detected FE stop for"
" struct se_cmd: %p ITT: 0x%08x\n", " struct se_cmd: %p ITT: 0x%08x\n",
lun->unpacked_lun, lun->unpacked_lun,
...@@ -4315,8 +4308,7 @@ bool transport_wait_for_tasks(struct se_cmd *cmd) ...@@ -4315,8 +4308,7 @@ bool transport_wait_for_tasks(struct se_cmd *cmd)
* transport_clear_lun_from_sessions() once the ConfigFS context caller * transport_clear_lun_from_sessions() once the ConfigFS context caller
* has completed its operation on the struct se_cmd. * has completed its operation on the struct se_cmd.
*/ */
if (atomic_read(&cmd->transport_lun_stop)) { if (cmd->transport_state & CMD_T_LUN_STOP) {
pr_debug("wait_for_tasks: Stopping" pr_debug("wait_for_tasks: Stopping"
" wait_for_completion(&cmd->t_tasktransport_lun_fe" " wait_for_completion(&cmd->t_tasktransport_lun_fe"
"_stop_comp); for ITT: 0x%08x\n", "_stop_comp); for ITT: 0x%08x\n",
...@@ -4344,18 +4336,19 @@ bool transport_wait_for_tasks(struct se_cmd *cmd) ...@@ -4344,18 +4336,19 @@ bool transport_wait_for_tasks(struct se_cmd *cmd)
"stop_comp); for ITT: 0x%08x\n", "stop_comp); for ITT: 0x%08x\n",
cmd->se_tfo->get_task_tag(cmd)); cmd->se_tfo->get_task_tag(cmd));
atomic_set(&cmd->transport_lun_stop, 0); cmd->transport_state &= ~CMD_T_LUN_STOP;
} }
if (!atomic_read(&cmd->t_transport_active) ||
atomic_read(&cmd->t_transport_aborted)) { if (!(cmd->transport_state & CMD_T_ACTIVE) ||
(cmd->transport_state & CMD_T_ABORTED)) {
spin_unlock_irqrestore(&cmd->t_state_lock, flags); spin_unlock_irqrestore(&cmd->t_state_lock, flags);
return false; return false;
} }
atomic_set(&cmd->t_transport_stop, 1); cmd->transport_state |= CMD_T_STOP;
pr_debug("wait_for_tasks: Stopping %p ITT: 0x%08x" pr_debug("wait_for_tasks: Stopping %p ITT: 0x%08x"
" i_state: %d, t_state: %d, t_transport_stop = TRUE\n", " i_state: %d, t_state: %d, CMD_T_STOP\n",
cmd, cmd->se_tfo->get_task_tag(cmd), cmd, cmd->se_tfo->get_task_tag(cmd),
cmd->se_tfo->get_cmd_state(cmd), cmd->t_state); cmd->se_tfo->get_cmd_state(cmd), cmd->t_state);
...@@ -4366,8 +4359,7 @@ bool transport_wait_for_tasks(struct se_cmd *cmd) ...@@ -4366,8 +4359,7 @@ bool transport_wait_for_tasks(struct se_cmd *cmd)
wait_for_completion(&cmd->t_transport_stop_comp); wait_for_completion(&cmd->t_transport_stop_comp);
spin_lock_irqsave(&cmd->t_state_lock, flags); spin_lock_irqsave(&cmd->t_state_lock, flags);
atomic_set(&cmd->t_transport_active, 0); cmd->transport_state &= ~(CMD_T_ACTIVE | CMD_T_STOP);
atomic_set(&cmd->t_transport_stop, 0);
pr_debug("wait_for_tasks: Stopped wait_for_compltion(" pr_debug("wait_for_tasks: Stopped wait_for_compltion("
"&cmd->t_transport_stop_comp) for ITT: 0x%08x\n", "&cmd->t_transport_stop_comp) for ITT: 0x%08x\n",
...@@ -4596,7 +4588,7 @@ int transport_check_aborted_status(struct se_cmd *cmd, int send_status) ...@@ -4596,7 +4588,7 @@ int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
{ {
int ret = 0; int ret = 0;
if (atomic_read(&cmd->t_transport_aborted) != 0) { if (cmd->transport_state & CMD_T_ABORTED) {
if (!send_status || if (!send_status ||
(cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS)) (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS))
return 1; return 1;
...@@ -4633,7 +4625,7 @@ void transport_send_task_abort(struct se_cmd *cmd) ...@@ -4633,7 +4625,7 @@ void transport_send_task_abort(struct se_cmd *cmd)
*/ */
if (cmd->data_direction == DMA_TO_DEVICE) { if (cmd->data_direction == DMA_TO_DEVICE) {
if (cmd->se_tfo->write_pending_status(cmd) != 0) { if (cmd->se_tfo->write_pending_status(cmd) != 0) {
atomic_inc(&cmd->t_transport_aborted); cmd->transport_state |= CMD_T_ABORTED;
smp_mb__after_atomic_inc(); smp_mb__after_atomic_inc();
} }
} }
......
...@@ -555,23 +555,24 @@ struct se_cmd { ...@@ -555,23 +555,24 @@ struct se_cmd {
unsigned char *t_task_cdb; unsigned char *t_task_cdb;
unsigned char __t_task_cdb[TCM_MAX_COMMAND_SIZE]; unsigned char __t_task_cdb[TCM_MAX_COMMAND_SIZE];
unsigned long long t_task_lba; unsigned long long t_task_lba;
int t_tasks_failed;
u32 t_tasks_sg_chained_no; u32 t_tasks_sg_chained_no;
atomic_t t_fe_count; atomic_t t_fe_count;
atomic_t t_se_count; atomic_t t_se_count;
atomic_t t_task_cdbs_left; atomic_t t_task_cdbs_left;
atomic_t t_task_cdbs_ex_left; atomic_t t_task_cdbs_ex_left;
atomic_t t_task_cdbs_sent; atomic_t t_task_cdbs_sent;
atomic_t t_transport_aborted; unsigned int transport_state;
atomic_t t_transport_active; #define CMD_T_ABORTED (1 << 0)
atomic_t t_transport_complete; #define CMD_T_ACTIVE (1 << 1)
atomic_t t_transport_queue_active; #define CMD_T_COMPLETE (1 << 2)
atomic_t t_transport_sent; #define CMD_T_QUEUED (1 << 3)
atomic_t t_transport_stop; #define CMD_T_SENT (1 << 4)
atomic_t transport_dev_active; #define CMD_T_STOP (1 << 5)
#define CMD_T_FAILED (1 << 6)
#define CMD_T_LUN_STOP (1 << 7)
#define CMD_T_LUN_FE_STOP (1 << 8)
#define CMD_T_DEV_ACTIVE (1 << 9)
atomic_t transport_lun_active; atomic_t transport_lun_active;
atomic_t transport_lun_fe_stop;
atomic_t transport_lun_stop;
spinlock_t t_state_lock; spinlock_t t_state_lock;
struct completion t_transport_stop_comp; struct completion t_transport_stop_comp;
struct completion transport_lun_fe_stop_comp; struct completion transport_lun_fe_stop_comp;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment