Commit b1a2ecda authored by Bart Van Assche's avatar Bart Van Assche Committed by Nicholas Bellinger

target: Inline transport_cmd_check_stop()

The function transport_cmd_check_stop() has two callers. These callers
invoke this function as follows:
* transport_cmd_check_stop(cmd, true, false)
* transport_cmd_check_stop(cmd, false, true)
Hence inline this function into its callers.

This patch does not change any functionality but improves source
code readability.
Signed-off-by: default avatarBart Van Assche <bart.vanassche@sandisk.com>
Reviewed-by: default avatarHannes Reinecke <hare@suse.com>
Reviewed-by: default avatarBryant G. Ly <bryantly@linux.vnet.ibm.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Andy Grover <agrover@redhat.com>
Cc: David Disseldorp <ddiss@suse.de>
Signed-off-by: default avatarNicholas Bellinger <nab@linux-iscsi.org>
parent 580ab13a
...@@ -604,24 +604,18 @@ static void target_remove_from_state_list(struct se_cmd *cmd) ...@@ -604,24 +604,18 @@ static void target_remove_from_state_list(struct se_cmd *cmd)
spin_unlock_irqrestore(&dev->execute_task_lock, flags); spin_unlock_irqrestore(&dev->execute_task_lock, flags);
} }
static int transport_cmd_check_stop(struct se_cmd *cmd, bool remove_from_lists, static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd)
bool write_pending)
{ {
unsigned long flags; unsigned long flags;
if (remove_from_lists) {
target_remove_from_state_list(cmd); target_remove_from_state_list(cmd);
/* /*
* Clear struct se_cmd->se_lun before the handoff to FE. * Clear struct se_cmd->se_lun before the handoff to FE.
*/ */
cmd->se_lun = NULL; cmd->se_lun = NULL;
}
spin_lock_irqsave(&cmd->t_state_lock, flags); spin_lock_irqsave(&cmd->t_state_lock, flags);
if (write_pending)
cmd->t_state = TRANSPORT_WRITE_PENDING;
/* /*
* Determine if frontend context caller is requesting the stopping of * Determine if frontend context caller is requesting the stopping of
* this command for frontend exceptions. * this command for frontend exceptions.
...@@ -635,31 +629,18 @@ static int transport_cmd_check_stop(struct se_cmd *cmd, bool remove_from_lists, ...@@ -635,31 +629,18 @@ static int transport_cmd_check_stop(struct se_cmd *cmd, bool remove_from_lists,
complete_all(&cmd->t_transport_stop_comp); complete_all(&cmd->t_transport_stop_comp);
return 1; return 1;
} }
cmd->transport_state &= ~CMD_T_ACTIVE; cmd->transport_state &= ~CMD_T_ACTIVE;
if (remove_from_lists) { spin_unlock_irqrestore(&cmd->t_state_lock, flags);
/* /*
* Some fabric modules like tcm_loop can release * Some fabric modules like tcm_loop can release their internally
* their internally allocated I/O reference now and * allocated I/O reference and struct se_cmd now.
* struct se_cmd now.
* *
* Fabric modules are expected to return '1' here if the * Fabric modules are expected to return '1' here if the se_cmd being
* se_cmd being passed is released at this point, * passed is released at this point, or zero if not being released.
* or zero if not being released.
*/ */
if (cmd->se_tfo->check_stop_free != NULL) { return cmd->se_tfo->check_stop_free ? cmd->se_tfo->check_stop_free(cmd)
spin_unlock_irqrestore(&cmd->t_state_lock, flags); : 0;
return cmd->se_tfo->check_stop_free(cmd);
}
}
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
return 0;
}
static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd)
{
return transport_cmd_check_stop(cmd, true, false);
} }
static void transport_lun_remove_cmd(struct se_cmd *cmd) static void transport_lun_remove_cmd(struct se_cmd *cmd)
...@@ -2385,6 +2366,7 @@ EXPORT_SYMBOL(target_alloc_sgl); ...@@ -2385,6 +2366,7 @@ EXPORT_SYMBOL(target_alloc_sgl);
sense_reason_t sense_reason_t
transport_generic_new_cmd(struct se_cmd *cmd) transport_generic_new_cmd(struct se_cmd *cmd)
{ {
unsigned long flags;
int ret = 0; int ret = 0;
bool zero_flag = !(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB); bool zero_flag = !(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB);
...@@ -2450,8 +2432,24 @@ transport_generic_new_cmd(struct se_cmd *cmd) ...@@ -2450,8 +2432,24 @@ transport_generic_new_cmd(struct se_cmd *cmd)
target_execute_cmd(cmd); target_execute_cmd(cmd);
return 0; return 0;
} }
if (transport_cmd_check_stop(cmd, false, true))
spin_lock_irqsave(&cmd->t_state_lock, flags);
cmd->t_state = TRANSPORT_WRITE_PENDING;
/*
* Determine if frontend context caller is requesting the stopping of
* this command for frontend exceptions.
*/
if (cmd->transport_state & CMD_T_STOP) {
pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08llx\n",
__func__, __LINE__, cmd->tag);
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
complete_all(&cmd->t_transport_stop_comp);
return 0; return 0;
}
cmd->transport_state &= ~CMD_T_ACTIVE;
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
ret = cmd->se_tfo->write_pending(cmd); ret = cmd->se_tfo->write_pending(cmd);
if (ret == -EAGAIN || ret == -ENOMEM) if (ret == -EAGAIN || ret == -ENOMEM)
......
...@@ -47,7 +47,7 @@ struct target_core_fabric_ops { ...@@ -47,7 +47,7 @@ struct target_core_fabric_ops {
u32 (*tpg_get_inst_index)(struct se_portal_group *); u32 (*tpg_get_inst_index)(struct se_portal_group *);
/* /*
* Optional to release struct se_cmd and fabric dependent allocated * Optional to release struct se_cmd and fabric dependent allocated
* I/O descriptor in transport_cmd_check_stop(). * I/O descriptor after command execution has finished.
* *
* Returning 1 will signal a descriptor has been released. * Returning 1 will signal a descriptor has been released.
* Returning 0 will signal a descriptor has not been released. * Returning 0 will signal a descriptor has not been released.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment