Commit 34a99158 authored by Dan Williams's avatar Dan Williams

isci: kill 'get/set' macros

Most of these simple dereference macros are longer than their open coded
equivalent.  Deleting enum sci_controller_mode is thrown in for good
measure.
Reported-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarDan Williams <dan.j.williams@intel.com>
parent 89a7301f
...@@ -2627,7 +2627,7 @@ enum sci_status sci_controller_start_io(struct isci_host *ihost, ...@@ -2627,7 +2627,7 @@ enum sci_status sci_controller_start_io(struct isci_host *ihost,
return status; return status;
set_bit(IREQ_ACTIVE, &ireq->flags); set_bit(IREQ_ACTIVE, &ireq->flags);
sci_controller_post_request(ihost, sci_request_get_post_context(ireq)); sci_controller_post_request(ihost, ireq->post_context);
return SCI_SUCCESS; return SCI_SUCCESS;
} }
...@@ -2707,7 +2707,7 @@ enum sci_status sci_controller_continue_io(struct isci_request *ireq) ...@@ -2707,7 +2707,7 @@ enum sci_status sci_controller_continue_io(struct isci_request *ireq)
} }
set_bit(IREQ_ACTIVE, &ireq->flags); set_bit(IREQ_ACTIVE, &ireq->flags);
sci_controller_post_request(ihost, sci_request_get_post_context(ireq)); sci_controller_post_request(ihost, ireq->post_context);
return SCI_SUCCESS; return SCI_SUCCESS;
} }
...@@ -2747,9 +2747,7 @@ enum sci_task_status sci_controller_start_task(struct isci_host *ihost, ...@@ -2747,9 +2747,7 @@ enum sci_task_status sci_controller_start_task(struct isci_host *ihost,
return SCI_SUCCESS; return SCI_SUCCESS;
case SCI_SUCCESS: case SCI_SUCCESS:
set_bit(IREQ_ACTIVE, &ireq->flags); set_bit(IREQ_ACTIVE, &ireq->flags);
sci_controller_post_request(ihost, ireq->post_context);
sci_controller_post_request(ihost,
sci_request_get_post_context(ireq));
break; break;
default: default:
break; break;
......
...@@ -172,6 +172,7 @@ struct isci_host { ...@@ -172,6 +172,7 @@ struct isci_host {
/* XXX kill */ /* XXX kill */
bool phy_startup_timer_pending; bool phy_startup_timer_pending;
u32 next_phy_to_start; u32 next_phy_to_start;
/* XXX convert to unsigned long and use bitops */
u8 invalid_phy_mask; u8 invalid_phy_mask;
/* TODO attempt dynamic interrupt coalescing scheme */ /* TODO attempt dynamic interrupt coalescing scheme */
...@@ -359,13 +360,8 @@ static inline struct isci_host *dev_to_ihost(struct domain_device *dev) ...@@ -359,13 +360,8 @@ static inline struct isci_host *dev_to_ihost(struct domain_device *dev)
return dev->port->ha->lldd_ha; return dev->port->ha->lldd_ha;
} }
/** /* we always use protocol engine group zero */
* sci_controller_get_protocol_engine_group() - #define ISCI_PEG 0
*
* This macro returns the protocol engine group for this controller object.
* Presently we only support protocol engine group 0 so just return that
*/
#define sci_controller_get_protocol_engine_group(controller) 0
/* see sci_controller_io_tag_allocate|free for how seq and tci are built */ /* see sci_controller_io_tag_allocate|free for how seq and tci are built */
#define ISCI_TAG(seq, tci) (((u16) (seq)) << 12 | tci) #define ISCI_TAG(seq, tci) (((u16) (seq)) << 12 | tci)
...@@ -385,16 +381,6 @@ static inline int sci_remote_device_node_count(struct isci_remote_device *idev) ...@@ -385,16 +381,6 @@ static inline int sci_remote_device_node_count(struct isci_remote_device *idev)
return SCU_SSP_REMOTE_NODE_COUNT; return SCU_SSP_REMOTE_NODE_COUNT;
} }
/**
* sci_controller_set_invalid_phy() -
*
* This macro will set the bit in the invalid phy mask for this controller
* object. This is used to control messages reported for invalid link up
* notifications.
*/
#define sci_controller_set_invalid_phy(controller, phy) \
((controller)->invalid_phy_mask |= (1 << (phy)->phy_index))
/** /**
* sci_controller_clear_invalid_phy() - * sci_controller_clear_invalid_phy() -
* *
......
...@@ -73,11 +73,6 @@ ...@@ -73,11 +73,6 @@
#define SCI_CONTROLLER_INVALID_IO_TAG 0xFFFF #define SCI_CONTROLLER_INVALID_IO_TAG 0xFFFF
enum sci_controller_mode {
SCI_MODE_SPEED,
SCI_MODE_SIZE /* deprecated */
};
#define SCI_MAX_PHYS (4UL) #define SCI_MAX_PHYS (4UL)
#define SCI_MAX_PORTS SCI_MAX_PHYS #define SCI_MAX_PORTS SCI_MAX_PHYS
#define SCI_MAX_SMP_PHYS (384) /* not silicon constrained */ #define SCI_MAX_SMP_PHYS (384) /* not silicon constrained */
......
...@@ -265,10 +265,11 @@ static void phy_sata_timeout(unsigned long data) ...@@ -265,10 +265,11 @@ static void phy_sata_timeout(unsigned long data)
* port (i.e. it's contained in the dummy port). !NULL All other * port (i.e. it's contained in the dummy port). !NULL All other
* values indicate a handle/pointer to the port containing the phy. * values indicate a handle/pointer to the port containing the phy.
*/ */
struct isci_port *phy_get_non_dummy_port( struct isci_port *phy_get_non_dummy_port(struct isci_phy *iphy)
struct isci_phy *iphy)
{ {
if (sci_port_get_index(iphy->owning_port) == SCIC_SDS_DUMMY_PORT) struct isci_port *iport = iphy->owning_port;
if (iport->physical_port_index == SCIC_SDS_DUMMY_PORT)
return NULL; return NULL;
return iphy->owning_port; return iphy->owning_port;
...@@ -858,10 +859,9 @@ enum sci_status sci_phy_frame_handler(struct isci_phy *iphy, u32 frame_index) ...@@ -858,10 +859,9 @@ enum sci_status sci_phy_frame_handler(struct isci_phy *iphy, u32 frame_index)
struct dev_to_host_fis *frame_header; struct dev_to_host_fis *frame_header;
u32 *fis_frame_data; u32 *fis_frame_data;
result = sci_unsolicited_frame_control_get_header( result = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
&(sci_phy_get_controller(iphy)->uf_control), frame_index,
frame_index, (void **)&frame_header);
(void **)&frame_header);
if (result != SCI_SUCCESS) if (result != SCI_SUCCESS)
return result; return result;
...@@ -1090,6 +1090,8 @@ static void scu_link_layer_tx_hard_reset( ...@@ -1090,6 +1090,8 @@ static void scu_link_layer_tx_hard_reset(
static void sci_phy_stopped_state_enter(struct sci_base_state_machine *sm) static void sci_phy_stopped_state_enter(struct sci_base_state_machine *sm)
{ {
struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
struct isci_port *iport = iphy->owning_port;
struct isci_host *ihost = iport->owning_controller;
/* /*
* @todo We need to get to the controller to place this PE in a * @todo We need to get to the controller to place this PE in a
...@@ -1100,14 +1102,14 @@ static void sci_phy_stopped_state_enter(struct sci_base_state_machine *sm) ...@@ -1100,14 +1102,14 @@ static void sci_phy_stopped_state_enter(struct sci_base_state_machine *sm)
scu_link_layer_stop_protocol_engine(iphy); scu_link_layer_stop_protocol_engine(iphy);
if (iphy->sm.previous_state_id != SCI_PHY_INITIAL) if (iphy->sm.previous_state_id != SCI_PHY_INITIAL)
sci_controller_link_down(sci_phy_get_controller(iphy), sci_controller_link_down(ihost, phy_get_non_dummy_port(iphy), iphy);
phy_get_non_dummy_port(iphy),
iphy);
} }
static void sci_phy_starting_state_enter(struct sci_base_state_machine *sm) static void sci_phy_starting_state_enter(struct sci_base_state_machine *sm)
{ {
struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
struct isci_port *iport = iphy->owning_port;
struct isci_host *ihost = iport->owning_controller;
scu_link_layer_stop_protocol_engine(iphy); scu_link_layer_stop_protocol_engine(iphy);
scu_link_layer_start_oob(iphy); scu_link_layer_start_oob(iphy);
...@@ -1117,9 +1119,7 @@ static void sci_phy_starting_state_enter(struct sci_base_state_machine *sm) ...@@ -1117,9 +1119,7 @@ static void sci_phy_starting_state_enter(struct sci_base_state_machine *sm)
iphy->bcn_received_while_port_unassigned = false; iphy->bcn_received_while_port_unassigned = false;
if (iphy->sm.previous_state_id == SCI_PHY_READY) if (iphy->sm.previous_state_id == SCI_PHY_READY)
sci_controller_link_down(sci_phy_get_controller(iphy), sci_controller_link_down(ihost, phy_get_non_dummy_port(iphy), iphy);
phy_get_non_dummy_port(iphy),
iphy);
sci_change_state(&iphy->sm, SCI_PHY_SUB_INITIAL); sci_change_state(&iphy->sm, SCI_PHY_SUB_INITIAL);
} }
...@@ -1127,11 +1127,10 @@ static void sci_phy_starting_state_enter(struct sci_base_state_machine *sm) ...@@ -1127,11 +1127,10 @@ static void sci_phy_starting_state_enter(struct sci_base_state_machine *sm)
static void sci_phy_ready_state_enter(struct sci_base_state_machine *sm) static void sci_phy_ready_state_enter(struct sci_base_state_machine *sm)
{ {
struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
struct isci_port *iport = iphy->owning_port;
struct isci_host *ihost = iport->owning_controller;
sci_controller_link_up(sci_phy_get_controller(iphy), sci_controller_link_up(ihost, phy_get_non_dummy_port(iphy), iphy);
phy_get_non_dummy_port(iphy),
iphy);
} }
static void sci_phy_ready_state_exit(struct sci_base_state_machine *sm) static void sci_phy_ready_state_exit(struct sci_base_state_machine *sm)
......
...@@ -440,23 +440,6 @@ enum sci_phy_states { ...@@ -440,23 +440,6 @@ enum sci_phy_states {
SCI_PHY_FINAL, SCI_PHY_FINAL,
}; };
/**
* sci_phy_get_index() -
*
* This macro returns the phy index for the specified phy
*/
#define sci_phy_get_index(phy) \
((phy)->phy_index)
/**
* sci_phy_get_controller() - This macro returns the controller for this
* phy
*
*
*/
#define sci_phy_get_controller(phy) \
(sci_port_get_controller((phy)->owning_port))
void sci_phy_construct( void sci_phy_construct(
struct isci_phy *iphy, struct isci_phy *iphy,
struct isci_port *iport, struct isci_port *iport,
......
...@@ -654,7 +654,7 @@ static void sci_port_activate_phy(struct isci_port *iport, struct isci_phy *iphy ...@@ -654,7 +654,7 @@ static void sci_port_activate_phy(struct isci_port *iport, struct isci_phy *iphy
void sci_port_deactivate_phy(struct isci_port *iport, struct isci_phy *iphy, void sci_port_deactivate_phy(struct isci_port *iport, struct isci_phy *iphy,
bool do_notify_user) bool do_notify_user)
{ {
struct isci_host *ihost = sci_port_get_controller(iport); struct isci_host *ihost = iport->owning_controller;
iport->active_phy_mask &= ~(1 << iphy->phy_index); iport->active_phy_mask &= ~(1 << iphy->phy_index);
...@@ -678,7 +678,7 @@ static void sci_port_invalid_link_up(struct isci_port *iport, struct isci_phy *i ...@@ -678,7 +678,7 @@ static void sci_port_invalid_link_up(struct isci_port *iport, struct isci_phy *i
* invalid link. * invalid link.
*/ */
if ((ihost->invalid_phy_mask & (1 << iphy->phy_index)) == 0) { if ((ihost->invalid_phy_mask & (1 << iphy->phy_index)) == 0) {
sci_controller_set_invalid_phy(ihost, iphy); ihost->invalid_phy_mask |= 1 << iphy->phy_index;
dev_warn(&ihost->pdev->dev, "Invalid link up!\n"); dev_warn(&ihost->pdev->dev, "Invalid link up!\n");
} }
} }
......
...@@ -210,23 +210,6 @@ enum sci_port_states { ...@@ -210,23 +210,6 @@ enum sci_port_states {
}; };
/**
* sci_port_get_controller() -
*
* Helper macro to get the owning controller of this port
*/
#define sci_port_get_controller(this_port) \
((this_port)->owning_controller)
/**
* sci_port_get_index() -
*
* This macro returns the physical port index for this port object
*/
#define sci_port_get_index(this_port) \
((this_port)->physical_port_index)
static inline void sci_port_decrement_request_count(struct isci_port *iport) static inline void sci_port_decrement_request_count(struct isci_port *iport)
{ {
if (WARN_ONCE(iport->started_request_count == 0, if (WARN_ONCE(iport->started_request_count == 0,
......
...@@ -367,10 +367,10 @@ static void sci_mpc_agent_link_up(struct isci_host *ihost, ...@@ -367,10 +367,10 @@ static void sci_mpc_agent_link_up(struct isci_host *ihost,
if (!iport) if (!iport)
return; return;
port_agent->phy_ready_mask |= (1 << sci_phy_get_index(iphy)); port_agent->phy_ready_mask |= (1 << iphy->phy_index);
sci_port_link_up(iport, iphy); sci_port_link_up(iport, iphy);
if ((iport->active_phy_mask & (1 << sci_phy_get_index(iphy)))) if ((iport->active_phy_mask & (1 << iphy->phy_index)))
port_agent->phy_configured_mask |= (1 << sci_phy_get_index(iphy)); port_agent->phy_configured_mask |= (1 << iphy->phy_index);
} }
/** /**
...@@ -404,10 +404,8 @@ static void sci_mpc_agent_link_down( ...@@ -404,10 +404,8 @@ static void sci_mpc_agent_link_down(
* rebuilding the port with the phys that remain in the ready * rebuilding the port with the phys that remain in the ready
* state. * state.
*/ */
port_agent->phy_ready_mask &= port_agent->phy_ready_mask &= ~(1 << iphy->phy_index);
~(1 << sci_phy_get_index(iphy)); port_agent->phy_configured_mask &= ~(1 << iphy->phy_index);
port_agent->phy_configured_mask &=
~(1 << sci_phy_get_index(iphy));
/* /*
* Check to see if there are more phys waiting to be * Check to see if there are more phys waiting to be
...@@ -643,7 +641,7 @@ static void sci_apc_agent_link_down( ...@@ -643,7 +641,7 @@ static void sci_apc_agent_link_down(
struct isci_port *iport, struct isci_port *iport,
struct isci_phy *iphy) struct isci_phy *iphy)
{ {
port_agent->phy_ready_mask &= ~(1 << sci_phy_get_index(iphy)); port_agent->phy_ready_mask &= ~(1 << iphy->phy_index);
if (!iport) if (!iport)
return; return;
......
...@@ -456,7 +456,7 @@ static void sci_remote_device_start_request(struct isci_remote_device *idev, ...@@ -456,7 +456,7 @@ static void sci_remote_device_start_request(struct isci_remote_device *idev,
sci_port_complete_io(iport, idev, ireq); sci_port_complete_io(iport, idev, ireq);
else { else {
kref_get(&idev->kref); kref_get(&idev->kref);
sci_remote_device_increment_request_count(idev); idev->started_request_count++;
} }
} }
...@@ -636,7 +636,7 @@ enum sci_status sci_remote_device_complete_io(struct isci_host *ihost, ...@@ -636,7 +636,7 @@ enum sci_status sci_remote_device_complete_io(struct isci_host *ihost,
* status of "DEVICE_RESET_REQUIRED", instead of "INVALID STATE". * status of "DEVICE_RESET_REQUIRED", instead of "INVALID STATE".
*/ */
sci_change_state(sm, SCI_STP_DEV_AWAIT_RESET); sci_change_state(sm, SCI_STP_DEV_AWAIT_RESET);
} else if (sci_remote_device_get_request_count(idev) == 0) } else if (idev->started_request_count == 0)
sci_change_state(sm, SCI_STP_DEV_IDLE); sci_change_state(sm, SCI_STP_DEV_IDLE);
break; break;
case SCI_SMP_DEV_CMD: case SCI_SMP_DEV_CMD:
...@@ -650,10 +650,10 @@ enum sci_status sci_remote_device_complete_io(struct isci_host *ihost, ...@@ -650,10 +650,10 @@ enum sci_status sci_remote_device_complete_io(struct isci_host *ihost,
if (status != SCI_SUCCESS) if (status != SCI_SUCCESS)
break; break;
if (sci_remote_device_get_request_count(idev) == 0) if (idev->started_request_count == 0)
sci_remote_node_context_destruct(&idev->rnc, sci_remote_node_context_destruct(&idev->rnc,
rnc_destruct_done, rnc_destruct_done,
idev); idev);
break; break;
} }
...@@ -761,26 +761,17 @@ enum sci_status sci_remote_device_start_task(struct isci_host *ihost, ...@@ -761,26 +761,17 @@ enum sci_status sci_remote_device_start_task(struct isci_host *ihost,
return status; return status;
} }
/** void sci_remote_device_post_request(struct isci_remote_device *idev, u32 request)
*
* @sci_dev:
* @request:
*
* This method takes the request and bulids an appropriate SCU context for the
* request and then requests the controller to post the request. none
*/
void sci_remote_device_post_request(
struct isci_remote_device *idev,
u32 request)
{ {
struct isci_port *iport = idev->owning_port;
u32 context; u32 context;
context = sci_remote_device_build_command_context(idev, request); context = request |
(ISCI_PEG << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
(iport->physical_port_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
idev->rnc.remote_node_index;
sci_controller_post_request( sci_controller_post_request(iport->owning_controller, context);
sci_remote_device_get_controller(idev),
context
);
} }
/* called once the remote node context has transisitioned to a /* called once the remote node context has transisitioned to a
...@@ -893,7 +884,7 @@ static void sci_remote_device_stopped_state_enter(struct sci_base_state_machine ...@@ -893,7 +884,7 @@ static void sci_remote_device_stopped_state_enter(struct sci_base_state_machine
static void sci_remote_device_starting_state_enter(struct sci_base_state_machine *sm) static void sci_remote_device_starting_state_enter(struct sci_base_state_machine *sm)
{ {
struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
struct isci_host *ihost = sci_remote_device_get_controller(idev); struct isci_host *ihost = idev->owning_port->owning_controller;
isci_remote_device_not_ready(ihost, idev, isci_remote_device_not_ready(ihost, idev,
SCIC_REMOTE_DEVICE_NOT_READY_START_REQUESTED); SCIC_REMOTE_DEVICE_NOT_READY_START_REQUESTED);
...@@ -961,7 +952,7 @@ static void sci_stp_remote_device_ready_idle_substate_enter(struct sci_base_stat ...@@ -961,7 +952,7 @@ static void sci_stp_remote_device_ready_idle_substate_enter(struct sci_base_stat
static void sci_stp_remote_device_ready_cmd_substate_enter(struct sci_base_state_machine *sm) static void sci_stp_remote_device_ready_cmd_substate_enter(struct sci_base_state_machine *sm)
{ {
struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
struct isci_host *ihost = sci_remote_device_get_controller(idev); struct isci_host *ihost = idev->owning_port->owning_controller;
BUG_ON(idev->working_request == NULL); BUG_ON(idev->working_request == NULL);
...@@ -972,7 +963,7 @@ static void sci_stp_remote_device_ready_cmd_substate_enter(struct sci_base_state ...@@ -972,7 +963,7 @@ static void sci_stp_remote_device_ready_cmd_substate_enter(struct sci_base_state
static void sci_stp_remote_device_ready_ncq_error_substate_enter(struct sci_base_state_machine *sm) static void sci_stp_remote_device_ready_ncq_error_substate_enter(struct sci_base_state_machine *sm)
{ {
struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
struct isci_host *ihost = sci_remote_device_get_controller(idev); struct isci_host *ihost = idev->owning_port->owning_controller;
if (idev->not_ready_reason == SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED) if (idev->not_ready_reason == SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED)
isci_remote_device_not_ready(ihost, idev, isci_remote_device_not_ready(ihost, idev,
...@@ -982,7 +973,7 @@ static void sci_stp_remote_device_ready_ncq_error_substate_enter(struct sci_base ...@@ -982,7 +973,7 @@ static void sci_stp_remote_device_ready_ncq_error_substate_enter(struct sci_base
static void sci_smp_remote_device_ready_idle_substate_enter(struct sci_base_state_machine *sm) static void sci_smp_remote_device_ready_idle_substate_enter(struct sci_base_state_machine *sm)
{ {
struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
struct isci_host *ihost = sci_remote_device_get_controller(idev); struct isci_host *ihost = idev->owning_port->owning_controller;
isci_remote_device_ready(ihost, idev); isci_remote_device_ready(ihost, idev);
} }
...@@ -990,7 +981,7 @@ static void sci_smp_remote_device_ready_idle_substate_enter(struct sci_base_stat ...@@ -990,7 +981,7 @@ static void sci_smp_remote_device_ready_idle_substate_enter(struct sci_base_stat
static void sci_smp_remote_device_ready_cmd_substate_enter(struct sci_base_state_machine *sm) static void sci_smp_remote_device_ready_cmd_substate_enter(struct sci_base_state_machine *sm)
{ {
struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
struct isci_host *ihost = sci_remote_device_get_controller(idev); struct isci_host *ihost = idev->owning_port->owning_controller;
BUG_ON(idev->working_request == NULL); BUG_ON(idev->working_request == NULL);
......
...@@ -305,91 +305,18 @@ static inline bool dev_is_expander(struct domain_device *dev) ...@@ -305,91 +305,18 @@ static inline bool dev_is_expander(struct domain_device *dev)
return dev->dev_type == EDGE_DEV || dev->dev_type == FANOUT_DEV; return dev->dev_type == EDGE_DEV || dev->dev_type == FANOUT_DEV;
} }
/** static inline void sci_remote_device_decrement_request_count(struct isci_remote_device *idev)
* sci_remote_device_increment_request_count() - {
* /* XXX delete this voodoo when converting to the top-level device
* This macro incrments the request count for this device * reference count
*/ */
#define sci_remote_device_increment_request_count(idev) \ if (WARN_ONCE(idev->started_request_count == 0,
((idev)->started_request_count++) "%s: tried to decrement started_request_count past 0!?",
__func__))
/** /* pass */;
* sci_remote_device_decrement_request_count() - else
* idev->started_request_count--;
* This macro decrements the request count for this device. This count will }
* never decrment past 0.
*/
#define sci_remote_device_decrement_request_count(idev) \
((idev)->started_request_count > 0 ? \
(idev)->started_request_count-- : 0)
/**
* sci_remote_device_get_request_count() -
*
* This is a helper macro to return the current device request count.
*/
#define sci_remote_device_get_request_count(idev) \
((idev)->started_request_count)
/**
* sci_remote_device_get_controller() -
*
* This macro returns the controller object that contains this device object
*/
#define sci_remote_device_get_controller(idev) \
sci_port_get_controller(sci_remote_device_get_port(idev))
/**
* sci_remote_device_get_port() -
*
* This macro returns the owning port of this device
*/
#define sci_remote_device_get_port(idev) \
((idev)->owning_port)
/**
* sci_remote_device_get_controller_peg() -
*
* This macro returns the controllers protocol engine group
*/
#define sci_remote_device_get_controller_peg(idev) \
(\
sci_controller_get_protocol_engine_group(\
sci_port_get_controller(\
sci_remote_device_get_port(idev) \
) \
) \
)
/**
* sci_remote_device_get_index() -
*
* This macro returns the remote node index for this device object
*/
#define sci_remote_device_get_index(idev) \
((idev)->rnc.remote_node_index)
/**
* sci_remote_device_build_command_context() -
*
* This macro builds a remote device context for the SCU post request operation
*/
#define sci_remote_device_build_command_context(device, command) \
((command) \
| (sci_remote_device_get_controller_peg((device)) << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) \
| ((device)->owning_port->physical_port_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) \
| (sci_remote_device_get_index((device))) \
)
/**
* sci_remote_device_set_working_request() -
*
* This macro makes the working request assingment for the remote device
* object. To clear the working request use this macro with a NULL request
* object.
*/
#define sci_remote_device_set_working_request(device, request) \
((device)->working_request = (request))
enum sci_status sci_remote_device_frame_handler( enum sci_status sci_remote_device_frame_handler(
struct isci_remote_device *idev, struct isci_remote_device *idev,
......
...@@ -111,7 +111,7 @@ static void sci_remote_node_context_construct_buffer(struct sci_remote_node_cont ...@@ -111,7 +111,7 @@ static void sci_remote_node_context_construct_buffer(struct sci_remote_node_cont
struct isci_host *ihost; struct isci_host *ihost;
__le64 sas_addr; __le64 sas_addr;
ihost = sci_remote_device_get_controller(idev); ihost = idev->owning_port->owning_controller;
rnc = sci_rnc_by_id(ihost, rni); rnc = sci_rnc_by_id(ihost, rni);
memset(rnc, 0, sizeof(union scu_remote_node_context) memset(rnc, 0, sizeof(union scu_remote_node_context)
......
...@@ -204,9 +204,6 @@ void sci_remote_node_context_construct(struct sci_remote_node_context *rnc, ...@@ -204,9 +204,6 @@ void sci_remote_node_context_construct(struct sci_remote_node_context *rnc,
bool sci_remote_node_context_is_ready( bool sci_remote_node_context_is_ready(
struct sci_remote_node_context *sci_rnc); struct sci_remote_node_context *sci_rnc);
#define sci_remote_node_context_get_remote_node_index(rcn) \
((rnc)->remote_node_index)
enum sci_status sci_remote_node_context_event_handler(struct sci_remote_node_context *sci_rnc, enum sci_status sci_remote_node_context_event_handler(struct sci_remote_node_context *sci_rnc,
u32 event_code); u32 event_code);
enum sci_status sci_remote_node_context_destruct(struct sci_remote_node_context *sci_rnc, enum sci_status sci_remote_node_context_destruct(struct sci_remote_node_context *sci_rnc,
......
...@@ -211,22 +211,21 @@ static void scu_ssp_reqeust_construct_task_context( ...@@ -211,22 +211,21 @@ static void scu_ssp_reqeust_construct_task_context(
struct isci_remote_device *idev; struct isci_remote_device *idev;
struct isci_port *iport; struct isci_port *iport;
idev = sci_request_get_device(ireq); idev = ireq->target_device;
iport = sci_request_get_port(ireq); iport = idev->owning_port;
/* Fill in the TC with the its required data */ /* Fill in the TC with the its required data */
task_context->abort = 0; task_context->abort = 0;
task_context->priority = 0; task_context->priority = 0;
task_context->initiator_request = 1; task_context->initiator_request = 1;
task_context->connection_rate = idev->connection_rate; task_context->connection_rate = idev->connection_rate;
task_context->protocol_engine_index = task_context->protocol_engine_index = ISCI_PEG;
sci_controller_get_protocol_engine_group(controller); task_context->logical_port_index = iport->physical_port_index;
task_context->logical_port_index = sci_port_get_index(iport);
task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SSP; task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SSP;
task_context->valid = SCU_TASK_CONTEXT_VALID; task_context->valid = SCU_TASK_CONTEXT_VALID;
task_context->context_type = SCU_TASK_CONTEXT_TYPE; task_context->context_type = SCU_TASK_CONTEXT_TYPE;
task_context->remote_node_index = sci_remote_device_get_index(idev); task_context->remote_node_index = idev->rnc.remote_node_index;
task_context->command_code = 0; task_context->command_code = 0;
task_context->link_layer_control = 0; task_context->link_layer_control = 0;
...@@ -242,9 +241,8 @@ static void scu_ssp_reqeust_construct_task_context( ...@@ -242,9 +241,8 @@ static void scu_ssp_reqeust_construct_task_context(
task_context->task_phase = 0x01; task_context->task_phase = 0x01;
ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
(sci_controller_get_protocol_engine_group(controller) << (ISCI_PEG << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | (iport->physical_port_index <<
(sci_port_get_index(iport) <<
SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) | SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
ISCI_TAG_TCI(ireq->io_tag)); ISCI_TAG_TCI(ireq->io_tag));
...@@ -349,23 +347,21 @@ static void scu_sata_reqeust_construct_task_context( ...@@ -349,23 +347,21 @@ static void scu_sata_reqeust_construct_task_context(
struct isci_remote_device *idev; struct isci_remote_device *idev;
struct isci_port *iport; struct isci_port *iport;
idev = sci_request_get_device(ireq); idev = ireq->target_device;
iport = sci_request_get_port(ireq); iport = idev->owning_port;
/* Fill in the TC with the its required data */ /* Fill in the TC with the its required data */
task_context->abort = 0; task_context->abort = 0;
task_context->priority = SCU_TASK_PRIORITY_NORMAL; task_context->priority = SCU_TASK_PRIORITY_NORMAL;
task_context->initiator_request = 1; task_context->initiator_request = 1;
task_context->connection_rate = idev->connection_rate; task_context->connection_rate = idev->connection_rate;
task_context->protocol_engine_index = task_context->protocol_engine_index = ISCI_PEG;
sci_controller_get_protocol_engine_group(controller); task_context->logical_port_index = iport->physical_port_index;
task_context->logical_port_index =
sci_port_get_index(iport);
task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_STP; task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_STP;
task_context->valid = SCU_TASK_CONTEXT_VALID; task_context->valid = SCU_TASK_CONTEXT_VALID;
task_context->context_type = SCU_TASK_CONTEXT_TYPE; task_context->context_type = SCU_TASK_CONTEXT_TYPE;
task_context->remote_node_index = sci_remote_device_get_index(idev); task_context->remote_node_index = idev->rnc.remote_node_index;
task_context->command_code = 0; task_context->command_code = 0;
task_context->link_layer_control = 0; task_context->link_layer_control = 0;
...@@ -385,11 +381,10 @@ static void scu_sata_reqeust_construct_task_context( ...@@ -385,11 +381,10 @@ static void scu_sata_reqeust_construct_task_context(
task_context->type.words[0] = *(u32 *)&ireq->stp.cmd; task_context->type.words[0] = *(u32 *)&ireq->stp.cmd;
ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
(sci_controller_get_protocol_engine_group(controller) << (ISCI_PEG << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | (iport->physical_port_index <<
(sci_port_get_index(iport) << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) | ISCI_TAG_TCI(ireq->io_tag));
ISCI_TAG_TCI(ireq->io_tag));
/* /*
* Copy the physical address for the command buffer to the SCU Task * Copy the physical address for the command buffer to the SCU Task
* Context. We must offset the command buffer by 4 bytes because the * Context. We must offset the command buffer by 4 bytes because the
...@@ -716,10 +711,8 @@ sci_io_request_terminate(struct isci_request *ireq) ...@@ -716,10 +711,8 @@ sci_io_request_terminate(struct isci_request *ireq)
switch (state) { switch (state) {
case SCI_REQ_CONSTRUCTED: case SCI_REQ_CONSTRUCTED:
sci_request_set_status(ireq, ireq->scu_status = SCU_TASK_DONE_TASK_ABORT;
SCU_TASK_DONE_TASK_ABORT, ireq->sci_status = SCI_FAILURE_IO_TERMINATED;
SCI_FAILURE_IO_TERMINATED);
sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
return SCI_SUCCESS; return SCI_SUCCESS;
case SCI_REQ_STARTED: case SCI_REQ_STARTED:
...@@ -848,9 +841,8 @@ request_started_state_tc_event(struct isci_request *ireq, ...@@ -848,9 +841,8 @@ request_started_state_tc_event(struct isci_request *ireq,
*/ */
switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
sci_request_set_status(ireq, ireq->scu_status = SCU_TASK_DONE_GOOD;
SCU_TASK_DONE_GOOD, ireq->sci_status = SCI_SUCCESS;
SCI_SUCCESS);
break; break;
case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_EARLY_RESP): { case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_EARLY_RESP): {
/* There are times when the SCU hardware will return an early /* There are times when the SCU hardware will return an early
...@@ -868,13 +860,11 @@ request_started_state_tc_event(struct isci_request *ireq, ...@@ -868,13 +860,11 @@ request_started_state_tc_event(struct isci_request *ireq,
word_cnt); word_cnt);
if (resp->status == 0) { if (resp->status == 0) {
sci_request_set_status(ireq, ireq->scu_status = SCU_TASK_DONE_GOOD;
SCU_TASK_DONE_GOOD, ireq->sci_status = SCI_SUCCESS_IO_DONE_EARLY;
SCI_SUCCESS_IO_DONE_EARLY);
} else { } else {
sci_request_set_status(ireq, ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
SCU_TASK_DONE_CHECK_RESPONSE, ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
SCI_FAILURE_IO_RESPONSE_VALID);
} }
break; break;
} }
...@@ -885,9 +875,8 @@ request_started_state_tc_event(struct isci_request *ireq, ...@@ -885,9 +875,8 @@ request_started_state_tc_event(struct isci_request *ireq,
&ireq->ssp.rsp, &ireq->ssp.rsp,
word_cnt); word_cnt);
sci_request_set_status(ireq, ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
SCU_TASK_DONE_CHECK_RESPONSE, ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
SCI_FAILURE_IO_RESPONSE_VALID);
break; break;
} }
...@@ -900,13 +889,12 @@ request_started_state_tc_event(struct isci_request *ireq, ...@@ -900,13 +889,12 @@ request_started_state_tc_event(struct isci_request *ireq,
datapres = resp_iu->datapres; datapres = resp_iu->datapres;
if (datapres == 1 || datapres == 2) { if (datapres == 1 || datapres == 2) {
sci_request_set_status(ireq, ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
SCU_TASK_DONE_CHECK_RESPONSE, ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
SCI_FAILURE_IO_RESPONSE_VALID); } else {
} else ireq->scu_status = SCU_TASK_DONE_GOOD;
sci_request_set_status(ireq, ireq->sci_status = SCI_SUCCESS;
SCU_TASK_DONE_GOOD, }
SCI_SUCCESS);
break; break;
/* only stp device gets suspended. */ /* only stp device gets suspended. */
case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_ACK_NAK_TO): case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_ACK_NAK_TO):
...@@ -921,15 +909,13 @@ request_started_state_tc_event(struct isci_request *ireq, ...@@ -921,15 +909,13 @@ request_started_state_tc_event(struct isci_request *ireq,
case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR): case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR):
case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDB_ERR): case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDB_ERR):
if (ireq->protocol == SCIC_STP_PROTOCOL) { if (ireq->protocol == SCIC_STP_PROTOCOL) {
sci_request_set_status(ireq, ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
SCU_GET_COMPLETION_TL_STATUS(completion_code) >> SCU_COMPLETION_TL_STATUS_SHIFT;
SCU_COMPLETION_TL_STATUS_SHIFT, ireq->sci_status = SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED;
SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED);
} else { } else {
sci_request_set_status(ireq, ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
SCU_GET_COMPLETION_TL_STATUS(completion_code) >> SCU_COMPLETION_TL_STATUS_SHIFT;
SCU_COMPLETION_TL_STATUS_SHIFT, ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
} }
break; break;
...@@ -944,10 +930,9 @@ request_started_state_tc_event(struct isci_request *ireq, ...@@ -944,10 +930,9 @@ request_started_state_tc_event(struct isci_request *ireq,
case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY): case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY):
case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED): case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED):
case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED): case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED):
sci_request_set_status(ireq, ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
SCU_GET_COMPLETION_TL_STATUS(completion_code) >> SCU_COMPLETION_TL_STATUS_SHIFT;
SCU_COMPLETION_TL_STATUS_SHIFT, ireq->sci_status = SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED;
SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED);
break; break;
/* neither ssp nor stp gets suspended. */ /* neither ssp nor stp gets suspended. */
...@@ -967,11 +952,9 @@ request_started_state_tc_event(struct isci_request *ireq, ...@@ -967,11 +952,9 @@ request_started_state_tc_event(struct isci_request *ireq,
case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_IIT_ENTRY_NV): case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_IIT_ENTRY_NV):
case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_RNCNV_OUTBOUND): case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_RNCNV_OUTBOUND):
default: default:
sci_request_set_status( ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
ireq, SCU_COMPLETION_TL_STATUS_SHIFT;
SCU_GET_COMPLETION_TL_STATUS(completion_code) >> ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
SCU_COMPLETION_TL_STATUS_SHIFT,
SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
break; break;
} }
...@@ -991,9 +974,8 @@ request_aborting_state_tc_event(struct isci_request *ireq, ...@@ -991,9 +974,8 @@ request_aborting_state_tc_event(struct isci_request *ireq,
switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
case (SCU_TASK_DONE_GOOD << SCU_COMPLETION_TL_STATUS_SHIFT): case (SCU_TASK_DONE_GOOD << SCU_COMPLETION_TL_STATUS_SHIFT):
case (SCU_TASK_DONE_TASK_ABORT << SCU_COMPLETION_TL_STATUS_SHIFT): case (SCU_TASK_DONE_TASK_ABORT << SCU_COMPLETION_TL_STATUS_SHIFT):
sci_request_set_status(ireq, SCU_TASK_DONE_TASK_ABORT, ireq->scu_status = SCU_TASK_DONE_TASK_ABORT;
SCI_FAILURE_IO_TERMINATED); ireq->sci_status = SCI_FAILURE_IO_TERMINATED;
sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
break; break;
...@@ -1012,9 +994,8 @@ static enum sci_status ssp_task_request_await_tc_event(struct isci_request *ireq ...@@ -1012,9 +994,8 @@ static enum sci_status ssp_task_request_await_tc_event(struct isci_request *ireq
{ {
switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
sci_request_set_status(ireq, SCU_TASK_DONE_GOOD, ireq->scu_status = SCU_TASK_DONE_GOOD;
SCI_SUCCESS); ireq->sci_status = SCI_SUCCESS;
sci_change_state(&ireq->sm, SCI_REQ_TASK_WAIT_TC_RESP); sci_change_state(&ireq->sm, SCI_REQ_TASK_WAIT_TC_RESP);
break; break;
case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_ACK_NAK_TO): case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_ACK_NAK_TO):
...@@ -1036,10 +1017,8 @@ static enum sci_status ssp_task_request_await_tc_event(struct isci_request *ireq ...@@ -1036,10 +1017,8 @@ static enum sci_status ssp_task_request_await_tc_event(struct isci_request *ireq
* If a NAK was received, then it is up to the user to retry * If a NAK was received, then it is up to the user to retry
* the request. * the request.
*/ */
sci_request_set_status(ireq, ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
SCU_NORMALIZE_COMPLETION_STATUS(completion_code), ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
break; break;
} }
...@@ -1057,12 +1036,10 @@ smp_request_await_response_tc_event(struct isci_request *ireq, ...@@ -1057,12 +1036,10 @@ smp_request_await_response_tc_event(struct isci_request *ireq,
* unexpected. but if the TC has success status, we * unexpected. but if the TC has success status, we
* complete the IO anyway. * complete the IO anyway.
*/ */
sci_request_set_status(ireq, SCU_TASK_DONE_GOOD, ireq->scu_status = SCU_TASK_DONE_GOOD;
SCI_SUCCESS); ireq->sci_status = SCI_SUCCESS;
sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
break; break;
case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_RESP_TO_ERR): case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_RESP_TO_ERR):
case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_UFI_ERR): case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_UFI_ERR):
case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_FRM_TYPE_ERR): case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_FRM_TYPE_ERR):
...@@ -1074,20 +1051,16 @@ smp_request_await_response_tc_event(struct isci_request *ireq, ...@@ -1074,20 +1051,16 @@ smp_request_await_response_tc_event(struct isci_request *ireq,
* these SMP_XXX_XX_ERR status. For these type of error, * these SMP_XXX_XX_ERR status. For these type of error,
* we ask ihost user to retry the request. * we ask ihost user to retry the request.
*/ */
sci_request_set_status(ireq, SCU_TASK_DONE_SMP_RESP_TO_ERR, ireq->scu_status = SCU_TASK_DONE_SMP_RESP_TO_ERR;
SCI_FAILURE_RETRY_REQUIRED); ireq->sci_status = SCI_FAILURE_RETRY_REQUIRED;
sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
break; break;
default: default:
/* All other completion status cause the IO to be complete. If a NAK /* All other completion status cause the IO to be complete. If a NAK
* was received, then it is up to the user to retry the request * was received, then it is up to the user to retry the request
*/ */
sci_request_set_status(ireq, ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
SCU_NORMALIZE_COMPLETION_STATUS(completion_code), ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
break; break;
} }
...@@ -1101,9 +1074,8 @@ smp_request_await_tc_event(struct isci_request *ireq, ...@@ -1101,9 +1074,8 @@ smp_request_await_tc_event(struct isci_request *ireq,
{ {
switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
sci_request_set_status(ireq, SCU_TASK_DONE_GOOD, ireq->scu_status = SCU_TASK_DONE_GOOD;
SCI_SUCCESS); ireq->sci_status = SCI_SUCCESS;
sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
break; break;
default: default:
...@@ -1111,10 +1083,8 @@ smp_request_await_tc_event(struct isci_request *ireq, ...@@ -1111,10 +1083,8 @@ smp_request_await_tc_event(struct isci_request *ireq,
* complete. If a NAK was received, then it is up to * complete. If a NAK was received, then it is up to
* the user to retry the request. * the user to retry the request.
*/ */
sci_request_set_status(ireq, ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
SCU_NORMALIZE_COMPLETION_STATUS(completion_code), ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
break; break;
} }
...@@ -1171,9 +1141,8 @@ stp_request_non_data_await_h2d_tc_event(struct isci_request *ireq, ...@@ -1171,9 +1141,8 @@ stp_request_non_data_await_h2d_tc_event(struct isci_request *ireq,
{ {
switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
sci_request_set_status(ireq, SCU_TASK_DONE_GOOD, ireq->scu_status = SCU_TASK_DONE_GOOD;
SCI_SUCCESS); ireq->sci_status = SCI_SUCCESS;
sci_change_state(&ireq->sm, SCI_REQ_STP_NON_DATA_WAIT_D2H); sci_change_state(&ireq->sm, SCI_REQ_STP_NON_DATA_WAIT_D2H);
break; break;
...@@ -1182,10 +1151,8 @@ stp_request_non_data_await_h2d_tc_event(struct isci_request *ireq, ...@@ -1182,10 +1151,8 @@ stp_request_non_data_await_h2d_tc_event(struct isci_request *ireq,
* complete. If a NAK was received, then it is up to * complete. If a NAK was received, then it is up to
* the user to retry the request. * the user to retry the request.
*/ */
sci_request_set_status(ireq, ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
SCU_NORMALIZE_COMPLETION_STATUS(completion_code), ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
break; break;
} }
...@@ -1363,10 +1330,8 @@ stp_request_pio_await_h2d_completion_tc_event(struct isci_request *ireq, ...@@ -1363,10 +1330,8 @@ stp_request_pio_await_h2d_completion_tc_event(struct isci_request *ireq,
switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
sci_request_set_status(ireq, ireq->scu_status = SCU_TASK_DONE_GOOD;
SCU_TASK_DONE_GOOD, ireq->sci_status = SCI_SUCCESS;
SCI_SUCCESS);
sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME); sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
break; break;
...@@ -1375,10 +1340,8 @@ stp_request_pio_await_h2d_completion_tc_event(struct isci_request *ireq, ...@@ -1375,10 +1340,8 @@ stp_request_pio_await_h2d_completion_tc_event(struct isci_request *ireq,
* complete. If a NAK was received, then it is up to * complete. If a NAK was received, then it is up to
* the user to retry the request. * the user to retry the request.
*/ */
sci_request_set_status(ireq, ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
SCU_NORMALIZE_COMPLETION_STATUS(completion_code), ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
break; break;
} }
...@@ -1426,11 +1389,8 @@ pio_data_out_tx_done_tc_event(struct isci_request *ireq, ...@@ -1426,11 +1389,8 @@ pio_data_out_tx_done_tc_event(struct isci_request *ireq,
* If a NAK was received, then it is up to the user to retry * If a NAK was received, then it is up to the user to retry
* the request. * the request.
*/ */
sci_request_set_status( ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
ireq, ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
break; break;
} }
...@@ -1438,15 +1398,6 @@ pio_data_out_tx_done_tc_event(struct isci_request *ireq, ...@@ -1438,15 +1398,6 @@ pio_data_out_tx_done_tc_event(struct isci_request *ireq,
return status; return status;
} }
static void sci_stp_request_udma_complete_request(
struct isci_request *ireq,
u32 scu_status,
enum sci_status sci_status)
{
sci_request_set_status(ireq, scu_status, sci_status);
sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
}
static enum sci_status sci_stp_request_udma_general_frame_handler(struct isci_request *ireq, static enum sci_status sci_stp_request_udma_general_frame_handler(struct isci_request *ireq,
u32 frame_index) u32 frame_index)
{ {
...@@ -1512,13 +1463,12 @@ sci_io_request_frame_handler(struct isci_request *ireq, ...@@ -1512,13 +1463,12 @@ sci_io_request_frame_handler(struct isci_request *ireq,
if (resp_iu->datapres == 0x01 || if (resp_iu->datapres == 0x01 ||
resp_iu->datapres == 0x02) { resp_iu->datapres == 0x02) {
sci_request_set_status(ireq, ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
SCU_TASK_DONE_CHECK_RESPONSE, ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); } else {
} else ireq->scu_status = SCU_TASK_DONE_GOOD;
sci_request_set_status(ireq, ireq->sci_status = SCI_SUCCESS;
SCU_TASK_DONE_GOOD, }
SCI_SUCCESS);
} else { } else {
/* not a response frame, why did it get forwarded? */ /* not a response frame, why did it get forwarded? */
dev_err(&ihost->pdev->dev, dev_err(&ihost->pdev->dev,
...@@ -1567,9 +1517,8 @@ sci_io_request_frame_handler(struct isci_request *ireq, ...@@ -1567,9 +1517,8 @@ sci_io_request_frame_handler(struct isci_request *ireq,
sci_swab32_cpy(((u8 *) rsp_hdr) + SMP_RESP_HDR_SZ, sci_swab32_cpy(((u8 *) rsp_hdr) + SMP_RESP_HDR_SZ,
smp_resp, word_cnt); smp_resp, word_cnt);
sci_request_set_status(ireq, SCU_TASK_DONE_GOOD, ireq->scu_status = SCU_TASK_DONE_GOOD;
SCI_SUCCESS); ireq->sci_status = SCI_SUCCESS;
sci_change_state(&ireq->sm, SCI_REQ_SMP_WAIT_TC_COMP); sci_change_state(&ireq->sm, SCI_REQ_SMP_WAIT_TC_COMP);
} else { } else {
/* /*
...@@ -1584,10 +1533,8 @@ sci_io_request_frame_handler(struct isci_request *ireq, ...@@ -1584,10 +1533,8 @@ sci_io_request_frame_handler(struct isci_request *ireq,
frame_index, frame_index,
rsp_hdr->frame_type); rsp_hdr->frame_type);
sci_request_set_status(ireq, ireq->scu_status = SCU_TASK_DONE_SMP_FRM_TYPE_ERR;
SCU_TASK_DONE_SMP_FRM_TYPE_ERR, ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
} }
...@@ -1602,16 +1549,14 @@ sci_io_request_frame_handler(struct isci_request *ireq, ...@@ -1602,16 +1549,14 @@ sci_io_request_frame_handler(struct isci_request *ireq,
case SCI_REQ_STP_UDMA_WAIT_D2H: case SCI_REQ_STP_UDMA_WAIT_D2H:
/* Use the general frame handler to copy the resposne data */ /* Use the general frame handler to copy the resposne data */
status = sci_stp_request_udma_general_frame_handler(ireq, status = sci_stp_request_udma_general_frame_handler(ireq, frame_index);
frame_index);
if (status != SCI_SUCCESS) if (status != SCI_SUCCESS)
return status; return status;
sci_stp_request_udma_complete_request(ireq, ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
SCU_TASK_DONE_CHECK_RESPONSE, ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
SCI_FAILURE_IO_RESPONSE_VALID); sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
return SCI_SUCCESS; return SCI_SUCCESS;
case SCI_REQ_STP_NON_DATA_WAIT_D2H: { case SCI_REQ_STP_NON_DATA_WAIT_D2H: {
...@@ -1645,8 +1590,8 @@ sci_io_request_frame_handler(struct isci_request *ireq, ...@@ -1645,8 +1590,8 @@ sci_io_request_frame_handler(struct isci_request *ireq,
frame_buffer); frame_buffer);
/* The command has completed with error */ /* The command has completed with error */
sci_request_set_status(ireq, SCU_TASK_DONE_CHECK_RESPONSE, ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
SCI_FAILURE_IO_RESPONSE_VALID); ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
break; break;
default: default:
...@@ -1655,8 +1600,8 @@ sci_io_request_frame_handler(struct isci_request *ireq, ...@@ -1655,8 +1600,8 @@ sci_io_request_frame_handler(struct isci_request *ireq,
"violation occurred\n", __func__, stp_req, "violation occurred\n", __func__, stp_req,
frame_index); frame_index);
sci_request_set_status(ireq, SCU_TASK_DONE_UNEXP_FIS, ireq->scu_status = SCU_TASK_DONE_UNEXP_FIS;
SCI_FAILURE_PROTOCOL_VIOLATION); ireq->sci_status = SCI_FAILURE_PROTOCOL_VIOLATION;
break; break;
} }
...@@ -1753,10 +1698,8 @@ sci_io_request_frame_handler(struct isci_request *ireq, ...@@ -1753,10 +1698,8 @@ sci_io_request_frame_handler(struct isci_request *ireq,
frame_header, frame_header,
frame_buffer); frame_buffer);
sci_request_set_status(ireq, ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
SCU_TASK_DONE_CHECK_RESPONSE, ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
SCI_FAILURE_IO_RESPONSE_VALID);
sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
break; break;
...@@ -1800,10 +1743,8 @@ sci_io_request_frame_handler(struct isci_request *ireq, ...@@ -1800,10 +1743,8 @@ sci_io_request_frame_handler(struct isci_request *ireq,
frame_index, frame_index,
frame_header->fis_type); frame_header->fis_type);
sci_request_set_status(ireq, ireq->scu_status = SCU_TASK_DONE_GOOD;
SCU_TASK_DONE_GOOD, ireq->sci_status = SCI_FAILURE_IO_REQUIRES_SCSI_ABORT;
SCI_FAILURE_IO_REQUIRES_SCSI_ABORT);
sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
/* Frame is decoded return it to the controller */ /* Frame is decoded return it to the controller */
...@@ -1833,10 +1774,8 @@ sci_io_request_frame_handler(struct isci_request *ireq, ...@@ -1833,10 +1774,8 @@ sci_io_request_frame_handler(struct isci_request *ireq,
return status; return status;
if ((stp_req->status & ATA_BUSY) == 0) { if ((stp_req->status & ATA_BUSY) == 0) {
sci_request_set_status(ireq, ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
SCU_TASK_DONE_CHECK_RESPONSE, ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
SCI_FAILURE_IO_RESPONSE_VALID);
sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
} else { } else {
sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME); sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
...@@ -1873,9 +1812,8 @@ sci_io_request_frame_handler(struct isci_request *ireq, ...@@ -1873,9 +1812,8 @@ sci_io_request_frame_handler(struct isci_request *ireq,
frame_buffer); frame_buffer);
/* The command has completed with error */ /* The command has completed with error */
sci_request_set_status(ireq, ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
SCU_TASK_DONE_CHECK_RESPONSE, ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
SCI_FAILURE_IO_RESPONSE_VALID);
break; break;
default: default:
...@@ -1886,9 +1824,8 @@ sci_io_request_frame_handler(struct isci_request *ireq, ...@@ -1886,9 +1824,8 @@ sci_io_request_frame_handler(struct isci_request *ireq,
stp_req, stp_req,
frame_index); frame_index);
sci_request_set_status(ireq, ireq->scu_status = SCU_TASK_DONE_UNEXP_FIS;
SCU_TASK_DONE_UNEXP_FIS, ireq->sci_status = SCI_FAILURE_PROTOCOL_VIOLATION;
SCI_FAILURE_PROTOCOL_VIOLATION);
break; break;
} }
...@@ -1927,9 +1864,9 @@ static enum sci_status stp_request_udma_await_tc_event(struct isci_request *ireq ...@@ -1927,9 +1864,9 @@ static enum sci_status stp_request_udma_await_tc_event(struct isci_request *ireq
switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
sci_stp_request_udma_complete_request(ireq, ireq->scu_status = SCU_TASK_DONE_GOOD;
SCU_TASK_DONE_GOOD, ireq->sci_status = SCI_SUCCESS;
SCI_SUCCESS); sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
break; break;
case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_FIS): case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_FIS):
case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR): case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR):
...@@ -1941,9 +1878,9 @@ static enum sci_status stp_request_udma_await_tc_event(struct isci_request *ireq ...@@ -1941,9 +1878,9 @@ static enum sci_status stp_request_udma_await_tc_event(struct isci_request *ireq
sci_remote_device_suspend(ireq->target_device, sci_remote_device_suspend(ireq->target_device,
SCU_EVENT_SPECIFIC(SCU_NORMALIZE_COMPLETION_STATUS(completion_code))); SCU_EVENT_SPECIFIC(SCU_NORMALIZE_COMPLETION_STATUS(completion_code)));
sci_stp_request_udma_complete_request(ireq, ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
SCU_TASK_DONE_CHECK_RESPONSE, ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
SCI_FAILURE_IO_RESPONSE_VALID); sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
} else { } else {
/* If we have an error completion status for the /* If we have an error completion status for the
* TC then we can expect a D2H register FIS from * TC then we can expect a D2H register FIS from
...@@ -1970,9 +1907,9 @@ static enum sci_status stp_request_udma_await_tc_event(struct isci_request *ireq ...@@ -1970,9 +1907,9 @@ static enum sci_status stp_request_udma_await_tc_event(struct isci_request *ireq
/* Fall through to the default case */ /* Fall through to the default case */
default: default:
/* All other completion status cause the IO to be complete. */ /* All other completion status cause the IO to be complete. */
sci_stp_request_udma_complete_request(ireq, ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
SCU_NORMALIZE_COMPLETION_STATUS(completion_code), ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
break; break;
} }
...@@ -1985,9 +1922,8 @@ stp_request_soft_reset_await_h2d_asserted_tc_event(struct isci_request *ireq, ...@@ -1985,9 +1922,8 @@ stp_request_soft_reset_await_h2d_asserted_tc_event(struct isci_request *ireq,
{ {
switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
sci_request_set_status(ireq, SCU_TASK_DONE_GOOD, ireq->scu_status = SCU_TASK_DONE_GOOD;
SCI_SUCCESS); ireq->sci_status = SCI_SUCCESS;
sci_change_state(&ireq->sm, SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG); sci_change_state(&ireq->sm, SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG);
break; break;
...@@ -1997,10 +1933,8 @@ stp_request_soft_reset_await_h2d_asserted_tc_event(struct isci_request *ireq, ...@@ -1997,10 +1933,8 @@ stp_request_soft_reset_await_h2d_asserted_tc_event(struct isci_request *ireq,
* If a NAK was received, then it is up to the user to retry * If a NAK was received, then it is up to the user to retry
* the request. * the request.
*/ */
sci_request_set_status(ireq, ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
SCU_NORMALIZE_COMPLETION_STATUS(completion_code), ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
break; break;
} }
...@@ -2014,9 +1948,8 @@ stp_request_soft_reset_await_h2d_diagnostic_tc_event(struct isci_request *ireq, ...@@ -2014,9 +1948,8 @@ stp_request_soft_reset_await_h2d_diagnostic_tc_event(struct isci_request *ireq,
{ {
switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
sci_request_set_status(ireq, SCU_TASK_DONE_GOOD, ireq->scu_status = SCU_TASK_DONE_GOOD;
SCI_SUCCESS); ireq->sci_status = SCI_SUCCESS;
sci_change_state(&ireq->sm, SCI_REQ_STP_SOFT_RESET_WAIT_D2H); sci_change_state(&ireq->sm, SCI_REQ_STP_SOFT_RESET_WAIT_D2H);
break; break;
...@@ -2025,10 +1958,8 @@ stp_request_soft_reset_await_h2d_diagnostic_tc_event(struct isci_request *ireq, ...@@ -2025,10 +1958,8 @@ stp_request_soft_reset_await_h2d_diagnostic_tc_event(struct isci_request *ireq,
* a NAK was received, then it is up to the user to retry the * a NAK was received, then it is up to the user to retry the
* request. * request.
*/ */
sci_request_set_status(ireq, ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
SCU_NORMALIZE_COMPLETION_STATUS(completion_code), ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
break; break;
} }
...@@ -2504,7 +2435,7 @@ static void isci_request_io_request_complete(struct isci_host *ihost, ...@@ -2504,7 +2435,7 @@ static void isci_request_io_request_complete(struct isci_host *ihost,
completion_status); completion_status);
spin_lock(&request->state_lock); spin_lock(&request->state_lock);
request_status = isci_request_get_state(request); request_status = request->status;
/* Decode the request status. Note that if the request has been /* Decode the request status. Note that if the request has been
* aborted by a task management function, we don't care * aborted by a task management function, we don't care
...@@ -2904,24 +2835,21 @@ static void sci_stp_request_started_non_data_await_h2d_completion_enter(struct s ...@@ -2904,24 +2835,21 @@ static void sci_stp_request_started_non_data_await_h2d_completion_enter(struct s
{ {
struct isci_request *ireq = container_of(sm, typeof(*ireq), sm); struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
sci_remote_device_set_working_request(ireq->target_device, ireq->target_device->working_request = ireq;
ireq);
} }
static void sci_stp_request_started_pio_await_h2d_completion_enter(struct sci_base_state_machine *sm) static void sci_stp_request_started_pio_await_h2d_completion_enter(struct sci_base_state_machine *sm)
{ {
struct isci_request *ireq = container_of(sm, typeof(*ireq), sm); struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
sci_remote_device_set_working_request(ireq->target_device, ireq->target_device->working_request = ireq;
ireq);
} }
static void sci_stp_request_started_soft_reset_await_h2d_asserted_completion_enter(struct sci_base_state_machine *sm) static void sci_stp_request_started_soft_reset_await_h2d_asserted_completion_enter(struct sci_base_state_machine *sm)
{ {
struct isci_request *ireq = container_of(sm, typeof(*ireq), sm); struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
sci_remote_device_set_working_request(ireq->target_device, ireq->target_device->working_request = ireq;
ireq);
} }
static void sci_stp_request_started_soft_reset_await_h2d_diagnostic_completion_enter(struct sci_base_state_machine *sm) static void sci_stp_request_started_soft_reset_await_h2d_diagnostic_completion_enter(struct sci_base_state_machine *sm)
...@@ -3141,8 +3069,8 @@ sci_io_request_construct_smp(struct device *dev, ...@@ -3141,8 +3069,8 @@ sci_io_request_construct_smp(struct device *dev,
task_context = ireq->tc; task_context = ireq->tc;
idev = sci_request_get_device(ireq); idev = ireq->target_device;
iport = sci_request_get_port(ireq); iport = idev->owning_port;
/* /*
* Fill in the TC with the its required data * Fill in the TC with the its required data
...@@ -3151,9 +3079,8 @@ sci_io_request_construct_smp(struct device *dev, ...@@ -3151,9 +3079,8 @@ sci_io_request_construct_smp(struct device *dev,
task_context->priority = 0; task_context->priority = 0;
task_context->initiator_request = 1; task_context->initiator_request = 1;
task_context->connection_rate = idev->connection_rate; task_context->connection_rate = idev->connection_rate;
task_context->protocol_engine_index = task_context->protocol_engine_index = ISCI_PEG;
sci_controller_get_protocol_engine_group(ihost); task_context->logical_port_index = iport->physical_port_index;
task_context->logical_port_index = sci_port_get_index(iport);
task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SMP; task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SMP;
task_context->abort = 0; task_context->abort = 0;
task_context->valid = SCU_TASK_CONTEXT_VALID; task_context->valid = SCU_TASK_CONTEXT_VALID;
...@@ -3195,11 +3122,10 @@ sci_io_request_construct_smp(struct device *dev, ...@@ -3195,11 +3122,10 @@ sci_io_request_construct_smp(struct device *dev,
task_context->task_phase = 0; task_context->task_phase = 0;
ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
(sci_controller_get_protocol_engine_group(ihost) << (ISCI_PEG << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | (iport->physical_port_index <<
(sci_port_get_index(iport) << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) | ISCI_TAG_TCI(ireq->io_tag));
ISCI_TAG_TCI(ireq->io_tag));
/* /*
* Copy the physical address for the command buffer to the SCU Task * Copy the physical address for the command buffer to the SCU Task
* Context command buffer should not contain command header. * Context command buffer should not contain command header.
......
...@@ -300,58 +300,6 @@ enum sci_base_request_states { ...@@ -300,58 +300,6 @@ enum sci_base_request_states {
SCI_REQ_FINAL, SCI_REQ_FINAL,
}; };
/**
* sci_request_get_controller() -
*
* This macro will return the controller for this io request object
*/
#define sci_request_get_controller(ireq) \
((ireq)->owning_controller)
/**
* sci_request_get_device() -
*
* This macro will return the device for this io request object
*/
#define sci_request_get_device(ireq) \
((ireq)->target_device)
/**
* sci_request_get_port() -
*
* This macro will return the port for this io request object
*/
#define sci_request_get_port(ireq) \
sci_remote_device_get_port(sci_request_get_device(ireq))
/**
* sci_request_get_post_context() -
*
* This macro returns the constructed post context result for the io request.
*/
#define sci_request_get_post_context(ireq) \
((ireq)->post_context)
/**
* sci_request_get_task_context() -
*
* This is a helper macro to return the os handle for this request object.
*/
#define sci_request_get_task_context(request) \
((request)->task_context_buffer)
/**
* sci_request_set_status() -
*
* This macro will set the scu hardware status and sci request completion
* status for an io request.
*/
#define sci_request_set_status(request, scu_status_code, sci_status_code) \
{ \
(request)->scu_status = (scu_status_code); \
(request)->sci_status = (sci_status_code); \
}
enum sci_status sci_request_start(struct isci_request *ireq); enum sci_status sci_request_start(struct isci_request *ireq);
enum sci_status sci_io_request_terminate(struct isci_request *ireq); enum sci_status sci_io_request_terminate(struct isci_request *ireq);
enum sci_status enum sci_status
...@@ -381,27 +329,6 @@ sci_io_request_get_dma_addr(struct isci_request *ireq, void *virt_addr) ...@@ -381,27 +329,6 @@ sci_io_request_get_dma_addr(struct isci_request *ireq, void *virt_addr)
return ireq->request_daddr + (requested_addr - base_addr); return ireq->request_daddr + (requested_addr - base_addr);
} }
/**
* This function gets the status of the request object.
* @request: This parameter points to the isci_request object
*
* status of the object as a isci_request_status enum.
*/
static inline enum isci_request_status
isci_request_get_state(struct isci_request *isci_request)
{
BUG_ON(isci_request == NULL);
/*probably a bad sign... */
if (isci_request->status == unallocated)
dev_warn(&isci_request->isci_host->pdev->dev,
"%s: isci_request->status == unallocated\n",
__func__);
return isci_request->status;
}
/** /**
* isci_request_change_state() - This function sets the status of the request * isci_request_change_state() - This function sets the status of the request
* object. * object.
......
...@@ -654,7 +654,7 @@ static void isci_terminate_request_core(struct isci_host *ihost, ...@@ -654,7 +654,7 @@ static void isci_terminate_request_core(struct isci_host *ihost,
* needs to be detached and freed here. * needs to be detached and freed here.
*/ */
spin_lock_irqsave(&isci_request->state_lock, flags); spin_lock_irqsave(&isci_request->state_lock, flags);
request_status = isci_request_get_state(isci_request); request_status = isci_request->status;
if ((isci_request->ttype == io_task) /* TMFs are in their own thread */ if ((isci_request->ttype == io_task) /* TMFs are in their own thread */
&& ((request_status == aborted) && ((request_status == aborted)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment