Commit e90dd264 authored by Mark Rustad's avatar Mark Rustad Committed by Jeff Kirsher

ixgbe: Make return values more direct

Make return values more direct, eliminating some gotos and
otherwise unneeded conditionals. This also eliminates some
local variables. Also a few minor cleanups in affected code
so checkpatch won't complain.
Signed-off-by: default avatarMark Rustad <mark.d.rustad@intel.com>
Tested-by: default avatarPhil Schmitt <phillip.j.schmitt@intel.com>
Signed-off-by: default avatarJeff Kirsher <jeffrey.t.kirsher@intel.com>
parent 9f1fb8ac
......@@ -122,7 +122,7 @@ static s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw)
{
struct ixgbe_mac_info *mac = &hw->mac;
struct ixgbe_phy_info *phy = &hw->phy;
s32 ret_val = 0;
s32 ret_val;
u16 list_offset, data_offset;
/* Identify the PHY */
......@@ -147,28 +147,23 @@ static s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw)
/* Call SFP+ identify routine to get the SFP+ module type */
ret_val = phy->ops.identify_sfp(hw);
if (ret_val != 0)
goto out;
else if (hw->phy.sfp_type == ixgbe_sfp_type_unknown) {
ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED;
goto out;
}
if (ret_val)
return ret_val;
if (hw->phy.sfp_type == ixgbe_sfp_type_unknown)
return IXGBE_ERR_SFP_NOT_SUPPORTED;
/* Check to see if SFP+ module is supported */
ret_val = ixgbe_get_sfp_init_sequence_offsets(hw,
&list_offset,
&data_offset);
if (ret_val != 0) {
ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED;
goto out;
}
if (ret_val)
return IXGBE_ERR_SFP_NOT_SUPPORTED;
break;
default:
break;
}
out:
return ret_val;
return 0;
}
/**
......@@ -183,7 +178,7 @@ static s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw)
{
u32 regval;
u32 i;
s32 ret_val = 0;
s32 ret_val;
ret_val = ixgbe_start_hw_generic(hw);
......@@ -203,11 +198,13 @@ static s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw)
IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
}
if (ret_val)
return ret_val;
/* set the completion timeout for interface */
if (ret_val == 0)
ixgbe_set_pcie_completion_timeout(hw);
ixgbe_set_pcie_completion_timeout(hw);
return ret_val;
return 0;
}
/**
......@@ -222,7 +219,6 @@ static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
ixgbe_link_speed *speed,
bool *autoneg)
{
s32 status = 0;
u32 autoc = 0;
/*
......@@ -262,11 +258,10 @@ static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
break;
default:
status = IXGBE_ERR_LINK_SETUP;
break;
return IXGBE_ERR_LINK_SETUP;
}
return status;
return 0;
}
/**
......@@ -277,14 +272,12 @@ static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
**/
static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw)
{
enum ixgbe_media_type media_type;
/* Detect if there is a copper PHY attached. */
switch (hw->phy.type) {
case ixgbe_phy_cu_unknown:
case ixgbe_phy_tn:
media_type = ixgbe_media_type_copper;
goto out;
return ixgbe_media_type_copper;
default:
break;
}
......@@ -294,30 +287,27 @@ static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw)
case IXGBE_DEV_ID_82598:
case IXGBE_DEV_ID_82598_BX:
/* Default device ID is mezzanine card KX/KX4 */
media_type = ixgbe_media_type_backplane;
break;
return ixgbe_media_type_backplane;
case IXGBE_DEV_ID_82598AF_DUAL_PORT:
case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
case IXGBE_DEV_ID_82598_DA_DUAL_PORT:
case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM:
case IXGBE_DEV_ID_82598EB_XF_LR:
case IXGBE_DEV_ID_82598EB_SFP_LOM:
media_type = ixgbe_media_type_fiber;
break;
return ixgbe_media_type_fiber;
case IXGBE_DEV_ID_82598EB_CX4:
case IXGBE_DEV_ID_82598_CX4_DUAL_PORT:
media_type = ixgbe_media_type_cx4;
break;
return ixgbe_media_type_cx4;
case IXGBE_DEV_ID_82598AT:
case IXGBE_DEV_ID_82598AT2:
media_type = ixgbe_media_type_copper;
break;
return ixgbe_media_type_copper;
default:
media_type = ixgbe_media_type_unknown;
break;
return ixgbe_media_type_unknown;
}
out:
return media_type;
}
/**
......@@ -328,7 +318,6 @@ static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw)
**/
static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw)
{
s32 ret_val = 0;
u32 fctrl_reg;
u32 rmcs_reg;
u32 reg;
......@@ -338,10 +327,8 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw)
bool link_up;
/* Validate the water mark configuration */
if (!hw->fc.pause_time) {
ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
goto out;
}
if (!hw->fc.pause_time)
return IXGBE_ERR_INVALID_LINK_SETTINGS;
/* Low water mark of zero causes XOFF floods */
for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
......@@ -350,8 +337,7 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw)
if (!hw->fc.low_water[i] ||
hw->fc.low_water[i] >= hw->fc.high_water[i]) {
hw_dbg(hw, "Invalid water mark configuration\n");
ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
goto out;
return IXGBE_ERR_INVALID_LINK_SETTINGS;
}
}
}
......@@ -428,8 +414,7 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw)
break;
default:
hw_dbg(hw, "Flow control param set incorrectly\n");
ret_val = IXGBE_ERR_CONFIG;
goto out;
return IXGBE_ERR_CONFIG;
}
/* Set 802.3x based flow control settings. */
......@@ -460,8 +445,7 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw)
/* Configure flow control refresh threshold value */
IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
out:
return ret_val;
return 0;
}
/**
......@@ -597,7 +581,7 @@ static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
}
if (!*link_up)
goto out;
return 0;
}
links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
......@@ -628,7 +612,6 @@ static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
(ixgbe_validate_link_ready(hw) != 0))
*link_up = false;
out:
return 0;
}
......@@ -645,7 +628,6 @@ static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
bool autoneg_wait_to_complete)
{
bool autoneg = false;
s32 status = 0;
ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
u32 curr_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
u32 autoc = curr_autoc;
......@@ -656,7 +638,7 @@ static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
speed &= link_capabilities;
if (speed == IXGBE_LINK_SPEED_UNKNOWN)
status = IXGBE_ERR_LINK_SETUP;
return IXGBE_ERR_LINK_SETUP;
/* Set KX4/KX support according to speed requested */
else if (link_mode == IXGBE_AUTOC_LMS_KX4_AN ||
......@@ -670,17 +652,11 @@ static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
}
if (status == 0) {
/*
* Setup and restart the link based on the new values in
* ixgbe_hw This will write the AUTOC register based on the new
* stored values
*/
status = ixgbe_start_mac_link_82598(hw,
autoneg_wait_to_complete);
}
return status;
/* Setup and restart the link based on the new values in
* ixgbe_hw This will write the AUTOC register based on the new
* stored values
*/
return ixgbe_start_mac_link_82598(hw, autoneg_wait_to_complete);
}
......@@ -717,7 +693,7 @@ static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
**/
static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw)
{
s32 status = 0;
s32 status;
s32 phy_status = 0;
u32 ctrl;
u32 gheccr;
......@@ -727,8 +703,8 @@ static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw)
/* Call adapter stop to disable tx/rx and clear interrupts */
status = hw->mac.ops.stop_adapter(hw);
if (status != 0)
goto reset_hw_out;
if (status)
return status;
/*
* Power up the Atlas Tx lanes if they are currently powered down.
......@@ -770,7 +746,7 @@ static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw)
/* Init PHY and function pointers, perform SFP setup */
phy_status = hw->phy.ops.init(hw);
if (phy_status == IXGBE_ERR_SFP_NOT_SUPPORTED)
goto reset_hw_out;
return phy_status;
if (phy_status == IXGBE_ERR_SFP_NOT_PRESENT)
goto mac_reset_top;
......@@ -836,7 +812,6 @@ static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw)
*/
hw->mac.ops.init_rx_addrs(hw);
reset_hw_out:
if (phy_status)
status = phy_status;
......
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
Copyright(c) 1999 - 2013 Intel Corporation.
Copyright(c) 1999 - 2014 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
......@@ -87,7 +87,6 @@ s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_hw *hw,
int min_credit;
int min_multiplier;
int min_percent = 100;
s32 ret_val = 0;
/* Initialization values default for Tx settings */
u32 credit_refill = 0;
u32 credit_max = 0;
......@@ -95,10 +94,8 @@ s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_hw *hw,
u8 bw_percent = 0;
u8 i;
if (dcb_config == NULL) {
ret_val = DCB_ERR_CONFIG;
goto out;
}
if (!dcb_config)
return DCB_ERR_CONFIG;
min_credit = ((max_frame / 2) + DCB_CREDIT_QUANTUM - 1) /
DCB_CREDIT_QUANTUM;
......@@ -174,8 +171,7 @@ s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_hw *hw,
p->data_credits_max = (u16)credit_max;
}
out:
return ret_val;
return 0;
}
void ixgbe_dcb_unpack_pfc(struct ixgbe_dcb_config *cfg, u8 *pfc_en)
......@@ -236,7 +232,7 @@ u8 ixgbe_dcb_get_tc_from_up(struct ixgbe_dcb_config *cfg, int direction, u8 up)
/* If tc is 0 then DCB is likely not enabled or supported */
if (!tc)
goto out;
return 0;
/*
* Test from maximum TC to 1 and report the first match we find. If
......@@ -247,7 +243,7 @@ u8 ixgbe_dcb_get_tc_from_up(struct ixgbe_dcb_config *cfg, int direction, u8 up)
if (prio_mask & tc_config[tc].path[direction].up_to_tc_bitmap)
break;
}
out:
return tc;
}
......@@ -269,7 +265,6 @@ void ixgbe_dcb_unpack_map(struct ixgbe_dcb_config *cfg, int direction, u8 *map)
s32 ixgbe_dcb_hw_config(struct ixgbe_hw *hw,
struct ixgbe_dcb_config *dcb_config)
{
s32 ret = 0;
u8 pfc_en;
u8 ptype[MAX_TRAFFIC_CLASS];
u8 bwgid[MAX_TRAFFIC_CLASS];
......@@ -287,37 +282,31 @@ s32 ixgbe_dcb_hw_config(struct ixgbe_hw *hw,
switch (hw->mac.type) {
case ixgbe_mac_82598EB:
ret = ixgbe_dcb_hw_config_82598(hw, pfc_en, refill, max,
bwgid, ptype);
break;
return ixgbe_dcb_hw_config_82598(hw, pfc_en, refill, max,
bwgid, ptype);
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
ret = ixgbe_dcb_hw_config_82599(hw, pfc_en, refill, max,
bwgid, ptype, prio_tc);
break;
return ixgbe_dcb_hw_config_82599(hw, pfc_en, refill, max,
bwgid, ptype, prio_tc);
default:
break;
}
return ret;
return 0;
}
/* Helper routines to abstract HW specifics from DCB netlink ops */
s32 ixgbe_dcb_hw_pfc_config(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc)
{
int ret = -EINVAL;
switch (hw->mac.type) {
case ixgbe_mac_82598EB:
ret = ixgbe_dcb_config_pfc_82598(hw, pfc_en);
break;
return ixgbe_dcb_config_pfc_82598(hw, pfc_en);
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
ret = ixgbe_dcb_config_pfc_82599(hw, pfc_en, prio_tc);
break;
return ixgbe_dcb_config_pfc_82599(hw, pfc_en, prio_tc);
default:
break;
}
return ret;
return -EINVAL;
}
s32 ixgbe_dcb_hw_ets(struct ixgbe_hw *hw, struct ieee_ets *ets, int max_frame)
......
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
Copyright(c) 1999 - 2013 Intel Corporation.
Copyright(c) 1999 - 2014 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
......@@ -153,7 +153,6 @@ static u8 ixgbe_dcbnl_get_state(struct net_device *netdev)
static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
int err = 0;
/* Fail command if not in CEE mode */
if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
......@@ -161,12 +160,10 @@ static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state)
/* verify there is something to do, if not then exit */
if (!state == !(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
goto out;
return 0;
err = ixgbe_setup_tc(netdev,
state ? adapter->dcb_cfg.num_tcs.pg_tcs : 0);
out:
return !!err;
return !!ixgbe_setup_tc(netdev,
state ? adapter->dcb_cfg.num_tcs.pg_tcs : 0);
}
static void ixgbe_dcbnl_get_perm_hw_addr(struct net_device *netdev,
......@@ -331,12 +328,12 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
/* Fail command if not in CEE mode */
if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
return ret;
return DCB_NO_HW_CHG;
adapter->dcb_set_bitmap |= ixgbe_copy_dcb_cfg(adapter,
MAX_TRAFFIC_CLASS);
if (!adapter->dcb_set_bitmap)
return ret;
return DCB_NO_HW_CHG;
if (adapter->dcb_set_bitmap & (BIT_PG_TX|BIT_PG_RX)) {
u16 refill[MAX_TRAFFIC_CLASS], max[MAX_TRAFFIC_CLASS];
......@@ -536,7 +533,7 @@ static int ixgbe_dcbnl_ieee_setets(struct net_device *dev,
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
int max_frame = dev->mtu + ETH_HLEN + ETH_FCS_LEN;
int i, err = 0;
int i, err;
__u8 max_tc = 0;
__u8 map_chg = 0;
......@@ -573,17 +570,15 @@ static int ixgbe_dcbnl_ieee_setets(struct net_device *dev,
if (max_tc > adapter->dcb_cfg.num_tcs.pg_tcs)
return -EINVAL;
if (max_tc != netdev_get_num_tc(dev))
if (max_tc != netdev_get_num_tc(dev)) {
err = ixgbe_setup_tc(dev, max_tc);
else if (map_chg)
if (err)
return err;
} else if (map_chg) {
ixgbe_dcbnl_devreset(dev);
}
if (err)
goto err_out;
err = ixgbe_dcb_hw_ets(&adapter->hw, ets, max_frame);
err_out:
return err;
return ixgbe_dcb_hw_ets(&adapter->hw, ets, max_frame);
}
static int ixgbe_dcbnl_ieee_getpfc(struct net_device *dev,
......@@ -647,10 +642,10 @@ static int ixgbe_dcbnl_ieee_setapp(struct net_device *dev,
struct dcb_app *app)
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
int err = -EINVAL;
int err;
if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
return err;
return -EINVAL;
err = dcb_ieee_setapp(dev, app);
if (err)
......@@ -662,7 +657,7 @@ static int ixgbe_dcbnl_ieee_setapp(struct net_device *dev,
u8 app_mask = dcb_ieee_getapp_mask(dev, app);
if (app_mask & (1 << adapter->fcoe.up))
return err;
return 0;
adapter->fcoe.up = app->priority;
ixgbe_dcbnl_devreset(dev);
......@@ -705,7 +700,7 @@ static int ixgbe_dcbnl_ieee_delapp(struct net_device *dev,
u8 app_mask = dcb_ieee_getapp_mask(dev, app);
if (app_mask & (1 << adapter->fcoe.up))
return err;
return 0;
adapter->fcoe.up = app_mask ?
ffs(app_mask) - 1 : IXGBE_FCOE_DEFTC;
......
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
Copyright(c) 1999 - 2013 Intel Corporation.
Copyright(c) 1999 - 2014 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
......@@ -67,23 +67,23 @@ static inline void ixgbe_fcoe_clear_ddp(struct ixgbe_fcoe_ddp *ddp)
*/
int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid)
{
int len = 0;
int len;
struct ixgbe_fcoe *fcoe;
struct ixgbe_adapter *adapter;
struct ixgbe_fcoe_ddp *ddp;
u32 fcbuff;
if (!netdev)
goto out_ddp_put;
return 0;
if (xid >= IXGBE_FCOE_DDP_MAX)
goto out_ddp_put;
return 0;
adapter = netdev_priv(netdev);
fcoe = &adapter->fcoe;
ddp = &fcoe->ddp[xid];
if (!ddp->udl)
goto out_ddp_put;
return 0;
len = ddp->len;
/* if there an error, force to invalidate ddp context */
......@@ -114,7 +114,6 @@ int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid)
ixgbe_fcoe_clear_ddp(ddp);
out_ddp_put:
return len;
}
......@@ -394,17 +393,17 @@ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
xid = be16_to_cpu(fh->fh_rx_id);
if (xid >= IXGBE_FCOE_DDP_MAX)
goto ddp_out;
return -EINVAL;
fcoe = &adapter->fcoe;
ddp = &fcoe->ddp[xid];
if (!ddp->udl)
goto ddp_out;
return -EINVAL;
ddp_err = ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_FCEOFE |
IXGBE_RXDADV_ERR_FCERR);
if (ddp_err)
goto ddp_out;
return -EINVAL;
switch (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_FCSTAT)) {
/* return 0 to bypass going to ULD for DDPed data */
......@@ -447,7 +446,7 @@ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
crc = (struct fcoe_crc_eof *)skb_put(skb, sizeof(*crc));
crc->fcoe_eof = FC_EOF_T;
}
ddp_out:
return rc;
}
......@@ -878,7 +877,6 @@ int ixgbe_fcoe_disable(struct net_device *netdev)
*/
int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type)
{
int rc = -EINVAL;
u16 prefix = 0xffff;
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_mac_info *mac = &adapter->hw.mac;
......@@ -903,9 +901,9 @@ int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type)
((u64) mac->san_addr[3] << 16) |
((u64) mac->san_addr[4] << 8) |
((u64) mac->san_addr[5]);
rc = 0;
return 0;
}
return rc;
return -EINVAL;
}
/**
......
......@@ -570,7 +570,7 @@ static void ixgbe_dump(struct ixgbe_adapter *adapter)
/* Print TX Ring Summary */
if (!netdev || !netif_running(netdev))
goto exit;
return;
dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
pr_info(" %s %s %s %s\n",
......@@ -685,7 +685,7 @@ static void ixgbe_dump(struct ixgbe_adapter *adapter)
/* Print RX Rings */
if (!netif_msg_rx_status(adapter))
goto exit;
return;
dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
......@@ -787,9 +787,6 @@ static void ixgbe_dump(struct ixgbe_adapter *adapter)
}
}
exit:
return;
}
static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter)
......@@ -1011,7 +1008,6 @@ static inline bool ixgbe_check_tx_hang(struct ixgbe_ring *tx_ring)
u32 tx_done = ixgbe_get_tx_completed(tx_ring);
u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
u32 tx_pending = ixgbe_get_tx_pending(tx_ring);
bool ret = false;
clear_check_for_tx_hang(tx_ring);
......@@ -1027,18 +1023,16 @@ static inline bool ixgbe_check_tx_hang(struct ixgbe_ring *tx_ring)
* run the check_tx_hang logic with a transmit completion
* pending but without time to complete it yet.
*/
if ((tx_done_old == tx_done) && tx_pending) {
if (tx_done_old == tx_done && tx_pending)
/* make sure it is true for two checks in a row */
ret = test_and_set_bit(__IXGBE_HANG_CHECK_ARMED,
&tx_ring->state);
} else {
/* update completed stats and continue */
tx_ring->tx_stats.tx_done_old = tx_done;
/* reset the countdown */
clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state);
}
return test_and_set_bit(__IXGBE_HANG_CHECK_ARMED,
&tx_ring->state);
/* update completed stats and continue */
tx_ring->tx_stats.tx_done_old = tx_done;
/* reset the countdown */
clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state);
return ret;
return false;
}
/**
......@@ -4701,18 +4695,18 @@ static int ixgbe_non_sfp_link_config(struct ixgbe_hw *hw)
ret = hw->mac.ops.check_link(hw, &speed, &link_up, false);
if (ret)
goto link_cfg_out;
return ret;
speed = hw->phy.autoneg_advertised;
if ((!speed) && (hw->mac.ops.get_link_capabilities))
ret = hw->mac.ops.get_link_capabilities(hw, &speed,
&autoneg);
if (ret)
goto link_cfg_out;
return ret;
if (hw->mac.ops.setup_link)
ret = hw->mac.ops.setup_link(hw, speed, link_up);
link_cfg_out:
return ret;
}
......
......@@ -43,16 +43,15 @@
s32 ixgbe_read_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
s32 ret_val = IXGBE_ERR_MBX;
/* limit read to size of mailbox */
if (size > mbx->size)
size = mbx->size;
if (mbx->ops.read)
ret_val = mbx->ops.read(hw, msg, size, mbx_id);
if (!mbx->ops.read)
return IXGBE_ERR_MBX;
return ret_val;
return mbx->ops.read(hw, msg, size, mbx_id);
}
/**
......@@ -87,12 +86,11 @@ s32 ixgbe_write_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
s32 ixgbe_check_for_msg(struct ixgbe_hw *hw, u16 mbx_id)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
s32 ret_val = IXGBE_ERR_MBX;
if (mbx->ops.check_for_msg)
ret_val = mbx->ops.check_for_msg(hw, mbx_id);
if (!mbx->ops.check_for_msg)
return IXGBE_ERR_MBX;
return ret_val;
return mbx->ops.check_for_msg(hw, mbx_id);
}
/**
......@@ -105,12 +103,11 @@ s32 ixgbe_check_for_msg(struct ixgbe_hw *hw, u16 mbx_id)
s32 ixgbe_check_for_ack(struct ixgbe_hw *hw, u16 mbx_id)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
s32 ret_val = IXGBE_ERR_MBX;
if (mbx->ops.check_for_ack)
ret_val = mbx->ops.check_for_ack(hw, mbx_id);
if (!mbx->ops.check_for_ack)
return IXGBE_ERR_MBX;
return ret_val;
return mbx->ops.check_for_ack(hw, mbx_id);
}
/**
......@@ -123,12 +120,11 @@ s32 ixgbe_check_for_ack(struct ixgbe_hw *hw, u16 mbx_id)
s32 ixgbe_check_for_rst(struct ixgbe_hw *hw, u16 mbx_id)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
s32 ret_val = IXGBE_ERR_MBX;
if (mbx->ops.check_for_rst)
ret_val = mbx->ops.check_for_rst(hw, mbx_id);
if (!mbx->ops.check_for_rst)
return IXGBE_ERR_MBX;
return ret_val;
return mbx->ops.check_for_rst(hw, mbx_id);
}
/**
......@@ -144,17 +140,16 @@ static s32 ixgbe_poll_for_msg(struct ixgbe_hw *hw, u16 mbx_id)
int countdown = mbx->timeout;
if (!countdown || !mbx->ops.check_for_msg)
goto out;
return IXGBE_ERR_MBX;
while (countdown && mbx->ops.check_for_msg(hw, mbx_id)) {
while (mbx->ops.check_for_msg(hw, mbx_id)) {
countdown--;
if (!countdown)
break;
return IXGBE_ERR_MBX;
udelay(mbx->usec_delay);
}
out:
return countdown ? 0 : IXGBE_ERR_MBX;
return 0;
}
/**
......@@ -170,17 +165,16 @@ static s32 ixgbe_poll_for_ack(struct ixgbe_hw *hw, u16 mbx_id)
int countdown = mbx->timeout;
if (!countdown || !mbx->ops.check_for_ack)
goto out;
return IXGBE_ERR_MBX;
while (countdown && mbx->ops.check_for_ack(hw, mbx_id)) {
while (mbx->ops.check_for_ack(hw, mbx_id)) {
countdown--;
if (!countdown)
break;
return IXGBE_ERR_MBX;
udelay(mbx->usec_delay);
}
out:
return countdown ? 0 : IXGBE_ERR_MBX;
return 0;
}
/**
......@@ -197,18 +191,17 @@ static s32 ixgbe_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size,
u16 mbx_id)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
s32 ret_val = IXGBE_ERR_MBX;
s32 ret_val;
if (!mbx->ops.read)
goto out;
return IXGBE_ERR_MBX;
ret_val = ixgbe_poll_for_msg(hw, mbx_id);
if (ret_val)
return ret_val;
/* if ack received read message, otherwise we timed out */
if (!ret_val)
ret_val = mbx->ops.read(hw, msg, size, mbx_id);
out:
return ret_val;
/* if ack received read message */
return mbx->ops.read(hw, msg, size, mbx_id);
}
/**
......@@ -225,33 +218,31 @@ static s32 ixgbe_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size,
u16 mbx_id)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
s32 ret_val = IXGBE_ERR_MBX;
s32 ret_val;
/* exit if either we can't write or there isn't a defined timeout */
if (!mbx->ops.write || !mbx->timeout)
goto out;
return IXGBE_ERR_MBX;
/* send msg */
ret_val = mbx->ops.write(hw, msg, size, mbx_id);
if (ret_val)
return ret_val;
/* if msg sent wait until we receive an ack */
if (!ret_val)
ret_val = ixgbe_poll_for_ack(hw, mbx_id);
out:
return ret_val;
return ixgbe_poll_for_ack(hw, mbx_id);
}
static s32 ixgbe_check_for_bit_pf(struct ixgbe_hw *hw, u32 mask, s32 index)
{
u32 mbvficr = IXGBE_READ_REG(hw, IXGBE_MBVFICR(index));
s32 ret_val = IXGBE_ERR_MBX;
if (mbvficr & mask) {
ret_val = 0;
IXGBE_WRITE_REG(hw, IXGBE_MBVFICR(index), mask);
return 0;
}
return ret_val;
return IXGBE_ERR_MBX;
}
/**
......@@ -263,17 +254,16 @@ static s32 ixgbe_check_for_bit_pf(struct ixgbe_hw *hw, u32 mask, s32 index)
**/
static s32 ixgbe_check_for_msg_pf(struct ixgbe_hw *hw, u16 vf_number)
{
s32 ret_val = IXGBE_ERR_MBX;
s32 index = IXGBE_MBVFICR_INDEX(vf_number);
u32 vf_bit = vf_number % 16;
if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFREQ_VF1 << vf_bit,
index)) {
ret_val = 0;
hw->mbx.stats.reqs++;
return 0;
}
return ret_val;
return IXGBE_ERR_MBX;
}
/**
......@@ -285,17 +275,16 @@ static s32 ixgbe_check_for_msg_pf(struct ixgbe_hw *hw, u16 vf_number)
**/
static s32 ixgbe_check_for_ack_pf(struct ixgbe_hw *hw, u16 vf_number)
{
s32 ret_val = IXGBE_ERR_MBX;
s32 index = IXGBE_MBVFICR_INDEX(vf_number);
u32 vf_bit = vf_number % 16;
if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFACK_VF1 << vf_bit,
index)) {
ret_val = 0;
hw->mbx.stats.acks++;
return 0;
}
return ret_val;
return IXGBE_ERR_MBX;
}
/**
......@@ -310,7 +299,6 @@ static s32 ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, u16 vf_number)
u32 reg_offset = (vf_number < 32) ? 0 : 1;
u32 vf_shift = vf_number % 32;
u32 vflre = 0;
s32 ret_val = IXGBE_ERR_MBX;
switch (hw->mac.type) {
case ixgbe_mac_82599EB:
......@@ -324,12 +312,12 @@ static s32 ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, u16 vf_number)
}
if (vflre & (1 << vf_shift)) {
ret_val = 0;
IXGBE_WRITE_REG(hw, IXGBE_VFLREC(reg_offset), (1 << vf_shift));
hw->mbx.stats.rsts++;
return 0;
}
return ret_val;
return IXGBE_ERR_MBX;
}
/**
......@@ -341,7 +329,6 @@ static s32 ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, u16 vf_number)
**/
static s32 ixgbe_obtain_mbx_lock_pf(struct ixgbe_hw *hw, u16 vf_number)
{
s32 ret_val = IXGBE_ERR_MBX;
u32 p2v_mailbox;
/* Take ownership of the buffer */
......@@ -350,9 +337,9 @@ static s32 ixgbe_obtain_mbx_lock_pf(struct ixgbe_hw *hw, u16 vf_number)
/* reserve mailbox for vf use */
p2v_mailbox = IXGBE_READ_REG(hw, IXGBE_PFMAILBOX(vf_number));
if (p2v_mailbox & IXGBE_PFMAILBOX_PFU)
ret_val = 0;
return 0;
return ret_val;
return IXGBE_ERR_MBX;
}
/**
......@@ -373,7 +360,7 @@ static s32 ixgbe_write_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size,
/* lock the mailbox to prevent pf/vf race condition */
ret_val = ixgbe_obtain_mbx_lock_pf(hw, vf_number);
if (ret_val)
goto out_no_write;
return ret_val;
/* flush msg and acks as we are overwriting the message buffer */
ixgbe_check_for_msg_pf(hw, vf_number);
......@@ -389,9 +376,7 @@ static s32 ixgbe_write_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size,
/* update stats */
hw->mbx.stats.msgs_tx++;
out_no_write:
return ret_val;
return 0;
}
/**
......@@ -414,7 +399,7 @@ static s32 ixgbe_read_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size,
/* lock the mailbox to prevent pf/vf race condition */
ret_val = ixgbe_obtain_mbx_lock_pf(hw, vf_number);
if (ret_val)
goto out_no_read;
return ret_val;
/* copy the message to the mailbox memory buffer */
for (i = 0; i < size; i++)
......@@ -426,8 +411,7 @@ static s32 ixgbe_read_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size,
/* update stats */
hw->mbx.stats.msgs_rx++;
out_no_read:
return ret_val;
return 0;
}
#ifdef CONFIG_PCI_IOV
......
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
Copyright(c) 1999 - 2013 Intel Corporation.
Copyright(c) 1999 - 2014 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
......@@ -245,10 +245,10 @@ static int ixgbe_pci_sriov_enable(struct pci_dev *dev, int num_vfs)
if (pre_existing_vfs && pre_existing_vfs != num_vfs)
err = ixgbe_disable_sriov(adapter);
else if (pre_existing_vfs && pre_existing_vfs == num_vfs)
goto out;
return num_vfs;
if (err)
goto err_out;
return err;
/* While the SR-IOV capability structure reports total VFs to be
* 64 we limit the actual number that can be allocated to 63 so
......@@ -256,16 +256,14 @@ static int ixgbe_pci_sriov_enable(struct pci_dev *dev, int num_vfs)
* PF. The PCI bus driver already checks for other values out of
* range.
*/
if (num_vfs > IXGBE_MAX_VFS_DRV_LIMIT) {
err = -EPERM;
goto err_out;
}
if (num_vfs > IXGBE_MAX_VFS_DRV_LIMIT)
return -EPERM;
adapter->num_vfs = num_vfs;
err = __ixgbe_enable_sriov(adapter);
if (err)
goto err_out;
return err;
for (i = 0; i < adapter->num_vfs; i++)
ixgbe_vf_configuration(dev, (i | 0x10000000));
......@@ -273,17 +271,14 @@ static int ixgbe_pci_sriov_enable(struct pci_dev *dev, int num_vfs)
err = pci_enable_sriov(dev, num_vfs);
if (err) {
e_dev_warn("Failed to enable PCI sriov: %d\n", err);
goto err_out;
return err;
}
ixgbe_sriov_reinit(adapter);
out:
return num_vfs;
err_out:
return err;
#endif
#else
return 0;
#endif
}
static int ixgbe_pci_sriov_disable(struct pci_dev *dev)
......@@ -807,7 +802,7 @@ static int ixgbe_set_vf_vlan_msg(struct ixgbe_adapter *adapter,
if (!add && adapter->netdev->flags & IFF_PROMISC) {
reg_ndx = ixgbe_find_vlvf_entry(hw, vid);
if (reg_ndx < 0)
goto out;
return err;
vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(reg_ndx));
/* See if any other pools are set for this VLAN filter
* entry other than the PF.
......@@ -833,8 +828,6 @@ static int ixgbe_set_vf_vlan_msg(struct ixgbe_adapter *adapter,
ixgbe_set_vf_vlan(adapter, add, vid, VMDQ_P(0));
}
out:
return err;
}
......@@ -951,7 +944,7 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
/* this is a message we already processed, do nothing */
if (msgbuf[0] & (IXGBE_VT_MSGTYPE_ACK | IXGBE_VT_MSGTYPE_NACK))
return retval;
return 0;
/* flush the ack before we write any messages back */
IXGBE_WRITE_FLUSH(hw);
......@@ -966,7 +959,7 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
if (!adapter->vfinfo[vf].clear_to_send) {
msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK;
ixgbe_write_mbx(hw, msgbuf, 1, vf);
return retval;
return 0;
}
switch ((msgbuf[0] & 0xFFFF)) {
......
......@@ -99,8 +99,8 @@ static s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw)
/* Call adapter stop to disable tx/rx and clear interrupts */
status = hw->mac.ops.stop_adapter(hw);
if (status != 0)
goto reset_hw_out;
if (status)
return status;
/* flush pending Tx transactions */
ixgbe_clear_tx_pending(hw);
......@@ -168,7 +168,6 @@ static s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw)
hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix,
&hw->mac.wwpn_prefix);
reset_hw_out:
return status;
}
......@@ -182,15 +181,13 @@ static s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw)
**/
static s32 ixgbe_start_hw_X540(struct ixgbe_hw *hw)
{
s32 ret_val = 0;
s32 ret_val;
ret_val = ixgbe_start_hw_generic(hw);
if (ret_val != 0)
goto out;
if (ret_val)
return ret_val;
ret_val = ixgbe_start_hw_gen2(hw);
out:
return ret_val;
return ixgbe_start_hw_gen2(hw);
}
/**
......@@ -483,12 +480,12 @@ static s32 ixgbe_update_eeprom_checksum_X540(struct ixgbe_hw *hw)
static s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw)
{
u32 flup;
s32 status = IXGBE_ERR_EEPROM;
s32 status;
status = ixgbe_poll_flash_update_done_X540(hw);
if (status == IXGBE_ERR_EEPROM) {
hw_dbg(hw, "Flash update time out\n");
goto out;
return status;
}
flup = IXGBE_READ_REG(hw, IXGBE_EEC) | IXGBE_EEC_FLUP;
......@@ -514,7 +511,7 @@ static s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw)
else
hw_dbg(hw, "Flash update time out\n");
}
out:
return status;
}
......@@ -529,17 +526,14 @@ static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw)
{
u32 i;
u32 reg;
s32 status = IXGBE_ERR_EEPROM;
for (i = 0; i < IXGBE_FLUDONE_ATTEMPTS; i++) {
reg = IXGBE_READ_REG(hw, IXGBE_EEC);
if (reg & IXGBE_EEC_FLUDONE) {
status = 0;
break;
}
if (reg & IXGBE_EEC_FLUDONE)
return 0;
udelay(5);
}
return status;
return IXGBE_ERR_EEPROM;
}
/**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment