Commit 85a43a9e authored by David S. Miller's avatar David S. Miller

Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/jkirsher/net-next-2.6

parents 23efcb73 42532da6
...@@ -131,6 +131,13 @@ struct vf_macvlans { ...@@ -131,6 +131,13 @@ struct vf_macvlans {
u8 vf_macvlan[ETH_ALEN]; u8 vf_macvlan[ETH_ALEN];
}; };
#define IXGBE_MAX_TXD_PWR 14
#define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR)
/* Tx Descriptors needed, worst case */
#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD)
#define DESC_NEEDED ((MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE)) + 4)
/* wrapper around a pointer to a socket buffer, /* wrapper around a pointer to a socket buffer,
* so a DMA handle can be stored along with the buffer */ * so a DMA handle can be stored along with the buffer */
struct ixgbe_tx_buffer { struct ixgbe_tx_buffer {
...@@ -306,9 +313,13 @@ struct ixgbe_q_vector { ...@@ -306,9 +313,13 @@ struct ixgbe_q_vector {
((_eitr) ? (1000000000 / ((_eitr) * 256)) : 8) ((_eitr) ? (1000000000 / ((_eitr) * 256)) : 8)
#define EITR_REG_TO_INTS_PER_SEC EITR_INTS_PER_SEC_TO_REG #define EITR_REG_TO_INTS_PER_SEC EITR_INTS_PER_SEC_TO_REG
#define IXGBE_DESC_UNUSED(R) \ static inline u16 ixgbe_desc_unused(struct ixgbe_ring *ring)
((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \ {
(R)->next_to_clean - (R)->next_to_use - 1) u16 ntc = ring->next_to_clean;
u16 ntu = ring->next_to_use;
return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1;
}
#define IXGBE_RX_DESC_ADV(R, i) \ #define IXGBE_RX_DESC_ADV(R, i) \
(&(((union ixgbe_adv_rx_desc *)((R)->desc))[i])) (&(((union ixgbe_adv_rx_desc *)((R)->desc))[i]))
...@@ -576,10 +587,10 @@ extern void ixgbe_clear_rscctl(struct ixgbe_adapter *adapter, ...@@ -576,10 +587,10 @@ extern void ixgbe_clear_rscctl(struct ixgbe_adapter *adapter,
struct ixgbe_ring *ring); struct ixgbe_ring *ring);
extern void ixgbe_set_rx_mode(struct net_device *netdev); extern void ixgbe_set_rx_mode(struct net_device *netdev);
extern int ixgbe_setup_tc(struct net_device *dev, u8 tc); extern int ixgbe_setup_tc(struct net_device *dev, u8 tc);
extern void ixgbe_tx_ctxtdesc(struct ixgbe_ring *, u32, u32, u32, u32);
#ifdef IXGBE_FCOE #ifdef IXGBE_FCOE
extern void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter); extern void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter);
extern int ixgbe_fso(struct ixgbe_adapter *adapter, extern int ixgbe_fso(struct ixgbe_ring *tx_ring, struct sk_buff *skb,
struct ixgbe_ring *tx_ring, struct sk_buff *skb,
u32 tx_flags, u8 *hdr_len); u32 tx_flags, u8 *hdr_len);
extern void ixgbe_cleanup_fcoe(struct ixgbe_adapter *adapter); extern void ixgbe_cleanup_fcoe(struct ixgbe_adapter *adapter);
extern int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter, extern int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
......
...@@ -330,24 +330,20 @@ static void ixgbe_dcbnl_get_pfc_cfg(struct net_device *netdev, int priority, ...@@ -330,24 +330,20 @@ static void ixgbe_dcbnl_get_pfc_cfg(struct net_device *netdev, int priority,
static u8 ixgbe_dcbnl_set_all(struct net_device *netdev) static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
{ {
struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_adapter *adapter = netdev_priv(netdev);
int ret;
#ifdef IXGBE_FCOE
struct dcb_app app = { struct dcb_app app = {
.selector = DCB_APP_IDTYPE_ETHTYPE, .selector = DCB_APP_IDTYPE_ETHTYPE,
.protocol = ETH_P_FCOE, .protocol = ETH_P_FCOE,
}; };
u8 up = dcb_getapp(netdev, &app); u8 up = dcb_getapp(netdev, &app);
int ret; #endif
ret = ixgbe_copy_dcb_cfg(&adapter->temp_dcb_cfg, &adapter->dcb_cfg, ret = ixgbe_copy_dcb_cfg(&adapter->temp_dcb_cfg, &adapter->dcb_cfg,
MAX_TRAFFIC_CLASS); MAX_TRAFFIC_CLASS);
if (ret) if (ret)
return DCB_NO_HW_CHG; return DCB_NO_HW_CHG;
/* In IEEE mode app data must be parsed into DCBX format for
* hardware routines.
*/
if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)
up = (1 << up);
#ifdef IXGBE_FCOE #ifdef IXGBE_FCOE
if (up && (up != (1 << adapter->fcoe.up))) if (up && (up != (1 << adapter->fcoe.up)))
adapter->dcb_set_bitmap |= BIT_APP_UPCHG; adapter->dcb_set_bitmap |= BIT_APP_UPCHG;
...@@ -361,7 +357,7 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev) ...@@ -361,7 +357,7 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state)) while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
usleep_range(1000, 2000); usleep_range(1000, 2000);
ixgbe_fcoe_setapp(adapter, up); adapter->fcoe.up = ffs(up) - 1;
if (netif_running(netdev)) if (netif_running(netdev))
netdev->netdev_ops->ndo_stop(netdev); netdev->netdev_ops->ndo_stop(netdev);
...@@ -674,24 +670,75 @@ static int ixgbe_dcbnl_ieee_setpfc(struct net_device *dev, ...@@ -674,24 +670,75 @@ static int ixgbe_dcbnl_ieee_setpfc(struct net_device *dev,
return err; return err;
} }
#ifdef IXGBE_FCOE
static void ixgbe_dcbnl_devreset(struct net_device *dev)
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
if (netif_running(dev))
dev->netdev_ops->ndo_stop(dev);
ixgbe_clear_interrupt_scheme(adapter);
ixgbe_init_interrupt_scheme(adapter);
if (netif_running(dev))
dev->netdev_ops->ndo_open(dev);
}
#endif
static int ixgbe_dcbnl_ieee_setapp(struct net_device *dev, static int ixgbe_dcbnl_ieee_setapp(struct net_device *dev,
struct dcb_app *app) struct dcb_app *app)
{ {
struct ixgbe_adapter *adapter = netdev_priv(dev); struct ixgbe_adapter *adapter = netdev_priv(dev);
int err = -EINVAL;
if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)) if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
return -EINVAL; return err;
dcb_setapp(dev, app); err = dcb_ieee_setapp(dev, app);
#ifdef IXGBE_FCOE #ifdef IXGBE_FCOE
if (app->selector == 1 && app->protocol == ETH_P_FCOE && if (!err && app->selector == IEEE_8021QAZ_APP_SEL_ETHERTYPE &&
adapter->fcoe.tc == app->priority) app->protocol == ETH_P_FCOE) {
ixgbe_dcbnl_set_all(dev); u8 app_mask = dcb_ieee_getapp_mask(dev, app);
if (app_mask & (1 << adapter->fcoe.up))
return err;
adapter->fcoe.up = app->priority;
ixgbe_dcbnl_devreset(dev);
}
#endif #endif
return 0; return 0;
} }
static int ixgbe_dcbnl_ieee_delapp(struct net_device *dev,
struct dcb_app *app)
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
int err;
if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
return -EINVAL;
err = dcb_ieee_delapp(dev, app);
#ifdef IXGBE_FCOE
if (!err && app->selector == IEEE_8021QAZ_APP_SEL_ETHERTYPE &&
app->protocol == ETH_P_FCOE) {
u8 app_mask = dcb_ieee_getapp_mask(dev, app);
if (app_mask & (1 << adapter->fcoe.up))
return err;
adapter->fcoe.up = app_mask ?
ffs(app_mask) - 1 : IXGBE_FCOE_DEFTC;
ixgbe_dcbnl_devreset(dev);
}
#endif
return err;
}
static u8 ixgbe_dcbnl_getdcbx(struct net_device *dev) static u8 ixgbe_dcbnl_getdcbx(struct net_device *dev)
{ {
struct ixgbe_adapter *adapter = netdev_priv(dev); struct ixgbe_adapter *adapter = netdev_priv(dev);
...@@ -743,6 +790,7 @@ const struct dcbnl_rtnl_ops dcbnl_ops = { ...@@ -743,6 +790,7 @@ const struct dcbnl_rtnl_ops dcbnl_ops = {
.ieee_getpfc = ixgbe_dcbnl_ieee_getpfc, .ieee_getpfc = ixgbe_dcbnl_ieee_getpfc,
.ieee_setpfc = ixgbe_dcbnl_ieee_setpfc, .ieee_setpfc = ixgbe_dcbnl_ieee_setpfc,
.ieee_setapp = ixgbe_dcbnl_ieee_setapp, .ieee_setapp = ixgbe_dcbnl_ieee_setapp,
.ieee_delapp = ixgbe_dcbnl_ieee_delapp,
.getstate = ixgbe_dcbnl_get_state, .getstate = ixgbe_dcbnl_get_state,
.setstate = ixgbe_dcbnl_set_state, .setstate = ixgbe_dcbnl_set_state,
.getpermhwaddr = ixgbe_dcbnl_get_perm_hw_addr, .getpermhwaddr = ixgbe_dcbnl_get_perm_hw_addr,
......
...@@ -26,9 +26,6 @@ ...@@ -26,9 +26,6 @@
*******************************************************************************/ *******************************************************************************/
#include "ixgbe.h" #include "ixgbe.h"
#ifdef CONFIG_IXGBE_DCB
#include "ixgbe_dcb_82599.h"
#endif /* CONFIG_IXGBE_DCB */
#include <linux/if_ether.h> #include <linux/if_ether.h>
#include <linux/gfp.h> #include <linux/gfp.h>
#include <linux/if_vlan.h> #include <linux/if_vlan.h>
...@@ -474,23 +471,17 @@ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter, ...@@ -474,23 +471,17 @@ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
* *
* Returns : 0 indicates no FSO, > 0 for FSO, < 0 for error * Returns : 0 indicates no FSO, > 0 for FSO, < 0 for error
*/ */
int ixgbe_fso(struct ixgbe_adapter *adapter, int ixgbe_fso(struct ixgbe_ring *tx_ring, struct sk_buff *skb,
struct ixgbe_ring *tx_ring, struct sk_buff *skb,
u32 tx_flags, u8 *hdr_len) u32 tx_flags, u8 *hdr_len)
{ {
u8 sof, eof; struct fc_frame_header *fh;
u32 vlan_macip_lens; u32 vlan_macip_lens;
u32 fcoe_sof_eof; u32 fcoe_sof_eof = 0;
u32 type_tucmd;
u32 mss_l4len_idx; u32 mss_l4len_idx;
int mss = 0; u8 sof, eof;
unsigned int i;
struct ixgbe_tx_buffer *tx_buffer_info;
struct ixgbe_adv_tx_context_desc *context_desc;
struct fc_frame_header *fh;
if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_type != SKB_GSO_FCOE)) { if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_type != SKB_GSO_FCOE)) {
e_err(drv, "Wrong gso type %d:expecting SKB_GSO_FCOE\n", dev_err(tx_ring->dev, "Wrong gso type %d:expecting SKB_GSO_FCOE\n",
skb_shinfo(skb)->gso_type); skb_shinfo(skb)->gso_type);
return -EINVAL; return -EINVAL;
} }
...@@ -501,23 +492,22 @@ int ixgbe_fso(struct ixgbe_adapter *adapter, ...@@ -501,23 +492,22 @@ int ixgbe_fso(struct ixgbe_adapter *adapter,
sizeof(struct fcoe_hdr)); sizeof(struct fcoe_hdr));
/* sets up SOF and ORIS */ /* sets up SOF and ORIS */
fcoe_sof_eof = 0;
sof = ((struct fcoe_hdr *)skb_network_header(skb))->fcoe_sof; sof = ((struct fcoe_hdr *)skb_network_header(skb))->fcoe_sof;
switch (sof) { switch (sof) {
case FC_SOF_I2: case FC_SOF_I2:
fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_ORIS; fcoe_sof_eof = IXGBE_ADVTXD_FCOEF_ORIS;
break; break;
case FC_SOF_I3: case FC_SOF_I3:
fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_SOF; fcoe_sof_eof = IXGBE_ADVTXD_FCOEF_SOF |
fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_ORIS; IXGBE_ADVTXD_FCOEF_ORIS;
break; break;
case FC_SOF_N2: case FC_SOF_N2:
break; break;
case FC_SOF_N3: case FC_SOF_N3:
fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_SOF; fcoe_sof_eof = IXGBE_ADVTXD_FCOEF_SOF;
break; break;
default: default:
e_warn(drv, "unknown sof = 0x%x\n", sof); dev_warn(tx_ring->dev, "unknown sof = 0x%x\n", sof);
return -EINVAL; return -EINVAL;
} }
...@@ -530,12 +520,11 @@ int ixgbe_fso(struct ixgbe_adapter *adapter, ...@@ -530,12 +520,11 @@ int ixgbe_fso(struct ixgbe_adapter *adapter,
break; break;
case FC_EOF_T: case FC_EOF_T:
/* lso needs ORIE */ /* lso needs ORIE */
if (skb_is_gso(skb)) { if (skb_is_gso(skb))
fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_N; fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_N |
fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_ORIE; IXGBE_ADVTXD_FCOEF_ORIE;
} else { else
fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_T; fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_T;
}
break; break;
case FC_EOF_NI: case FC_EOF_NI:
fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_NI; fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_NI;
...@@ -544,7 +533,7 @@ int ixgbe_fso(struct ixgbe_adapter *adapter, ...@@ -544,7 +533,7 @@ int ixgbe_fso(struct ixgbe_adapter *adapter,
fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_A; fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_A;
break; break;
default: default:
e_warn(drv, "unknown eof = 0x%x\n", eof); dev_warn(tx_ring->dev, "unknown eof = 0x%x\n", eof);
return -EINVAL; return -EINVAL;
} }
...@@ -553,43 +542,28 @@ int ixgbe_fso(struct ixgbe_adapter *adapter, ...@@ -553,43 +542,28 @@ int ixgbe_fso(struct ixgbe_adapter *adapter,
if (fh->fh_f_ctl[2] & FC_FC_REL_OFF) if (fh->fh_f_ctl[2] & FC_FC_REL_OFF)
fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_PARINC; fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_PARINC;
/* hdr_len includes fc_hdr if FCoE lso is enabled */ /* include trailer in headlen as it is replicated per frame */
*hdr_len = sizeof(struct fcoe_crc_eof); *hdr_len = sizeof(struct fcoe_crc_eof);
/* hdr_len includes fc_hdr if FCoE LSO is enabled */
if (skb_is_gso(skb)) if (skb_is_gso(skb))
*hdr_len += (skb_transport_offset(skb) + *hdr_len += (skb_transport_offset(skb) +
sizeof(struct fc_frame_header)); sizeof(struct fc_frame_header));
/* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
vlan_macip_lens = (skb_transport_offset(skb) +
sizeof(struct fc_frame_header));
vlan_macip_lens |= ((skb_transport_offset(skb) - 4)
<< IXGBE_ADVTXD_MACLEN_SHIFT);
vlan_macip_lens |= (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK);
/* type_tycmd and mss: set TUCMD.FCoE to enable offload */
type_tucmd = IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT |
IXGBE_ADVTXT_TUCMD_FCOE;
if (skb_is_gso(skb))
mss = skb_shinfo(skb)->gso_size;
/* mss_l4len_id: use 1 for FSO as TSO, no need for L4LEN */ /* mss_l4len_id: use 1 for FSO as TSO, no need for L4LEN */
mss_l4len_idx = (mss << IXGBE_ADVTXD_MSS_SHIFT) | mss_l4len_idx = skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
(1 << IXGBE_ADVTXD_IDX_SHIFT); mss_l4len_idx |= 1 << IXGBE_ADVTXD_IDX_SHIFT;
/* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
vlan_macip_lens = skb_transport_offset(skb) +
sizeof(struct fc_frame_header);
vlan_macip_lens |= (skb_transport_offset(skb) - 4)
<< IXGBE_ADVTXD_MACLEN_SHIFT;
vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
/* write context desc */ /* write context desc */
i = tx_ring->next_to_use; ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, fcoe_sof_eof,
context_desc = IXGBE_TX_CTXTDESC_ADV(tx_ring, i); IXGBE_ADVTXT_TUCMD_FCOE, mss_l4len_idx);
context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
context_desc->seqnum_seed = cpu_to_le32(fcoe_sof_eof);
context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
tx_buffer_info = &tx_ring->tx_buffer_info[i];
tx_buffer_info->time_stamp = jiffies;
tx_buffer_info->next_to_watch = i;
i++;
if (i == tx_ring->count)
i = 0;
tx_ring->next_to_use = i;
return skb_is_gso(skb); return skb_is_gso(skb);
} }
...@@ -648,10 +622,6 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter) ...@@ -648,10 +622,6 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
struct ixgbe_hw *hw = &adapter->hw; struct ixgbe_hw *hw = &adapter->hw;
struct ixgbe_fcoe *fcoe = &adapter->fcoe; struct ixgbe_fcoe *fcoe = &adapter->fcoe;
struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE]; struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE];
#ifdef CONFIG_IXGBE_DCB
u8 tc;
u32 up2tc;
#endif
if (!fcoe->pool) { if (!fcoe->pool) {
spin_lock_init(&fcoe->lock); spin_lock_init(&fcoe->lock);
...@@ -717,18 +687,6 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter) ...@@ -717,18 +687,6 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
IXGBE_FCRXCTRL_FCOELLI | IXGBE_FCRXCTRL_FCOELLI |
IXGBE_FCRXCTRL_FCCRCBO | IXGBE_FCRXCTRL_FCCRCBO |
(FC_FCOE_VER << IXGBE_FCRXCTRL_FCOEVER_SHIFT)); (FC_FCOE_VER << IXGBE_FCRXCTRL_FCOEVER_SHIFT));
#ifdef CONFIG_IXGBE_DCB
up2tc = IXGBE_READ_REG(&adapter->hw, IXGBE_RTTUP2TC);
for (i = 0; i < MAX_USER_PRIORITY; i++) {
tc = (u8)(up2tc >> (i * IXGBE_RTTUP2TC_UP_SHIFT));
tc &= (MAX_TRAFFIC_CLASS - 1);
if (fcoe->tc == tc) {
fcoe->up = i;
break;
}
}
#endif
return; return;
out_extra_ddp_buffer: out_extra_ddp_buffer:
...@@ -856,41 +814,6 @@ int ixgbe_fcoe_disable(struct net_device *netdev) ...@@ -856,41 +814,6 @@ int ixgbe_fcoe_disable(struct net_device *netdev)
return rc; return rc;
} }
#ifdef CONFIG_IXGBE_DCB
/**
* ixgbe_fcoe_setapp - sets the user priority bitmap for FCoE
* @adapter : ixgbe adapter
* @up : 802.1p user priority bitmap
*
* Finds out the traffic class from the input user priority
* bitmap for FCoE.
*
* Returns : 0 on success otherwise returns 1 on error
*/
u8 ixgbe_fcoe_setapp(struct ixgbe_adapter *adapter, u8 up)
{
int i;
u32 up2tc;
/* valid user priority bitmap must not be 0 */
if (up) {
/* from user priority to the corresponding traffic class */
up2tc = IXGBE_READ_REG(&adapter->hw, IXGBE_RTTUP2TC);
for (i = 0; i < MAX_USER_PRIORITY; i++) {
if (up & (1 << i)) {
up2tc >>= (i * IXGBE_RTTUP2TC_UP_SHIFT);
up2tc &= (MAX_TRAFFIC_CLASS - 1);
adapter->fcoe.tc = (u8)up2tc;
adapter->fcoe.up = i;
return 0;
}
}
}
return 1;
}
#endif /* CONFIG_IXGBE_DCB */
/** /**
* ixgbe_fcoe_get_wwn - get world wide name for the node or the port * ixgbe_fcoe_get_wwn - get world wide name for the node or the port
* @netdev : ixgbe adapter * @netdev : ixgbe adapter
......
...@@ -74,7 +74,6 @@ struct ixgbe_fcoe { ...@@ -74,7 +74,6 @@ struct ixgbe_fcoe {
dma_addr_t extra_ddp_buffer_dma; dma_addr_t extra_ddp_buffer_dma;
unsigned long mode; unsigned long mode;
#ifdef CONFIG_IXGBE_DCB #ifdef CONFIG_IXGBE_DCB
u8 tc;
u8 up; u8 up;
#endif #endif
}; };
......
...@@ -35,6 +35,7 @@ ...@@ -35,6 +35,7 @@
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/ip.h> #include <linux/ip.h>
#include <linux/tcp.h> #include <linux/tcp.h>
#include <linux/sctp.h>
#include <linux/pkt_sched.h> #include <linux/pkt_sched.h>
#include <linux/ipv6.h> #include <linux/ipv6.h>
#include <linux/slab.h> #include <linux/slab.h>
...@@ -771,15 +772,6 @@ static inline bool ixgbe_check_tx_hang(struct ixgbe_ring *tx_ring) ...@@ -771,15 +772,6 @@ static inline bool ixgbe_check_tx_hang(struct ixgbe_ring *tx_ring)
return ret; return ret;
} }
#define IXGBE_MAX_TXD_PWR 14
#define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR)
/* Tx Descriptors needed, worst case */
#define TXD_USE_COUNT(S) (((S) >> IXGBE_MAX_TXD_PWR) + \
(((S) & (IXGBE_MAX_DATA_PER_TXD - 1)) ? 1 : 0))
#define DESC_NEEDED (TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD) /* skb->data */ + \
MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1) /* for context */
/** /**
* ixgbe_tx_timeout_reset - initiate reset due to Tx timeout * ixgbe_tx_timeout_reset - initiate reset due to Tx timeout
* @adapter: driver private struct * @adapter: driver private struct
...@@ -882,7 +874,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, ...@@ -882,7 +874,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
if (unlikely(count && netif_carrier_ok(tx_ring->netdev) && if (unlikely(count && netif_carrier_ok(tx_ring->netdev) &&
(IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) { (ixgbe_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) {
/* Make sure that anybody stopping the queue after this /* Make sure that anybody stopping the queue after this
* sees the new next_to_clean. * sees the new next_to_clean.
*/ */
...@@ -1474,7 +1466,7 @@ static void ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, ...@@ -1474,7 +1466,7 @@ static void ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
} }
rx_ring->next_to_clean = i; rx_ring->next_to_clean = i;
cleaned_count = IXGBE_DESC_UNUSED(rx_ring); cleaned_count = ixgbe_desc_unused(rx_ring);
if (cleaned_count) if (cleaned_count)
ixgbe_alloc_rx_buffers(rx_ring, cleaned_count); ixgbe_alloc_rx_buffers(rx_ring, cleaned_count);
...@@ -1880,8 +1872,7 @@ static void ixgbe_check_lsc(struct ixgbe_adapter *adapter) ...@@ -1880,8 +1872,7 @@ static void ixgbe_check_lsc(struct ixgbe_adapter *adapter)
static irqreturn_t ixgbe_msix_lsc(int irq, void *data) static irqreturn_t ixgbe_msix_lsc(int irq, void *data)
{ {
struct net_device *netdev = data; struct ixgbe_adapter *adapter = data;
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw; struct ixgbe_hw *hw = &adapter->hw;
u32 eicr; u32 eicr;
...@@ -2376,7 +2367,7 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter) ...@@ -2376,7 +2367,7 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
sprintf(adapter->lsc_int_name, "%s:lsc", netdev->name); sprintf(adapter->lsc_int_name, "%s:lsc", netdev->name);
err = request_irq(adapter->msix_entries[vector].vector, err = request_irq(adapter->msix_entries[vector].vector,
ixgbe_msix_lsc, 0, adapter->lsc_int_name, netdev); ixgbe_msix_lsc, 0, adapter->lsc_int_name, adapter);
if (err) { if (err) {
e_err(probe, "request_irq for msix_lsc failed: %d\n", err); e_err(probe, "request_irq for msix_lsc failed: %d\n", err);
goto free_queue_irqs; goto free_queue_irqs;
...@@ -2488,8 +2479,7 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues, ...@@ -2488,8 +2479,7 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues,
**/ **/
static irqreturn_t ixgbe_intr(int irq, void *data) static irqreturn_t ixgbe_intr(int irq, void *data)
{ {
struct net_device *netdev = data; struct ixgbe_adapter *adapter = data;
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw; struct ixgbe_hw *hw = &adapter->hw;
struct ixgbe_q_vector *q_vector = adapter->q_vector[0]; struct ixgbe_q_vector *q_vector = adapter->q_vector[0];
u32 eicr; u32 eicr;
...@@ -2586,10 +2576,10 @@ static int ixgbe_request_irq(struct ixgbe_adapter *adapter) ...@@ -2586,10 +2576,10 @@ static int ixgbe_request_irq(struct ixgbe_adapter *adapter)
err = ixgbe_request_msix_irqs(adapter); err = ixgbe_request_msix_irqs(adapter);
} else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) { } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
err = request_irq(adapter->pdev->irq, ixgbe_intr, 0, err = request_irq(adapter->pdev->irq, ixgbe_intr, 0,
netdev->name, netdev); netdev->name, adapter);
} else { } else {
err = request_irq(adapter->pdev->irq, ixgbe_intr, IRQF_SHARED, err = request_irq(adapter->pdev->irq, ixgbe_intr, IRQF_SHARED,
netdev->name, netdev); netdev->name, adapter);
} }
if (err) if (err)
...@@ -2600,15 +2590,13 @@ static int ixgbe_request_irq(struct ixgbe_adapter *adapter) ...@@ -2600,15 +2590,13 @@ static int ixgbe_request_irq(struct ixgbe_adapter *adapter)
static void ixgbe_free_irq(struct ixgbe_adapter *adapter) static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
{ {
struct net_device *netdev = adapter->netdev;
if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
int i, q_vectors; int i, q_vectors;
q_vectors = adapter->num_msix_vectors; q_vectors = adapter->num_msix_vectors;
i = q_vectors - 1; i = q_vectors - 1;
free_irq(adapter->msix_entries[i].vector, netdev); free_irq(adapter->msix_entries[i].vector, adapter);
i--; i--;
for (; i >= 0; i--) { for (; i >= 0; i--) {
...@@ -2623,7 +2611,7 @@ static void ixgbe_free_irq(struct ixgbe_adapter *adapter) ...@@ -2623,7 +2611,7 @@ static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
ixgbe_reset_q_vectors(adapter); ixgbe_reset_q_vectors(adapter);
} else { } else {
free_irq(adapter->pdev->irq, netdev); free_irq(adapter->pdev->irq, adapter);
} }
} }
...@@ -3130,7 +3118,7 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter, ...@@ -3130,7 +3118,7 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl); IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
ixgbe_rx_desc_queue_enable(adapter, ring); ixgbe_rx_desc_queue_enable(adapter, ring);
ixgbe_alloc_rx_buffers(ring, IXGBE_DESC_UNUSED(ring)); ixgbe_alloc_rx_buffers(ring, ixgbe_desc_unused(ring));
} }
static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter) static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter)
...@@ -5181,7 +5169,6 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter) ...@@ -5181,7 +5169,6 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
adapter->ring_feature[RING_F_FCOE].indices = 0; adapter->ring_feature[RING_F_FCOE].indices = 0;
#ifdef CONFIG_IXGBE_DCB #ifdef CONFIG_IXGBE_DCB
/* Default traffic class to use for FCoE */ /* Default traffic class to use for FCoE */
adapter->fcoe.tc = IXGBE_FCOE_DEFTC;
adapter->fcoe.up = IXGBE_FCOE_DEFTC; adapter->fcoe.up = IXGBE_FCOE_DEFTC;
#endif #endif
#endif /* IXGBE_FCOE */ #endif /* IXGBE_FCOE */
...@@ -6357,27 +6344,46 @@ static void ixgbe_service_task(struct work_struct *work) ...@@ -6357,27 +6344,46 @@ static void ixgbe_service_task(struct work_struct *work)
ixgbe_service_event_complete(adapter); ixgbe_service_event_complete(adapter);
} }
static int ixgbe_tso(struct ixgbe_adapter *adapter, void ixgbe_tx_ctxtdesc(struct ixgbe_ring *tx_ring, u32 vlan_macip_lens,
struct ixgbe_ring *tx_ring, struct sk_buff *skb, u32 fcoe_sof_eof, u32 type_tucmd, u32 mss_l4len_idx)
u32 tx_flags, u8 *hdr_len, __be16 protocol)
{ {
struct ixgbe_adv_tx_context_desc *context_desc; struct ixgbe_adv_tx_context_desc *context_desc;
unsigned int i; u16 i = tx_ring->next_to_use;
context_desc = IXGBE_TX_CTXTDESC_ADV(tx_ring, i);
i++;
tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
/* set bits to identify this as an advanced context descriptor */
type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
context_desc->seqnum_seed = cpu_to_le32(fcoe_sof_eof);
context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
}
static int ixgbe_tso(struct ixgbe_ring *tx_ring, struct sk_buff *skb,
u32 tx_flags, __be16 protocol, u8 *hdr_len)
{
int err; int err;
struct ixgbe_tx_buffer *tx_buffer_info; u32 vlan_macip_lens, type_tucmd;
u32 vlan_macip_lens = 0, type_tucmd_mlhl;
u32 mss_l4len_idx, l4len; u32 mss_l4len_idx, l4len;
if (skb_is_gso(skb)) { if (!skb_is_gso(skb))
return 0;
if (skb_header_cloned(skb)) { if (skb_header_cloned(skb)) {
err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
if (err) if (err)
return err; return err;
} }
l4len = tcp_hdrlen(skb);
*hdr_len += l4len;
if (protocol == htons(ETH_P_IP)) { /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
if (protocol == __constant_htons(ETH_P_IP)) {
struct iphdr *iph = ip_hdr(skb); struct iphdr *iph = ip_hdr(skb);
iph->tot_len = 0; iph->tot_len = 0;
iph->check = 0; iph->check = 0;
...@@ -6385,6 +6391,7 @@ static int ixgbe_tso(struct ixgbe_adapter *adapter, ...@@ -6385,6 +6391,7 @@ static int ixgbe_tso(struct ixgbe_adapter *adapter,
iph->daddr, 0, iph->daddr, 0,
IPPROTO_TCP, IPPROTO_TCP,
0); 0);
type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
} else if (skb_is_gso_v6(skb)) { } else if (skb_is_gso_v6(skb)) {
ipv6_hdr(skb)->payload_len = 0; ipv6_hdr(skb)->payload_len = 0;
tcp_hdr(skb)->check = tcp_hdr(skb)->check =
...@@ -6393,143 +6400,89 @@ static int ixgbe_tso(struct ixgbe_adapter *adapter, ...@@ -6393,143 +6400,89 @@ static int ixgbe_tso(struct ixgbe_adapter *adapter,
0, IPPROTO_TCP, 0); 0, IPPROTO_TCP, 0);
} }
i = tx_ring->next_to_use; l4len = tcp_hdrlen(skb);
*hdr_len = skb_transport_offset(skb) + l4len;
tx_buffer_info = &tx_ring->tx_buffer_info[i];
context_desc = IXGBE_TX_CTXTDESC_ADV(tx_ring, i);
/* VLAN MACLEN IPLEN */
if (tx_flags & IXGBE_TX_FLAGS_VLAN)
vlan_macip_lens |=
(tx_flags & IXGBE_TX_FLAGS_VLAN_MASK);
vlan_macip_lens |= ((skb_network_offset(skb)) <<
IXGBE_ADVTXD_MACLEN_SHIFT);
*hdr_len += skb_network_offset(skb);
vlan_macip_lens |=
(skb_transport_header(skb) - skb_network_header(skb));
*hdr_len +=
(skb_transport_header(skb) - skb_network_header(skb));
context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
context_desc->seqnum_seed = 0;
/* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
type_tucmd_mlhl = (IXGBE_TXD_CMD_DEXT |
IXGBE_ADVTXD_DTYP_CTXT);
if (protocol == htons(ETH_P_IP)) /* mss_l4len_id: use 1 as index for TSO */
type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4; mss_l4len_idx = l4len << IXGBE_ADVTXD_L4LEN_SHIFT;
type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP; mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl); mss_l4len_idx |= 1 << IXGBE_ADVTXD_IDX_SHIFT;
/* MSS L4LEN IDX */
mss_l4len_idx =
(skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT);
mss_l4len_idx |= (l4len << IXGBE_ADVTXD_L4LEN_SHIFT);
/* use index 1 for TSO */
mss_l4len_idx |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
tx_buffer_info->time_stamp = jiffies; /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
tx_buffer_info->next_to_watch = i; vlan_macip_lens = skb_network_header_len(skb);
vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
i++; ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0, type_tucmd,
if (i == tx_ring->count) mss_l4len_idx);
i = 0;
tx_ring->next_to_use = i;
return true; return 1;
}
return false;
} }
static u32 ixgbe_psum(struct ixgbe_adapter *adapter, struct sk_buff *skb, static bool ixgbe_tx_csum(struct ixgbe_ring *tx_ring,
struct sk_buff *skb, u32 tx_flags,
__be16 protocol) __be16 protocol)
{ {
u32 rtn = 0; u32 vlan_macip_lens = 0;
u32 mss_l4len_idx = 0;
u32 type_tucmd = 0;
if (skb->ip_summed != CHECKSUM_PARTIAL) {
if (!(tx_flags & IXGBE_TX_FLAGS_VLAN))
return false;
} else {
u8 l4_hdr = 0;
switch (protocol) { switch (protocol) {
case cpu_to_be16(ETH_P_IP): case __constant_htons(ETH_P_IP):
rtn |= IXGBE_ADVTXD_TUCMD_IPV4; vlan_macip_lens |= skb_network_header_len(skb);
switch (ip_hdr(skb)->protocol) { type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
case IPPROTO_TCP: l4_hdr = ip_hdr(skb)->protocol;
rtn |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
break; break;
case IPPROTO_SCTP: case __constant_htons(ETH_P_IPV6):
rtn |= IXGBE_ADVTXD_TUCMD_L4T_SCTP; vlan_macip_lens |= skb_network_header_len(skb);
l4_hdr = ipv6_hdr(skb)->nexthdr;
break; break;
default:
if (unlikely(net_ratelimit())) {
dev_warn(tx_ring->dev,
"partial checksum but proto=%x!\n",
skb->protocol);
} }
break; break;
case cpu_to_be16(ETH_P_IPV6): }
/* XXX what about other V6 headers?? */
switch (ipv6_hdr(skb)->nexthdr) { switch (l4_hdr) {
case IPPROTO_TCP: case IPPROTO_TCP:
rtn |= IXGBE_ADVTXD_TUCMD_L4T_TCP; type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
mss_l4len_idx = tcp_hdrlen(skb) <<
IXGBE_ADVTXD_L4LEN_SHIFT;
break; break;
case IPPROTO_SCTP: case IPPROTO_SCTP:
rtn |= IXGBE_ADVTXD_TUCMD_L4T_SCTP; type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
mss_l4len_idx = sizeof(struct sctphdr) <<
IXGBE_ADVTXD_L4LEN_SHIFT;
break; break;
} case IPPROTO_UDP:
mss_l4len_idx = sizeof(struct udphdr) <<
IXGBE_ADVTXD_L4LEN_SHIFT;
break; break;
default: default:
if (unlikely(net_ratelimit())) if (unlikely(net_ratelimit())) {
e_warn(probe, "partial checksum but proto=%x!\n", dev_warn(tx_ring->dev,
protocol); "partial checksum but l4 proto=%x!\n",
skb->protocol);
}
break; break;
} }
}
return rtn; vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
} vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter,
struct ixgbe_ring *tx_ring,
struct sk_buff *skb, u32 tx_flags,
__be16 protocol)
{
struct ixgbe_adv_tx_context_desc *context_desc;
unsigned int i;
struct ixgbe_tx_buffer *tx_buffer_info;
u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
if (skb->ip_summed == CHECKSUM_PARTIAL ||
(tx_flags & IXGBE_TX_FLAGS_VLAN)) {
i = tx_ring->next_to_use;
tx_buffer_info = &tx_ring->tx_buffer_info[i];
context_desc = IXGBE_TX_CTXTDESC_ADV(tx_ring, i);
if (tx_flags & IXGBE_TX_FLAGS_VLAN)
vlan_macip_lens |=
(tx_flags & IXGBE_TX_FLAGS_VLAN_MASK);
vlan_macip_lens |= (skb_network_offset(skb) <<
IXGBE_ADVTXD_MACLEN_SHIFT);
if (skb->ip_summed == CHECKSUM_PARTIAL)
vlan_macip_lens |= (skb_transport_header(skb) -
skb_network_header(skb));
context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
context_desc->seqnum_seed = 0;
type_tucmd_mlhl |= (IXGBE_TXD_CMD_DEXT |
IXGBE_ADVTXD_DTYP_CTXT);
if (skb->ip_summed == CHECKSUM_PARTIAL)
type_tucmd_mlhl |= ixgbe_psum(adapter, skb, protocol);
context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
/* use index zero for tx checksum offload */
context_desc->mss_l4len_idx = 0;
tx_buffer_info->time_stamp = jiffies;
tx_buffer_info->next_to_watch = i;
i++;
if (i == tx_ring->count)
i = 0;
tx_ring->next_to_use = i;
return true; ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0,
} type_tucmd, mss_l4len_idx);
return false; return (skb->ip_summed == CHECKSUM_PARTIAL);
} }
static int ixgbe_tx_map(struct ixgbe_adapter *adapter, static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
...@@ -6541,11 +6494,12 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter, ...@@ -6541,11 +6494,12 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
struct ixgbe_tx_buffer *tx_buffer_info; struct ixgbe_tx_buffer *tx_buffer_info;
unsigned int len; unsigned int len;
unsigned int total = skb->len; unsigned int total = skb->len;
unsigned int offset = 0, size, count = 0, i; unsigned int offset = 0, size, count = 0;
unsigned int nr_frags = skb_shinfo(skb)->nr_frags; unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
unsigned int f; unsigned int f;
unsigned int bytecount = skb->len; unsigned int bytecount = skb->len;
u16 gso_segs = 1; u16 gso_segs = 1;
u16 i;
i = tx_ring->next_to_use; i = tx_ring->next_to_use;
...@@ -6811,7 +6765,7 @@ static void ixgbe_atr(struct ixgbe_ring *ring, struct sk_buff *skb, ...@@ -6811,7 +6765,7 @@ static void ixgbe_atr(struct ixgbe_ring *ring, struct sk_buff *skb,
input, common, ring->queue_index); input, common, ring->queue_index);
} }
static int __ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, int size) static int __ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size)
{ {
netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
/* Herbert's original patch had: /* Herbert's original patch had:
...@@ -6821,7 +6775,7 @@ static int __ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, int size) ...@@ -6821,7 +6775,7 @@ static int __ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, int size)
/* We need to check again in a case another CPU has just /* We need to check again in a case another CPU has just
* made room available. */ * made room available. */
if (likely(IXGBE_DESC_UNUSED(tx_ring) < size)) if (likely(ixgbe_desc_unused(tx_ring) < size))
return -EBUSY; return -EBUSY;
/* A reprieve! - use start_queue because it doesn't call schedule */ /* A reprieve! - use start_queue because it doesn't call schedule */
...@@ -6830,9 +6784,9 @@ static int __ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, int size) ...@@ -6830,9 +6784,9 @@ static int __ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, int size)
return 0; return 0;
} }
static int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, int size) static int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size)
{ {
if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size)) if (likely(ixgbe_desc_unused(tx_ring) >= size))
return 0; return 0;
return __ixgbe_maybe_stop_tx(tx_ring, size); return __ixgbe_maybe_stop_tx(tx_ring, size);
} }
...@@ -6868,13 +6822,33 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, ...@@ -6868,13 +6822,33 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
struct ixgbe_adapter *adapter, struct ixgbe_adapter *adapter,
struct ixgbe_ring *tx_ring) struct ixgbe_ring *tx_ring)
{ {
unsigned int first;
unsigned int tx_flags = 0;
u8 hdr_len = 0;
int tso; int tso;
int count = 0; u32 tx_flags = 0;
unsigned int f; #if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
unsigned short f;
#endif
u16 first;
u16 count = TXD_USE_COUNT(skb_headlen(skb));
__be16 protocol; __be16 protocol;
u8 hdr_len = 0;
/*
* need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD,
* + 1 desc for skb_head_len/IXGBE_MAX_DATA_PER_TXD,
* + 2 desc gap to keep tail from touching head,
* + 1 desc for context descriptor,
* otherwise try next time
*/
#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
#else
count += skb_shinfo(skb)->nr_frags;
#endif
if (ixgbe_maybe_stop_tx(tx_ring, count + 3)) {
tx_ring->tx_stats.tx_busy++;
return NETDEV_TX_BUSY;
}
protocol = vlan_get_protocol(skb); protocol = vlan_get_protocol(skb);
...@@ -6899,51 +6873,29 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, ...@@ -6899,51 +6873,29 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED && if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED &&
(protocol == htons(ETH_P_FCOE))) (protocol == htons(ETH_P_FCOE)))
tx_flags |= IXGBE_TX_FLAGS_FCOE; tx_flags |= IXGBE_TX_FLAGS_FCOE;
#endif
/* four things can cause us to need a context descriptor */
if (skb_is_gso(skb) ||
(skb->ip_summed == CHECKSUM_PARTIAL) ||
(tx_flags & IXGBE_TX_FLAGS_VLAN) ||
(tx_flags & IXGBE_TX_FLAGS_FCOE))
count++;
count += TXD_USE_COUNT(skb_headlen(skb));
for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
if (ixgbe_maybe_stop_tx(tx_ring, count)) {
tx_ring->tx_stats.tx_busy++;
return NETDEV_TX_BUSY;
}
#endif
/* record the location of the first descriptor for this packet */
first = tx_ring->next_to_use; first = tx_ring->next_to_use;
if (tx_flags & IXGBE_TX_FLAGS_FCOE) { if (tx_flags & IXGBE_TX_FLAGS_FCOE) {
#ifdef IXGBE_FCOE #ifdef IXGBE_FCOE
/* setup tx offload for FCoE */ /* setup tx offload for FCoE */
tso = ixgbe_fso(adapter, tx_ring, skb, tx_flags, &hdr_len); tso = ixgbe_fso(tx_ring, skb, tx_flags, &hdr_len);
if (tso < 0) { if (tso < 0)
dev_kfree_skb_any(skb); goto out_drop;
return NETDEV_TX_OK; else if (tso)
}
if (tso)
tx_flags |= IXGBE_TX_FLAGS_FSO; tx_flags |= IXGBE_TX_FLAGS_FSO;
#endif /* IXGBE_FCOE */ #endif /* IXGBE_FCOE */
} else { } else {
if (protocol == htons(ETH_P_IP)) if (protocol == htons(ETH_P_IP))
tx_flags |= IXGBE_TX_FLAGS_IPV4; tx_flags |= IXGBE_TX_FLAGS_IPV4;
tso = ixgbe_tso(adapter, tx_ring, skb, tx_flags, &hdr_len, tso = ixgbe_tso(tx_ring, skb, tx_flags, protocol, &hdr_len);
protocol); if (tso < 0)
if (tso < 0) { goto out_drop;
dev_kfree_skb_any(skb); else if (tso)
return NETDEV_TX_OK;
}
if (tso)
tx_flags |= IXGBE_TX_FLAGS_TSO; tx_flags |= IXGBE_TX_FLAGS_TSO;
else if (ixgbe_tx_csum(adapter, tx_ring, skb, tx_flags, else if (ixgbe_tx_csum(tx_ring, skb, tx_flags, protocol))
protocol) &&
(skb->ip_summed == CHECKSUM_PARTIAL))
tx_flags |= IXGBE_TX_FLAGS_CSUM; tx_flags |= IXGBE_TX_FLAGS_CSUM;
} }
...@@ -6956,12 +6908,16 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, ...@@ -6956,12 +6908,16 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED); ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED);
} else { } else {
dev_kfree_skb_any(skb);
tx_ring->tx_buffer_info[first].time_stamp = 0; tx_ring->tx_buffer_info[first].time_stamp = 0;
tx_ring->next_to_use = first; tx_ring->next_to_use = first;
goto out_drop;
} }
return NETDEV_TX_OK; return NETDEV_TX_OK;
out_drop:
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
} }
static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev) static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
......
...@@ -605,6 +605,22 @@ static void ixgbe_set_vf_rate_limit(struct ixgbe_hw *hw, int vf, int tx_rate, ...@@ -605,6 +605,22 @@ static void ixgbe_set_vf_rate_limit(struct ixgbe_hw *hw, int vf, int tx_rate,
} }
IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, 2*vf); /* vf Y uses queue 2*Y */ IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, 2*vf); /* vf Y uses queue 2*Y */
/*
* Set global transmit compensation time to the MMW_SIZE in RTTBCNRM
* register. Typically MMW_SIZE=0x014 if 9728-byte jumbo is supported
* and 0x004 otherwise.
*/
switch (hw->mac.type) {
case ixgbe_mac_82599EB:
IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM, 0x4);
break;
case ixgbe_mac_X540:
IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM, 0x14);
break;
default:
break;
}
IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val); IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val);
} }
......
...@@ -534,7 +534,7 @@ ...@@ -534,7 +534,7 @@
#define IXGBE_RTTBCNRC_RF_INT_SHIFT 14 #define IXGBE_RTTBCNRC_RF_INT_SHIFT 14
#define IXGBE_RTTBCNRC_RF_INT_MASK \ #define IXGBE_RTTBCNRC_RF_INT_MASK \
(IXGBE_RTTBCNRC_RF_DEC_MASK << IXGBE_RTTBCNRC_RF_INT_SHIFT) (IXGBE_RTTBCNRC_RF_DEC_MASK << IXGBE_RTTBCNRC_RF_INT_SHIFT)
#define IXGBE_RTTBCNRM 0x04980
/* FCoE DMA Context Registers */ /* FCoE DMA Context Registers */
#define IXGBE_FCPTRL 0x02410 /* FC User Desc. PTR Low */ #define IXGBE_FCPTRL 0x02410 /* FC User Desc. PTR Low */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment