Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
85a43a9e
Commit
85a43a9e
authored
Jun 27, 2011
by
David S. Miller
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/jkirsher/net-next-2.6
parents
23efcb73
42532da6
Changes
7
Show whitespace changes
Inline
Side-by-side
Showing
7 changed files
with
298 additions
and
345 deletions
+298
-345
drivers/net/ixgbe/ixgbe.h
drivers/net/ixgbe/ixgbe.h
+16
-5
drivers/net/ixgbe/ixgbe_dcb_nl.c
drivers/net/ixgbe/ixgbe_dcb_nl.c
+61
-13
drivers/net/ixgbe/ixgbe_fcoe.c
drivers/net/ixgbe/ixgbe_fcoe.c
+31
-108
drivers/net/ixgbe/ixgbe_fcoe.h
drivers/net/ixgbe/ixgbe_fcoe.h
+0
-1
drivers/net/ixgbe/ixgbe_main.c
drivers/net/ixgbe/ixgbe_main.c
+173
-217
drivers/net/ixgbe/ixgbe_sriov.c
drivers/net/ixgbe/ixgbe_sriov.c
+16
-0
drivers/net/ixgbe/ixgbe_type.h
drivers/net/ixgbe/ixgbe_type.h
+1
-1
No files found.
drivers/net/ixgbe/ixgbe.h
View file @
85a43a9e
...
...
@@ -131,6 +131,13 @@ struct vf_macvlans {
u8
vf_macvlan
[
ETH_ALEN
];
};
#define IXGBE_MAX_TXD_PWR 14
#define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR)
/* Tx Descriptors needed, worst case */
#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD)
#define DESC_NEEDED ((MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE)) + 4)
/* wrapper around a pointer to a socket buffer,
* so a DMA handle can be stored along with the buffer */
struct
ixgbe_tx_buffer
{
...
...
@@ -306,9 +313,13 @@ struct ixgbe_q_vector {
((_eitr) ? (1000000000 / ((_eitr) * 256)) : 8)
#define EITR_REG_TO_INTS_PER_SEC EITR_INTS_PER_SEC_TO_REG
#define IXGBE_DESC_UNUSED(R) \
((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
(R)->next_to_clean - (R)->next_to_use - 1)
static
inline
u16
ixgbe_desc_unused
(
struct
ixgbe_ring
*
ring
)
{
u16
ntc
=
ring
->
next_to_clean
;
u16
ntu
=
ring
->
next_to_use
;
return
((
ntc
>
ntu
)
?
0
:
ring
->
count
)
+
ntc
-
ntu
-
1
;
}
#define IXGBE_RX_DESC_ADV(R, i) \
(&(((union ixgbe_adv_rx_desc *)((R)->desc))[i]))
...
...
@@ -576,10 +587,10 @@ extern void ixgbe_clear_rscctl(struct ixgbe_adapter *adapter,
struct
ixgbe_ring
*
ring
);
extern
void
ixgbe_set_rx_mode
(
struct
net_device
*
netdev
);
extern
int
ixgbe_setup_tc
(
struct
net_device
*
dev
,
u8
tc
);
extern
void
ixgbe_tx_ctxtdesc
(
struct
ixgbe_ring
*
,
u32
,
u32
,
u32
,
u32
);
#ifdef IXGBE_FCOE
extern
void
ixgbe_configure_fcoe
(
struct
ixgbe_adapter
*
adapter
);
extern
int
ixgbe_fso
(
struct
ixgbe_adapter
*
adapter
,
struct
ixgbe_ring
*
tx_ring
,
struct
sk_buff
*
skb
,
extern
int
ixgbe_fso
(
struct
ixgbe_ring
*
tx_ring
,
struct
sk_buff
*
skb
,
u32
tx_flags
,
u8
*
hdr_len
);
extern
void
ixgbe_cleanup_fcoe
(
struct
ixgbe_adapter
*
adapter
);
extern
int
ixgbe_fcoe_ddp
(
struct
ixgbe_adapter
*
adapter
,
...
...
drivers/net/ixgbe/ixgbe_dcb_nl.c
View file @
85a43a9e
...
...
@@ -330,24 +330,20 @@ static void ixgbe_dcbnl_get_pfc_cfg(struct net_device *netdev, int priority,
static
u8
ixgbe_dcbnl_set_all
(
struct
net_device
*
netdev
)
{
struct
ixgbe_adapter
*
adapter
=
netdev_priv
(
netdev
);
int
ret
;
#ifdef IXGBE_FCOE
struct
dcb_app
app
=
{
.
selector
=
DCB_APP_IDTYPE_ETHTYPE
,
.
protocol
=
ETH_P_FCOE
,
};
u8
up
=
dcb_getapp
(
netdev
,
&
app
);
int
ret
;
#endif
ret
=
ixgbe_copy_dcb_cfg
(
&
adapter
->
temp_dcb_cfg
,
&
adapter
->
dcb_cfg
,
MAX_TRAFFIC_CLASS
);
if
(
ret
)
return
DCB_NO_HW_CHG
;
/* In IEEE mode app data must be parsed into DCBX format for
* hardware routines.
*/
if
(
adapter
->
dcbx_cap
&
DCB_CAP_DCBX_VER_IEEE
)
up
=
(
1
<<
up
);
#ifdef IXGBE_FCOE
if
(
up
&&
(
up
!=
(
1
<<
adapter
->
fcoe
.
up
)))
adapter
->
dcb_set_bitmap
|=
BIT_APP_UPCHG
;
...
...
@@ -361,7 +357,7 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
while
(
test_and_set_bit
(
__IXGBE_RESETTING
,
&
adapter
->
state
))
usleep_range
(
1000
,
2000
);
ixgbe_fcoe_setapp
(
adapter
,
up
)
;
adapter
->
fcoe
.
up
=
ffs
(
up
)
-
1
;
if
(
netif_running
(
netdev
))
netdev
->
netdev_ops
->
ndo_stop
(
netdev
);
...
...
@@ -674,24 +670,75 @@ static int ixgbe_dcbnl_ieee_setpfc(struct net_device *dev,
return
err
;
}
#ifdef IXGBE_FCOE
static
void
ixgbe_dcbnl_devreset
(
struct
net_device
*
dev
)
{
struct
ixgbe_adapter
*
adapter
=
netdev_priv
(
dev
);
if
(
netif_running
(
dev
))
dev
->
netdev_ops
->
ndo_stop
(
dev
);
ixgbe_clear_interrupt_scheme
(
adapter
);
ixgbe_init_interrupt_scheme
(
adapter
);
if
(
netif_running
(
dev
))
dev
->
netdev_ops
->
ndo_open
(
dev
);
}
#endif
static
int
ixgbe_dcbnl_ieee_setapp
(
struct
net_device
*
dev
,
struct
dcb_app
*
app
)
{
struct
ixgbe_adapter
*
adapter
=
netdev_priv
(
dev
);
int
err
=
-
EINVAL
;
if
(
!
(
adapter
->
dcbx_cap
&
DCB_CAP_DCBX_VER_IEEE
))
return
-
EINVAL
;
return
err
;
dcb
_setapp
(
dev
,
app
);
err
=
dcb_ieee
_setapp
(
dev
,
app
);
#ifdef IXGBE_FCOE
if
(
app
->
selector
==
1
&&
app
->
protocol
==
ETH_P_FCOE
&&
adapter
->
fcoe
.
tc
==
app
->
priority
)
ixgbe_dcbnl_set_all
(
dev
);
if
(
!
err
&&
app
->
selector
==
IEEE_8021QAZ_APP_SEL_ETHERTYPE
&&
app
->
protocol
==
ETH_P_FCOE
)
{
u8
app_mask
=
dcb_ieee_getapp_mask
(
dev
,
app
);
if
(
app_mask
&
(
1
<<
adapter
->
fcoe
.
up
))
return
err
;
adapter
->
fcoe
.
up
=
app
->
priority
;
ixgbe_dcbnl_devreset
(
dev
);
}
#endif
return
0
;
}
static
int
ixgbe_dcbnl_ieee_delapp
(
struct
net_device
*
dev
,
struct
dcb_app
*
app
)
{
struct
ixgbe_adapter
*
adapter
=
netdev_priv
(
dev
);
int
err
;
if
(
!
(
adapter
->
dcbx_cap
&
DCB_CAP_DCBX_VER_IEEE
))
return
-
EINVAL
;
err
=
dcb_ieee_delapp
(
dev
,
app
);
#ifdef IXGBE_FCOE
if
(
!
err
&&
app
->
selector
==
IEEE_8021QAZ_APP_SEL_ETHERTYPE
&&
app
->
protocol
==
ETH_P_FCOE
)
{
u8
app_mask
=
dcb_ieee_getapp_mask
(
dev
,
app
);
if
(
app_mask
&
(
1
<<
adapter
->
fcoe
.
up
))
return
err
;
adapter
->
fcoe
.
up
=
app_mask
?
ffs
(
app_mask
)
-
1
:
IXGBE_FCOE_DEFTC
;
ixgbe_dcbnl_devreset
(
dev
);
}
#endif
return
err
;
}
static
u8
ixgbe_dcbnl_getdcbx
(
struct
net_device
*
dev
)
{
struct
ixgbe_adapter
*
adapter
=
netdev_priv
(
dev
);
...
...
@@ -743,6 +790,7 @@ const struct dcbnl_rtnl_ops dcbnl_ops = {
.
ieee_getpfc
=
ixgbe_dcbnl_ieee_getpfc
,
.
ieee_setpfc
=
ixgbe_dcbnl_ieee_setpfc
,
.
ieee_setapp
=
ixgbe_dcbnl_ieee_setapp
,
.
ieee_delapp
=
ixgbe_dcbnl_ieee_delapp
,
.
getstate
=
ixgbe_dcbnl_get_state
,
.
setstate
=
ixgbe_dcbnl_set_state
,
.
getpermhwaddr
=
ixgbe_dcbnl_get_perm_hw_addr
,
...
...
drivers/net/ixgbe/ixgbe_fcoe.c
View file @
85a43a9e
...
...
@@ -26,9 +26,6 @@
*******************************************************************************/
#include "ixgbe.h"
#ifdef CONFIG_IXGBE_DCB
#include "ixgbe_dcb_82599.h"
#endif
/* CONFIG_IXGBE_DCB */
#include <linux/if_ether.h>
#include <linux/gfp.h>
#include <linux/if_vlan.h>
...
...
@@ -474,23 +471,17 @@ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
*
* Returns : 0 indicates no FSO, > 0 for FSO, < 0 for error
*/
int
ixgbe_fso
(
struct
ixgbe_adapter
*
adapter
,
struct
ixgbe_ring
*
tx_ring
,
struct
sk_buff
*
skb
,
int
ixgbe_fso
(
struct
ixgbe_ring
*
tx_ring
,
struct
sk_buff
*
skb
,
u32
tx_flags
,
u8
*
hdr_len
)
{
u8
sof
,
eof
;
struct
fc_frame_header
*
fh
;
u32
vlan_macip_lens
;
u32
fcoe_sof_eof
;
u32
type_tucmd
;
u32
fcoe_sof_eof
=
0
;
u32
mss_l4len_idx
;
int
mss
=
0
;
unsigned
int
i
;
struct
ixgbe_tx_buffer
*
tx_buffer_info
;
struct
ixgbe_adv_tx_context_desc
*
context_desc
;
struct
fc_frame_header
*
fh
;
u8
sof
,
eof
;
if
(
skb_is_gso
(
skb
)
&&
(
skb_shinfo
(
skb
)
->
gso_type
!=
SKB_GSO_FCOE
))
{
e_err
(
dr
v
,
"Wrong gso type %d:expecting SKB_GSO_FCOE
\n
"
,
dev_err
(
tx_ring
->
de
v
,
"Wrong gso type %d:expecting SKB_GSO_FCOE
\n
"
,
skb_shinfo
(
skb
)
->
gso_type
);
return
-
EINVAL
;
}
...
...
@@ -501,23 +492,22 @@ int ixgbe_fso(struct ixgbe_adapter *adapter,
sizeof
(
struct
fcoe_hdr
));
/* sets up SOF and ORIS */
fcoe_sof_eof
=
0
;
sof
=
((
struct
fcoe_hdr
*
)
skb_network_header
(
skb
))
->
fcoe_sof
;
switch
(
sof
)
{
case
FC_SOF_I2
:
fcoe_sof_eof
|
=
IXGBE_ADVTXD_FCOEF_ORIS
;
fcoe_sof_eof
=
IXGBE_ADVTXD_FCOEF_ORIS
;
break
;
case
FC_SOF_I3
:
fcoe_sof_eof
|=
IXGBE_ADVTXD_FCOEF_SOF
;
fcoe_sof_eof
|=
IXGBE_ADVTXD_FCOEF_ORIS
;
fcoe_sof_eof
=
IXGBE_ADVTXD_FCOEF_SOF
|
IXGBE_ADVTXD_FCOEF_ORIS
;
break
;
case
FC_SOF_N2
:
break
;
case
FC_SOF_N3
:
fcoe_sof_eof
|
=
IXGBE_ADVTXD_FCOEF_SOF
;
fcoe_sof_eof
=
IXGBE_ADVTXD_FCOEF_SOF
;
break
;
default:
e_warn
(
dr
v
,
"unknown sof = 0x%x
\n
"
,
sof
);
dev_warn
(
tx_ring
->
de
v
,
"unknown sof = 0x%x
\n
"
,
sof
);
return
-
EINVAL
;
}
...
...
@@ -530,12 +520,11 @@ int ixgbe_fso(struct ixgbe_adapter *adapter,
break
;
case
FC_EOF_T
:
/* lso needs ORIE */
if
(
skb_is_gso
(
skb
))
{
fcoe_sof_eof
|=
IXGBE_ADVTXD_FCOEF_EOF_N
;
fcoe_sof_eof
|=
IXGBE_ADVTXD_FCOEF_ORIE
;
}
else
{
if
(
skb_is_gso
(
skb
))
fcoe_sof_eof
|=
IXGBE_ADVTXD_FCOEF_EOF_N
|
IXGBE_ADVTXD_FCOEF_ORIE
;
else
fcoe_sof_eof
|=
IXGBE_ADVTXD_FCOEF_EOF_T
;
}
break
;
case
FC_EOF_NI
:
fcoe_sof_eof
|=
IXGBE_ADVTXD_FCOEF_EOF_NI
;
...
...
@@ -544,7 +533,7 @@ int ixgbe_fso(struct ixgbe_adapter *adapter,
fcoe_sof_eof
|=
IXGBE_ADVTXD_FCOEF_EOF_A
;
break
;
default:
e_warn
(
dr
v
,
"unknown eof = 0x%x
\n
"
,
eof
);
dev_warn
(
tx_ring
->
de
v
,
"unknown eof = 0x%x
\n
"
,
eof
);
return
-
EINVAL
;
}
...
...
@@ -553,43 +542,28 @@ int ixgbe_fso(struct ixgbe_adapter *adapter,
if
(
fh
->
fh_f_ctl
[
2
]
&
FC_FC_REL_OFF
)
fcoe_sof_eof
|=
IXGBE_ADVTXD_FCOEF_PARINC
;
/*
hdr_len includes fc_hdr if FCoE lso is enabled
*/
/*
include trailer in headlen as it is replicated per frame
*/
*
hdr_len
=
sizeof
(
struct
fcoe_crc_eof
);
/* hdr_len includes fc_hdr if FCoE LSO is enabled */
if
(
skb_is_gso
(
skb
))
*
hdr_len
+=
(
skb_transport_offset
(
skb
)
+
sizeof
(
struct
fc_frame_header
));
/* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
vlan_macip_lens
=
(
skb_transport_offset
(
skb
)
+
sizeof
(
struct
fc_frame_header
));
vlan_macip_lens
|=
((
skb_transport_offset
(
skb
)
-
4
)
<<
IXGBE_ADVTXD_MACLEN_SHIFT
);
vlan_macip_lens
|=
(
tx_flags
&
IXGBE_TX_FLAGS_VLAN_MASK
);
/* type_tycmd and mss: set TUCMD.FCoE to enable offload */
type_tucmd
=
IXGBE_TXD_CMD_DEXT
|
IXGBE_ADVTXD_DTYP_CTXT
|
IXGBE_ADVTXT_TUCMD_FCOE
;
if
(
skb_is_gso
(
skb
))
mss
=
skb_shinfo
(
skb
)
->
gso_size
;
/* mss_l4len_id: use 1 for FSO as TSO, no need for L4LEN */
mss_l4len_idx
=
(
mss
<<
IXGBE_ADVTXD_MSS_SHIFT
)
|
(
1
<<
IXGBE_ADVTXD_IDX_SHIFT
);
mss_l4len_idx
=
skb_shinfo
(
skb
)
->
gso_size
<<
IXGBE_ADVTXD_MSS_SHIFT
;
mss_l4len_idx
|=
1
<<
IXGBE_ADVTXD_IDX_SHIFT
;
/* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
vlan_macip_lens
=
skb_transport_offset
(
skb
)
+
sizeof
(
struct
fc_frame_header
);
vlan_macip_lens
|=
(
skb_transport_offset
(
skb
)
-
4
)
<<
IXGBE_ADVTXD_MACLEN_SHIFT
;
vlan_macip_lens
|=
tx_flags
&
IXGBE_TX_FLAGS_VLAN_MASK
;
/* write context desc */
i
=
tx_ring
->
next_to_use
;
context_desc
=
IXGBE_TX_CTXTDESC_ADV
(
tx_ring
,
i
);
context_desc
->
vlan_macip_lens
=
cpu_to_le32
(
vlan_macip_lens
);
context_desc
->
seqnum_seed
=
cpu_to_le32
(
fcoe_sof_eof
);
context_desc
->
type_tucmd_mlhl
=
cpu_to_le32
(
type_tucmd
);
context_desc
->
mss_l4len_idx
=
cpu_to_le32
(
mss_l4len_idx
);
tx_buffer_info
=
&
tx_ring
->
tx_buffer_info
[
i
];
tx_buffer_info
->
time_stamp
=
jiffies
;
tx_buffer_info
->
next_to_watch
=
i
;
i
++
;
if
(
i
==
tx_ring
->
count
)
i
=
0
;
tx_ring
->
next_to_use
=
i
;
ixgbe_tx_ctxtdesc
(
tx_ring
,
vlan_macip_lens
,
fcoe_sof_eof
,
IXGBE_ADVTXT_TUCMD_FCOE
,
mss_l4len_idx
);
return
skb_is_gso
(
skb
);
}
...
...
@@ -648,10 +622,6 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
struct
ixgbe_hw
*
hw
=
&
adapter
->
hw
;
struct
ixgbe_fcoe
*
fcoe
=
&
adapter
->
fcoe
;
struct
ixgbe_ring_feature
*
f
=
&
adapter
->
ring_feature
[
RING_F_FCOE
];
#ifdef CONFIG_IXGBE_DCB
u8
tc
;
u32
up2tc
;
#endif
if
(
!
fcoe
->
pool
)
{
spin_lock_init
(
&
fcoe
->
lock
);
...
...
@@ -717,18 +687,6 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
IXGBE_FCRXCTRL_FCOELLI
|
IXGBE_FCRXCTRL_FCCRCBO
|
(
FC_FCOE_VER
<<
IXGBE_FCRXCTRL_FCOEVER_SHIFT
));
#ifdef CONFIG_IXGBE_DCB
up2tc
=
IXGBE_READ_REG
(
&
adapter
->
hw
,
IXGBE_RTTUP2TC
);
for
(
i
=
0
;
i
<
MAX_USER_PRIORITY
;
i
++
)
{
tc
=
(
u8
)(
up2tc
>>
(
i
*
IXGBE_RTTUP2TC_UP_SHIFT
));
tc
&=
(
MAX_TRAFFIC_CLASS
-
1
);
if
(
fcoe
->
tc
==
tc
)
{
fcoe
->
up
=
i
;
break
;
}
}
#endif
return
;
out_extra_ddp_buffer:
...
...
@@ -856,41 +814,6 @@ int ixgbe_fcoe_disable(struct net_device *netdev)
return
rc
;
}
#ifdef CONFIG_IXGBE_DCB
/**
* ixgbe_fcoe_setapp - sets the user priority bitmap for FCoE
* @adapter : ixgbe adapter
* @up : 802.1p user priority bitmap
*
* Finds out the traffic class from the input user priority
* bitmap for FCoE.
*
* Returns : 0 on success otherwise returns 1 on error
*/
u8
ixgbe_fcoe_setapp
(
struct
ixgbe_adapter
*
adapter
,
u8
up
)
{
int
i
;
u32
up2tc
;
/* valid user priority bitmap must not be 0 */
if
(
up
)
{
/* from user priority to the corresponding traffic class */
up2tc
=
IXGBE_READ_REG
(
&
adapter
->
hw
,
IXGBE_RTTUP2TC
);
for
(
i
=
0
;
i
<
MAX_USER_PRIORITY
;
i
++
)
{
if
(
up
&
(
1
<<
i
))
{
up2tc
>>=
(
i
*
IXGBE_RTTUP2TC_UP_SHIFT
);
up2tc
&=
(
MAX_TRAFFIC_CLASS
-
1
);
adapter
->
fcoe
.
tc
=
(
u8
)
up2tc
;
adapter
->
fcoe
.
up
=
i
;
return
0
;
}
}
}
return
1
;
}
#endif
/* CONFIG_IXGBE_DCB */
/**
* ixgbe_fcoe_get_wwn - get world wide name for the node or the port
* @netdev : ixgbe adapter
...
...
drivers/net/ixgbe/ixgbe_fcoe.h
View file @
85a43a9e
...
...
@@ -74,7 +74,6 @@ struct ixgbe_fcoe {
dma_addr_t
extra_ddp_buffer_dma
;
unsigned
long
mode
;
#ifdef CONFIG_IXGBE_DCB
u8
tc
;
u8
up
;
#endif
};
...
...
drivers/net/ixgbe/ixgbe_main.c
View file @
85a43a9e
...
...
@@ -35,6 +35,7 @@
#include <linux/interrupt.h>
#include <linux/ip.h>
#include <linux/tcp.h>
#include <linux/sctp.h>
#include <linux/pkt_sched.h>
#include <linux/ipv6.h>
#include <linux/slab.h>
...
...
@@ -771,15 +772,6 @@ static inline bool ixgbe_check_tx_hang(struct ixgbe_ring *tx_ring)
return
ret
;
}
#define IXGBE_MAX_TXD_PWR 14
#define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR)
/* Tx Descriptors needed, worst case */
#define TXD_USE_COUNT(S) (((S) >> IXGBE_MAX_TXD_PWR) + \
(((S) & (IXGBE_MAX_DATA_PER_TXD - 1)) ? 1 : 0))
#define DESC_NEEDED (TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD)
/* skb->data */
+ \
MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1)
/* for context */
/**
* ixgbe_tx_timeout_reset - initiate reset due to Tx timeout
* @adapter: driver private struct
...
...
@@ -882,7 +874,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
if
(
unlikely
(
count
&&
netif_carrier_ok
(
tx_ring
->
netdev
)
&&
(
IXGBE_DESC_UNUSED
(
tx_ring
)
>=
TX_WAKE_THRESHOLD
)))
{
(
ixgbe_desc_unused
(
tx_ring
)
>=
TX_WAKE_THRESHOLD
)))
{
/* Make sure that anybody stopping the queue after this
* sees the new next_to_clean.
*/
...
...
@@ -1474,7 +1466,7 @@ static void ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
}
rx_ring
->
next_to_clean
=
i
;
cleaned_count
=
IXGBE_DESC_UNUSED
(
rx_ring
);
cleaned_count
=
ixgbe_desc_unused
(
rx_ring
);
if
(
cleaned_count
)
ixgbe_alloc_rx_buffers
(
rx_ring
,
cleaned_count
);
...
...
@@ -1880,8 +1872,7 @@ static void ixgbe_check_lsc(struct ixgbe_adapter *adapter)
static
irqreturn_t
ixgbe_msix_lsc
(
int
irq
,
void
*
data
)
{
struct
net_device
*
netdev
=
data
;
struct
ixgbe_adapter
*
adapter
=
netdev_priv
(
netdev
);
struct
ixgbe_adapter
*
adapter
=
data
;
struct
ixgbe_hw
*
hw
=
&
adapter
->
hw
;
u32
eicr
;
...
...
@@ -2376,7 +2367,7 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
sprintf
(
adapter
->
lsc_int_name
,
"%s:lsc"
,
netdev
->
name
);
err
=
request_irq
(
adapter
->
msix_entries
[
vector
].
vector
,
ixgbe_msix_lsc
,
0
,
adapter
->
lsc_int_name
,
netdev
);
ixgbe_msix_lsc
,
0
,
adapter
->
lsc_int_name
,
adapter
);
if
(
err
)
{
e_err
(
probe
,
"request_irq for msix_lsc failed: %d
\n
"
,
err
);
goto
free_queue_irqs
;
...
...
@@ -2488,8 +2479,7 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues,
**/
static
irqreturn_t
ixgbe_intr
(
int
irq
,
void
*
data
)
{
struct
net_device
*
netdev
=
data
;
struct
ixgbe_adapter
*
adapter
=
netdev_priv
(
netdev
);
struct
ixgbe_adapter
*
adapter
=
data
;
struct
ixgbe_hw
*
hw
=
&
adapter
->
hw
;
struct
ixgbe_q_vector
*
q_vector
=
adapter
->
q_vector
[
0
];
u32
eicr
;
...
...
@@ -2586,10 +2576,10 @@ static int ixgbe_request_irq(struct ixgbe_adapter *adapter)
err
=
ixgbe_request_msix_irqs
(
adapter
);
}
else
if
(
adapter
->
flags
&
IXGBE_FLAG_MSI_ENABLED
)
{
err
=
request_irq
(
adapter
->
pdev
->
irq
,
ixgbe_intr
,
0
,
netdev
->
name
,
netdev
);
netdev
->
name
,
adapter
);
}
else
{
err
=
request_irq
(
adapter
->
pdev
->
irq
,
ixgbe_intr
,
IRQF_SHARED
,
netdev
->
name
,
netdev
);
netdev
->
name
,
adapter
);
}
if
(
err
)
...
...
@@ -2600,15 +2590,13 @@ static int ixgbe_request_irq(struct ixgbe_adapter *adapter)
static
void
ixgbe_free_irq
(
struct
ixgbe_adapter
*
adapter
)
{
struct
net_device
*
netdev
=
adapter
->
netdev
;
if
(
adapter
->
flags
&
IXGBE_FLAG_MSIX_ENABLED
)
{
int
i
,
q_vectors
;
q_vectors
=
adapter
->
num_msix_vectors
;
i
=
q_vectors
-
1
;
free_irq
(
adapter
->
msix_entries
[
i
].
vector
,
netdev
);
free_irq
(
adapter
->
msix_entries
[
i
].
vector
,
adapter
);
i
--
;
for
(;
i
>=
0
;
i
--
)
{
...
...
@@ -2623,7 +2611,7 @@ static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
ixgbe_reset_q_vectors
(
adapter
);
}
else
{
free_irq
(
adapter
->
pdev
->
irq
,
netdev
);
free_irq
(
adapter
->
pdev
->
irq
,
adapter
);
}
}
...
...
@@ -3130,7 +3118,7 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
IXGBE_WRITE_REG
(
hw
,
IXGBE_RXDCTL
(
reg_idx
),
rxdctl
);
ixgbe_rx_desc_queue_enable
(
adapter
,
ring
);
ixgbe_alloc_rx_buffers
(
ring
,
IXGBE_DESC_UNUSED
(
ring
));
ixgbe_alloc_rx_buffers
(
ring
,
ixgbe_desc_unused
(
ring
));
}
static
void
ixgbe_setup_psrtype
(
struct
ixgbe_adapter
*
adapter
)
...
...
@@ -5181,7 +5169,6 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
adapter
->
ring_feature
[
RING_F_FCOE
].
indices
=
0
;
#ifdef CONFIG_IXGBE_DCB
/* Default traffic class to use for FCoE */
adapter
->
fcoe
.
tc
=
IXGBE_FCOE_DEFTC
;
adapter
->
fcoe
.
up
=
IXGBE_FCOE_DEFTC
;
#endif
#endif
/* IXGBE_FCOE */
...
...
@@ -6357,27 +6344,46 @@ static void ixgbe_service_task(struct work_struct *work)
ixgbe_service_event_complete
(
adapter
);
}
static
int
ixgbe_tso
(
struct
ixgbe_adapter
*
adapter
,
struct
ixgbe_ring
*
tx_ring
,
struct
sk_buff
*
skb
,
u32
tx_flags
,
u8
*
hdr_len
,
__be16
protocol
)
void
ixgbe_tx_ctxtdesc
(
struct
ixgbe_ring
*
tx_ring
,
u32
vlan_macip_lens
,
u32
fcoe_sof_eof
,
u32
type_tucmd
,
u32
mss_l4len_idx
)
{
struct
ixgbe_adv_tx_context_desc
*
context_desc
;
unsigned
int
i
;
u16
i
=
tx_ring
->
next_to_use
;
context_desc
=
IXGBE_TX_CTXTDESC_ADV
(
tx_ring
,
i
);
i
++
;
tx_ring
->
next_to_use
=
(
i
<
tx_ring
->
count
)
?
i
:
0
;
/* set bits to identify this as an advanced context descriptor */
type_tucmd
|=
IXGBE_TXD_CMD_DEXT
|
IXGBE_ADVTXD_DTYP_CTXT
;
context_desc
->
vlan_macip_lens
=
cpu_to_le32
(
vlan_macip_lens
);
context_desc
->
seqnum_seed
=
cpu_to_le32
(
fcoe_sof_eof
);
context_desc
->
type_tucmd_mlhl
=
cpu_to_le32
(
type_tucmd
);
context_desc
->
mss_l4len_idx
=
cpu_to_le32
(
mss_l4len_idx
);
}
static
int
ixgbe_tso
(
struct
ixgbe_ring
*
tx_ring
,
struct
sk_buff
*
skb
,
u32
tx_flags
,
__be16
protocol
,
u8
*
hdr_len
)
{
int
err
;
struct
ixgbe_tx_buffer
*
tx_buffer_info
;
u32
vlan_macip_lens
=
0
,
type_tucmd_mlhl
;
u32
vlan_macip_lens
,
type_tucmd
;
u32
mss_l4len_idx
,
l4len
;
if
(
skb_is_gso
(
skb
))
{
if
(
!
skb_is_gso
(
skb
))
return
0
;
if
(
skb_header_cloned
(
skb
))
{
err
=
pskb_expand_head
(
skb
,
0
,
0
,
GFP_ATOMIC
);
if
(
err
)
return
err
;
}
l4len
=
tcp_hdrlen
(
skb
);
*
hdr_len
+=
l4len
;
if
(
protocol
==
htons
(
ETH_P_IP
))
{
/* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
type_tucmd
=
IXGBE_ADVTXD_TUCMD_L4T_TCP
;
if
(
protocol
==
__constant_htons
(
ETH_P_IP
))
{
struct
iphdr
*
iph
=
ip_hdr
(
skb
);
iph
->
tot_len
=
0
;
iph
->
check
=
0
;
...
...
@@ -6385,6 +6391,7 @@ static int ixgbe_tso(struct ixgbe_adapter *adapter,
iph
->
daddr
,
0
,
IPPROTO_TCP
,
0
);
type_tucmd
|=
IXGBE_ADVTXD_TUCMD_IPV4
;
}
else
if
(
skb_is_gso_v6
(
skb
))
{
ipv6_hdr
(
skb
)
->
payload_len
=
0
;
tcp_hdr
(
skb
)
->
check
=
...
...
@@ -6393,143 +6400,89 @@ static int ixgbe_tso(struct ixgbe_adapter *adapter,
0
,
IPPROTO_TCP
,
0
);
}
i
=
tx_ring
->
next_to_use
;
tx_buffer_info
=
&
tx_ring
->
tx_buffer_info
[
i
];
context_desc
=
IXGBE_TX_CTXTDESC_ADV
(
tx_ring
,
i
);
/* VLAN MACLEN IPLEN */
if
(
tx_flags
&
IXGBE_TX_FLAGS_VLAN
)
vlan_macip_lens
|=
(
tx_flags
&
IXGBE_TX_FLAGS_VLAN_MASK
);
vlan_macip_lens
|=
((
skb_network_offset
(
skb
))
<<
IXGBE_ADVTXD_MACLEN_SHIFT
);
*
hdr_len
+=
skb_network_offset
(
skb
);
vlan_macip_lens
|=
(
skb_transport_header
(
skb
)
-
skb_network_header
(
skb
));
*
hdr_len
+=
(
skb_transport_header
(
skb
)
-
skb_network_header
(
skb
));
context_desc
->
vlan_macip_lens
=
cpu_to_le32
(
vlan_macip_lens
);
context_desc
->
seqnum_seed
=
0
;
/* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
type_tucmd_mlhl
=
(
IXGBE_TXD_CMD_DEXT
|
IXGBE_ADVTXD_DTYP_CTXT
);
l4len
=
tcp_hdrlen
(
skb
);
*
hdr_len
=
skb_transport_offset
(
skb
)
+
l4len
;
if
(
protocol
==
htons
(
ETH_P_IP
))
type_tucmd_mlhl
|=
IXGBE_ADVTXD_TUCMD_IPV4
;
type_tucmd_mlhl
|=
IXGBE_ADVTXD_TUCMD_L4T_TCP
;
context_desc
->
type_tucmd_mlhl
=
cpu_to_le32
(
type_tucmd_mlhl
);
/* MSS L4LEN IDX */
mss_l4len_idx
=
(
skb_shinfo
(
skb
)
->
gso_size
<<
IXGBE_ADVTXD_MSS_SHIFT
);
mss_l4len_idx
|=
(
l4len
<<
IXGBE_ADVTXD_L4LEN_SHIFT
);
/* use index 1 for TSO */
mss_l4len_idx
|=
(
1
<<
IXGBE_ADVTXD_IDX_SHIFT
);
context_desc
->
mss_l4len_idx
=
cpu_to_le32
(
mss_l4len_idx
);
/* mss_l4len_id: use 1 as index for TSO */
mss_l4len_idx
=
l4len
<<
IXGBE_ADVTXD_L4LEN_SHIFT
;
mss_l4len_idx
|=
skb_shinfo
(
skb
)
->
gso_size
<<
IXGBE_ADVTXD_MSS_SHIFT
;
mss_l4len_idx
|=
1
<<
IXGBE_ADVTXD_IDX_SHIFT
;
tx_buffer_info
->
time_stamp
=
jiffies
;
tx_buffer_info
->
next_to_watch
=
i
;
/* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
vlan_macip_lens
=
skb_network_header_len
(
skb
);
vlan_macip_lens
|=
skb_network_offset
(
skb
)
<<
IXGBE_ADVTXD_MACLEN_SHIFT
;
vlan_macip_lens
|=
tx_flags
&
IXGBE_TX_FLAGS_VLAN_MASK
;
i
++
;
if
(
i
==
tx_ring
->
count
)
i
=
0
;
tx_ring
->
next_to_use
=
i
;
ixgbe_tx_ctxtdesc
(
tx_ring
,
vlan_macip_lens
,
0
,
type_tucmd
,
mss_l4len_idx
);
return
true
;
}
return
false
;
return
1
;
}
static
u32
ixgbe_psum
(
struct
ixgbe_adapter
*
adapter
,
struct
sk_buff
*
skb
,
static
bool
ixgbe_tx_csum
(
struct
ixgbe_ring
*
tx_ring
,
struct
sk_buff
*
skb
,
u32
tx_flags
,
__be16
protocol
)
{
u32
rtn
=
0
;
u32
vlan_macip_lens
=
0
;
u32
mss_l4len_idx
=
0
;
u32
type_tucmd
=
0
;
if
(
skb
->
ip_summed
!=
CHECKSUM_PARTIAL
)
{
if
(
!
(
tx_flags
&
IXGBE_TX_FLAGS_VLAN
))
return
false
;
}
else
{
u8
l4_hdr
=
0
;
switch
(
protocol
)
{
case
cpu_to_be16
(
ETH_P_IP
):
rtn
|=
IXGBE_ADVTXD_TUCMD_IPV4
;
switch
(
ip_hdr
(
skb
)
->
protocol
)
{
case
IPPROTO_TCP
:
rtn
|=
IXGBE_ADVTXD_TUCMD_L4T_TCP
;
case
__constant_htons
(
ETH_P_IP
):
vlan_macip_lens
|=
skb_network_header_len
(
skb
);
type_tucmd
|=
IXGBE_ADVTXD_TUCMD_IPV4
;
l4_hdr
=
ip_hdr
(
skb
)
->
protocol
;
break
;
case
IPPROTO_SCTP
:
rtn
|=
IXGBE_ADVTXD_TUCMD_L4T_SCTP
;
case
__constant_htons
(
ETH_P_IPV6
):
vlan_macip_lens
|=
skb_network_header_len
(
skb
);
l4_hdr
=
ipv6_hdr
(
skb
)
->
nexthdr
;
break
;
default:
if
(
unlikely
(
net_ratelimit
()))
{
dev_warn
(
tx_ring
->
dev
,
"partial checksum but proto=%x!
\n
"
,
skb
->
protocol
);
}
break
;
case
cpu_to_be16
(
ETH_P_IPV6
):
/* XXX what about other V6 headers?? */
switch
(
ipv6_hdr
(
skb
)
->
next
hdr
)
{
}
switch
(
l4_
hdr
)
{
case
IPPROTO_TCP
:
rtn
|=
IXGBE_ADVTXD_TUCMD_L4T_TCP
;
type_tucmd
|=
IXGBE_ADVTXD_TUCMD_L4T_TCP
;
mss_l4len_idx
=
tcp_hdrlen
(
skb
)
<<
IXGBE_ADVTXD_L4LEN_SHIFT
;
break
;
case
IPPROTO_SCTP
:
rtn
|=
IXGBE_ADVTXD_TUCMD_L4T_SCTP
;
type_tucmd
|=
IXGBE_ADVTXD_TUCMD_L4T_SCTP
;
mss_l4len_idx
=
sizeof
(
struct
sctphdr
)
<<
IXGBE_ADVTXD_L4LEN_SHIFT
;
break
;
}
case
IPPROTO_UDP
:
mss_l4len_idx
=
sizeof
(
struct
udphdr
)
<<
IXGBE_ADVTXD_L4LEN_SHIFT
;
break
;
default:
if
(
unlikely
(
net_ratelimit
()))
e_warn
(
probe
,
"partial checksum but proto=%x!
\n
"
,
protocol
);
if
(
unlikely
(
net_ratelimit
()))
{
dev_warn
(
tx_ring
->
dev
,
"partial checksum but l4 proto=%x!
\n
"
,
skb
->
protocol
);
}
break
;
}
}
return
rtn
;
}
static
bool
ixgbe_tx_csum
(
struct
ixgbe_adapter
*
adapter
,
struct
ixgbe_ring
*
tx_ring
,
struct
sk_buff
*
skb
,
u32
tx_flags
,
__be16
protocol
)
{
struct
ixgbe_adv_tx_context_desc
*
context_desc
;
unsigned
int
i
;
struct
ixgbe_tx_buffer
*
tx_buffer_info
;
u32
vlan_macip_lens
=
0
,
type_tucmd_mlhl
=
0
;
if
(
skb
->
ip_summed
==
CHECKSUM_PARTIAL
||
(
tx_flags
&
IXGBE_TX_FLAGS_VLAN
))
{
i
=
tx_ring
->
next_to_use
;
tx_buffer_info
=
&
tx_ring
->
tx_buffer_info
[
i
];
context_desc
=
IXGBE_TX_CTXTDESC_ADV
(
tx_ring
,
i
);
if
(
tx_flags
&
IXGBE_TX_FLAGS_VLAN
)
vlan_macip_lens
|=
(
tx_flags
&
IXGBE_TX_FLAGS_VLAN_MASK
);
vlan_macip_lens
|=
(
skb_network_offset
(
skb
)
<<
IXGBE_ADVTXD_MACLEN_SHIFT
);
if
(
skb
->
ip_summed
==
CHECKSUM_PARTIAL
)
vlan_macip_lens
|=
(
skb_transport_header
(
skb
)
-
skb_network_header
(
skb
));
context_desc
->
vlan_macip_lens
=
cpu_to_le32
(
vlan_macip_lens
);
context_desc
->
seqnum_seed
=
0
;
type_tucmd_mlhl
|=
(
IXGBE_TXD_CMD_DEXT
|
IXGBE_ADVTXD_DTYP_CTXT
);
if
(
skb
->
ip_summed
==
CHECKSUM_PARTIAL
)
type_tucmd_mlhl
|=
ixgbe_psum
(
adapter
,
skb
,
protocol
);
context_desc
->
type_tucmd_mlhl
=
cpu_to_le32
(
type_tucmd_mlhl
);
/* use index zero for tx checksum offload */
context_desc
->
mss_l4len_idx
=
0
;
tx_buffer_info
->
time_stamp
=
jiffies
;
tx_buffer_info
->
next_to_watch
=
i
;
i
++
;
if
(
i
==
tx_ring
->
count
)
i
=
0
;
tx_ring
->
next_to_use
=
i
;
vlan_macip_lens
|=
skb_network_offset
(
skb
)
<<
IXGBE_ADVTXD_MACLEN_SHIFT
;
vlan_macip_lens
|=
tx_flags
&
IXGBE_TX_FLAGS_VLAN_MASK
;
return
true
;
}
ixgbe_tx_ctxtdesc
(
tx_ring
,
vlan_macip_lens
,
0
,
type_tucmd
,
mss_l4len_idx
);
return
false
;
return
(
skb
->
ip_summed
==
CHECKSUM_PARTIAL
)
;
}
static
int
ixgbe_tx_map
(
struct
ixgbe_adapter
*
adapter
,
...
...
@@ -6541,11 +6494,12 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
struct
ixgbe_tx_buffer
*
tx_buffer_info
;
unsigned
int
len
;
unsigned
int
total
=
skb
->
len
;
unsigned
int
offset
=
0
,
size
,
count
=
0
,
i
;
unsigned
int
offset
=
0
,
size
,
count
=
0
;
unsigned
int
nr_frags
=
skb_shinfo
(
skb
)
->
nr_frags
;
unsigned
int
f
;
unsigned
int
bytecount
=
skb
->
len
;
u16
gso_segs
=
1
;
u16
i
;
i
=
tx_ring
->
next_to_use
;
...
...
@@ -6811,7 +6765,7 @@ static void ixgbe_atr(struct ixgbe_ring *ring, struct sk_buff *skb,
input
,
common
,
ring
->
queue_index
);
}
static
int
__ixgbe_maybe_stop_tx
(
struct
ixgbe_ring
*
tx_ring
,
int
size
)
static
int
__ixgbe_maybe_stop_tx
(
struct
ixgbe_ring
*
tx_ring
,
u16
size
)
{
netif_stop_subqueue
(
tx_ring
->
netdev
,
tx_ring
->
queue_index
);
/* Herbert's original patch had:
...
...
@@ -6821,7 +6775,7 @@ static int __ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, int size)
/* We need to check again in a case another CPU has just
* made room available. */
if
(
likely
(
IXGBE_DESC_UNUSED
(
tx_ring
)
<
size
))
if
(
likely
(
ixgbe_desc_unused
(
tx_ring
)
<
size
))
return
-
EBUSY
;
/* A reprieve! - use start_queue because it doesn't call schedule */
...
...
@@ -6830,9 +6784,9 @@ static int __ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, int size)
return
0
;
}
static
int
ixgbe_maybe_stop_tx
(
struct
ixgbe_ring
*
tx_ring
,
int
size
)
static
int
ixgbe_maybe_stop_tx
(
struct
ixgbe_ring
*
tx_ring
,
u16
size
)
{
if
(
likely
(
IXGBE_DESC_UNUSED
(
tx_ring
)
>=
size
))
if
(
likely
(
ixgbe_desc_unused
(
tx_ring
)
>=
size
))
return
0
;
return
__ixgbe_maybe_stop_tx
(
tx_ring
,
size
);
}
...
...
@@ -6868,13 +6822,33 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
struct
ixgbe_adapter
*
adapter
,
struct
ixgbe_ring
*
tx_ring
)
{
unsigned
int
first
;
unsigned
int
tx_flags
=
0
;
u8
hdr_len
=
0
;
int
tso
;
int
count
=
0
;
unsigned
int
f
;
u32
tx_flags
=
0
;
#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
unsigned
short
f
;
#endif
u16
first
;
u16
count
=
TXD_USE_COUNT
(
skb_headlen
(
skb
));
__be16
protocol
;
u8
hdr_len
=
0
;
/*
* need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD,
* + 1 desc for skb_head_len/IXGBE_MAX_DATA_PER_TXD,
* + 2 desc gap to keep tail from touching head,
* + 1 desc for context descriptor,
* otherwise try next time
*/
#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
for
(
f
=
0
;
f
<
skb_shinfo
(
skb
)
->
nr_frags
;
f
++
)
count
+=
TXD_USE_COUNT
(
skb_shinfo
(
skb
)
->
frags
[
f
].
size
);
#else
count
+=
skb_shinfo
(
skb
)
->
nr_frags
;
#endif
if
(
ixgbe_maybe_stop_tx
(
tx_ring
,
count
+
3
))
{
tx_ring
->
tx_stats
.
tx_busy
++
;
return
NETDEV_TX_BUSY
;
}
protocol
=
vlan_get_protocol
(
skb
);
...
...
@@ -6899,51 +6873,29 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
if
(
adapter
->
flags
&
IXGBE_FLAG_FCOE_ENABLED
&&
(
protocol
==
htons
(
ETH_P_FCOE
)))
tx_flags
|=
IXGBE_TX_FLAGS_FCOE
;
#endif
/* four things can cause us to need a context descriptor */
if
(
skb_is_gso
(
skb
)
||
(
skb
->
ip_summed
==
CHECKSUM_PARTIAL
)
||
(
tx_flags
&
IXGBE_TX_FLAGS_VLAN
)
||
(
tx_flags
&
IXGBE_TX_FLAGS_FCOE
))
count
++
;
count
+=
TXD_USE_COUNT
(
skb_headlen
(
skb
));
for
(
f
=
0
;
f
<
skb_shinfo
(
skb
)
->
nr_frags
;
f
++
)
count
+=
TXD_USE_COUNT
(
skb_shinfo
(
skb
)
->
frags
[
f
].
size
);
if
(
ixgbe_maybe_stop_tx
(
tx_ring
,
count
))
{
tx_ring
->
tx_stats
.
tx_busy
++
;
return
NETDEV_TX_BUSY
;
}
#endif
/* record the location of the first descriptor for this packet */
first
=
tx_ring
->
next_to_use
;
if
(
tx_flags
&
IXGBE_TX_FLAGS_FCOE
)
{
#ifdef IXGBE_FCOE
/* setup tx offload for FCoE */
tso
=
ixgbe_fso
(
adapter
,
tx_ring
,
skb
,
tx_flags
,
&
hdr_len
);
if
(
tso
<
0
)
{
dev_kfree_skb_any
(
skb
);
return
NETDEV_TX_OK
;
}
if
(
tso
)
tso
=
ixgbe_fso
(
tx_ring
,
skb
,
tx_flags
,
&
hdr_len
);
if
(
tso
<
0
)
goto
out_drop
;
else
if
(
tso
)
tx_flags
|=
IXGBE_TX_FLAGS_FSO
;
#endif
/* IXGBE_FCOE */
}
else
{
if
(
protocol
==
htons
(
ETH_P_IP
))
tx_flags
|=
IXGBE_TX_FLAGS_IPV4
;
tso
=
ixgbe_tso
(
adapter
,
tx_ring
,
skb
,
tx_flags
,
&
hdr_len
,
protocol
);
if
(
tso
<
0
)
{
dev_kfree_skb_any
(
skb
);
return
NETDEV_TX_OK
;
}
if
(
tso
)
tso
=
ixgbe_tso
(
tx_ring
,
skb
,
tx_flags
,
protocol
,
&
hdr_len
);
if
(
tso
<
0
)
goto
out_drop
;
else
if
(
tso
)
tx_flags
|=
IXGBE_TX_FLAGS_TSO
;
else
if
(
ixgbe_tx_csum
(
adapter
,
tx_ring
,
skb
,
tx_flags
,
protocol
)
&&
(
skb
->
ip_summed
==
CHECKSUM_PARTIAL
))
else
if
(
ixgbe_tx_csum
(
tx_ring
,
skb
,
tx_flags
,
protocol
))
tx_flags
|=
IXGBE_TX_FLAGS_CSUM
;
}
...
...
@@ -6956,12 +6908,16 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
ixgbe_maybe_stop_tx
(
tx_ring
,
DESC_NEEDED
);
}
else
{
dev_kfree_skb_any
(
skb
);
tx_ring
->
tx_buffer_info
[
first
].
time_stamp
=
0
;
tx_ring
->
next_to_use
=
first
;
goto
out_drop
;
}
return
NETDEV_TX_OK
;
out_drop:
dev_kfree_skb_any
(
skb
);
return
NETDEV_TX_OK
;
}
static
netdev_tx_t
ixgbe_xmit_frame
(
struct
sk_buff
*
skb
,
struct
net_device
*
netdev
)
...
...
drivers/net/ixgbe/ixgbe_sriov.c
View file @
85a43a9e
...
...
@@ -605,6 +605,22 @@ static void ixgbe_set_vf_rate_limit(struct ixgbe_hw *hw, int vf, int tx_rate,
}
IXGBE_WRITE_REG
(
hw
,
IXGBE_RTTDQSEL
,
2
*
vf
);
/* vf Y uses queue 2*Y */
/*
* Set global transmit compensation time to the MMW_SIZE in RTTBCNRM
* register. Typically MMW_SIZE=0x014 if 9728-byte jumbo is supported
* and 0x004 otherwise.
*/
switch
(
hw
->
mac
.
type
)
{
case
ixgbe_mac_82599EB
:
IXGBE_WRITE_REG
(
hw
,
IXGBE_RTTBCNRM
,
0x4
);
break
;
case
ixgbe_mac_X540
:
IXGBE_WRITE_REG
(
hw
,
IXGBE_RTTBCNRM
,
0x14
);
break
;
default:
break
;
}
IXGBE_WRITE_REG
(
hw
,
IXGBE_RTTBCNRC
,
bcnrc_val
);
}
...
...
drivers/net/ixgbe/ixgbe_type.h
View file @
85a43a9e
...
...
@@ -534,7 +534,7 @@
#define IXGBE_RTTBCNRC_RF_INT_SHIFT 14
#define IXGBE_RTTBCNRC_RF_INT_MASK \
(IXGBE_RTTBCNRC_RF_DEC_MASK << IXGBE_RTTBCNRC_RF_INT_SHIFT)
#define IXGBE_RTTBCNRM 0x04980
/* FCoE DMA Context Registers */
#define IXGBE_FCPTRL 0x02410
/* FC User Desc. PTR Low */
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment