Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
d5b1d8cd
Commit
d5b1d8cd
authored
Oct 07, 2011
by
David S. Miller
Browse files
Options
Browse Files
Download
Plain Diff
Merge
git://github.com/Jkirsher/net-next
parents
5d6bcdfe
ebe42d16
Changes
9
Show whitespace changes
Inline
Side-by-side
Showing
9 changed files
with
600 additions
and
521 deletions
+600
-521
drivers/net/ethernet/intel/e1000/e1000.h
drivers/net/ethernet/intel/e1000/e1000.h
+6
-6
drivers/net/ethernet/intel/e1000/e1000_hw.c
drivers/net/ethernet/intel/e1000/e1000_hw.c
+11
-11
drivers/net/ethernet/intel/e1000/e1000_main.c
drivers/net/ethernet/intel/e1000/e1000_main.c
+81
-88
drivers/net/ethernet/intel/e1000e/ich8lan.c
drivers/net/ethernet/intel/e1000e/ich8lan.c
+1
-1
drivers/net/ethernet/intel/igb/e1000_82575.h
drivers/net/ethernet/intel/igb/e1000_82575.h
+2
-0
drivers/net/ethernet/intel/igb/igb.h
drivers/net/ethernet/intel/igb/igb.h
+32
-22
drivers/net/ethernet/intel/igb/igb_ethtool.c
drivers/net/ethernet/intel/igb/igb_ethtool.c
+9
-7
drivers/net/ethernet/intel/igb/igb_main.c
drivers/net/ethernet/intel/igb/igb_main.c
+456
-384
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+2
-2
No files found.
drivers/net/ethernet/intel/e1000/e1000.h
View file @
d5b1d8cd
...
@@ -214,9 +214,6 @@ struct e1000_rx_ring {
...
@@ -214,9 +214,6 @@ struct e1000_rx_ring {
/* board specific private data structure */
/* board specific private data structure */
struct
e1000_adapter
{
struct
e1000_adapter
{
struct
timer_list
tx_fifo_stall_timer
;
struct
timer_list
watchdog_timer
;
struct
timer_list
phy_info_timer
;
unsigned
long
active_vlans
[
BITS_TO_LONGS
(
VLAN_N_VID
)];
unsigned
long
active_vlans
[
BITS_TO_LONGS
(
VLAN_N_VID
)];
u16
mng_vlan_id
;
u16
mng_vlan_id
;
u32
bd_number
;
u32
bd_number
;
...
@@ -237,7 +234,6 @@ struct e1000_adapter {
...
@@ -237,7 +234,6 @@ struct e1000_adapter {
u16
tx_itr
;
u16
tx_itr
;
u16
rx_itr
;
u16
rx_itr
;
struct
work_struct
reset_task
;
u8
fc_autoneg
;
u8
fc_autoneg
;
/* TX */
/* TX */
...
@@ -310,8 +306,12 @@ struct e1000_adapter {
...
@@ -310,8 +306,12 @@ struct e1000_adapter {
bool
discarding
;
bool
discarding
;
struct
work_struct
fifo_stall_task
;
struct
work_struct
reset_task
;
struct
work_struct
phy_info_task
;
struct
delayed_work
watchdog_task
;
struct
delayed_work
fifo_stall_task
;
struct
delayed_work
phy_info_task
;
struct
mutex
mutex
;
};
};
enum
e1000_state_t
{
enum
e1000_state_t
{
...
...
drivers/net/ethernet/intel/e1000/e1000_hw.c
View file @
d5b1d8cd
...
@@ -5385,7 +5385,7 @@ static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up)
...
@@ -5385,7 +5385,7 @@ static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up)
if
(
ret_val
)
if
(
ret_val
)
return
ret_val
;
return
ret_val
;
m
delay
(
20
);
m
sleep
(
20
);
ret_val
=
e1000_write_phy_reg
(
hw
,
0x0000
,
ret_val
=
e1000_write_phy_reg
(
hw
,
0x0000
,
IGP01E1000_IEEE_FORCE_GIGA
);
IGP01E1000_IEEE_FORCE_GIGA
);
...
@@ -5413,7 +5413,7 @@ static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up)
...
@@ -5413,7 +5413,7 @@ static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up)
if
(
ret_val
)
if
(
ret_val
)
return
ret_val
;
return
ret_val
;
m
delay
(
20
);
m
sleep
(
20
);
/* Now enable the transmitter */
/* Now enable the transmitter */
ret_val
=
ret_val
=
...
@@ -5440,7 +5440,7 @@ static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up)
...
@@ -5440,7 +5440,7 @@ static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up)
if
(
ret_val
)
if
(
ret_val
)
return
ret_val
;
return
ret_val
;
m
delay
(
20
);
m
sleep
(
20
);
ret_val
=
e1000_write_phy_reg
(
hw
,
0x0000
,
ret_val
=
e1000_write_phy_reg
(
hw
,
0x0000
,
IGP01E1000_IEEE_FORCE_GIGA
);
IGP01E1000_IEEE_FORCE_GIGA
);
...
@@ -5457,7 +5457,7 @@ static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up)
...
@@ -5457,7 +5457,7 @@ static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up)
if
(
ret_val
)
if
(
ret_val
)
return
ret_val
;
return
ret_val
;
m
delay
(
20
);
m
sleep
(
20
);
/* Now enable the transmitter */
/* Now enable the transmitter */
ret_val
=
ret_val
=
...
@@ -5750,26 +5750,26 @@ static s32 e1000_polarity_reversal_workaround(struct e1000_hw *hw)
...
@@ -5750,26 +5750,26 @@ static s32 e1000_polarity_reversal_workaround(struct e1000_hw *hw)
if
((
mii_status_reg
&
~
MII_SR_LINK_STATUS
)
==
0
)
if
((
mii_status_reg
&
~
MII_SR_LINK_STATUS
)
==
0
)
break
;
break
;
m
delay
(
100
);
m
sleep
(
100
);
}
}
/* Recommended delay time after link has been lost */
/* Recommended delay time after link has been lost */
m
delay
(
1000
);
m
sleep
(
1000
);
/* Now we will re-enable th transmitter on the PHY */
/* Now we will re-enable th transmitter on the PHY */
ret_val
=
e1000_write_phy_reg
(
hw
,
M88E1000_PHY_PAGE_SELECT
,
0x0019
);
ret_val
=
e1000_write_phy_reg
(
hw
,
M88E1000_PHY_PAGE_SELECT
,
0x0019
);
if
(
ret_val
)
if
(
ret_val
)
return
ret_val
;
return
ret_val
;
m
delay
(
50
);
m
sleep
(
50
);
ret_val
=
e1000_write_phy_reg
(
hw
,
M88E1000_PHY_GEN_CONTROL
,
0xFFF0
);
ret_val
=
e1000_write_phy_reg
(
hw
,
M88E1000_PHY_GEN_CONTROL
,
0xFFF0
);
if
(
ret_val
)
if
(
ret_val
)
return
ret_val
;
return
ret_val
;
m
delay
(
50
);
m
sleep
(
50
);
ret_val
=
e1000_write_phy_reg
(
hw
,
M88E1000_PHY_GEN_CONTROL
,
0xFF00
);
ret_val
=
e1000_write_phy_reg
(
hw
,
M88E1000_PHY_GEN_CONTROL
,
0xFF00
);
if
(
ret_val
)
if
(
ret_val
)
return
ret_val
;
return
ret_val
;
m
delay
(
50
);
m
sleep
(
50
);
ret_val
=
e1000_write_phy_reg
(
hw
,
M88E1000_PHY_GEN_CONTROL
,
0x0000
);
ret_val
=
e1000_write_phy_reg
(
hw
,
M88E1000_PHY_GEN_CONTROL
,
0x0000
);
if
(
ret_val
)
if
(
ret_val
)
return
ret_val
;
return
ret_val
;
...
@@ -5794,7 +5794,7 @@ static s32 e1000_polarity_reversal_workaround(struct e1000_hw *hw)
...
@@ -5794,7 +5794,7 @@ static s32 e1000_polarity_reversal_workaround(struct e1000_hw *hw)
if
(
mii_status_reg
&
MII_SR_LINK_STATUS
)
if
(
mii_status_reg
&
MII_SR_LINK_STATUS
)
break
;
break
;
m
delay
(
100
);
m
sleep
(
100
);
}
}
return
E1000_SUCCESS
;
return
E1000_SUCCESS
;
}
}
...
@@ -5825,6 +5825,6 @@ static s32 e1000_get_auto_rd_done(struct e1000_hw *hw)
...
@@ -5825,6 +5825,6 @@ static s32 e1000_get_auto_rd_done(struct e1000_hw *hw)
static
s32
e1000_get_phy_cfg_done
(
struct
e1000_hw
*
hw
)
static
s32
e1000_get_phy_cfg_done
(
struct
e1000_hw
*
hw
)
{
{
e_dbg
(
"e1000_get_phy_cfg_done"
);
e_dbg
(
"e1000_get_phy_cfg_done"
);
m
delay
(
10
);
m
sleep
(
10
);
return
E1000_SUCCESS
;
return
E1000_SUCCESS
;
}
}
drivers/net/ethernet/intel/e1000/e1000_main.c
View file @
d5b1d8cd
...
@@ -131,10 +131,8 @@ static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
...
@@ -131,10 +131,8 @@ static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
static
void
e1000_clean_rx_ring
(
struct
e1000_adapter
*
adapter
,
static
void
e1000_clean_rx_ring
(
struct
e1000_adapter
*
adapter
,
struct
e1000_rx_ring
*
rx_ring
);
struct
e1000_rx_ring
*
rx_ring
);
static
void
e1000_set_rx_mode
(
struct
net_device
*
netdev
);
static
void
e1000_set_rx_mode
(
struct
net_device
*
netdev
);
static
void
e1000_update_phy_info
(
unsigned
long
data
);
static
void
e1000_update_phy_info_task
(
struct
work_struct
*
work
);
static
void
e1000_update_phy_info_task
(
struct
work_struct
*
work
);
static
void
e1000_watchdog
(
unsigned
long
data
);
static
void
e1000_watchdog
(
struct
work_struct
*
work
);
static
void
e1000_82547_tx_fifo_stall
(
unsigned
long
data
);
static
void
e1000_82547_tx_fifo_stall_task
(
struct
work_struct
*
work
);
static
void
e1000_82547_tx_fifo_stall_task
(
struct
work_struct
*
work
);
static
netdev_tx_t
e1000_xmit_frame
(
struct
sk_buff
*
skb
,
static
netdev_tx_t
e1000_xmit_frame
(
struct
sk_buff
*
skb
,
struct
net_device
*
netdev
);
struct
net_device
*
netdev
);
...
@@ -487,12 +485,21 @@ static void e1000_power_down_phy(struct e1000_adapter *adapter)
...
@@ -487,12 +485,21 @@ static void e1000_power_down_phy(struct e1000_adapter *adapter)
e1000_read_phy_reg
(
hw
,
PHY_CTRL
,
&
mii_reg
);
e1000_read_phy_reg
(
hw
,
PHY_CTRL
,
&
mii_reg
);
mii_reg
|=
MII_CR_POWER_DOWN
;
mii_reg
|=
MII_CR_POWER_DOWN
;
e1000_write_phy_reg
(
hw
,
PHY_CTRL
,
mii_reg
);
e1000_write_phy_reg
(
hw
,
PHY_CTRL
,
mii_reg
);
m
delay
(
1
);
m
sleep
(
1
);
}
}
out:
out:
return
;
return
;
}
}
static
void
e1000_down_and_stop
(
struct
e1000_adapter
*
adapter
)
{
set_bit
(
__E1000_DOWN
,
&
adapter
->
flags
);
cancel_work_sync
(
&
adapter
->
reset_task
);
cancel_delayed_work_sync
(
&
adapter
->
watchdog_task
);
cancel_delayed_work_sync
(
&
adapter
->
phy_info_task
);
cancel_delayed_work_sync
(
&
adapter
->
fifo_stall_task
);
}
void
e1000_down
(
struct
e1000_adapter
*
adapter
)
void
e1000_down
(
struct
e1000_adapter
*
adapter
)
{
{
struct
e1000_hw
*
hw
=
&
adapter
->
hw
;
struct
e1000_hw
*
hw
=
&
adapter
->
hw
;
...
@@ -522,13 +529,9 @@ void e1000_down(struct e1000_adapter *adapter)
...
@@ -522,13 +529,9 @@ void e1000_down(struct e1000_adapter *adapter)
/*
/*
* Setting DOWN must be after irq_disable to prevent
* Setting DOWN must be after irq_disable to prevent
* a screaming interrupt. Setting DOWN also prevents
* a screaming interrupt. Setting DOWN also prevents
* t
imers and t
asks from rescheduling.
* tasks from rescheduling.
*/
*/
set_bit
(
__E1000_DOWN
,
&
adapter
->
flags
);
e1000_down_and_stop
(
adapter
);
del_timer_sync
(
&
adapter
->
tx_fifo_stall_timer
);
del_timer_sync
(
&
adapter
->
watchdog_timer
);
del_timer_sync
(
&
adapter
->
phy_info_timer
);
adapter
->
link_speed
=
0
;
adapter
->
link_speed
=
0
;
adapter
->
link_duplex
=
0
;
adapter
->
link_duplex
=
0
;
...
@@ -543,10 +546,10 @@ static void e1000_reinit_safe(struct e1000_adapter *adapter)
...
@@ -543,10 +546,10 @@ static void e1000_reinit_safe(struct e1000_adapter *adapter)
{
{
while
(
test_and_set_bit
(
__E1000_RESETTING
,
&
adapter
->
flags
))
while
(
test_and_set_bit
(
__E1000_RESETTING
,
&
adapter
->
flags
))
msleep
(
1
);
msleep
(
1
);
rtnl_lock
(
);
mutex_lock
(
&
adapter
->
mutex
);
e1000_down
(
adapter
);
e1000_down
(
adapter
);
e1000_up
(
adapter
);
e1000_up
(
adapter
);
rtnl_unlock
(
);
mutex_unlock
(
&
adapter
->
mutex
);
clear_bit
(
__E1000_RESETTING
,
&
adapter
->
flags
);
clear_bit
(
__E1000_RESETTING
,
&
adapter
->
flags
);
}
}
...
@@ -1120,21 +1123,12 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
...
@@ -1120,21 +1123,12 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
if
(
!
is_valid_ether_addr
(
netdev
->
perm_addr
))
if
(
!
is_valid_ether_addr
(
netdev
->
perm_addr
))
e_err
(
probe
,
"Invalid MAC Address
\n
"
);
e_err
(
probe
,
"Invalid MAC Address
\n
"
);
init_timer
(
&
adapter
->
tx_fifo_stall_timer
);
adapter
->
tx_fifo_stall_timer
.
function
=
e1000_82547_tx_fifo_stall
;
adapter
->
tx_fifo_stall_timer
.
data
=
(
unsigned
long
)
adapter
;
init_timer
(
&
adapter
->
watchdog_timer
);
adapter
->
watchdog_timer
.
function
=
e1000_watchdog
;
adapter
->
watchdog_timer
.
data
=
(
unsigned
long
)
adapter
;
init_timer
(
&
adapter
->
phy_info_timer
);
INIT_DELAYED_WORK
(
&
adapter
->
watchdog_task
,
e1000_watchdog
);
adapter
->
phy_info_timer
.
function
=
e1000_update_phy_info
;
INIT_DELAYED_WORK
(
&
adapter
->
fifo_stall_task
,
adapter
->
phy_info_timer
.
data
=
(
unsigned
long
)
adapter
;
e1000_82547_tx_fifo_stall_task
);
INIT_DELAYED_WORK
(
&
adapter
->
phy_info_task
,
e1000_update_phy_info_task
);
INIT_WORK
(
&
adapter
->
fifo_stall_task
,
e1000_82547_tx_fifo_stall_task
);
INIT_WORK
(
&
adapter
->
reset_task
,
e1000_reset_task
);
INIT_WORK
(
&
adapter
->
reset_task
,
e1000_reset_task
);
INIT_WORK
(
&
adapter
->
phy_info_task
,
e1000_update_phy_info_task
);
e1000_check_options
(
adapter
);
e1000_check_options
(
adapter
);
...
@@ -1279,13 +1273,7 @@ static void __devexit e1000_remove(struct pci_dev *pdev)
...
@@ -1279,13 +1273,7 @@ static void __devexit e1000_remove(struct pci_dev *pdev)
struct
e1000_adapter
*
adapter
=
netdev_priv
(
netdev
);
struct
e1000_adapter
*
adapter
=
netdev_priv
(
netdev
);
struct
e1000_hw
*
hw
=
&
adapter
->
hw
;
struct
e1000_hw
*
hw
=
&
adapter
->
hw
;
set_bit
(
__E1000_DOWN
,
&
adapter
->
flags
);
e1000_down_and_stop
(
adapter
);
del_timer_sync
(
&
adapter
->
tx_fifo_stall_timer
);
del_timer_sync
(
&
adapter
->
watchdog_timer
);
del_timer_sync
(
&
adapter
->
phy_info_timer
);
cancel_work_sync
(
&
adapter
->
reset_task
);
e1000_release_manageability
(
adapter
);
e1000_release_manageability
(
adapter
);
unregister_netdev
(
netdev
);
unregister_netdev
(
netdev
);
...
@@ -1329,6 +1317,7 @@ static int __devinit e1000_sw_init(struct e1000_adapter *adapter)
...
@@ -1329,6 +1317,7 @@ static int __devinit e1000_sw_init(struct e1000_adapter *adapter)
e1000_irq_disable
(
adapter
);
e1000_irq_disable
(
adapter
);
spin_lock_init
(
&
adapter
->
stats_lock
);
spin_lock_init
(
&
adapter
->
stats_lock
);
mutex_init
(
&
adapter
->
mutex
);
set_bit
(
__E1000_DOWN
,
&
adapter
->
flags
);
set_bit
(
__E1000_DOWN
,
&
adapter
->
flags
);
...
@@ -1369,7 +1358,7 @@ static int __devinit e1000_alloc_queues(struct e1000_adapter *adapter)
...
@@ -1369,7 +1358,7 @@ static int __devinit e1000_alloc_queues(struct e1000_adapter *adapter)
* The open entry point is called when a network interface is made
* The open entry point is called when a network interface is made
* active by the system (IFF_UP). At this point all resources needed
* active by the system (IFF_UP). At this point all resources needed
* for transmit and receive operations are allocated, the interrupt
* for transmit and receive operations are allocated, the interrupt
* handler is registered with the OS, the watchdog t
imer
is started,
* handler is registered with the OS, the watchdog t
ask
is started,
* and the stack is notified that the interface is ready.
* and the stack is notified that the interface is ready.
**/
**/
...
@@ -2331,35 +2320,23 @@ static void e1000_set_rx_mode(struct net_device *netdev)
...
@@ -2331,35 +2320,23 @@ static void e1000_set_rx_mode(struct net_device *netdev)
kfree
(
mcarray
);
kfree
(
mcarray
);
}
}
/* Need to wait a few seconds after link up to get diagnostic information from
/**
* the phy */
* e1000_update_phy_info_task - get phy info
* @work: work struct contained inside adapter struct
static
void
e1000_update_phy_info
(
unsigned
long
data
)
*
{
* Need to wait a few seconds after link up to get diagnostic information from
struct
e1000_adapter
*
adapter
=
(
struct
e1000_adapter
*
)
data
;
* the phy
schedule_work
(
&
adapter
->
phy_info_task
);
*/
}
static
void
e1000_update_phy_info_task
(
struct
work_struct
*
work
)
static
void
e1000_update_phy_info_task
(
struct
work_struct
*
work
)
{
{
struct
e1000_adapter
*
adapter
=
container_of
(
work
,
struct
e1000_adapter
*
adapter
=
container_of
(
work
,
struct
e1000_adapter
,
struct
e1000_adapter
,
phy_info_task
);
phy_info_task
.
work
);
struct
e1000_hw
*
hw
=
&
adapter
->
hw
;
if
(
test_bit
(
__E1000_DOWN
,
&
adapter
->
flags
))
return
;
rtnl_lock
();
mutex_lock
(
&
adapter
->
mutex
);
e1000_phy_get_info
(
hw
,
&
adapter
->
phy_info
);
e1000_phy_get_info
(
&
adapter
->
hw
,
&
adapter
->
phy_info
);
rtnl_unlock
();
mutex_unlock
(
&
adapter
->
mutex
);
}
/**
* e1000_82547_tx_fifo_stall - Timer Call-back
* @data: pointer to adapter cast into an unsigned long
**/
static
void
e1000_82547_tx_fifo_stall
(
unsigned
long
data
)
{
struct
e1000_adapter
*
adapter
=
(
struct
e1000_adapter
*
)
data
;
schedule_work
(
&
adapter
->
fifo_stall_task
);
}
}
/**
/**
...
@@ -2370,12 +2347,14 @@ static void e1000_82547_tx_fifo_stall_task(struct work_struct *work)
...
@@ -2370,12 +2347,14 @@ static void e1000_82547_tx_fifo_stall_task(struct work_struct *work)
{
{
struct
e1000_adapter
*
adapter
=
container_of
(
work
,
struct
e1000_adapter
*
adapter
=
container_of
(
work
,
struct
e1000_adapter
,
struct
e1000_adapter
,
fifo_stall_tas
k
);
fifo_stall_task
.
wor
k
);
struct
e1000_hw
*
hw
=
&
adapter
->
hw
;
struct
e1000_hw
*
hw
=
&
adapter
->
hw
;
struct
net_device
*
netdev
=
adapter
->
netdev
;
struct
net_device
*
netdev
=
adapter
->
netdev
;
u32
tctl
;
u32
tctl
;
rtnl_lock
();
if
(
test_bit
(
__E1000_DOWN
,
&
adapter
->
flags
))
return
;
mutex_lock
(
&
adapter
->
mutex
);
if
(
atomic_read
(
&
adapter
->
tx_fifo_stall
))
{
if
(
atomic_read
(
&
adapter
->
tx_fifo_stall
))
{
if
((
er32
(
TDT
)
==
er32
(
TDH
))
&&
if
((
er32
(
TDT
)
==
er32
(
TDH
))
&&
(
er32
(
TDFT
)
==
er32
(
TDFH
))
&&
(
er32
(
TDFT
)
==
er32
(
TDFH
))
&&
...
@@ -2393,10 +2372,10 @@ static void e1000_82547_tx_fifo_stall_task(struct work_struct *work)
...
@@ -2393,10 +2372,10 @@ static void e1000_82547_tx_fifo_stall_task(struct work_struct *work)
atomic_set
(
&
adapter
->
tx_fifo_stall
,
0
);
atomic_set
(
&
adapter
->
tx_fifo_stall
,
0
);
netif_wake_queue
(
netdev
);
netif_wake_queue
(
netdev
);
}
else
if
(
!
test_bit
(
__E1000_DOWN
,
&
adapter
->
flags
))
{
}
else
if
(
!
test_bit
(
__E1000_DOWN
,
&
adapter
->
flags
))
{
mod_timer
(
&
adapter
->
tx_fifo_stall_timer
,
jiffies
+
1
);
schedule_delayed_work
(
&
adapter
->
fifo_stall_task
,
1
);
}
}
}
}
rtnl_unlock
(
);
mutex_unlock
(
&
adapter
->
mutex
);
}
}
bool
e1000_has_link
(
struct
e1000_adapter
*
adapter
)
bool
e1000_has_link
(
struct
e1000_adapter
*
adapter
)
...
@@ -2437,17 +2416,23 @@ bool e1000_has_link(struct e1000_adapter *adapter)
...
@@ -2437,17 +2416,23 @@ bool e1000_has_link(struct e1000_adapter *adapter)
}
}
/**
/**
* e1000_watchdog -
Timer Call-back
* e1000_watchdog -
work function
* @
data: pointer to adapter cast into an unsigned long
* @
work: work struct contained inside adapter struct
**/
**/
static
void
e1000_watchdog
(
unsigned
long
data
)
static
void
e1000_watchdog
(
struct
work_struct
*
work
)
{
{
struct
e1000_adapter
*
adapter
=
(
struct
e1000_adapter
*
)
data
;
struct
e1000_adapter
*
adapter
=
container_of
(
work
,
struct
e1000_adapter
,
watchdog_task
.
work
);
struct
e1000_hw
*
hw
=
&
adapter
->
hw
;
struct
e1000_hw
*
hw
=
&
adapter
->
hw
;
struct
net_device
*
netdev
=
adapter
->
netdev
;
struct
net_device
*
netdev
=
adapter
->
netdev
;
struct
e1000_tx_ring
*
txdr
=
adapter
->
tx_ring
;
struct
e1000_tx_ring
*
txdr
=
adapter
->
tx_ring
;
u32
link
,
tctl
;
u32
link
,
tctl
;
if
(
test_bit
(
__E1000_DOWN
,
&
adapter
->
flags
))
return
;
mutex_lock
(
&
adapter
->
mutex
);
link
=
e1000_has_link
(
adapter
);
link
=
e1000_has_link
(
adapter
);
if
((
netif_carrier_ok
(
netdev
))
&&
link
)
if
((
netif_carrier_ok
(
netdev
))
&&
link
)
goto
link_up
;
goto
link_up
;
...
@@ -2493,8 +2478,8 @@ static void e1000_watchdog(unsigned long data)
...
@@ -2493,8 +2478,8 @@ static void e1000_watchdog(unsigned long data)
netif_carrier_on
(
netdev
);
netif_carrier_on
(
netdev
);
if
(
!
test_bit
(
__E1000_DOWN
,
&
adapter
->
flags
))
if
(
!
test_bit
(
__E1000_DOWN
,
&
adapter
->
flags
))
mod_timer
(
&
adapter
->
phy_info_timer
,
schedule_delayed_work
(
&
adapter
->
phy_info_task
,
round_jiffies
(
jiffies
+
2
*
HZ
)
);
2
*
HZ
);
adapter
->
smartspeed
=
0
;
adapter
->
smartspeed
=
0
;
}
}
}
else
{
}
else
{
...
@@ -2506,8 +2491,8 @@ static void e1000_watchdog(unsigned long data)
...
@@ -2506,8 +2491,8 @@ static void e1000_watchdog(unsigned long data)
netif_carrier_off
(
netdev
);
netif_carrier_off
(
netdev
);
if
(
!
test_bit
(
__E1000_DOWN
,
&
adapter
->
flags
))
if
(
!
test_bit
(
__E1000_DOWN
,
&
adapter
->
flags
))
mod_timer
(
&
adapter
->
phy_info_timer
,
schedule_delayed_work
(
&
adapter
->
phy_info_task
,
round_jiffies
(
jiffies
+
2
*
HZ
)
);
2
*
HZ
);
}
}
e1000_smartspeed
(
adapter
);
e1000_smartspeed
(
adapter
);
...
@@ -2536,8 +2521,8 @@ static void e1000_watchdog(unsigned long data)
...
@@ -2536,8 +2521,8 @@ static void e1000_watchdog(unsigned long data)
* (Do the reset outside of interrupt context). */
* (Do the reset outside of interrupt context). */
adapter
->
tx_timeout_count
++
;
adapter
->
tx_timeout_count
++
;
schedule_work
(
&
adapter
->
reset_task
);
schedule_work
(
&
adapter
->
reset_task
);
/*
return
immediately since reset is imminent */
/*
exit
immediately since reset is imminent */
return
;
goto
unlock
;
}
}
}
}
...
@@ -2563,10 +2548,12 @@ static void e1000_watchdog(unsigned long data)
...
@@ -2563,10 +2548,12 @@ static void e1000_watchdog(unsigned long data)
/* Force detection of hung controller every watchdog period */
/* Force detection of hung controller every watchdog period */
adapter
->
detect_tx_hung
=
true
;
adapter
->
detect_tx_hung
=
true
;
/* Res
et the timer
*/
/* Res
chedule the task
*/
if
(
!
test_bit
(
__E1000_DOWN
,
&
adapter
->
flags
))
if
(
!
test_bit
(
__E1000_DOWN
,
&
adapter
->
flags
))
mod_timer
(
&
adapter
->
watchdog_timer
,
schedule_delayed_work
(
&
adapter
->
watchdog_task
,
2
*
HZ
);
round_jiffies
(
jiffies
+
2
*
HZ
));
unlock:
mutex_unlock
(
&
adapter
->
mutex
);
}
}
enum
latency_range
{
enum
latency_range
{
...
@@ -3206,15 +3193,13 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
...
@@ -3206,15 +3193,13 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
if
(
unlikely
(
e1000_maybe_stop_tx
(
netdev
,
tx_ring
,
count
+
2
)))
if
(
unlikely
(
e1000_maybe_stop_tx
(
netdev
,
tx_ring
,
count
+
2
)))
return
NETDEV_TX_BUSY
;
return
NETDEV_TX_BUSY
;
if
(
unlikely
(
hw
->
mac_type
==
e1000_82547
))
{
if
(
unlikely
(
(
hw
->
mac_type
==
e1000_82547
)
&&
if
(
unlikely
(
e1000_82547_fifo_workaround
(
adapter
,
skb
)))
{
(
e1000_82547_fifo_workaround
(
adapter
,
skb
)
)))
{
netif_stop_queue
(
netdev
);
netif_stop_queue
(
netdev
);
if
(
!
test_bit
(
__E1000_DOWN
,
&
adapter
->
flags
))
if
(
!
test_bit
(
__E1000_DOWN
,
&
adapter
->
flags
))
mod_timer
(
&
adapter
->
tx_fifo_stall_timer
,
schedule_delayed_work
(
&
adapter
->
fifo_stall_task
,
1
);
jiffies
+
1
);
return
NETDEV_TX_BUSY
;
return
NETDEV_TX_BUSY
;
}
}
}
if
(
vlan_tx_tag_present
(
skb
))
{
if
(
vlan_tx_tag_present
(
skb
))
{
tx_flags
|=
E1000_TX_FLAGS_VLAN
;
tx_flags
|=
E1000_TX_FLAGS_VLAN
;
...
@@ -3275,6 +3260,8 @@ static void e1000_reset_task(struct work_struct *work)
...
@@ -3275,6 +3260,8 @@ static void e1000_reset_task(struct work_struct *work)
struct
e1000_adapter
*
adapter
=
struct
e1000_adapter
*
adapter
=
container_of
(
work
,
struct
e1000_adapter
,
reset_task
);
container_of
(
work
,
struct
e1000_adapter
,
reset_task
);
if
(
test_bit
(
__E1000_DOWN
,
&
adapter
->
flags
))
return
;
e1000_reinit_safe
(
adapter
);
e1000_reinit_safe
(
adapter
);
}
}
...
@@ -3283,7 +3270,7 @@ static void e1000_reset_task(struct work_struct *work)
...
@@ -3283,7 +3270,7 @@ static void e1000_reset_task(struct work_struct *work)
* @netdev: network interface device structure
* @netdev: network interface device structure
*
*
* Returns the address of the device statistics structure.
* Returns the address of the device statistics structure.
* The statistics are actually updated from the
timer callback
.
* The statistics are actually updated from the
watchdog
.
**/
**/
static
struct
net_device_stats
*
e1000_get_stats
(
struct
net_device
*
netdev
)
static
struct
net_device_stats
*
e1000_get_stats
(
struct
net_device
*
netdev
)
...
@@ -3551,7 +3538,7 @@ static irqreturn_t e1000_intr(int irq, void *data)
...
@@ -3551,7 +3538,7 @@ static irqreturn_t e1000_intr(int irq, void *data)
hw
->
get_link_status
=
1
;
hw
->
get_link_status
=
1
;
/* guard against interrupt when we're going down */
/* guard against interrupt when we're going down */
if
(
!
test_bit
(
__E1000_DOWN
,
&
adapter
->
flags
))
if
(
!
test_bit
(
__E1000_DOWN
,
&
adapter
->
flags
))
mod_timer
(
&
adapter
->
watchdog_timer
,
jiffies
+
1
);
schedule_delayed_work
(
&
adapter
->
watchdog_task
,
1
);
}
}
/* disable interrupts, without the synchronize_irq bit */
/* disable interrupts, without the synchronize_irq bit */
...
@@ -4729,6 +4716,8 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
...
@@ -4729,6 +4716,8 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
netif_device_detach
(
netdev
);
netif_device_detach
(
netdev
);
mutex_lock
(
&
adapter
->
mutex
);
if
(
netif_running
(
netdev
))
{
if
(
netif_running
(
netdev
))
{
WARN_ON
(
test_bit
(
__E1000_RESETTING
,
&
adapter
->
flags
));
WARN_ON
(
test_bit
(
__E1000_RESETTING
,
&
adapter
->
flags
));
e1000_down
(
adapter
);
e1000_down
(
adapter
);
...
@@ -4736,8 +4725,10 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
...
@@ -4736,8 +4725,10 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
#ifdef CONFIG_PM
#ifdef CONFIG_PM
retval
=
pci_save_state
(
pdev
);
retval
=
pci_save_state
(
pdev
);
if
(
retval
)
if
(
retval
)
{
mutex_unlock
(
&
adapter
->
mutex
);
return
retval
;
return
retval
;
}
#endif
#endif
status
=
er32
(
STATUS
);
status
=
er32
(
STATUS
);
...
@@ -4792,6 +4783,8 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
...
@@ -4792,6 +4783,8 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
if
(
netif_running
(
netdev
))
if
(
netif_running
(
netdev
))
e1000_free_irq
(
adapter
);
e1000_free_irq
(
adapter
);
mutex_unlock
(
&
adapter
->
mutex
);
pci_disable_device
(
pdev
);
pci_disable_device
(
pdev
);
return
0
;
return
0
;
...
...
drivers/net/ethernet/intel/e1000e/ich8lan.c
View file @
d5b1d8cd
...
@@ -1578,7 +1578,7 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
...
@@ -1578,7 +1578,7 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
ret_val
=
e1e_wphy
(
hw
,
PHY_REG
(
776
,
20
),
data
);
ret_val
=
e1e_wphy
(
hw
,
PHY_REG
(
776
,
20
),
data
);
if
(
ret_val
)
if
(
ret_val
)
goto
out
;
goto
out
;
ret_val
=
e1e_wphy
(
hw
,
PHY_REG
(
776
,
23
),
0xF
E
00
);
ret_val
=
e1e_wphy
(
hw
,
PHY_REG
(
776
,
23
),
0xF
1
00
);
if
(
ret_val
)
if
(
ret_val
)
goto
out
;
goto
out
;
e1e_rphy
(
hw
,
HV_PM_CTRL
,
&
data
);
e1e_rphy
(
hw
,
HV_PM_CTRL
,
&
data
);
...
...
drivers/net/ethernet/intel/igb/e1000_82575.h
View file @
d5b1d8cd
...
@@ -130,7 +130,9 @@ union e1000_adv_tx_desc {
...
@@ -130,7 +130,9 @@ union e1000_adv_tx_desc {
#define E1000_ADVTXD_MAC_TSTAMP 0x00080000
/* IEEE1588 Timestamp packet */
#define E1000_ADVTXD_MAC_TSTAMP 0x00080000
/* IEEE1588 Timestamp packet */
#define E1000_ADVTXD_DTYP_CTXT 0x00200000
/* Advanced Context Descriptor */
#define E1000_ADVTXD_DTYP_CTXT 0x00200000
/* Advanced Context Descriptor */
#define E1000_ADVTXD_DTYP_DATA 0x00300000
/* Advanced Data Descriptor */
#define E1000_ADVTXD_DTYP_DATA 0x00300000
/* Advanced Data Descriptor */
#define E1000_ADVTXD_DCMD_EOP 0x01000000
/* End of Packet */
#define E1000_ADVTXD_DCMD_IFCS 0x02000000
/* Insert FCS (Ethernet CRC) */
#define E1000_ADVTXD_DCMD_IFCS 0x02000000
/* Insert FCS (Ethernet CRC) */
#define E1000_ADVTXD_DCMD_RS 0x08000000
/* Report Status */
#define E1000_ADVTXD_DCMD_DEXT 0x20000000
/* Descriptor extension (1=Adv) */
#define E1000_ADVTXD_DCMD_DEXT 0x20000000
/* Descriptor extension (1=Adv) */
#define E1000_ADVTXD_DCMD_VLE 0x40000000
/* VLAN pkt enable */
#define E1000_ADVTXD_DCMD_VLE 0x40000000
/* VLAN pkt enable */
#define E1000_ADVTXD_DCMD_TSE 0x80000000
/* TCP Seg enable */
#define E1000_ADVTXD_DCMD_TSE 0x80000000
/* TCP Seg enable */
...
...
drivers/net/ethernet/intel/igb/igb.h
View file @
d5b1d8cd
...
@@ -47,6 +47,7 @@ struct igb_adapter;
...
@@ -47,6 +47,7 @@ struct igb_adapter;
/* TX/RX descriptor defines */
/* TX/RX descriptor defines */
#define IGB_DEFAULT_TXD 256
#define IGB_DEFAULT_TXD 256
#define IGB_DEFAULT_TX_WORK 128
#define IGB_MIN_TXD 80
#define IGB_MIN_TXD 80
#define IGB_MAX_TXD 4096
#define IGB_MAX_TXD 4096
...
@@ -129,29 +130,33 @@ struct vf_data_storage {
...
@@ -129,29 +130,33 @@ struct vf_data_storage {
#define IGB_MNG_VLAN_NONE -1
#define IGB_MNG_VLAN_NONE -1
#define IGB_TX_FLAGS_CSUM 0x00000001
#define IGB_TX_FLAGS_VLAN 0x00000002
#define IGB_TX_FLAGS_TSO 0x00000004
#define IGB_TX_FLAGS_IPV4 0x00000008
#define IGB_TX_FLAGS_TSTAMP 0x00000010
#define IGB_TX_FLAGS_VLAN_MASK 0xffff0000
#define IGB_TX_FLAGS_VLAN_SHIFT 16
/* wrapper around a pointer to a socket buffer,
/* wrapper around a pointer to a socket buffer,
* so a DMA handle can be stored along with the buffer */
* so a DMA handle can be stored along with the buffer */
struct
igb_buffer
{
struct
igb_tx_buffer
{
struct
sk_buff
*
skb
;
union
e1000_adv_tx_desc
*
next_to_watch
;
dma_addr_t
dma
;
union
{
/* TX */
struct
{
unsigned
long
time_stamp
;
unsigned
long
time_stamp
;
u16
length
;
struct
sk_buff
*
skb
;
u16
next_to_watch
;
unsigned
int
bytecount
;
unsigned
int
bytecount
;
u16
gso_segs
;
u16
gso_segs
;
u8
tx_flags
;
dma_addr_t
dma
;
u8
mapped_as_page
;
u32
length
;
};
u32
tx_flags
;
/* RX */
};
struct
{
struct
igb_rx_buffer
{
struct
sk_buff
*
skb
;
dma_addr_t
dma
;
struct
page
*
page
;
struct
page
*
page
;
dma_addr_t
page_dma
;
dma_addr_t
page_dma
;
u16
page_offset
;
u32
page_offset
;
};
};
};
};
struct
igb_tx_queue_stats
{
struct
igb_tx_queue_stats
{
...
@@ -177,6 +182,7 @@ struct igb_q_vector {
...
@@ -177,6 +182,7 @@ struct igb_q_vector {
u32
eims_value
;
u32
eims_value
;
u16
cpu
;
u16
cpu
;
u16
tx_work_limit
;
u16
itr_val
;
u16
itr_val
;
u8
set_itr
;
u8
set_itr
;
...
@@ -189,7 +195,10 @@ struct igb_ring {
...
@@ -189,7 +195,10 @@ struct igb_ring {
struct
igb_q_vector
*
q_vector
;
/* backlink to q_vector */
struct
igb_q_vector
*
q_vector
;
/* backlink to q_vector */
struct
net_device
*
netdev
;
/* back pointer to net_device */
struct
net_device
*
netdev
;
/* back pointer to net_device */
struct
device
*
dev
;
/* device pointer for dma mapping */
struct
device
*
dev
;
/* device pointer for dma mapping */
struct
igb_buffer
*
buffer_info
;
/* array of buffer info structs */
union
{
/* array of buffer info structs */
struct
igb_tx_buffer
*
tx_buffer_info
;
struct
igb_rx_buffer
*
rx_buffer_info
;
};
void
*
desc
;
/* descriptor ring memory */
void
*
desc
;
/* descriptor ring memory */
unsigned
long
flags
;
/* ring specific flags */
unsigned
long
flags
;
/* ring specific flags */
void
__iomem
*
tail
;
/* pointer to ring tail register */
void
__iomem
*
tail
;
/* pointer to ring tail register */
...
@@ -229,7 +238,7 @@ struct igb_ring {
...
@@ -229,7 +238,7 @@ struct igb_ring {
#define IGB_RING_FLAG_TX_CTX_IDX 0x00000001
/* HW requires context index */
#define IGB_RING_FLAG_TX_CTX_IDX 0x00000001
/* HW requires context index */
#define IGB_
ADVTXD_DCMD (E1000_TXD_CMD_EOP | E1000_TXD_
CMD_RS)
#define IGB_
TXD_DCMD (E1000_ADVTXD_DCMD_EOP | E1000_ADVTXD_D
CMD_RS)
#define IGB_RX_DESC(R, i) \
#define IGB_RX_DESC(R, i) \
(&(((union e1000_adv_rx_desc *)((R)->desc))[i]))
(&(((union e1000_adv_rx_desc *)((R)->desc))[i]))
...
@@ -266,6 +275,7 @@ struct igb_adapter {
...
@@ -266,6 +275,7 @@ struct igb_adapter {
u16
rx_itr
;
u16
rx_itr
;
/* TX */
/* TX */
u16
tx_work_limit
;
u32
tx_timeout_count
;
u32
tx_timeout_count
;
int
num_tx_queues
;
int
num_tx_queues
;
struct
igb_ring
*
tx_ring
[
16
];
struct
igb_ring
*
tx_ring
[
16
];
...
@@ -374,7 +384,7 @@ extern void igb_setup_tctl(struct igb_adapter *);
...
@@ -374,7 +384,7 @@ extern void igb_setup_tctl(struct igb_adapter *);
extern
void
igb_setup_rctl
(
struct
igb_adapter
*
);
extern
void
igb_setup_rctl
(
struct
igb_adapter
*
);
extern
netdev_tx_t
igb_xmit_frame_ring
(
struct
sk_buff
*
,
struct
igb_ring
*
);
extern
netdev_tx_t
igb_xmit_frame_ring
(
struct
sk_buff
*
,
struct
igb_ring
*
);
extern
void
igb_unmap_and_free_tx_resource
(
struct
igb_ring
*
,
extern
void
igb_unmap_and_free_tx_resource
(
struct
igb_ring
*
,
struct
igb_buffer
*
);
struct
igb_
tx_
buffer
*
);
extern
void
igb_alloc_rx_buffers
(
struct
igb_ring
*
,
u16
);
extern
void
igb_alloc_rx_buffers
(
struct
igb_ring
*
,
u16
);
extern
void
igb_update_stats
(
struct
igb_adapter
*
,
struct
rtnl_link_stats64
*
);
extern
void
igb_update_stats
(
struct
igb_adapter
*
,
struct
rtnl_link_stats64
*
);
extern
bool
igb_has_link
(
struct
igb_adapter
*
adapter
);
extern
bool
igb_has_link
(
struct
igb_adapter
*
adapter
);
...
...
drivers/net/ethernet/intel/igb/igb_ethtool.c
View file @
d5b1d8cd
...
@@ -1579,7 +1579,8 @@ static int igb_clean_test_rings(struct igb_ring *rx_ring,
...
@@ -1579,7 +1579,8 @@ static int igb_clean_test_rings(struct igb_ring *rx_ring,
unsigned
int
size
)
unsigned
int
size
)
{
{
union
e1000_adv_rx_desc
*
rx_desc
;
union
e1000_adv_rx_desc
*
rx_desc
;
struct
igb_buffer
*
buffer_info
;
struct
igb_rx_buffer
*
rx_buffer_info
;
struct
igb_tx_buffer
*
tx_buffer_info
;
int
rx_ntc
,
tx_ntc
,
count
=
0
;
int
rx_ntc
,
tx_ntc
,
count
=
0
;
u32
staterr
;
u32
staterr
;
...
@@ -1591,22 +1592,22 @@ static int igb_clean_test_rings(struct igb_ring *rx_ring,
...
@@ -1591,22 +1592,22 @@ static int igb_clean_test_rings(struct igb_ring *rx_ring,
while
(
staterr
&
E1000_RXD_STAT_DD
)
{
while
(
staterr
&
E1000_RXD_STAT_DD
)
{
/* check rx buffer */
/* check rx buffer */
buffer_info
=
&
rx_ring
->
buffer_info
[
rx_ntc
];
rx_buffer_info
=
&
rx_ring
->
rx_
buffer_info
[
rx_ntc
];
/* unmap rx buffer, will be remapped by alloc_rx_buffers */
/* unmap rx buffer, will be remapped by alloc_rx_buffers */
dma_unmap_single
(
rx_ring
->
dev
,
dma_unmap_single
(
rx_ring
->
dev
,
buffer_info
->
dma
,
rx_
buffer_info
->
dma
,
IGB_RX_HDR_LEN
,
IGB_RX_HDR_LEN
,
DMA_FROM_DEVICE
);
DMA_FROM_DEVICE
);
buffer_info
->
dma
=
0
;
rx_
buffer_info
->
dma
=
0
;
/* verify contents of skb */
/* verify contents of skb */
if
(
!
igb_check_lbtest_frame
(
buffer_info
->
skb
,
size
))
if
(
!
igb_check_lbtest_frame
(
rx_
buffer_info
->
skb
,
size
))
count
++
;
count
++
;
/* unmap buffer on tx side */
/* unmap buffer on tx side */
buffer_info
=
&
tx_ring
->
buffer_info
[
tx_ntc
];
tx_buffer_info
=
&
tx_ring
->
tx_
buffer_info
[
tx_ntc
];
igb_unmap_and_free_tx_resource
(
tx_ring
,
buffer_info
);
igb_unmap_and_free_tx_resource
(
tx_ring
,
tx_
buffer_info
);
/* increment rx/tx next to clean counters */
/* increment rx/tx next to clean counters */
rx_ntc
++
;
rx_ntc
++
;
...
@@ -2011,6 +2012,7 @@ static int igb_set_coalesce(struct net_device *netdev,
...
@@ -2011,6 +2012,7 @@ static int igb_set_coalesce(struct net_device *netdev,
for
(
i
=
0
;
i
<
adapter
->
num_q_vectors
;
i
++
)
{
for
(
i
=
0
;
i
<
adapter
->
num_q_vectors
;
i
++
)
{
struct
igb_q_vector
*
q_vector
=
adapter
->
q_vector
[
i
];
struct
igb_q_vector
*
q_vector
=
adapter
->
q_vector
[
i
];
q_vector
->
tx_work_limit
=
adapter
->
tx_work_limit
;
if
(
q_vector
->
rx_ring
)
if
(
q_vector
->
rx_ring
)
q_vector
->
itr_val
=
adapter
->
rx_itr_setting
;
q_vector
->
itr_val
=
adapter
->
rx_itr_setting
;
else
else
...
...
drivers/net/ethernet/intel/igb/igb_main.c
View file @
d5b1d8cd
...
@@ -45,6 +45,9 @@
...
@@ -45,6 +45,9 @@
#include <linux/pci-aspm.h>
#include <linux/pci-aspm.h>
#include <linux/delay.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/interrupt.h>
#include <linux/ip.h>
#include <linux/tcp.h>
#include <linux/sctp.h>
#include <linux/if_ether.h>
#include <linux/if_ether.h>
#include <linux/aer.h>
#include <linux/aer.h>
#include <linux/prefetch.h>
#include <linux/prefetch.h>
...
@@ -136,8 +139,8 @@ static irqreturn_t igb_msix_ring(int irq, void *);
...
@@ -136,8 +139,8 @@ static irqreturn_t igb_msix_ring(int irq, void *);
static
void
igb_update_dca
(
struct
igb_q_vector
*
);
static
void
igb_update_dca
(
struct
igb_q_vector
*
);
static
void
igb_setup_dca
(
struct
igb_adapter
*
);
static
void
igb_setup_dca
(
struct
igb_adapter
*
);
#endif
/* CONFIG_IGB_DCA */
#endif
/* CONFIG_IGB_DCA */
static
bool
igb_clean_tx_irq
(
struct
igb_q_vector
*
);
static
int
igb_poll
(
struct
napi_struct
*
,
int
);
static
int
igb_poll
(
struct
napi_struct
*
,
int
);
static
bool
igb_clean_tx_irq
(
struct
igb_q_vector
*
);
static
bool
igb_clean_rx_irq
(
struct
igb_q_vector
*
,
int
);
static
bool
igb_clean_rx_irq
(
struct
igb_q_vector
*
,
int
);
static
int
igb_ioctl
(
struct
net_device
*
,
struct
ifreq
*
,
int
cmd
);
static
int
igb_ioctl
(
struct
net_device
*
,
struct
ifreq
*
,
int
cmd
);
static
void
igb_tx_timeout
(
struct
net_device
*
);
static
void
igb_tx_timeout
(
struct
net_device
*
);
...
@@ -339,7 +342,6 @@ static void igb_dump(struct igb_adapter *adapter)
...
@@ -339,7 +342,6 @@ static void igb_dump(struct igb_adapter *adapter)
struct
igb_ring
*
tx_ring
;
struct
igb_ring
*
tx_ring
;
union
e1000_adv_tx_desc
*
tx_desc
;
union
e1000_adv_tx_desc
*
tx_desc
;
struct
my_u0
{
u64
a
;
u64
b
;
}
*
u0
;
struct
my_u0
{
u64
a
;
u64
b
;
}
*
u0
;
struct
igb_buffer
*
buffer_info
;
struct
igb_ring
*
rx_ring
;
struct
igb_ring
*
rx_ring
;
union
e1000_adv_rx_desc
*
rx_desc
;
union
e1000_adv_rx_desc
*
rx_desc
;
u32
staterr
;
u32
staterr
;
...
@@ -376,9 +378,10 @@ static void igb_dump(struct igb_adapter *adapter)
...
@@ -376,9 +378,10 @@ static void igb_dump(struct igb_adapter *adapter)
printk
(
KERN_INFO
"Queue [NTU] [NTC] [bi(ntc)->dma ]"
printk
(
KERN_INFO
"Queue [NTU] [NTC] [bi(ntc)->dma ]"
" leng ntw timestamp
\n
"
);
" leng ntw timestamp
\n
"
);
for
(
n
=
0
;
n
<
adapter
->
num_tx_queues
;
n
++
)
{
for
(
n
=
0
;
n
<
adapter
->
num_tx_queues
;
n
++
)
{
struct
igb_tx_buffer
*
buffer_info
;
tx_ring
=
adapter
->
tx_ring
[
n
];
tx_ring
=
adapter
->
tx_ring
[
n
];
buffer_info
=
&
tx_ring
->
buffer_info
[
tx_ring
->
next_to_clean
];
buffer_info
=
&
tx_ring
->
tx_
buffer_info
[
tx_ring
->
next_to_clean
];
printk
(
KERN_INFO
" %5d %5X %5X %016llX %04X %
3X
%016llX
\n
"
,
printk
(
KERN_INFO
" %5d %5X %5X %016llX %04X %
p
%016llX
\n
"
,
n
,
tx_ring
->
next_to_use
,
tx_ring
->
next_to_clean
,
n
,
tx_ring
->
next_to_use
,
tx_ring
->
next_to_clean
,
(
u64
)
buffer_info
->
dma
,
(
u64
)
buffer_info
->
dma
,
buffer_info
->
length
,
buffer_info
->
length
,
...
@@ -413,11 +416,12 @@ static void igb_dump(struct igb_adapter *adapter)
...
@@ -413,11 +416,12 @@ static void igb_dump(struct igb_adapter *adapter)
"leng ntw timestamp bi->skb
\n
"
);
"leng ntw timestamp bi->skb
\n
"
);
for
(
i
=
0
;
tx_ring
->
desc
&&
(
i
<
tx_ring
->
count
);
i
++
)
{
for
(
i
=
0
;
tx_ring
->
desc
&&
(
i
<
tx_ring
->
count
);
i
++
)
{
struct
igb_tx_buffer
*
buffer_info
;
tx_desc
=
IGB_TX_DESC
(
tx_ring
,
i
);
tx_desc
=
IGB_TX_DESC
(
tx_ring
,
i
);
buffer_info
=
&
tx_ring
->
buffer_info
[
i
];
buffer_info
=
&
tx_ring
->
tx_
buffer_info
[
i
];
u0
=
(
struct
my_u0
*
)
tx_desc
;
u0
=
(
struct
my_u0
*
)
tx_desc
;
printk
(
KERN_INFO
"T [0x%03X] %016llX %016llX %016llX"
printk
(
KERN_INFO
"T [0x%03X] %016llX %016llX %016llX"
" %04X %
3X
%016llX %p"
,
i
,
" %04X %
p
%016llX %p"
,
i
,
le64_to_cpu
(
u0
->
a
),
le64_to_cpu
(
u0
->
a
),
le64_to_cpu
(
u0
->
b
),
le64_to_cpu
(
u0
->
b
),
(
u64
)
buffer_info
->
dma
,
(
u64
)
buffer_info
->
dma
,
...
@@ -493,7 +497,8 @@ static void igb_dump(struct igb_adapter *adapter)
...
@@ -493,7 +497,8 @@ static void igb_dump(struct igb_adapter *adapter)
"<-- Adv Rx Write-Back format
\n
"
);
"<-- Adv Rx Write-Back format
\n
"
);
for
(
i
=
0
;
i
<
rx_ring
->
count
;
i
++
)
{
for
(
i
=
0
;
i
<
rx_ring
->
count
;
i
++
)
{
buffer_info
=
&
rx_ring
->
buffer_info
[
i
];
struct
igb_rx_buffer
*
buffer_info
;
buffer_info
=
&
rx_ring
->
rx_buffer_info
[
i
];
rx_desc
=
IGB_RX_DESC
(
rx_ring
,
i
);
rx_desc
=
IGB_RX_DESC
(
rx_ring
,
i
);
u0
=
(
struct
my_u0
*
)
rx_desc
;
u0
=
(
struct
my_u0
*
)
rx_desc
;
staterr
=
le32_to_cpu
(
rx_desc
->
wb
.
upper
.
status_error
);
staterr
=
le32_to_cpu
(
rx_desc
->
wb
.
upper
.
status_error
);
...
@@ -1120,6 +1125,7 @@ static void igb_map_tx_ring_to_vector(struct igb_adapter *adapter,
...
@@ -1120,6 +1125,7 @@ static void igb_map_tx_ring_to_vector(struct igb_adapter *adapter,
q_vector
->
tx_ring
=
adapter
->
tx_ring
[
ring_idx
];
q_vector
->
tx_ring
=
adapter
->
tx_ring
[
ring_idx
];
q_vector
->
tx_ring
->
q_vector
=
q_vector
;
q_vector
->
tx_ring
->
q_vector
=
q_vector
;
q_vector
->
itr_val
=
adapter
->
tx_itr_setting
;
q_vector
->
itr_val
=
adapter
->
tx_itr_setting
;
q_vector
->
tx_work_limit
=
adapter
->
tx_work_limit
;
if
(
q_vector
->
itr_val
&&
q_vector
->
itr_val
<=
3
)
if
(
q_vector
->
itr_val
&&
q_vector
->
itr_val
<=
3
)
q_vector
->
itr_val
=
IGB_START_ITR
;
q_vector
->
itr_val
=
IGB_START_ITR
;
}
}
...
@@ -2388,11 +2394,17 @@ static int __devinit igb_sw_init(struct igb_adapter *adapter)
...
@@ -2388,11 +2394,17 @@ static int __devinit igb_sw_init(struct igb_adapter *adapter)
pci_read_config_word
(
pdev
,
PCI_COMMAND
,
&
hw
->
bus
.
pci_cmd_word
);
pci_read_config_word
(
pdev
,
PCI_COMMAND
,
&
hw
->
bus
.
pci_cmd_word
);
/* set default ring sizes */
adapter
->
tx_ring_count
=
IGB_DEFAULT_TXD
;
adapter
->
tx_ring_count
=
IGB_DEFAULT_TXD
;
adapter
->
rx_ring_count
=
IGB_DEFAULT_RXD
;
adapter
->
rx_ring_count
=
IGB_DEFAULT_RXD
;
/* set default ITR values */
adapter
->
rx_itr_setting
=
IGB_DEFAULT_ITR
;
adapter
->
rx_itr_setting
=
IGB_DEFAULT_ITR
;
adapter
->
tx_itr_setting
=
IGB_DEFAULT_ITR
;
adapter
->
tx_itr_setting
=
IGB_DEFAULT_ITR
;
/* set default work limits */
adapter
->
tx_work_limit
=
IGB_DEFAULT_TX_WORK
;
adapter
->
max_frame_size
=
netdev
->
mtu
+
ETH_HLEN
+
ETH_FCS_LEN
+
adapter
->
max_frame_size
=
netdev
->
mtu
+
ETH_HLEN
+
ETH_FCS_LEN
+
VLAN_HLEN
;
VLAN_HLEN
;
adapter
->
min_frame_size
=
ETH_ZLEN
+
ETH_FCS_LEN
;
adapter
->
min_frame_size
=
ETH_ZLEN
+
ETH_FCS_LEN
;
...
@@ -2569,9 +2581,9 @@ int igb_setup_tx_resources(struct igb_ring *tx_ring)
...
@@ -2569,9 +2581,9 @@ int igb_setup_tx_resources(struct igb_ring *tx_ring)
struct
device
*
dev
=
tx_ring
->
dev
;
struct
device
*
dev
=
tx_ring
->
dev
;
int
size
;
int
size
;
size
=
sizeof
(
struct
igb_buffer
)
*
tx_ring
->
count
;
size
=
sizeof
(
struct
igb_
tx_
buffer
)
*
tx_ring
->
count
;
tx_ring
->
buffer_info
=
vzalloc
(
size
);
tx_ring
->
tx_
buffer_info
=
vzalloc
(
size
);
if
(
!
tx_ring
->
buffer_info
)
if
(
!
tx_ring
->
tx_
buffer_info
)
goto
err
;
goto
err
;
/* round up to nearest 4K */
/* round up to nearest 4K */
...
@@ -2591,7 +2603,7 @@ int igb_setup_tx_resources(struct igb_ring *tx_ring)
...
@@ -2591,7 +2603,7 @@ int igb_setup_tx_resources(struct igb_ring *tx_ring)
return
0
;
return
0
;
err:
err:
vfree
(
tx_ring
->
buffer_info
);
vfree
(
tx_ring
->
tx_
buffer_info
);
dev_err
(
dev
,
dev_err
(
dev
,
"Unable to allocate memory for the transmit descriptor ring
\n
"
);
"Unable to allocate memory for the transmit descriptor ring
\n
"
);
return
-
ENOMEM
;
return
-
ENOMEM
;
...
@@ -2712,9 +2724,9 @@ int igb_setup_rx_resources(struct igb_ring *rx_ring)
...
@@ -2712,9 +2724,9 @@ int igb_setup_rx_resources(struct igb_ring *rx_ring)
struct
device
*
dev
=
rx_ring
->
dev
;
struct
device
*
dev
=
rx_ring
->
dev
;
int
size
,
desc_len
;
int
size
,
desc_len
;
size
=
sizeof
(
struct
igb_buffer
)
*
rx_ring
->
count
;
size
=
sizeof
(
struct
igb_
rx_
buffer
)
*
rx_ring
->
count
;
rx_ring
->
buffer_info
=
vzalloc
(
size
);
rx_ring
->
rx_
buffer_info
=
vzalloc
(
size
);
if
(
!
rx_ring
->
buffer_info
)
if
(
!
rx_ring
->
rx_
buffer_info
)
goto
err
;
goto
err
;
desc_len
=
sizeof
(
union
e1000_adv_rx_desc
);
desc_len
=
sizeof
(
union
e1000_adv_rx_desc
);
...
@@ -2737,8 +2749,8 @@ int igb_setup_rx_resources(struct igb_ring *rx_ring)
...
@@ -2737,8 +2749,8 @@ int igb_setup_rx_resources(struct igb_ring *rx_ring)
return
0
;
return
0
;
err:
err:
vfree
(
rx_ring
->
buffer_info
);
vfree
(
rx_ring
->
rx_
buffer_info
);
rx_ring
->
buffer_info
=
NULL
;
rx_ring
->
rx_
buffer_info
=
NULL
;
dev_err
(
dev
,
"Unable to allocate memory for the receive descriptor"
dev_err
(
dev
,
"Unable to allocate memory for the receive descriptor"
" ring
\n
"
);
" ring
\n
"
);
return
-
ENOMEM
;
return
-
ENOMEM
;
...
@@ -3100,8 +3112,8 @@ void igb_free_tx_resources(struct igb_ring *tx_ring)
...
@@ -3100,8 +3112,8 @@ void igb_free_tx_resources(struct igb_ring *tx_ring)
{
{
igb_clean_tx_ring
(
tx_ring
);
igb_clean_tx_ring
(
tx_ring
);
vfree
(
tx_ring
->
buffer_info
);
vfree
(
tx_ring
->
tx_
buffer_info
);
tx_ring
->
buffer_info
=
NULL
;
tx_ring
->
tx_
buffer_info
=
NULL
;
/* if not set, then don't free */
/* if not set, then don't free */
if
(
!
tx_ring
->
desc
)
if
(
!
tx_ring
->
desc
)
...
@@ -3127,30 +3139,26 @@ static void igb_free_all_tx_resources(struct igb_adapter *adapter)
...
@@ -3127,30 +3139,26 @@ static void igb_free_all_tx_resources(struct igb_adapter *adapter)
igb_free_tx_resources
(
adapter
->
tx_ring
[
i
]);
igb_free_tx_resources
(
adapter
->
tx_ring
[
i
]);
}
}
void
igb_unmap_and_free_tx_resource
(
struct
igb_ring
*
tx_
ring
,
void
igb_unmap_and_free_tx_resource
(
struct
igb_ring
*
ring
,
struct
igb_
buffer
*
buffer_info
)
struct
igb_
tx_buffer
*
tx_buffer
)
{
{
if
(
buffer_info
->
dma
)
{
if
(
tx_buffer
->
skb
)
{
if
(
buffer_info
->
mapped_as_page
)
dev_kfree_skb_any
(
tx_buffer
->
skb
);
dma_unmap_page
(
tx_ring
->
dev
,
if
(
tx_buffer
->
dma
)
buffer_info
->
dma
,
dma_unmap_single
(
ring
->
dev
,
buffer_info
->
length
,
tx_buffer
->
dma
,
tx_buffer
->
length
,
DMA_TO_DEVICE
);
DMA_TO_DEVICE
);
else
}
else
if
(
tx_buffer
->
dma
)
{
dma_unmap_single
(
tx_
ring
->
dev
,
dma_unmap_page
(
ring
->
dev
,
buffer_info
->
dma
,
tx_buffer
->
dma
,
buffer_info
->
length
,
tx_buffer
->
length
,
DMA_TO_DEVICE
);
DMA_TO_DEVICE
);
buffer_info
->
dma
=
0
;
}
}
if
(
buffer_info
->
skb
)
{
tx_buffer
->
next_to_watch
=
NULL
;
dev_kfree_skb_any
(
buffer_info
->
skb
);
tx_buffer
->
skb
=
NULL
;
buffer_info
->
skb
=
NULL
;
tx_buffer
->
dma
=
0
;
}
/* buffer_info must be completely set up in the transmit path */
buffer_info
->
time_stamp
=
0
;
buffer_info
->
length
=
0
;
buffer_info
->
next_to_watch
=
0
;
buffer_info
->
mapped_as_page
=
false
;
}
}
/**
/**
...
@@ -3159,21 +3167,21 @@ void igb_unmap_and_free_tx_resource(struct igb_ring *tx_ring,
...
@@ -3159,21 +3167,21 @@ void igb_unmap_and_free_tx_resource(struct igb_ring *tx_ring,
**/
**/
static
void
igb_clean_tx_ring
(
struct
igb_ring
*
tx_ring
)
static
void
igb_clean_tx_ring
(
struct
igb_ring
*
tx_ring
)
{
{
struct
igb_buffer
*
buffer_info
;
struct
igb_
tx_
buffer
*
buffer_info
;
unsigned
long
size
;
unsigned
long
size
;
unsigned
int
i
;
unsigned
int
i
;
if
(
!
tx_ring
->
buffer_info
)
if
(
!
tx_ring
->
tx_
buffer_info
)
return
;
return
;
/* Free all the Tx ring sk_buffs */
/* Free all the Tx ring sk_buffs */
for
(
i
=
0
;
i
<
tx_ring
->
count
;
i
++
)
{
for
(
i
=
0
;
i
<
tx_ring
->
count
;
i
++
)
{
buffer_info
=
&
tx_ring
->
buffer_info
[
i
];
buffer_info
=
&
tx_ring
->
tx_
buffer_info
[
i
];
igb_unmap_and_free_tx_resource
(
tx_ring
,
buffer_info
);
igb_unmap_and_free_tx_resource
(
tx_ring
,
buffer_info
);
}
}
size
=
sizeof
(
struct
igb_buffer
)
*
tx_ring
->
count
;
size
=
sizeof
(
struct
igb_
tx_
buffer
)
*
tx_ring
->
count
;
memset
(
tx_ring
->
buffer_info
,
0
,
size
);
memset
(
tx_ring
->
tx_
buffer_info
,
0
,
size
);
/* Zero out the descriptor ring */
/* Zero out the descriptor ring */
memset
(
tx_ring
->
desc
,
0
,
tx_ring
->
size
);
memset
(
tx_ring
->
desc
,
0
,
tx_ring
->
size
);
...
@@ -3204,8 +3212,8 @@ void igb_free_rx_resources(struct igb_ring *rx_ring)
...
@@ -3204,8 +3212,8 @@ void igb_free_rx_resources(struct igb_ring *rx_ring)
{
{
igb_clean_rx_ring
(
rx_ring
);
igb_clean_rx_ring
(
rx_ring
);
vfree
(
rx_ring
->
buffer_info
);
vfree
(
rx_ring
->
rx_
buffer_info
);
rx_ring
->
buffer_info
=
NULL
;
rx_ring
->
rx_
buffer_info
=
NULL
;
/* if not set, then don't free */
/* if not set, then don't free */
if
(
!
rx_ring
->
desc
)
if
(
!
rx_ring
->
desc
)
...
@@ -3240,12 +3248,12 @@ static void igb_clean_rx_ring(struct igb_ring *rx_ring)
...
@@ -3240,12 +3248,12 @@ static void igb_clean_rx_ring(struct igb_ring *rx_ring)
unsigned
long
size
;
unsigned
long
size
;
u16
i
;
u16
i
;
if
(
!
rx_ring
->
buffer_info
)
if
(
!
rx_ring
->
rx_
buffer_info
)
return
;
return
;
/* Free all the Rx ring sk_buffs */
/* Free all the Rx ring sk_buffs */
for
(
i
=
0
;
i
<
rx_ring
->
count
;
i
++
)
{
for
(
i
=
0
;
i
<
rx_ring
->
count
;
i
++
)
{
struct
igb_
buffer
*
buffer_info
=
&
rx_ring
->
buffer_info
[
i
];
struct
igb_
rx_buffer
*
buffer_info
=
&
rx_ring
->
rx_
buffer_info
[
i
];
if
(
buffer_info
->
dma
)
{
if
(
buffer_info
->
dma
)
{
dma_unmap_single
(
rx_ring
->
dev
,
dma_unmap_single
(
rx_ring
->
dev
,
buffer_info
->
dma
,
buffer_info
->
dma
,
...
@@ -3272,8 +3280,8 @@ static void igb_clean_rx_ring(struct igb_ring *rx_ring)
...
@@ -3272,8 +3280,8 @@ static void igb_clean_rx_ring(struct igb_ring *rx_ring)
}
}
}
}
size
=
sizeof
(
struct
igb_buffer
)
*
rx_ring
->
count
;
size
=
sizeof
(
struct
igb_
rx_
buffer
)
*
rx_ring
->
count
;
memset
(
rx_ring
->
buffer_info
,
0
,
size
);
memset
(
rx_ring
->
rx_
buffer_info
,
0
,
size
);
/* Zero out the descriptor ring */
/* Zero out the descriptor ring */
memset
(
rx_ring
->
desc
,
0
,
rx_ring
->
size
);
memset
(
rx_ring
->
desc
,
0
,
rx_ring
->
size
);
...
@@ -3943,24 +3951,39 @@ static void igb_set_itr(struct igb_adapter *adapter)
...
@@ -3943,24 +3951,39 @@ static void igb_set_itr(struct igb_adapter *adapter)
}
}
}
}
#define IGB_TX_FLAGS_CSUM 0x00000001
void
igb_tx_ctxtdesc
(
struct
igb_ring
*
tx_ring
,
u32
vlan_macip_lens
,
#define IGB_TX_FLAGS_VLAN 0x00000002
u32
type_tucmd
,
u32
mss_l4len_idx
)
#define IGB_TX_FLAGS_TSO 0x00000004
#define IGB_TX_FLAGS_IPV4 0x00000008
#define IGB_TX_FLAGS_TSTAMP 0x00000010
#define IGB_TX_FLAGS_VLAN_MASK 0xffff0000
#define IGB_TX_FLAGS_VLAN_SHIFT 16
static
inline
int
igb_tso
(
struct
igb_ring
*
tx_ring
,
struct
sk_buff
*
skb
,
u32
tx_flags
,
u8
*
hdr_len
)
{
{
struct
e1000_adv_tx_context_desc
*
context_desc
;
struct
e1000_adv_tx_context_desc
*
context_desc
;
unsigned
int
i
;
u16
i
=
tx_ring
->
next_to_use
;
context_desc
=
IGB_TX_CTXTDESC
(
tx_ring
,
i
);
i
++
;
tx_ring
->
next_to_use
=
(
i
<
tx_ring
->
count
)
?
i
:
0
;
/* set bits to identify this as an advanced context descriptor */
type_tucmd
|=
E1000_TXD_CMD_DEXT
|
E1000_ADVTXD_DTYP_CTXT
;
/* For 82575, context index must be unique per ring. */
if
(
tx_ring
->
flags
&
IGB_RING_FLAG_TX_CTX_IDX
)
mss_l4len_idx
|=
tx_ring
->
reg_idx
<<
4
;
context_desc
->
vlan_macip_lens
=
cpu_to_le32
(
vlan_macip_lens
);
context_desc
->
seqnum_seed
=
0
;
context_desc
->
type_tucmd_mlhl
=
cpu_to_le32
(
type_tucmd
);
context_desc
->
mss_l4len_idx
=
cpu_to_le32
(
mss_l4len_idx
);
}
static
inline
int
igb_tso
(
struct
igb_ring
*
tx_ring
,
struct
sk_buff
*
skb
,
u32
tx_flags
,
__be16
protocol
,
u8
*
hdr_len
)
{
int
err
;
int
err
;
struct
igb_buffer
*
buffer_info
;
u32
vlan_macip_lens
,
type_tucmd
;
u32
info
=
0
,
tu_cmd
=
0
;
u32
mss_l4len_idx
,
l4len
;
u32
mss_l4len_idx
;
u8
l4len
;
if
(
!
skb_is_gso
(
skb
))
return
0
;
if
(
skb_header_cloned
(
skb
))
{
if
(
skb_header_cloned
(
skb
))
{
err
=
pskb_expand_head
(
skb
,
0
,
0
,
GFP_ATOMIC
);
err
=
pskb_expand_head
(
skb
,
0
,
0
,
GFP_ATOMIC
);
...
@@ -3968,10 +3991,10 @@ static inline int igb_tso(struct igb_ring *tx_ring,
...
@@ -3968,10 +3991,10 @@ static inline int igb_tso(struct igb_ring *tx_ring,
return
err
;
return
err
;
}
}
l4len
=
tcp_hdrlen
(
skb
);
/* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
*
hdr_len
+=
l4len
;
type_tucmd
=
E1000_ADVTXD_TUCMD_L4T_TCP
;
if
(
skb
->
protocol
==
htons
(
ETH_P_IP
))
{
if
(
protocol
==
__constant_
htons
(
ETH_P_IP
))
{
struct
iphdr
*
iph
=
ip_hdr
(
skb
);
struct
iphdr
*
iph
=
ip_hdr
(
skb
);
iph
->
tot_len
=
0
;
iph
->
tot_len
=
0
;
iph
->
check
=
0
;
iph
->
check
=
0
;
...
@@ -3979,6 +4002,7 @@ static inline int igb_tso(struct igb_ring *tx_ring,
...
@@ -3979,6 +4002,7 @@ static inline int igb_tso(struct igb_ring *tx_ring,
iph
->
daddr
,
0
,
iph
->
daddr
,
0
,
IPPROTO_TCP
,
IPPROTO_TCP
,
0
);
0
);
type_tucmd
|=
E1000_ADVTXD_TUCMD_IPV4
;
}
else
if
(
skb_is_gso_v6
(
skb
))
{
}
else
if
(
skb_is_gso_v6
(
skb
))
{
ipv6_hdr
(
skb
)
->
payload_len
=
0
;
ipv6_hdr
(
skb
)
->
payload_len
=
0
;
tcp_hdr
(
skb
)
->
check
=
~
csum_ipv6_magic
(
&
ipv6_hdr
(
skb
)
->
saddr
,
tcp_hdr
(
skb
)
->
check
=
~
csum_ipv6_magic
(
&
ipv6_hdr
(
skb
)
->
saddr
,
...
@@ -3986,277 +4010,278 @@ static inline int igb_tso(struct igb_ring *tx_ring,
...
@@ -3986,277 +4010,278 @@ static inline int igb_tso(struct igb_ring *tx_ring,
0
,
IPPROTO_TCP
,
0
);
0
,
IPPROTO_TCP
,
0
);
}
}
i
=
tx_ring
->
next_to_use
;
l4len
=
tcp_hdrlen
(
skb
);
*
hdr_len
=
skb_transport_offset
(
skb
)
+
l4len
;
buffer_info
=
&
tx_ring
->
buffer_info
[
i
];
/* MSS L4LEN IDX */
context_desc
=
IGB_TX_CTXTDESC
(
tx_ring
,
i
);
mss_l4len_idx
=
l4len
<<
E1000_ADVTXD_L4LEN_SHIFT
;
/* VLAN MACLEN IPLEN */
mss_l4len_idx
|=
skb_shinfo
(
skb
)
->
gso_size
<<
E1000_ADVTXD_MSS_SHIFT
;
if
(
tx_flags
&
IGB_TX_FLAGS_VLAN
)
info
|=
(
tx_flags
&
IGB_TX_FLAGS_VLAN_MASK
);
info
|=
(
skb_network_offset
(
skb
)
<<
E1000_ADVTXD_MACLEN_SHIFT
);
*
hdr_len
+=
skb_network_offset
(
skb
);
info
|=
skb_network_header_len
(
skb
);
*
hdr_len
+=
skb_network_header_len
(
skb
);
context_desc
->
vlan_macip_lens
=
cpu_to_le32
(
info
);
/* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
/* VLAN MACLEN IPLEN */
tu_cmd
|=
(
E1000_TXD_CMD_DEXT
|
E1000_ADVTXD_DTYP_CTXT
);
vlan_macip_lens
=
skb_network_header_len
(
skb
);
vlan_macip_lens
|=
skb_network_offset
(
skb
)
<<
E1000_ADVTXD_MACLEN_SHIFT
;
vlan_macip_lens
|=
tx_flags
&
IGB_TX_FLAGS_VLAN_MASK
;
if
(
skb
->
protocol
==
htons
(
ETH_P_IP
))
igb_tx_ctxtdesc
(
tx_ring
,
vlan_macip_lens
,
type_tucmd
,
mss_l4len_idx
);
tu_cmd
|=
E1000_ADVTXD_TUCMD_IPV4
;
tu_cmd
|=
E1000_ADVTXD_TUCMD_L4T_TCP
;
context_desc
->
type_tucmd_mlhl
=
cpu_to_le32
(
tu_cmd
);
return
1
;
}
/* MSS L4LEN IDX */
static
inline
bool
igb_tx_csum
(
struct
igb_ring
*
tx_ring
,
struct
sk_buff
*
skb
,
mss_l4len_idx
=
(
skb_shinfo
(
skb
)
->
gso_size
<<
E1000_ADVTXD_MSS_SHIFT
);
u32
tx_flags
,
__be16
protocol
)
mss_l4len_idx
|=
(
l4len
<<
E1000_ADVTXD_L4LEN_SHIFT
);
{
u32
vlan_macip_lens
=
0
;
u32
mss_l4len_idx
=
0
;
u32
type_tucmd
=
0
;
/* For 82575, context index must be unique per ring. */
if
(
skb
->
ip_summed
!=
CHECKSUM_PARTIAL
)
{
if
(
tx_ring
->
flags
&
IGB_RING_FLAG_TX_CTX_IDX
)
if
(
!
(
tx_flags
&
IGB_TX_FLAGS_VLAN
))
mss_l4len_idx
|=
tx_ring
->
reg_idx
<<
4
;
return
false
;
}
else
{
u8
l4_hdr
=
0
;
switch
(
protocol
)
{
case
__constant_htons
(
ETH_P_IP
):
vlan_macip_lens
|=
skb_network_header_len
(
skb
);
type_tucmd
|=
E1000_ADVTXD_TUCMD_IPV4
;
l4_hdr
=
ip_hdr
(
skb
)
->
protocol
;
break
;
case
__constant_htons
(
ETH_P_IPV6
):
vlan_macip_lens
|=
skb_network_header_len
(
skb
);
l4_hdr
=
ipv6_hdr
(
skb
)
->
nexthdr
;
break
;
default:
if
(
unlikely
(
net_ratelimit
()))
{
dev_warn
(
tx_ring
->
dev
,
"partial checksum but proto=%x!
\n
"
,
protocol
);
}
break
;
}
context_desc
->
mss_l4len_idx
=
cpu_to_le32
(
mss_l4len_idx
);
switch
(
l4_hdr
)
{
context_desc
->
seqnum_seed
=
0
;
case
IPPROTO_TCP
:
type_tucmd
|=
E1000_ADVTXD_TUCMD_L4T_TCP
;
mss_l4len_idx
=
tcp_hdrlen
(
skb
)
<<
E1000_ADVTXD_L4LEN_SHIFT
;
break
;
case
IPPROTO_SCTP
:
type_tucmd
|=
E1000_ADVTXD_TUCMD_L4T_SCTP
;
mss_l4len_idx
=
sizeof
(
struct
sctphdr
)
<<
E1000_ADVTXD_L4LEN_SHIFT
;
break
;
case
IPPROTO_UDP
:
mss_l4len_idx
=
sizeof
(
struct
udphdr
)
<<
E1000_ADVTXD_L4LEN_SHIFT
;
break
;
default:
if
(
unlikely
(
net_ratelimit
()))
{
dev_warn
(
tx_ring
->
dev
,
"partial checksum but l4 proto=%x!
\n
"
,
l4_hdr
);
}
break
;
}
}
buffer_info
->
time_stamp
=
jiffies
;
vlan_macip_lens
|=
skb_network_offset
(
skb
)
<<
E1000_ADVTXD_MACLEN_SHIFT
;
buffer_info
->
next_to_watch
=
i
;
vlan_macip_lens
|=
tx_flags
&
IGB_TX_FLAGS_VLAN_MASK
;
buffer_info
->
dma
=
0
;
i
++
;
if
(
i
==
tx_ring
->
count
)
i
=
0
;
tx_ring
->
next_to_use
=
i
;
igb_tx_ctxtdesc
(
tx_ring
,
vlan_macip_lens
,
type_tucmd
,
mss_l4len_idx
)
;
return
true
;
return
(
skb
->
ip_summed
==
CHECKSUM_PARTIAL
)
;
}
}
static
inline
bool
igb_tx_csum
(
struct
igb_ring
*
tx_ring
,
static
__le32
igb_tx_cmd_type
(
u32
tx_flags
)
struct
sk_buff
*
skb
,
u32
tx_flags
)
{
{
struct
e1000_adv_tx_context_desc
*
context_desc
;
/* set type for advanced descriptor with frame checksum insertion */
struct
device
*
dev
=
tx_ring
->
dev
;
__le32
cmd_type
=
cpu_to_le32
(
E1000_ADVTXD_DTYP_DATA
|
struct
igb_buffer
*
buffer_info
;
E1000_ADVTXD_DCMD_IFCS
|
u32
info
=
0
,
tu_cmd
=
0
;
E1000_ADVTXD_DCMD_DEXT
);
unsigned
int
i
;
if
((
skb
->
ip_summed
==
CHECKSUM_PARTIAL
)
||
(
tx_flags
&
IGB_TX_FLAGS_VLAN
))
{
i
=
tx_ring
->
next_to_use
;
buffer_info
=
&
tx_ring
->
buffer_info
[
i
];
context_desc
=
IGB_TX_CTXTDESC
(
tx_ring
,
i
);
/* set HW vlan bit if vlan is present */
if
(
tx_flags
&
IGB_TX_FLAGS_VLAN
)
if
(
tx_flags
&
IGB_TX_FLAGS_VLAN
)
info
|=
(
tx_flags
&
IGB_TX_FLAGS_VLAN_MASK
);
cmd_type
|=
cpu_to_le32
(
E1000_ADVTXD_DCMD_VLE
);
info
|=
(
skb_network_offset
(
skb
)
<<
E1000_ADVTXD_MACLEN_SHIFT
);
/* set timestamp bit if present */
if
(
skb
->
ip_summed
==
CHECKSUM_PARTIAL
)
if
(
tx_flags
&
IGB_TX_FLAGS_TSTAMP
)
info
|=
skb_network_header_len
(
skb
);
cmd_type
|=
cpu_to_le32
(
E1000_ADVTXD_MAC_TSTAMP
);
context_desc
->
vlan_macip_lens
=
cpu_to_le32
(
info
);
/* set segmentation bits for TSO */
if
(
tx_flags
&
IGB_TX_FLAGS_TSO
)
cmd_type
|=
cpu_to_le32
(
E1000_ADVTXD_DCMD_TSE
);
tu_cmd
|=
(
E1000_TXD_CMD_DEXT
|
E1000_ADVTXD_DTYP_CTXT
);
return
cmd_type
;
}
if
(
skb
->
ip_summed
==
CHECKSUM_PARTIAL
)
{
static
__le32
igb_tx_olinfo_status
(
u32
tx_flags
,
unsigned
int
paylen
,
__be16
protocol
;
struct
igb_ring
*
tx_ring
)
{
u32
olinfo_status
=
paylen
<<
E1000_ADVTXD_PAYLEN_SHIFT
;
if
(
skb
->
protocol
==
cpu_to_be16
(
ETH_P_8021Q
))
{
/* 82575 requires a unique index per ring if any offload is enabled */
const
struct
vlan_ethhdr
*
vhdr
=
if
((
tx_flags
&
(
IGB_TX_FLAGS_CSUM
|
IGB_TX_FLAGS_VLAN
))
&&
(
const
struct
vlan_ethhdr
*
)
skb
->
data
;
(
tx_ring
->
flags
&
IGB_RING_FLAG_TX_CTX_IDX
))
olinfo_status
|=
tx_ring
->
reg_idx
<<
4
;
protocol
=
vhdr
->
h_vlan_encapsulated_proto
;
/* insert L4 checksum */
}
else
{
if
(
tx_flags
&
IGB_TX_FLAGS_CSUM
)
{
protocol
=
skb
->
protocol
;
olinfo_status
|=
E1000_TXD_POPTS_TXSM
<<
8
;
}
switch
(
protocol
)
{
/* insert IPv4 checksum */
case
cpu_to_be16
(
ETH_P_IP
):
if
(
tx_flags
&
IGB_TX_FLAGS_IPV4
)
tu_cmd
|=
E1000_ADVTXD_TUCMD_IPV4
;
olinfo_status
|=
E1000_TXD_POPTS_IXSM
<<
8
;
if
(
ip_hdr
(
skb
)
->
protocol
==
IPPROTO_TCP
)
tu_cmd
|=
E1000_ADVTXD_TUCMD_L4T_TCP
;
else
if
(
ip_hdr
(
skb
)
->
protocol
==
IPPROTO_SCTP
)
tu_cmd
|=
E1000_ADVTXD_TUCMD_L4T_SCTP
;
break
;
case
cpu_to_be16
(
ETH_P_IPV6
):
/* XXX what about other V6 headers?? */
if
(
ipv6_hdr
(
skb
)
->
nexthdr
==
IPPROTO_TCP
)
tu_cmd
|=
E1000_ADVTXD_TUCMD_L4T_TCP
;
else
if
(
ipv6_hdr
(
skb
)
->
nexthdr
==
IPPROTO_SCTP
)
tu_cmd
|=
E1000_ADVTXD_TUCMD_L4T_SCTP
;
break
;
default:
if
(
unlikely
(
net_ratelimit
()))
dev_warn
(
dev
,
"partial checksum but proto=%x!
\n
"
,
skb
->
protocol
);
break
;
}
}
}
context_desc
->
type_tucmd_mlhl
=
cpu_to_le32
(
tu_cmd
);
return
cpu_to_le32
(
olinfo_status
);
context_desc
->
seqnum_seed
=
0
;
}
if
(
tx_ring
->
flags
&
IGB_RING_FLAG_TX_CTX_IDX
)
context_desc
->
mss_l4len_idx
=
cpu_to_le32
(
tx_ring
->
reg_idx
<<
4
);
buffer_info
->
time_stamp
=
jiffies
;
/*
buffer_info
->
next_to_watch
=
i
;
* The largest size we can write to the descriptor is 65535. In order to
buffer_info
->
dma
=
0
;
* maintain a power of two alignment we have to limit ourselves to 32K.
*/
#define IGB_MAX_TXD_PWR 15
#define IGB_MAX_DATA_PER_TXD (1 << IGB_MAX_TXD_PWR)
i
++
;
static
void
igb_tx_map
(
struct
igb_ring
*
tx_ring
,
struct
sk_buff
*
skb
,
if
(
i
==
tx_ring
->
count
)
struct
igb_tx_buffer
*
first
,
u32
tx_flags
,
i
=
0
;
const
u8
hdr_len
)
tx_ring
->
next_to_use
=
i
;
{
struct
igb_tx_buffer
*
tx_buffer_info
;
union
e1000_adv_tx_desc
*
tx_desc
;
dma_addr_t
dma
;
struct
skb_frag_struct
*
frag
=
&
skb_shinfo
(
skb
)
->
frags
[
0
];
unsigned
int
data_len
=
skb
->
data_len
;
unsigned
int
size
=
skb_headlen
(
skb
);
unsigned
int
paylen
=
skb
->
len
-
hdr_len
;
__le32
cmd_type
;
u16
i
=
tx_ring
->
next_to_use
;
u16
gso_segs
;
if
(
tx_flags
&
IGB_TX_FLAGS_TSO
)
gso_segs
=
skb_shinfo
(
skb
)
->
gso_segs
;
else
gso_segs
=
1
;
return
true
;
/* multiply data chunks by size of headers */
}
first
->
bytecount
=
paylen
+
(
gso_segs
*
hdr_len
);
return
false
;
first
->
gso_segs
=
gso_segs
;
}
first
->
skb
=
skb
;
#define IGB_MAX_TXD_PWR 16
tx_desc
=
IGB_TX_DESC
(
tx_ring
,
i
);
#define IGB_MAX_DATA_PER_TXD (1<<IGB_MAX_TXD_PWR)
static
inline
int
igb_tx_map
(
struct
igb_ring
*
tx_ring
,
struct
sk_buff
*
skb
,
tx_desc
->
read
.
olinfo_status
=
unsigned
int
first
)
igb_tx_olinfo_status
(
tx_flags
,
paylen
,
tx_ring
);
{
struct
igb_buffer
*
buffer_info
;
cmd_type
=
igb_tx_cmd_type
(
tx_flags
);
struct
device
*
dev
=
tx_ring
->
dev
;
unsigned
int
hlen
=
skb_headlen
(
skb
);
dma
=
dma_map_single
(
tx_ring
->
dev
,
skb
->
data
,
size
,
DMA_TO_DEVICE
);
unsigned
int
count
=
0
,
i
;
if
(
dma_mapping_error
(
tx_ring
->
dev
,
dma
))
unsigned
int
f
;
u16
gso_segs
=
skb_shinfo
(
skb
)
->
gso_segs
?:
1
;
i
=
tx_ring
->
next_to_use
;
buffer_info
=
&
tx_ring
->
buffer_info
[
i
];
BUG_ON
(
hlen
>=
IGB_MAX_DATA_PER_TXD
);
buffer_info
->
length
=
hlen
;
/* set time_stamp *before* dma to help avoid a possible race */
buffer_info
->
time_stamp
=
jiffies
;
buffer_info
->
next_to_watch
=
i
;
buffer_info
->
dma
=
dma_map_single
(
dev
,
skb
->
data
,
hlen
,
DMA_TO_DEVICE
);
if
(
dma_mapping_error
(
dev
,
buffer_info
->
dma
))
goto
dma_error
;
goto
dma_error
;
for
(
f
=
0
;
f
<
skb_shinfo
(
skb
)
->
nr_frags
;
f
++
)
{
/* record length, and DMA address */
struct
skb_frag_struct
*
frag
=
&
skb_shinfo
(
skb
)
->
frags
[
f
];
first
->
length
=
size
;
unsigned
int
len
=
frag
->
size
;
first
->
dma
=
dma
;
first
->
tx_flags
=
tx_flags
;
tx_desc
->
read
.
buffer_addr
=
cpu_to_le64
(
dma
);
for
(;;)
{
while
(
unlikely
(
size
>
IGB_MAX_DATA_PER_TXD
))
{
tx_desc
->
read
.
cmd_type_len
=
cmd_type
|
cpu_to_le32
(
IGB_MAX_DATA_PER_TXD
);
count
++
;
i
++
;
i
++
;
if
(
i
==
tx_ring
->
count
)
tx_desc
++
;
if
(
i
==
tx_ring
->
count
)
{
tx_desc
=
IGB_TX_DESC
(
tx_ring
,
0
);
i
=
0
;
i
=
0
;
buffer_info
=
&
tx_ring
->
buffer_info
[
i
];
BUG_ON
(
len
>=
IGB_MAX_DATA_PER_TXD
);
buffer_info
->
length
=
len
;
buffer_info
->
time_stamp
=
jiffies
;
buffer_info
->
next_to_watch
=
i
;
buffer_info
->
mapped_as_page
=
true
;
buffer_info
->
dma
=
skb_frag_dma_map
(
dev
,
frag
,
0
,
len
,
DMA_TO_DEVICE
);
if
(
dma_mapping_error
(
dev
,
buffer_info
->
dma
))
goto
dma_error
;
}
}
tx_ring
->
buffer_info
[
i
].
skb
=
skb
;
dma
+=
IGB_MAX_DATA_PER_TXD
;
tx_ring
->
buffer_info
[
i
].
tx_flags
=
skb_shinfo
(
skb
)
->
tx_flags
;
size
-=
IGB_MAX_DATA_PER_TXD
;
/* multiply data chunks by size of headers */
tx_ring
->
buffer_info
[
i
].
bytecount
=
((
gso_segs
-
1
)
*
hlen
)
+
skb
->
len
;
tx_ring
->
buffer_info
[
i
].
gso_segs
=
gso_segs
;
tx_ring
->
buffer_info
[
first
].
next_to_watch
=
i
;
return
++
count
;
tx_desc
->
read
.
olinfo_status
=
0
;
tx_desc
->
read
.
buffer_addr
=
cpu_to_le64
(
dma
);
}
dma_error:
if
(
likely
(
!
data_len
))
dev_err
(
dev
,
"TX DMA map failed
\n
"
)
;
break
;
/* clear timestamp and dma mappings for failed buffer_info mapping */
tx_desc
->
read
.
cmd_type_len
=
cmd_type
|
cpu_to_le32
(
size
);
buffer_info
->
dma
=
0
;
buffer_info
->
time_stamp
=
0
;
buffer_info
->
length
=
0
;
buffer_info
->
next_to_watch
=
0
;
buffer_info
->
mapped_as_page
=
false
;
/* clear timestamp and dma mappings for remaining portion of packet */
i
++
;
while
(
count
--
)
{
tx_desc
++
;
if
(
i
==
0
)
if
(
i
==
tx_ring
->
count
)
{
i
=
tx_ring
->
count
;
tx_desc
=
IGB_TX_DESC
(
tx_ring
,
0
);
i
--
;
i
=
0
;
buffer_info
=
&
tx_ring
->
buffer_info
[
i
];
igb_unmap_and_free_tx_resource
(
tx_ring
,
buffer_info
);
}
}
return
0
;
size
=
frag
->
size
;
}
data_len
-=
size
;
static
inline
void
igb_tx_queue
(
struct
igb_ring
*
tx_ring
,
dma
=
skb_frag_dma_map
(
tx_ring
->
dev
,
frag
,
0
,
u32
tx_flags
,
int
count
,
u32
paylen
,
size
,
DMA_TO_DEVICE
);
u8
hdr_len
)
if
(
dma_mapping_error
(
tx_ring
->
dev
,
dma
))
{
goto
dma_error
;
union
e1000_adv_tx_desc
*
tx_desc
;
struct
igb_buffer
*
buffer_info
;
u32
olinfo_status
=
0
,
cmd_type_len
;
unsigned
int
i
=
tx_ring
->
next_to_use
;
cmd_type_len
=
(
E1000_ADVTXD_DTYP_DATA
|
E1000_ADVTXD_DCMD_IFCS
|
E1000_ADVTXD_DCMD_DEXT
);
if
(
tx_flags
&
IGB_TX_FLAGS_VLAN
)
cmd_type_len
|=
E1000_ADVTXD_DCMD_VLE
;
if
(
tx_flags
&
IGB_TX_FLAGS_TSTAMP
)
tx_buffer_info
=
&
tx_ring
->
tx_buffer_info
[
i
];
cmd_type_len
|=
E1000_ADVTXD_MAC_TSTAMP
;
tx_buffer_info
->
length
=
size
;
tx_buffer_info
->
dma
=
dma
;
if
(
tx_flags
&
IGB_TX_FLAGS_TSO
)
{
tx_desc
->
read
.
olinfo_status
=
0
;
cmd_type_len
|=
E1000_ADVTXD_DCMD_TSE
;
tx_desc
->
read
.
buffer_addr
=
cpu_to_le64
(
dma
)
;
/* insert tcp checksum */
frag
++
;
olinfo_status
|=
E1000_TXD_POPTS_TXSM
<<
8
;
}
/* insert ip checksum
*/
/* write last descriptor with RS and EOP bits
*/
if
(
tx_flags
&
IGB_TX_FLAGS_IPV4
)
cmd_type
|=
cpu_to_le32
(
size
)
|
cpu_to_le32
(
IGB_TXD_DCMD
);
olinfo_status
|=
E1000_TXD_POPTS_IXSM
<<
8
;
tx_desc
->
read
.
cmd_type_len
=
cmd_type
;
}
else
if
(
tx_flags
&
IGB_TX_FLAGS_CSUM
)
{
/* set the timestamp */
olinfo_status
|=
E1000_TXD_POPTS_TXSM
<<
8
;
first
->
time_stamp
=
jiffies
;
}
if
((
tx_ring
->
flags
&
IGB_RING_FLAG_TX_CTX_IDX
)
&&
/*
(
tx_flags
&
(
IGB_TX_FLAGS_CSUM
|
* Force memory writes to complete before letting h/w know there
IGB_TX_FLAGS_TSO
|
* are new descriptors to fetch. (Only applicable for weak-ordered
IGB_TX_FLAGS_VLAN
)))
* memory model archs, such as IA-64).
olinfo_status
|=
tx_ring
->
reg_idx
<<
4
;
*
* We also need this memory barrier to make certain all of the
* status bits have been updated before next_to_watch is written.
*/
wmb
();
olinfo_status
|=
((
paylen
-
hdr_len
)
<<
E1000_ADVTXD_PAYLEN_SHIFT
);
/* set next_to_watch value indicating a packet is present */
first
->
next_to_watch
=
tx_desc
;
do
{
buffer_info
=
&
tx_ring
->
buffer_info
[
i
];
tx_desc
=
IGB_TX_DESC
(
tx_ring
,
i
);
tx_desc
->
read
.
buffer_addr
=
cpu_to_le64
(
buffer_info
->
dma
);
tx_desc
->
read
.
cmd_type_len
=
cpu_to_le32
(
cmd_type_len
|
buffer_info
->
length
);
tx_desc
->
read
.
olinfo_status
=
cpu_to_le32
(
olinfo_status
);
count
--
;
i
++
;
i
++
;
if
(
i
==
tx_ring
->
count
)
if
(
i
==
tx_ring
->
count
)
i
=
0
;
i
=
0
;
}
while
(
count
>
0
);
tx_desc
->
read
.
cmd_type_len
|=
cpu_to_le32
(
IGB_ADVTXD_DCMD
);
/* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. (Only
* applicable for weak-ordered memory model archs,
* such as IA-64). */
wmb
();
tx_ring
->
next_to_use
=
i
;
tx_ring
->
next_to_use
=
i
;
writel
(
i
,
tx_ring
->
tail
);
writel
(
i
,
tx_ring
->
tail
);
/* we need this if more than one processor can write to our tail
/* we need this if more than one processor can write to our tail
* at a time, it syncronizes IO on IA64/Altix systems */
* at a time, it syncronizes IO on IA64/Altix systems */
mmiowb
();
mmiowb
();
return
;
dma_error:
dev_err
(
tx_ring
->
dev
,
"TX DMA map failed
\n
"
);
/* clear dma mappings for failed tx_buffer_info map */
for
(;;)
{
tx_buffer_info
=
&
tx_ring
->
tx_buffer_info
[
i
];
igb_unmap_and_free_tx_resource
(
tx_ring
,
tx_buffer_info
);
if
(
tx_buffer_info
==
first
)
break
;
if
(
i
==
0
)
i
=
tx_ring
->
count
;
i
--
;
}
tx_ring
->
next_to_use
=
i
;
}
}
static
int
__igb_maybe_stop_tx
(
struct
igb_ring
*
tx_ring
,
int
size
)
static
int
__igb_maybe_stop_tx
(
struct
igb_ring
*
tx_ring
,
int
size
)
...
@@ -4295,9 +4320,10 @@ static inline int igb_maybe_stop_tx(struct igb_ring *tx_ring, int size)
...
@@ -4295,9 +4320,10 @@ static inline int igb_maybe_stop_tx(struct igb_ring *tx_ring, int size)
netdev_tx_t
igb_xmit_frame_ring
(
struct
sk_buff
*
skb
,
netdev_tx_t
igb_xmit_frame_ring
(
struct
sk_buff
*
skb
,
struct
igb_ring
*
tx_ring
)
struct
igb_ring
*
tx_ring
)
{
{
int
tso
=
0
,
count
;
struct
igb_tx_buffer
*
first
;
int
tso
;
u32
tx_flags
=
0
;
u32
tx_flags
=
0
;
u16
first
;
__be16
protocol
=
vlan_get_protocol
(
skb
)
;
u8
hdr_len
=
0
;
u8
hdr_len
=
0
;
/* need: 1 descriptor per page,
/* need: 1 descriptor per page,
...
@@ -4320,43 +4346,31 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
...
@@ -4320,43 +4346,31 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
tx_flags
|=
(
vlan_tx_tag_get
(
skb
)
<<
IGB_TX_FLAGS_VLAN_SHIFT
);
tx_flags
|=
(
vlan_tx_tag_get
(
skb
)
<<
IGB_TX_FLAGS_VLAN_SHIFT
);
}
}
if
(
skb
->
protocol
==
htons
(
ETH_P_IP
))
/* record the location of the first descriptor for this packet */
tx_flags
|=
IGB_TX_FLAGS_IPV4
;
first
=
&
tx_ring
->
tx_buffer_info
[
tx_ring
->
next_to_use
];
first
=
tx_ring
->
next_to_use
;
if
(
skb_is_gso
(
skb
))
{
tso
=
igb_tso
(
tx_ring
,
skb
,
tx_flags
,
&
hdr_len
);
tso
=
igb_tso
(
tx_ring
,
skb
,
tx_flags
,
protocol
,
&
hdr_len
);
if
(
tso
<
0
)
{
if
(
tso
<
0
)
{
dev_kfree_skb_any
(
skb
);
goto
out_drop
;
return
NETDEV_TX_OK
;
}
else
if
(
tso
)
{
}
tx_flags
|=
IGB_TX_FLAGS_TSO
|
IGB_TX_FLAGS_CSUM
;
}
if
(
protocol
==
htons
(
ETH_P_IP
))
tx_flags
|=
IGB_TX_FLAGS_IPV4
;
if
(
tso
)
}
else
if
(
igb_tx_csum
(
tx_ring
,
skb
,
tx_flags
,
protocol
)
&&
tx_flags
|=
IGB_TX_FLAGS_TSO
;
(
skb
->
ip_summed
==
CHECKSUM_PARTIAL
))
{
else
if
(
igb_tx_csum
(
tx_ring
,
skb
,
tx_flags
)
&&
(
skb
->
ip_summed
==
CHECKSUM_PARTIAL
))
tx_flags
|=
IGB_TX_FLAGS_CSUM
;
tx_flags
|=
IGB_TX_FLAGS_CSUM
;
/*
* count reflects descriptors mapped, if 0 or less then mapping error
* has occurred and we need to rewind the descriptor queue
*/
count
=
igb_tx_map
(
tx_ring
,
skb
,
first
);
if
(
!
count
)
{
dev_kfree_skb_any
(
skb
);
tx_ring
->
buffer_info
[
first
].
time_stamp
=
0
;
tx_ring
->
next_to_use
=
first
;
return
NETDEV_TX_OK
;
}
}
igb_tx_
queue
(
tx_ring
,
tx_flags
,
count
,
skb
->
len
,
hdr_len
);
igb_tx_
map
(
tx_ring
,
skb
,
first
,
tx_flags
,
hdr_len
);
/* Make sure there is space in the ring for the next send. */
/* Make sure there is space in the ring for the next send. */
igb_maybe_stop_tx
(
tx_ring
,
MAX_SKB_FRAGS
+
4
);
igb_maybe_stop_tx
(
tx_ring
,
MAX_SKB_FRAGS
+
4
);
return
NETDEV_TX_OK
;
return
NETDEV_TX_OK
;
out_drop:
dev_kfree_skb_any
(
skb
);
return
NETDEV_TX_OK
;
}
}
static
inline
struct
igb_ring
*
igb_tx_queue_mapping
(
struct
igb_adapter
*
adapter
,
static
inline
struct
igb_ring
*
igb_tx_queue_mapping
(
struct
igb_adapter
*
adapter
,
...
@@ -5496,7 +5510,7 @@ static int igb_poll(struct napi_struct *napi, int budget)
...
@@ -5496,7 +5510,7 @@ static int igb_poll(struct napi_struct *napi, int budget)
igb_update_dca
(
q_vector
);
igb_update_dca
(
q_vector
);
#endif
#endif
if
(
q_vector
->
tx_ring
)
if
(
q_vector
->
tx_ring
)
clean_complete
=
!!
igb_clean_tx_irq
(
q_vector
);
clean_complete
=
igb_clean_tx_irq
(
q_vector
);
if
(
q_vector
->
rx_ring
)
if
(
q_vector
->
rx_ring
)
clean_complete
&=
igb_clean_rx_irq
(
q_vector
,
budget
);
clean_complete
&=
igb_clean_rx_irq
(
q_vector
,
budget
);
...
@@ -5544,13 +5558,14 @@ static void igb_systim_to_hwtstamp(struct igb_adapter *adapter,
...
@@ -5544,13 +5558,14 @@ static void igb_systim_to_hwtstamp(struct igb_adapter *adapter,
/**
/**
* igb_tx_hwtstamp - utility function which checks for TX time stamp
* igb_tx_hwtstamp - utility function which checks for TX time stamp
* @q_vector: pointer to q_vector containing needed info
* @q_vector: pointer to q_vector containing needed info
* @buffer: pointer to igb_buffer structure
* @buffer: pointer to igb_
tx_
buffer structure
*
*
* If we were asked to do hardware stamping and such a time stamp is
* If we were asked to do hardware stamping and such a time stamp is
* available, then it must have been for this skb here because we only
* available, then it must have been for this skb here because we only
* allow only one such packet into the queue.
* allow only one such packet into the queue.
*/
*/
static
void
igb_tx_hwtstamp
(
struct
igb_q_vector
*
q_vector
,
struct
igb_buffer
*
buffer_info
)
static
void
igb_tx_hwtstamp
(
struct
igb_q_vector
*
q_vector
,
struct
igb_tx_buffer
*
buffer_info
)
{
{
struct
igb_adapter
*
adapter
=
q_vector
->
adapter
;
struct
igb_adapter
*
adapter
=
q_vector
->
adapter
;
struct
e1000_hw
*
hw
=
&
adapter
->
hw
;
struct
e1000_hw
*
hw
=
&
adapter
->
hw
;
...
@@ -5558,7 +5573,7 @@ static void igb_tx_hwtstamp(struct igb_q_vector *q_vector, struct igb_buffer *bu
...
@@ -5558,7 +5573,7 @@ static void igb_tx_hwtstamp(struct igb_q_vector *q_vector, struct igb_buffer *bu
u64
regval
;
u64
regval
;
/* if skb does not support hw timestamp or TX stamp not valid exit */
/* if skb does not support hw timestamp or TX stamp not valid exit */
if
(
likely
(
!
(
buffer_info
->
tx_flags
&
SKBTX_HW
_TSTAMP
))
||
if
(
likely
(
!
(
buffer_info
->
tx_flags
&
IGB_TX_FLAGS
_TSTAMP
))
||
!
(
rd32
(
E1000_TSYNCTXCTL
)
&
E1000_TSYNCTXCTL_VALID
))
!
(
rd32
(
E1000_TSYNCTXCTL
)
&
E1000_TSYNCTXCTL_VALID
))
return
;
return
;
...
@@ -5578,69 +5593,108 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
...
@@ -5578,69 +5593,108 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
{
{
struct
igb_adapter
*
adapter
=
q_vector
->
adapter
;
struct
igb_adapter
*
adapter
=
q_vector
->
adapter
;
struct
igb_ring
*
tx_ring
=
q_vector
->
tx_ring
;
struct
igb_ring
*
tx_ring
=
q_vector
->
tx_ring
;
struct
net_device
*
netdev
=
tx_ring
->
netdev
;
struct
igb_tx_buffer
*
tx_buffer
;
struct
e1000_hw
*
hw
=
&
adapter
->
hw
;
struct
igb_buffer
*
buffer_info
;
union
e1000_adv_tx_desc
*
tx_desc
,
*
eop_desc
;
union
e1000_adv_tx_desc
*
tx_desc
,
*
eop_desc
;
unsigned
int
total_bytes
=
0
,
total_packets
=
0
;
unsigned
int
total_bytes
=
0
,
total_packets
=
0
;
unsigned
int
i
,
eop
,
count
=
0
;
unsigned
int
budget
=
q_vector
->
tx_work_limit
;
bool
cleaned
=
false
;
unsigned
int
i
=
tx_ring
->
next_to_clean
;
i
=
tx_ring
->
next_to_clean
;
if
(
test_bit
(
__IGB_DOWN
,
&
adapter
->
state
))
eop
=
tx_ring
->
buffer_info
[
i
].
next_to_watch
;
return
true
;
eop_desc
=
IGB_TX_DESC
(
tx_ring
,
eop
);
while
((
eop_desc
->
wb
.
status
&
cpu_to_le32
(
E1000_TXD_STAT_DD
))
&&
tx_buffer
=
&
tx_ring
->
tx_buffer_info
[
i
];
(
count
<
tx_ring
->
count
))
{
rmb
();
/* read buffer_info after eop_desc status */
for
(
cleaned
=
false
;
!
cleaned
;
count
++
)
{
tx_desc
=
IGB_TX_DESC
(
tx_ring
,
i
);
tx_desc
=
IGB_TX_DESC
(
tx_ring
,
i
);
buffer_info
=
&
tx_ring
->
buffer_info
[
i
];
i
-=
tx_ring
->
count
;
cleaned
=
(
i
==
eop
);
if
(
buffer_info
->
skb
)
{
for
(;
budget
;
budget
--
)
{
total_bytes
+=
buffer_info
->
bytecount
;
eop_desc
=
tx_buffer
->
next_to_watch
;
/* gso_segs is currently only valid for tcp */
total_packets
+=
buffer_info
->
gso_segs
;
igb_tx_hwtstamp
(
q_vector
,
buffer_info
);
}
igb_unmap_and_free_tx_resource
(
tx_ring
,
buffer_info
);
/* prevent any other reads prior to eop_desc */
tx_desc
->
wb
.
status
=
0
;
rmb
();
/* if next_to_watch is not set then there is no work pending */
if
(
!
eop_desc
)
break
;
/* if DD is not set pending work has not been completed */
if
(
!
(
eop_desc
->
wb
.
status
&
cpu_to_le32
(
E1000_TXD_STAT_DD
)))
break
;
/* clear next_to_watch to prevent false hangs */
tx_buffer
->
next_to_watch
=
NULL
;
/* update the statistics for this packet */
total_bytes
+=
tx_buffer
->
bytecount
;
total_packets
+=
tx_buffer
->
gso_segs
;
/* retrieve hardware timestamp */
igb_tx_hwtstamp
(
q_vector
,
tx_buffer
);
/* free the skb */
dev_kfree_skb_any
(
tx_buffer
->
skb
);
tx_buffer
->
skb
=
NULL
;
/* unmap skb header data */
dma_unmap_single
(
tx_ring
->
dev
,
tx_buffer
->
dma
,
tx_buffer
->
length
,
DMA_TO_DEVICE
);
/* clear last DMA location and unmap remaining buffers */
while
(
tx_desc
!=
eop_desc
)
{
tx_buffer
->
dma
=
0
;
tx_buffer
++
;
tx_desc
++
;
i
++
;
i
++
;
if
(
i
==
tx_ring
->
count
)
if
(
unlikely
(
!
i
))
{
i
=
0
;
i
-=
tx_ring
->
count
;
tx_buffer
=
tx_ring
->
tx_buffer_info
;
tx_desc
=
IGB_TX_DESC
(
tx_ring
,
0
);
}
/* unmap any remaining paged data */
if
(
tx_buffer
->
dma
)
{
dma_unmap_page
(
tx_ring
->
dev
,
tx_buffer
->
dma
,
tx_buffer
->
length
,
DMA_TO_DEVICE
);
}
}
eop
=
tx_ring
->
buffer_info
[
i
].
next_to_watch
;
eop_desc
=
IGB_TX_DESC
(
tx_ring
,
eop
);
}
}
tx_ring
->
next_to_clean
=
i
;
/* clear last DMA location */
tx_buffer
->
dma
=
0
;
if
(
unlikely
(
count
&&
/* move us one more past the eop_desc for start of next pkt */
netif_carrier_ok
(
netdev
)
&&
tx_buffer
++
;
igb_desc_unused
(
tx_ring
)
>=
IGB_TX_QUEUE_WAKE
))
{
tx_desc
++
;
/* Make sure that anybody stopping the queue after this
i
++
;
* sees the new next_to_clean.
if
(
unlikely
(
!
i
))
{
*/
i
-=
tx_ring
->
count
;
smp_mb
()
;
tx_buffer
=
tx_ring
->
tx_buffer_info
;
if
(
__netif_subqueue_stopped
(
netdev
,
tx_ring
->
queue_index
)
&&
tx_desc
=
IGB_TX_DESC
(
tx_ring
,
0
);
!
(
test_bit
(
__IGB_DOWN
,
&
adapter
->
state
)))
{
}
netif_wake_subqueue
(
netdev
,
tx_ring
->
queue_index
);
}
i
+=
tx_ring
->
count
;
tx_ring
->
next_to_clean
=
i
;
u64_stats_update_begin
(
&
tx_ring
->
tx_syncp
);
u64_stats_update_begin
(
&
tx_ring
->
tx_syncp
);
tx_ring
->
tx_stats
.
restart_queue
++
;
tx_ring
->
tx_stats
.
bytes
+=
total_bytes
;
tx_ring
->
tx_stats
.
packets
+=
total_packets
;
u64_stats_update_end
(
&
tx_ring
->
tx_syncp
);
u64_stats_update_end
(
&
tx_ring
->
tx_syncp
);
}
tx_ring
->
total_bytes
+=
total_bytes
;
}
tx_ring
->
total_packets
+=
total_packets
;
if
(
tx_ring
->
detect_tx_hung
)
{
if
(
tx_ring
->
detect_tx_hung
)
{
struct
e1000_hw
*
hw
=
&
adapter
->
hw
;
eop_desc
=
tx_buffer
->
next_to_watch
;
/* Detect a transmit hang in hardware, this serializes the
/* Detect a transmit hang in hardware, this serializes the
* check with the clearing of time_stamp and movement of i */
* check with the clearing of time_stamp and movement of i */
tx_ring
->
detect_tx_hung
=
false
;
tx_ring
->
detect_tx_hung
=
false
;
if
(
tx_ring
->
buffer_info
[
i
].
time_stamp
&&
if
(
eop_desc
&&
time_after
(
jiffies
,
tx_
ring
->
buffer_info
[
i
].
time_stamp
+
time_after
(
jiffies
,
tx_
buffer
->
time_stamp
+
(
adapter
->
tx_timeout_factor
*
HZ
))
&&
(
adapter
->
tx_timeout_factor
*
HZ
))
&&
!
(
rd32
(
E1000_STATUS
)
&
E1000_STATUS_TXOFF
))
{
!
(
rd32
(
E1000_STATUS
)
&
E1000_STATUS_TXOFF
))
{
...
@@ -5654,7 +5708,7 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
...
@@ -5654,7 +5708,7 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
" next_to_clean <%x>
\n
"
" next_to_clean <%x>
\n
"
"buffer_info[next_to_clean]
\n
"
"buffer_info[next_to_clean]
\n
"
" time_stamp <%lx>
\n
"
" time_stamp <%lx>
\n
"
" next_to_watch <%
x
>
\n
"
" next_to_watch <%
p
>
\n
"
" jiffies <%lx>
\n
"
" jiffies <%lx>
\n
"
" desc.status <%x>
\n
"
,
" desc.status <%x>
\n
"
,
tx_ring
->
queue_index
,
tx_ring
->
queue_index
,
...
@@ -5662,20 +5716,38 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
...
@@ -5662,20 +5716,38 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
readl
(
tx_ring
->
tail
),
readl
(
tx_ring
->
tail
),
tx_ring
->
next_to_use
,
tx_ring
->
next_to_use
,
tx_ring
->
next_to_clean
,
tx_ring
->
next_to_clean
,
tx_
ring
->
buffer_info
[
eop
].
time_stamp
,
tx_
buffer
->
time_stamp
,
eop
,
eop
_desc
,
jiffies
,
jiffies
,
eop_desc
->
wb
.
status
);
eop_desc
->
wb
.
status
);
netif_stop_subqueue
(
netdev
,
tx_ring
->
queue_index
);
netif_stop_subqueue
(
tx_ring
->
netdev
,
tx_ring
->
queue_index
);
/* we are about to reset, no point in enabling stuff */
return
true
;
}
}
}
}
tx_ring
->
total_bytes
+=
total_bytes
;
tx_ring
->
total_packets
+=
total_packets
;
if
(
unlikely
(
total_packets
&&
netif_carrier_ok
(
tx_ring
->
netdev
)
&&
igb_desc_unused
(
tx_ring
)
>=
IGB_TX_QUEUE_WAKE
))
{
/* Make sure that anybody stopping the queue after this
* sees the new next_to_clean.
*/
smp_mb
();
if
(
__netif_subqueue_stopped
(
tx_ring
->
netdev
,
tx_ring
->
queue_index
)
&&
!
(
test_bit
(
__IGB_DOWN
,
&
adapter
->
state
)))
{
netif_wake_subqueue
(
tx_ring
->
netdev
,
tx_ring
->
queue_index
);
u64_stats_update_begin
(
&
tx_ring
->
tx_syncp
);
u64_stats_update_begin
(
&
tx_ring
->
tx_syncp
);
tx_ring
->
tx_stats
.
bytes
+=
total_bytes
;
tx_ring
->
tx_stats
.
restart_queue
++
;
tx_ring
->
tx_stats
.
packets
+=
total_packets
;
u64_stats_update_end
(
&
tx_ring
->
tx_syncp
);
u64_stats_update_end
(
&
tx_ring
->
tx_syncp
);
return
count
<
tx_ring
->
count
;
}
}
return
!!
budget
;
}
}
static
inline
void
igb_rx_checksum
(
struct
igb_ring
*
ring
,
static
inline
void
igb_rx_checksum
(
struct
igb_ring
*
ring
,
...
@@ -5772,7 +5844,7 @@ static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, int budget)
...
@@ -5772,7 +5844,7 @@ static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, int budget)
staterr
=
le32_to_cpu
(
rx_desc
->
wb
.
upper
.
status_error
);
staterr
=
le32_to_cpu
(
rx_desc
->
wb
.
upper
.
status_error
);
while
(
staterr
&
E1000_RXD_STAT_DD
)
{
while
(
staterr
&
E1000_RXD_STAT_DD
)
{
struct
igb_
buffer
*
buffer_info
=
&
rx_ring
->
buffer_info
[
i
];
struct
igb_
rx_buffer
*
buffer_info
=
&
rx_ring
->
rx_
buffer_info
[
i
];
struct
sk_buff
*
skb
=
buffer_info
->
skb
;
struct
sk_buff
*
skb
=
buffer_info
->
skb
;
union
e1000_adv_rx_desc
*
next_rxd
;
union
e1000_adv_rx_desc
*
next_rxd
;
...
@@ -5825,8 +5897,8 @@ static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, int budget)
...
@@ -5825,8 +5897,8 @@ static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, int budget)
}
}
if
(
!
(
staterr
&
E1000_RXD_STAT_EOP
))
{
if
(
!
(
staterr
&
E1000_RXD_STAT_EOP
))
{
struct
igb_buffer
*
next_buffer
;
struct
igb_
rx_
buffer
*
next_buffer
;
next_buffer
=
&
rx_ring
->
buffer_info
[
i
];
next_buffer
=
&
rx_ring
->
rx_
buffer_info
[
i
];
buffer_info
->
skb
=
next_buffer
->
skb
;
buffer_info
->
skb
=
next_buffer
->
skb
;
buffer_info
->
dma
=
next_buffer
->
dma
;
buffer_info
->
dma
=
next_buffer
->
dma
;
next_buffer
->
skb
=
skb
;
next_buffer
->
skb
=
skb
;
...
@@ -5887,7 +5959,7 @@ static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, int budget)
...
@@ -5887,7 +5959,7 @@ static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, int budget)
}
}
static
bool
igb_alloc_mapped_skb
(
struct
igb_ring
*
rx_ring
,
static
bool
igb_alloc_mapped_skb
(
struct
igb_ring
*
rx_ring
,
struct
igb_buffer
*
bi
)
struct
igb_
rx_
buffer
*
bi
)
{
{
struct
sk_buff
*
skb
=
bi
->
skb
;
struct
sk_buff
*
skb
=
bi
->
skb
;
dma_addr_t
dma
=
bi
->
dma
;
dma_addr_t
dma
=
bi
->
dma
;
...
@@ -5921,7 +5993,7 @@ static bool igb_alloc_mapped_skb(struct igb_ring *rx_ring,
...
@@ -5921,7 +5993,7 @@ static bool igb_alloc_mapped_skb(struct igb_ring *rx_ring,
}
}
static
bool
igb_alloc_mapped_page
(
struct
igb_ring
*
rx_ring
,
static
bool
igb_alloc_mapped_page
(
struct
igb_ring
*
rx_ring
,
struct
igb_buffer
*
bi
)
struct
igb_
rx_
buffer
*
bi
)
{
{
struct
page
*
page
=
bi
->
page
;
struct
page
*
page
=
bi
->
page
;
dma_addr_t
page_dma
=
bi
->
page_dma
;
dma_addr_t
page_dma
=
bi
->
page_dma
;
...
@@ -5960,11 +6032,11 @@ static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
...
@@ -5960,11 +6032,11 @@ static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
void
igb_alloc_rx_buffers
(
struct
igb_ring
*
rx_ring
,
u16
cleaned_count
)
void
igb_alloc_rx_buffers
(
struct
igb_ring
*
rx_ring
,
u16
cleaned_count
)
{
{
union
e1000_adv_rx_desc
*
rx_desc
;
union
e1000_adv_rx_desc
*
rx_desc
;
struct
igb_buffer
*
bi
;
struct
igb_
rx_
buffer
*
bi
;
u16
i
=
rx_ring
->
next_to_use
;
u16
i
=
rx_ring
->
next_to_use
;
rx_desc
=
IGB_RX_DESC
(
rx_ring
,
i
);
rx_desc
=
IGB_RX_DESC
(
rx_ring
,
i
);
bi
=
&
rx_ring
->
buffer_info
[
i
];
bi
=
&
rx_ring
->
rx_
buffer_info
[
i
];
i
-=
rx_ring
->
count
;
i
-=
rx_ring
->
count
;
while
(
cleaned_count
--
)
{
while
(
cleaned_count
--
)
{
...
@@ -5985,7 +6057,7 @@ void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
...
@@ -5985,7 +6057,7 @@ void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
i
++
;
i
++
;
if
(
unlikely
(
!
i
))
{
if
(
unlikely
(
!
i
))
{
rx_desc
=
IGB_RX_DESC
(
rx_ring
,
0
);
rx_desc
=
IGB_RX_DESC
(
rx_ring
,
0
);
bi
=
rx_ring
->
buffer_info
;
bi
=
rx_ring
->
rx_
buffer_info
;
i
-=
rx_ring
->
count
;
i
-=
rx_ring
->
count
;
}
}
...
...
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
View file @
d5b1d8cd
...
@@ -56,8 +56,8 @@ char ixgbe_driver_name[] = "ixgbe";
...
@@ -56,8 +56,8 @@ char ixgbe_driver_name[] = "ixgbe";
static
const
char
ixgbe_driver_string
[]
=
static
const
char
ixgbe_driver_string
[]
=
"Intel(R) 10 Gigabit PCI Express Network Driver"
;
"Intel(R) 10 Gigabit PCI Express Network Driver"
;
#define MAJ 3
#define MAJ 3
#define MIN
4
#define MIN
6
#define BUILD
8
#define BUILD
7
#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
__stringify(BUILD) "-k"
__stringify(BUILD) "-k"
const
char
ixgbe_driver_version
[]
=
DRV_VERSION
;
const
char
ixgbe_driver_version
[]
=
DRV_VERSION
;
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment