Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
065f5f97
Commit
065f5f97
authored
Jul 12, 2012
by
David S. Miller
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'master' of
git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-next
parents
6d4fa852
0b7f5d0b
Changes
5
Hide whitespace changes
Inline
Side-by-side
Showing
5 changed files
with
127 additions
and
209 deletions
+127
-209
drivers/net/ethernet/intel/ixgbe/ixgbe.h
drivers/net/ethernet/intel/ixgbe/ixgbe.h
+11
-9
drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+1
-7
drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
+4
-4
drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
+50
-100
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+61
-89
No files found.
drivers/net/ethernet/intel/ixgbe/ixgbe.h
View file @
065f5f97
...
...
@@ -278,8 +278,10 @@ enum ixgbe_ring_f_enum {
#define MAX_TX_QUEUES IXGBE_MAX_FDIR_INDICES
#endif
/* IXGBE_FCOE */
struct
ixgbe_ring_feature
{
int
indices
;
int
mask
;
u16
limit
;
/* upper limit on feature indices */
u16
indices
;
/* current value of indices */
u16
mask
;
/* Mask used for feature to ring mapping */
u16
offset
;
/* offset to start of feature */
}
____cacheline_internodealigned_in_smp
;
/*
...
...
@@ -315,7 +317,7 @@ struct ixgbe_ring_container {
? 8 : 1)
#define MAX_TX_PACKET_BUFFERS MAX_RX_PACKET_BUFFERS
/* MAX_
MSIX_
Q_VECTORS of these are allocated,
/* MAX_Q_VECTORS of these are allocated,
* but we only use one per queue-specific vector.
*/
struct
ixgbe_q_vector
{
...
...
@@ -401,11 +403,11 @@ static inline u16 ixgbe_desc_unused(struct ixgbe_ring *ring)
#define NON_Q_VECTORS (OTHER_VECTOR)
#define MAX_MSIX_VECTORS_82599 64
#define MAX_
MSIX_
Q_VECTORS_82599 64
#define MAX_Q_VECTORS_82599 64
#define MAX_MSIX_VECTORS_82598 18
#define MAX_
MSIX_
Q_VECTORS_82598 16
#define MAX_Q_VECTORS_82598 16
#define MAX_
MSIX_Q_VECTORS MAX_MSI
X_Q_VECTORS_82599
#define MAX_
Q_VECTORS MA
X_Q_VECTORS_82599
#define MAX_MSIX_COUNT MAX_MSIX_VECTORS_82599
#define MIN_MSIX_Q_VECTORS 1
...
...
@@ -496,7 +498,7 @@ struct ixgbe_adapter {
u32
alloc_rx_page_failed
;
u32
alloc_rx_buff_failed
;
struct
ixgbe_q_vector
*
q_vector
[
MAX_
MSIX_
Q_VECTORS
];
struct
ixgbe_q_vector
*
q_vector
[
MAX_Q_VECTORS
];
/* DCB parameters */
struct
ieee_pfc
*
ixgbe_ieee_pfc
;
...
...
@@ -507,8 +509,8 @@ struct ixgbe_adapter {
u8
dcbx_cap
;
enum
ixgbe_fc_mode
last_lfc_mode
;
int
num_
msix_vectors
;
int
max_
msix_q_vectors
;
/* true count of q_vectors for device */
int
num_
q_vectors
;
/* current number of q_vectors for device */
int
max_
q_vectors
;
/* true count of q_vectors for device */
struct
ixgbe_ring_feature
ring_feature
[
RING_F_ARRAY_SIZE
];
struct
msix_entry
*
msix_entries
;
...
...
drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
View file @
065f5f97
...
...
@@ -2090,7 +2090,6 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
struct
ixgbe_adapter
*
adapter
=
netdev_priv
(
netdev
);
struct
ixgbe_q_vector
*
q_vector
;
int
i
;
int
num_vectors
;
u16
tx_itr_param
,
rx_itr_param
;
bool
need_reset
=
false
;
...
...
@@ -2126,12 +2125,7 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
/* check the old value and enable RSC if necessary */
need_reset
=
ixgbe_update_rsc
(
adapter
);
if
(
adapter
->
flags
&
IXGBE_FLAG_MSIX_ENABLED
)
num_vectors
=
adapter
->
num_msix_vectors
-
NON_Q_VECTORS
;
else
num_vectors
=
1
;
for
(
i
=
0
;
i
<
num_vectors
;
i
++
)
{
for
(
i
=
0
;
i
<
adapter
->
num_q_vectors
;
i
++
)
{
q_vector
=
adapter
->
q_vector
[
i
];
if
(
q_vector
->
tx
.
count
&&
!
q_vector
->
rx
.
count
)
/* tx only */
...
...
drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
View file @
065f5f97
...
...
@@ -674,7 +674,7 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
if
(
adapter
->
ring_feature
[
RING_F_FCOE
].
indices
)
{
/* Use multiple rx queues for FCoE by redirection table */
for
(
i
=
0
;
i
<
IXGBE_FCRETA_SIZE
;
i
++
)
{
fcoe_i
=
f
->
mask
+
i
%
f
->
indices
;
fcoe_i
=
f
->
offset
+
i
%
f
->
indices
;
fcoe_i
&=
IXGBE_FCRETA_ENTRY_MASK
;
fcoe_q
=
adapter
->
rx_ring
[
fcoe_i
]
->
reg_idx
;
IXGBE_WRITE_REG
(
hw
,
IXGBE_FCRETA
(
i
),
fcoe_q
);
...
...
@@ -683,7 +683,7 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
IXGBE_WRITE_REG
(
hw
,
IXGBE_ETQS
(
IXGBE_ETQF_FILTER_FCOE
),
0
);
}
else
{
/* Use single rx queue for FCoE */
fcoe_i
=
f
->
mask
;
fcoe_i
=
f
->
offset
;
fcoe_q
=
adapter
->
rx_ring
[
fcoe_i
]
->
reg_idx
;
IXGBE_WRITE_REG
(
hw
,
IXGBE_FCRECTL
,
0
);
IXGBE_WRITE_REG
(
hw
,
IXGBE_ETQS
(
IXGBE_ETQF_FILTER_FCOE
),
...
...
@@ -691,7 +691,7 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
(
fcoe_q
<<
IXGBE_ETQS_RX_QUEUE_SHIFT
));
}
/* send FIP frames to the first FCoE queue */
fcoe_i
=
f
->
mask
;
fcoe_i
=
f
->
offset
;
fcoe_q
=
adapter
->
rx_ring
[
fcoe_i
]
->
reg_idx
;
IXGBE_WRITE_REG
(
hw
,
IXGBE_ETQS
(
IXGBE_ETQF_FILTER_FIP
),
IXGBE_ETQS_QUEUE_EN
|
...
...
@@ -770,7 +770,7 @@ int ixgbe_fcoe_enable(struct net_device *netdev)
ixgbe_clear_interrupt_scheme
(
adapter
);
adapter
->
flags
|=
IXGBE_FLAG_FCOE_ENABLED
;
adapter
->
ring_feature
[
RING_F_FCOE
].
indices
=
IXGBE_FCRETA_SIZE
;
adapter
->
ring_feature
[
RING_F_FCOE
].
limit
=
IXGBE_FCRETA_SIZE
;
netdev
->
features
|=
NETIF_F_FCOE_CRC
;
netdev
->
features
|=
NETIF_F_FSO
;
netdev
->
features
|=
NETIF_F_FCOE_MTU
;
...
...
drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
View file @
065f5f97
...
...
@@ -138,30 +138,6 @@ static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
}
#endif
/**
* ixgbe_cache_ring_fdir - Descriptor ring to register mapping for Flow Director
* @adapter: board private structure to initialize
*
* Cache the descriptor ring offsets for Flow Director to the assigned rings.
*
**/
static
inline
bool
ixgbe_cache_ring_fdir
(
struct
ixgbe_adapter
*
adapter
)
{
int
i
;
bool
ret
=
false
;
if
((
adapter
->
flags
&
IXGBE_FLAG_RSS_ENABLED
)
&&
(
adapter
->
flags
&
IXGBE_FLAG_FDIR_HASH_CAPABLE
))
{
for
(
i
=
0
;
i
<
adapter
->
num_rx_queues
;
i
++
)
adapter
->
rx_ring
[
i
]
->
reg_idx
=
i
;
for
(
i
=
0
;
i
<
adapter
->
num_tx_queues
;
i
++
)
adapter
->
tx_ring
[
i
]
->
reg_idx
=
i
;
ret
=
true
;
}
return
ret
;
}
#ifdef IXGBE_FCOE
/**
* ixgbe_cache_ring_fcoe - Descriptor ring to register mapping for the FCoE
...
...
@@ -180,17 +156,14 @@ static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter)
return
false
;
if
(
adapter
->
flags
&
IXGBE_FLAG_RSS_ENABLED
)
{
if
(
adapter
->
flags
&
IXGBE_FLAG_FDIR_HASH_CAPABLE
)
ixgbe_cache_ring_fdir
(
adapter
);
else
ixgbe_cache_ring_rss
(
adapter
);
ixgbe_cache_ring_rss
(
adapter
);
fcoe_rx_i
=
f
->
mask
;
fcoe_tx_i
=
f
->
mask
;
fcoe_rx_i
=
f
->
offset
;
fcoe_tx_i
=
f
->
offset
;
}
for
(
i
=
0
;
i
<
f
->
indices
;
i
++
,
fcoe_rx_i
++
,
fcoe_tx_i
++
)
{
adapter
->
rx_ring
[
f
->
mask
+
i
]
->
reg_idx
=
fcoe_rx_i
;
adapter
->
tx_ring
[
f
->
mask
+
i
]
->
reg_idx
=
fcoe_tx_i
;
adapter
->
rx_ring
[
f
->
offset
+
i
]
->
reg_idx
=
fcoe_rx_i
;
adapter
->
tx_ring
[
f
->
offset
+
i
]
->
reg_idx
=
fcoe_tx_i
;
}
return
true
;
}
...
...
@@ -244,9 +217,6 @@ static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
return
;
#endif
/* IXGBE_FCOE */
if
(
ixgbe_cache_ring_fdir
(
adapter
))
return
;
if
(
ixgbe_cache_ring_rss
(
adapter
))
return
;
}
...
...
@@ -272,53 +242,39 @@ static inline bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter)
* to allocate one Rx queue per CPU, and if available, one Tx queue per CPU.
*
**/
static
inline
bool
ixgbe_set_rss_queues
(
struct
ixgbe_adapter
*
adapter
)
static
bool
ixgbe_set_rss_queues
(
struct
ixgbe_adapter
*
adapter
)
{
bool
ret
=
false
;
struct
ixgbe_ring_feature
*
f
=
&
adapter
->
ring_feature
[
RING_F_RSS
]
;
struct
ixgbe_ring_feature
*
f
;
u16
rss_i
;
if
(
adapter
->
flags
&
IXGBE_FLAG_RSS_ENABLED
)
{
f
->
mask
=
0xF
;
adapter
->
num_rx_queues
=
f
->
indices
;
adapter
->
num_tx_queues
=
f
->
indices
;
ret
=
true
;
if
(
!
(
adapter
->
flags
&
IXGBE_FLAG_RSS_ENABLED
))
{
adapter
->
flags
&=
~
IXGBE_FLAG_FDIR_HASH_CAPABLE
;
return
false
;
}
return
ret
;
}
/* set mask for 16 queue limit of RSS */
f
=
&
adapter
->
ring_feature
[
RING_F_RSS
];
rss_i
=
f
->
limit
;
/**
* ixgbe_set_fdir_queues - Allocate queues for Flow Director
* @adapter: board private structure to initialize
*
* Flow Director is an advanced Rx filter, attempting to get Rx flows back
* to the original CPU that initiated the Tx session. This runs in addition
* to RSS, so if a packet doesn't match an FDIR filter, we can still spread the
* Rx load across CPUs using RSS.
*
**/
static
inline
bool
ixgbe_set_fdir_queues
(
struct
ixgbe_adapter
*
adapter
)
{
bool
ret
=
false
;
struct
ixgbe_ring_feature
*
f_fdir
=
&
adapter
->
ring_feature
[
RING_F_FDIR
];
f_fdir
->
indices
=
min_t
(
int
,
num_online_cpus
(),
f_fdir
->
indices
);
f_fdir
->
mask
=
0
;
f
->
indices
=
rss_i
;
f
->
mask
=
0xF
;
/*
* Use
RSS in addition to Flow Director
to ensure the best
* Use
Flow Director in addition to RSS
to ensure the best
* distribution of flows across cores, even when an FDIR flow
* isn't matched.
*/
if
((
adapter
->
flags
&
IXGBE_FLAG_RSS_ENABLED
)
&&
(
adapter
->
flags
&
IXGBE_FLAG_FDIR_HASH_CAPABLE
))
{
adapter
->
num_tx_queues
=
f_fdir
->
indices
;
adapter
->
num_rx_queues
=
f_fdir
->
indices
;
ret
=
true
;
}
else
{
adapter
->
flags
&=
~
IXGBE_FLAG_FDIR_HASH_CAPABLE
;
if
(
adapter
->
flags
&
IXGBE_FLAG_FDIR_HASH_CAPABLE
)
{
f
=
&
adapter
->
ring_feature
[
RING_F_FDIR
];
f
->
indices
=
min_t
(
u16
,
num_online_cpus
(),
f
->
limit
);
rss_i
=
max_t
(
u16
,
rss_i
,
f
->
indices
);
}
return
ret
;
adapter
->
num_rx_queues
=
rss_i
;
adapter
->
num_tx_queues
=
rss_i
;
return
true
;
}
#ifdef IXGBE_FCOE
...
...
@@ -327,10 +283,7 @@ static inline bool ixgbe_set_fdir_queues(struct ixgbe_adapter *adapter)
* @adapter: board private structure to initialize
*
* FCoE RX FCRETA can use up to 8 rx queues for up to 8 different exchanges.
* The ring feature mask is not used as a mask for FCoE, as it can take any 8
* rx queues out of the max number of rx queues, instead, it is used as the
* index of the first rx queue used by FCoE.
*
* Offset is used as the index of the first rx queue used by FCoE.
**/
static
inline
bool
ixgbe_set_fcoe_queues
(
struct
ixgbe_adapter
*
adapter
)
{
...
...
@@ -339,21 +292,18 @@ static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter)
if
(
!
(
adapter
->
flags
&
IXGBE_FLAG_FCOE_ENABLED
))
return
false
;
f
->
indices
=
min_t
(
int
,
num_online_cpus
(),
f
->
indices
);
f
->
indices
=
min_t
(
int
,
num_online_cpus
(),
f
->
limit
);
adapter
->
num_rx_queues
=
1
;
adapter
->
num_tx_queues
=
1
;
if
(
adapter
->
flags
&
IXGBE_FLAG_RSS_ENABLED
)
{
e_info
(
probe
,
"FCoE enabled with RSS
\n
"
);
if
(
adapter
->
flags
&
IXGBE_FLAG_FDIR_HASH_CAPABLE
)
ixgbe_set_fdir_queues
(
adapter
);
else
ixgbe_set_rss_queues
(
adapter
);
ixgbe_set_rss_queues
(
adapter
);
}
/* adding FCoE rx rings to the end */
f
->
mask
=
adapter
->
num_rx_queues
;
f
->
offset
=
adapter
->
num_rx_queues
;
adapter
->
num_rx_queues
+=
f
->
indices
;
adapter
->
num_tx_queues
+=
f
->
indices
;
...
...
@@ -388,7 +338,7 @@ static inline bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter)
#ifdef IXGBE_FCOE
/* FCoE enabled queues require special configuration indexed
* by feature specific indices and
mask
. Here we map FCoE
* by feature specific indices and
offset
. Here we map FCoE
* indices onto the DCB queue pairs allowing FCoE to own
* configuration later.
*/
...
...
@@ -401,7 +351,7 @@ static inline bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter)
ixgbe_dcb_unpack_map
(
&
adapter
->
dcb_cfg
,
DCB_TX_CONFIG
,
prio_tc
);
tc
=
prio_tc
[
adapter
->
fcoe
.
up
];
f
->
indices
=
dev
->
tc_to_txq
[
tc
].
count
;
f
->
mask
=
dev
->
tc_to_txq
[
tc
].
offset
;
f
->
offset
=
dev
->
tc_to_txq
[
tc
].
offset
;
}
#endif
...
...
@@ -441,9 +391,6 @@ static int ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
goto
done
;
#endif
/* IXGBE_FCOE */
if
(
ixgbe_set_fdir_queues
(
adapter
))
goto
done
;
if
(
ixgbe_set_rss_queues
(
adapter
))
goto
done
;
...
...
@@ -507,8 +454,8 @@ static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
* of max_msix_q_vectors + NON_Q_VECTORS, or the number of
* vectors we were allocated.
*/
adapter
->
num_msix_vectors
=
min
(
vectors
,
adapter
->
max_msix_q_vectors
+
NON_Q_VECTORS
);
vectors
-=
NON_Q_VECTORS
;
adapter
->
num_q_vectors
=
min
(
vectors
,
adapter
->
max_q_vectors
);
}
}
...
...
@@ -632,8 +579,8 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
if
(
adapter
->
netdev
->
features
&
NETIF_F_FCOE_MTU
)
{
struct
ixgbe_ring_feature
*
f
;
f
=
&
adapter
->
ring_feature
[
RING_F_FCOE
];
if
((
rxr_idx
>=
f
->
mask
)
&&
(
rxr_idx
<
f
->
mask
+
f
->
indices
))
if
((
rxr_idx
>=
f
->
offset
)
&&
(
rxr_idx
<
f
->
offset
+
f
->
indices
))
set_bit
(
__IXGBE_RX_FCOE
,
&
ring
->
state
);
}
...
...
@@ -695,7 +642,7 @@ static void ixgbe_free_q_vector(struct ixgbe_adapter *adapter, int v_idx)
**/
static
int
ixgbe_alloc_q_vectors
(
struct
ixgbe_adapter
*
adapter
)
{
int
q_vectors
=
adapter
->
num_
msix_vectors
-
NON_Q_VECTORS
;
int
q_vectors
=
adapter
->
num_
q_vectors
;
int
rxr_remaining
=
adapter
->
num_rx_queues
;
int
txr_remaining
=
adapter
->
num_tx_queues
;
int
rxr_idx
=
0
,
txr_idx
=
0
,
v_idx
=
0
;
...
...
@@ -739,10 +686,12 @@ static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
return
0
;
err_out:
while
(
v_idx
)
{
v_idx
--
;
adapter
->
num_tx_queues
=
0
;
adapter
->
num_rx_queues
=
0
;
adapter
->
num_q_vectors
=
0
;
while
(
v_idx
--
)
ixgbe_free_q_vector
(
adapter
,
v_idx
);
}
return
-
ENOMEM
;
}
...
...
@@ -757,14 +706,13 @@ static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
**/
static
void
ixgbe_free_q_vectors
(
struct
ixgbe_adapter
*
adapter
)
{
int
v_idx
,
q_vectors
;
int
v_idx
=
adapter
->
num_
q_vectors
;
if
(
adapter
->
flags
&
IXGBE_FLAG_MSIX_ENABLED
)
q_vectors
=
adapter
->
num_msix_vectors
-
NON_Q_VECTORS
;
else
q_vectors
=
1
;
adapter
->
num_tx_queues
=
0
;
adapter
->
num_rx_queues
=
0
;
adapter
->
num_q_vectors
=
0
;
for
(
v_idx
=
0
;
v_idx
<
q_vectors
;
v_idx
++
)
while
(
v_idx
--
)
ixgbe_free_q_vector
(
adapter
,
v_idx
);
}
...
...
@@ -844,6 +792,8 @@ static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
if
(
err
)
return
err
;
adapter
->
num_q_vectors
=
1
;
err
=
pci_enable_msi
(
adapter
->
pdev
);
if
(
!
err
)
{
adapter
->
flags
|=
IXGBE_FLAG_MSI_ENABLED
;
...
...
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
View file @
065f5f97
...
...
@@ -993,7 +993,6 @@ static void ixgbe_update_dca(struct ixgbe_q_vector *q_vector)
static
void
ixgbe_setup_dca
(
struct
ixgbe_adapter
*
adapter
)
{
int
num_q_vectors
;
int
i
;
if
(
!
(
adapter
->
flags
&
IXGBE_FLAG_DCA_ENABLED
))
...
...
@@ -1002,12 +1001,7 @@ static void ixgbe_setup_dca(struct ixgbe_adapter *adapter)
/* always use CB2 mode, difference is masked in the CB driver */
IXGBE_WRITE_REG
(
&
adapter
->
hw
,
IXGBE_DCA_CTRL
,
2
);
if
(
adapter
->
flags
&
IXGBE_FLAG_MSIX_ENABLED
)
num_q_vectors
=
adapter
->
num_msix_vectors
-
NON_Q_VECTORS
;
else
num_q_vectors
=
1
;
for
(
i
=
0
;
i
<
num_q_vectors
;
i
++
)
{
for
(
i
=
0
;
i
<
adapter
->
num_q_vectors
;
i
++
)
{
adapter
->
q_vector
[
i
]
->
cpu
=
-
1
;
ixgbe_update_dca
(
adapter
->
q_vector
[
i
]);
}
...
...
@@ -1831,11 +1825,9 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
static
void
ixgbe_configure_msix
(
struct
ixgbe_adapter
*
adapter
)
{
struct
ixgbe_q_vector
*
q_vector
;
int
q_vectors
,
v_idx
;
int
v_idx
;
u32
mask
;
q_vectors
=
adapter
->
num_msix_vectors
-
NON_Q_VECTORS
;
/* Populate MSIX to EITR Select */
if
(
adapter
->
num_vfs
>
32
)
{
u32
eitrsel
=
(
1
<<
(
adapter
->
num_vfs
-
32
))
-
1
;
...
...
@@ -1846,7 +1838,7 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
* Populate the IVAR table and set the ITR values to the
* corresponding register.
*/
for
(
v_idx
=
0
;
v_idx
<
q_vectors
;
v_idx
++
)
{
for
(
v_idx
=
0
;
v_idx
<
adapter
->
num_
q_vectors
;
v_idx
++
)
{
struct
ixgbe_ring
*
ring
;
q_vector
=
adapter
->
q_vector
[
v_idx
];
...
...
@@ -2410,11 +2402,10 @@ int ixgbe_poll(struct napi_struct *napi, int budget)
static
int
ixgbe_request_msix_irqs
(
struct
ixgbe_adapter
*
adapter
)
{
struct
net_device
*
netdev
=
adapter
->
netdev
;
int
q_vectors
=
adapter
->
num_msix_vectors
-
NON_Q_VECTORS
;
int
vector
,
err
;
int
ri
=
0
,
ti
=
0
;
for
(
vector
=
0
;
vector
<
q_vectors
;
vector
++
)
{
for
(
vector
=
0
;
vector
<
adapter
->
num_
q_vectors
;
vector
++
)
{
struct
ixgbe_q_vector
*
q_vector
=
adapter
->
q_vector
[
vector
];
struct
msix_entry
*
entry
=
&
adapter
->
msix_entries
[
vector
];
...
...
@@ -2569,30 +2560,28 @@ static int ixgbe_request_irq(struct ixgbe_adapter *adapter)
static
void
ixgbe_free_irq
(
struct
ixgbe_adapter
*
adapter
)
{
if
(
adapter
->
flags
&
IXGBE_FLAG_MSIX_ENABLED
)
{
int
i
,
q_vectors
;
int
vector
;
q_vectors
=
adapter
->
num_msix_vectors
;
i
=
q_vectors
-
1
;
free_irq
(
adapter
->
msix_entries
[
i
].
vector
,
adapter
)
;
i
--
;
if
(
!
(
adapter
->
flags
&
IXGBE_FLAG_MSIX_ENABLED
))
{
free_irq
(
adapter
->
pdev
->
irq
,
adapter
)
;
return
;
}
for
(;
i
>=
0
;
i
--
)
{
/* free only the irqs that were actually requested */
if
(
!
adapter
->
q_vector
[
i
]
->
rx
.
ring
&&
!
adapter
->
q_vector
[
i
]
->
tx
.
ring
)
continue
;
for
(
vector
=
0
;
vector
<
adapter
->
num_q_vectors
;
vector
++
)
{
struct
ixgbe_q_vector
*
q_vector
=
adapter
->
q_vector
[
vector
];
struct
msix_entry
*
entry
=
&
adapter
->
msix_entries
[
vector
];
/* clear the affinity_mask in the IRQ descriptor
*/
irq_set_affinity_hint
(
adapter
->
msix_entries
[
i
].
vector
,
NULL
)
;
/* free only the irqs that were actually requested
*/
if
(
!
q_vector
->
rx
.
ring
&&
!
q_vector
->
tx
.
ring
)
continue
;
free_irq
(
adapter
->
msix_entries
[
i
].
vector
,
adapter
->
q_vector
[
i
]);
}
}
else
{
free_irq
(
adapter
->
pdev
->
irq
,
adapter
);
/* clear the affinity_mask in the IRQ descriptor */
irq_set_affinity_hint
(
entry
->
vector
,
NULL
);
free_irq
(
entry
->
vector
,
q_vector
);
}
free_irq
(
adapter
->
msix_entries
[
vector
++
].
vector
,
adapter
);
}
/**
...
...
@@ -2616,9 +2605,12 @@ static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
}
IXGBE_WRITE_FLUSH
(
&
adapter
->
hw
);
if
(
adapter
->
flags
&
IXGBE_FLAG_MSIX_ENABLED
)
{
int
i
;
for
(
i
=
0
;
i
<
adapter
->
num_msix_vectors
;
i
++
)
synchronize_irq
(
adapter
->
msix_entries
[
i
].
vector
);
int
vector
;
for
(
vector
=
0
;
vector
<
adapter
->
num_q_vectors
;
vector
++
)
synchronize_irq
(
adapter
->
msix_entries
[
vector
].
vector
);
synchronize_irq
(
adapter
->
msix_entries
[
vector
++
].
vector
);
}
else
{
synchronize_irq
(
adapter
->
pdev
->
irq
);
}
...
...
@@ -2855,40 +2847,34 @@ static void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter)
static
void
ixgbe_configure_srrctl
(
struct
ixgbe_adapter
*
adapter
,
struct
ixgbe_ring
*
rx_ring
)
{
struct
ixgbe_hw
*
hw
=
&
adapter
->
hw
;
u32
srrctl
;
u8
reg_idx
=
rx_ring
->
reg_idx
;
switch
(
adapter
->
hw
.
mac
.
type
)
{
case
ixgbe_mac_82598EB
:
{
struct
ixgbe_ring_feature
*
feature
=
adapter
->
ring_feature
;
const
int
mask
=
feature
[
RING_F_RSS
].
mask
;
reg_idx
=
reg_idx
&
mask
;
}
break
;
case
ixgbe_mac_82599EB
:
case
ixgbe_mac_X540
:
default:
break
;
}
srrctl
=
IXGBE_READ_REG
(
&
adapter
->
hw
,
IXGBE_SRRCTL
(
reg_idx
));
if
(
hw
->
mac
.
type
==
ixgbe_mac_82598EB
)
{
u16
mask
=
adapter
->
ring_feature
[
RING_F_RSS
].
mask
;
srrctl
&=
~
IXGBE_SRRCTL_BSIZEHDR_MASK
;
srrctl
&=
~
IXGBE_SRRCTL_BSIZEPKT_MASK
;
if
(
adapter
->
num_vfs
)
srrctl
|=
IXGBE_SRRCTL_DROP_EN
;
/*
* if VMDq is not active we must program one srrctl register
* per RSS queue since we have enabled RDRXCTL.MVMEN
*/
reg_idx
&=
mask
;
}
srrctl
|=
(
IXGBE_RX_HDR_SIZE
<<
IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT
)
&
IXGBE_SRRCTL_BSIZEHDR_MASK
;
/* configure header buffer length, needed for RSC */
srrctl
=
IXGBE_RX_HDR_SIZE
<<
IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT
;
/* configure the packet buffer length */
#if PAGE_SIZE > IXGBE_MAX_RXBUFFER
srrctl
|=
IXGBE_MAX_RXBUFFER
>>
IXGBE_SRRCTL_BSIZEPKT_SHIFT
;
#else
srrctl
|=
ixgbe_rx_bufsz
(
rx_ring
)
>>
IXGBE_SRRCTL_BSIZEPKT_SHIFT
;
#endif
/* configure descriptor type */
srrctl
|=
IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF
;
IXGBE_WRITE_REG
(
&
adapter
->
hw
,
IXGBE_SRRCTL
(
reg_idx
),
srrctl
);
IXGBE_WRITE_REG
(
hw
,
IXGBE_SRRCTL
(
reg_idx
),
srrctl
);
}
static
void
ixgbe_setup_mrqc
(
struct
ixgbe_adapter
*
adapter
)
...
...
@@ -3561,33 +3547,17 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
static
void
ixgbe_napi_enable_all
(
struct
ixgbe_adapter
*
adapter
)
{
int
q_idx
;
struct
ixgbe_q_vector
*
q_vector
;
int
q_vectors
=
adapter
->
num_msix_vectors
-
NON_Q_VECTORS
;
/* legacy and MSI only use one vector */
if
(
!
(
adapter
->
flags
&
IXGBE_FLAG_MSIX_ENABLED
))
q_vectors
=
1
;
for
(
q_idx
=
0
;
q_idx
<
q_vectors
;
q_idx
++
)
{
q_vector
=
adapter
->
q_vector
[
q_idx
];
napi_enable
(
&
q_vector
->
napi
);
}
for
(
q_idx
=
0
;
q_idx
<
adapter
->
num_q_vectors
;
q_idx
++
)
napi_enable
(
&
adapter
->
q_vector
[
q_idx
]
->
napi
);
}
static
void
ixgbe_napi_disable_all
(
struct
ixgbe_adapter
*
adapter
)
{
int
q_idx
;
struct
ixgbe_q_vector
*
q_vector
;
int
q_vectors
=
adapter
->
num_msix_vectors
-
NON_Q_VECTORS
;
/* legacy and MSI only use one vector */
if
(
!
(
adapter
->
flags
&
IXGBE_FLAG_MSIX_ENABLED
))
q_vectors
=
1
;
for
(
q_idx
=
0
;
q_idx
<
q_vectors
;
q_idx
++
)
{
q_vector
=
adapter
->
q_vector
[
q_idx
];
napi_disable
(
&
q_vector
->
napi
);
}
for
(
q_idx
=
0
;
q_idx
<
adapter
->
num_q_vectors
;
q_idx
++
)
napi_disable
(
&
adapter
->
q_vector
[
q_idx
]
->
napi
);
}
#ifdef CONFIG_IXGBE_DCB
...
...
@@ -4410,18 +4380,18 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
/* Set capability flags */
rss
=
min_t
(
int
,
IXGBE_MAX_RSS_INDICES
,
num_online_cpus
());
adapter
->
ring_feature
[
RING_F_RSS
].
indices
=
rss
;
adapter
->
ring_feature
[
RING_F_RSS
].
limit
=
rss
;
adapter
->
flags
|=
IXGBE_FLAG_RSS_ENABLED
;
switch
(
hw
->
mac
.
type
)
{
case
ixgbe_mac_82598EB
:
if
(
hw
->
device_id
==
IXGBE_DEV_ID_82598AT
)
adapter
->
flags
|=
IXGBE_FLAG_FAN_FAIL_CAPABLE
;
adapter
->
max_
msix_q_vectors
=
MAX_MSI
X_Q_VECTORS_82598
;
adapter
->
max_
q_vectors
=
MA
X_Q_VECTORS_82598
;
break
;
case
ixgbe_mac_X540
:
adapter
->
flags2
|=
IXGBE_FLAG2_TEMP_SENSOR_CAPABLE
;
case
ixgbe_mac_82599EB
:
adapter
->
max_
msix_q_vectors
=
MAX_MSI
X_Q_VECTORS_82599
;
adapter
->
max_
q_vectors
=
MA
X_Q_VECTORS_82599
;
adapter
->
flags2
|=
IXGBE_FLAG2_RSC_CAPABLE
;
adapter
->
flags2
|=
IXGBE_FLAG2_RSC_ENABLED
;
if
(
hw
->
device_id
==
IXGBE_DEV_ID_82599_T3_LOM
)
...
...
@@ -4429,13 +4399,12 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
/* Flow Director hash filters enabled */
adapter
->
flags
|=
IXGBE_FLAG_FDIR_HASH_CAPABLE
;
adapter
->
atr_sample_rate
=
20
;
adapter
->
ring_feature
[
RING_F_FDIR
].
indices
=
adapter
->
ring_feature
[
RING_F_FDIR
].
limit
=
IXGBE_MAX_FDIR_INDICES
;
adapter
->
fdir_pballoc
=
IXGBE_FDIR_PBALLOC_64K
;
#ifdef IXGBE_FCOE
adapter
->
flags
|=
IXGBE_FLAG_FCOE_CAPABLE
;
adapter
->
flags
&=
~
IXGBE_FLAG_FCOE_ENABLED
;
adapter
->
ring_feature
[
RING_F_FCOE
].
indices
=
0
;
#ifdef CONFIG_IXGBE_DCB
/* Default traffic class to use for FCoE */
adapter
->
fcoe
.
up
=
IXGBE_FCOE_DEFTC
;
...
...
@@ -5313,7 +5282,7 @@ static void ixgbe_check_hang_subtask(struct ixgbe_adapter *adapter)
(
IXGBE_EICS_TCP_TIMER
|
IXGBE_EICS_OTHER
));
}
else
{
/* get one bit for every active tx/rx interrupt vector */
for
(
i
=
0
;
i
<
adapter
->
num_
msix_vectors
-
NON_Q_VECTORS
;
i
++
)
{
for
(
i
=
0
;
i
<
adapter
->
num_
q_vectors
;
i
++
)
{
struct
ixgbe_q_vector
*
qv
=
adapter
->
q_vector
[
i
];
if
(
qv
->
rx
.
ring
||
qv
->
tx
.
ring
)
eics
|=
((
u64
)
1
<<
i
);
...
...
@@ -6230,8 +6199,14 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)
if
(((
protocol
==
htons
(
ETH_P_FCOE
))
||
(
protocol
==
htons
(
ETH_P_FIP
)))
&&
(
adapter
->
flags
&
IXGBE_FLAG_FCOE_ENABLED
))
{
txq
&=
(
adapter
->
ring_feature
[
RING_F_FCOE
].
indices
-
1
);
txq
+=
adapter
->
ring_feature
[
RING_F_FCOE
].
mask
;
struct
ixgbe_ring_feature
*
f
;
f
=
&
adapter
->
ring_feature
[
RING_F_FCOE
];
while
(
txq
>=
f
->
indices
)
txq
-=
f
->
indices
;
txq
+=
adapter
->
ring_feature
[
RING_F_FCOE
].
offset
;
return
txq
;
}
#endif
...
...
@@ -6525,11 +6500,8 @@ static void ixgbe_netpoll(struct net_device *netdev)
adapter
->
flags
|=
IXGBE_FLAG_IN_NETPOLL
;
if
(
adapter
->
flags
&
IXGBE_FLAG_MSIX_ENABLED
)
{
int
num_q_vectors
=
adapter
->
num_msix_vectors
-
NON_Q_VECTORS
;
for
(
i
=
0
;
i
<
num_q_vectors
;
i
++
)
{
struct
ixgbe_q_vector
*
q_vector
=
adapter
->
q_vector
[
i
];
ixgbe_msix_clean_rings
(
0
,
q_vector
);
}
for
(
i
=
0
;
i
<
adapter
->
num_q_vectors
;
i
++
)
ixgbe_msix_clean_rings
(
0
,
adapter
->
q_vector
[
i
]);
}
else
{
ixgbe_intr
(
adapter
->
pdev
->
irq
,
netdev
);
}
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment