Commit ee7a60c9 authored by David S. Miller's avatar David S. Miller

Merge branch 'liquidio-Tx-queue-cleanup'

Intiyaz Basha says:

====================
liquidio: Tx queue cleanup

Moved some common function to octeon_network.h
Removed some unwanted functions and checks.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 94cb5492 c9614a16
...@@ -377,20 +377,12 @@ static void lio_update_txq_status(struct octeon_device *oct, int iq_num) ...@@ -377,20 +377,12 @@ static void lio_update_txq_status(struct octeon_device *oct, int iq_num)
return; return;
lio = GET_LIO(netdev); lio = GET_LIO(netdev);
if (netif_is_multiqueue(netdev)) { if (__netif_subqueue_stopped(netdev, iq->q_index) &&
if (__netif_subqueue_stopped(netdev, iq->q_index) && lio->linfo.link.s.link_up &&
lio->linfo.link.s.link_up && (!octnet_iq_is_full(oct, iq_num))) {
(!octnet_iq_is_full(oct, iq_num))) { netif_wake_subqueue(netdev, iq->q_index);
netif_wake_subqueue(netdev, iq->q_index); INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq_num,
INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq_num,
tx_restart, 1);
}
} else if (netif_queue_stopped(netdev) &&
lio->linfo.link.s.link_up &&
(!octnet_iq_is_full(oct, lio->txq))) {
INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, lio->txq,
tx_restart, 1); tx_restart, 1);
netif_wake_queue(netdev);
} }
} }
......
...@@ -513,115 +513,6 @@ static void liquidio_deinit_pci(void) ...@@ -513,115 +513,6 @@ static void liquidio_deinit_pci(void)
pci_unregister_driver(&liquidio_pci_driver); pci_unregister_driver(&liquidio_pci_driver);
} }
/**
* \brief Stop Tx queues
* @param netdev network device
*/
static inline void txqs_stop(struct net_device *netdev)
{
if (netif_is_multiqueue(netdev)) {
int i;
for (i = 0; i < netdev->num_tx_queues; i++)
netif_stop_subqueue(netdev, i);
} else {
netif_stop_queue(netdev);
}
}
/**
* \brief Start Tx queues
* @param netdev network device
*/
static inline void txqs_start(struct net_device *netdev)
{
if (netif_is_multiqueue(netdev)) {
int i;
for (i = 0; i < netdev->num_tx_queues; i++)
netif_start_subqueue(netdev, i);
} else {
netif_start_queue(netdev);
}
}
/**
* \brief Wake Tx queues
* @param netdev network device
*/
static inline void txqs_wake(struct net_device *netdev)
{
struct lio *lio = GET_LIO(netdev);
if (netif_is_multiqueue(netdev)) {
int i;
for (i = 0; i < netdev->num_tx_queues; i++) {
int qno = lio->linfo.txpciq[i %
lio->oct_dev->num_iqs].s.q_no;
if (__netif_subqueue_stopped(netdev, i)) {
INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, qno,
tx_restart, 1);
netif_wake_subqueue(netdev, i);
}
}
} else {
INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, lio->txq,
tx_restart, 1);
netif_wake_queue(netdev);
}
}
/**
* \brief Stop Tx queue
* @param netdev network device
*/
static void stop_txq(struct net_device *netdev)
{
txqs_stop(netdev);
}
/**
* \brief Start Tx queue
* @param netdev network device
*/
static void start_txq(struct net_device *netdev)
{
struct lio *lio = GET_LIO(netdev);
if (lio->linfo.link.s.link_up) {
txqs_start(netdev);
return;
}
}
/**
* \brief Wake a queue
* @param netdev network device
* @param q which queue to wake
*/
static inline void wake_q(struct net_device *netdev, int q)
{
if (netif_is_multiqueue(netdev))
netif_wake_subqueue(netdev, q);
else
netif_wake_queue(netdev);
}
/**
* \brief Stop a queue
* @param netdev network device
* @param q which queue to stop
*/
static inline void stop_q(struct net_device *netdev, int q)
{
if (netif_is_multiqueue(netdev))
netif_stop_subqueue(netdev, q);
else
netif_stop_queue(netdev);
}
/** /**
* \brief Check Tx queue status, and take appropriate action * \brief Check Tx queue status, and take appropriate action
* @param lio per-network private data * @param lio per-network private data
...@@ -629,33 +520,24 @@ static inline void stop_q(struct net_device *netdev, int q) ...@@ -629,33 +520,24 @@ static inline void stop_q(struct net_device *netdev, int q)
*/ */
static inline int check_txq_status(struct lio *lio) static inline int check_txq_status(struct lio *lio)
{ {
int numqs = lio->netdev->num_tx_queues;
int ret_val = 0; int ret_val = 0;
int q, iq;
if (netif_is_multiqueue(lio->netdev)) { /* check each sub-queue state */
int numqs = lio->netdev->num_tx_queues; for (q = 0; q < numqs; q++) {
int q, iq = 0; iq = lio->linfo.txpciq[q %
lio->oct_dev->num_iqs].s.q_no;
/* check each sub-queue state */ if (octnet_iq_is_full(lio->oct_dev, iq))
for (q = 0; q < numqs; q++) { continue;
iq = lio->linfo.txpciq[q % if (__netif_subqueue_stopped(lio->netdev, q)) {
lio->oct_dev->num_iqs].s.q_no; netif_wake_subqueue(lio->netdev, q);
if (octnet_iq_is_full(lio->oct_dev, iq)) INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq,
continue; tx_restart, 1);
if (__netif_subqueue_stopped(lio->netdev, q)) { ret_val++;
wake_q(lio->netdev, q);
INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq,
tx_restart, 1);
ret_val++;
}
} }
} else {
if (octnet_iq_is_full(lio->oct_dev, lio->txq))
return 0;
wake_q(lio->netdev, lio->txq);
INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, lio->txq,
tx_restart, 1);
ret_val = 1;
} }
return ret_val; return ret_val;
} }
...@@ -900,11 +782,11 @@ static inline void update_link_status(struct net_device *netdev, ...@@ -900,11 +782,11 @@ static inline void update_link_status(struct net_device *netdev,
if (lio->linfo.link.s.link_up) { if (lio->linfo.link.s.link_up) {
dev_dbg(&oct->pci_dev->dev, "%s: link_up", __func__); dev_dbg(&oct->pci_dev->dev, "%s: link_up", __func__);
netif_carrier_on(netdev); netif_carrier_on(netdev);
txqs_wake(netdev); wake_txqs(netdev);
} else { } else {
dev_dbg(&oct->pci_dev->dev, "%s: link_off", __func__); dev_dbg(&oct->pci_dev->dev, "%s: link_off", __func__);
netif_carrier_off(netdev); netif_carrier_off(netdev);
stop_txq(netdev); stop_txqs(netdev);
} }
if (lio->linfo.link.s.mtu != current_max_mtu) { if (lio->linfo.link.s.mtu != current_max_mtu) {
netif_info(lio, probe, lio->netdev, "Max MTU changed from %d to %d\n", netif_info(lio, probe, lio->netdev, "Max MTU changed from %d to %d\n",
...@@ -1752,16 +1634,6 @@ static int octeon_pci_os_setup(struct octeon_device *oct) ...@@ -1752,16 +1634,6 @@ static int octeon_pci_os_setup(struct octeon_device *oct)
return 0; return 0;
} }
static inline int skb_iq(struct lio *lio, struct sk_buff *skb)
{
int q = 0;
if (netif_is_multiqueue(lio->netdev))
q = skb->queue_mapping % lio->linfo.num_txpciq;
return q;
}
/** /**
* \brief Check Tx queue state for a given network buffer * \brief Check Tx queue state for a given network buffer
* @param lio per-network private data * @param lio per-network private data
...@@ -1769,22 +1641,17 @@ static inline int skb_iq(struct lio *lio, struct sk_buff *skb) ...@@ -1769,22 +1641,17 @@ static inline int skb_iq(struct lio *lio, struct sk_buff *skb)
*/ */
static inline int check_txq_state(struct lio *lio, struct sk_buff *skb) static inline int check_txq_state(struct lio *lio, struct sk_buff *skb)
{ {
int q = 0, iq = 0; int q, iq;
if (netif_is_multiqueue(lio->netdev)) { q = skb->queue_mapping;
q = skb->queue_mapping; iq = lio->linfo.txpciq[(q % lio->oct_dev->num_iqs)].s.q_no;
iq = lio->linfo.txpciq[(q % lio->oct_dev->num_iqs)].s.q_no;
} else {
iq = lio->txq;
q = iq;
}
if (octnet_iq_is_full(lio->oct_dev, iq)) if (octnet_iq_is_full(lio->oct_dev, iq))
return 0; return 0;
if (__netif_subqueue_stopped(lio->netdev, q)) { if (__netif_subqueue_stopped(lio->netdev, q)) {
INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq, tx_restart, 1); INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq, tx_restart, 1);
wake_q(lio->netdev, q); netif_wake_subqueue(lio->netdev, q);
} }
return 1; return 1;
} }
...@@ -2224,7 +2091,7 @@ static int liquidio_open(struct net_device *netdev) ...@@ -2224,7 +2091,7 @@ static int liquidio_open(struct net_device *netdev)
return -1; return -1;
} }
start_txq(netdev); start_txqs(netdev);
/* tell Octeon to start forwarding packets to host */ /* tell Octeon to start forwarding packets to host */
send_rx_ctrl_cmd(lio, 1); send_rx_ctrl_cmd(lio, 1);
...@@ -2666,14 +2533,9 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) ...@@ -2666,14 +2533,9 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
lio = GET_LIO(netdev); lio = GET_LIO(netdev);
oct = lio->oct_dev; oct = lio->oct_dev;
if (netif_is_multiqueue(netdev)) { q_idx = skb_iq(lio, skb);
q_idx = skb->queue_mapping; tag = q_idx;
q_idx = (q_idx % (lio->linfo.num_txpciq)); iq_no = lio->linfo.txpciq[q_idx].s.q_no;
tag = q_idx;
iq_no = lio->linfo.txpciq[q_idx].s.q_no;
} else {
iq_no = lio->txq;
}
stats = &oct->instr_queue[iq_no]->stats; stats = &oct->instr_queue[iq_no]->stats;
...@@ -2704,23 +2566,14 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) ...@@ -2704,23 +2566,14 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
ndata.q_no = iq_no; ndata.q_no = iq_no;
if (netif_is_multiqueue(netdev)) { if (octnet_iq_is_full(oct, ndata.q_no)) {
if (octnet_iq_is_full(oct, ndata.q_no)) { /* defer sending if queue is full */
/* defer sending if queue is full */ netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n",
netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n", ndata.q_no);
ndata.q_no); stats->tx_iq_busy++;
stats->tx_iq_busy++; return NETDEV_TX_BUSY;
return NETDEV_TX_BUSY;
}
} else {
if (octnet_iq_is_full(oct, lio->txq)) {
/* defer sending if queue is full */
stats->tx_iq_busy++;
netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n",
lio->txq);
return NETDEV_TX_BUSY;
}
} }
/* pr_info(" XMIT - valid Qs: %d, 1st Q no: %d, cpu: %d, q_no:%d\n", /* pr_info(" XMIT - valid Qs: %d, 1st Q no: %d, cpu: %d, q_no:%d\n",
* lio->linfo.num_txpciq, lio->txq, cpu, ndata.q_no); * lio->linfo.num_txpciq, lio->txq, cpu, ndata.q_no);
*/ */
...@@ -2876,7 +2729,7 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) ...@@ -2876,7 +2729,7 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
netif_info(lio, tx_queued, lio->netdev, "Transmit queued successfully\n"); netif_info(lio, tx_queued, lio->netdev, "Transmit queued successfully\n");
if (status == IQ_SEND_STOP) if (status == IQ_SEND_STOP)
stop_q(netdev, q_idx); netif_stop_subqueue(netdev, q_idx);
netif_trans_update(netdev); netif_trans_update(netdev);
...@@ -2915,7 +2768,7 @@ static void liquidio_tx_timeout(struct net_device *netdev) ...@@ -2915,7 +2768,7 @@ static void liquidio_tx_timeout(struct net_device *netdev)
"Transmit timeout tx_dropped:%ld, waking up queues now!!\n", "Transmit timeout tx_dropped:%ld, waking up queues now!!\n",
netdev->stats.tx_dropped); netdev->stats.tx_dropped);
netif_trans_update(netdev); netif_trans_update(netdev);
txqs_wake(netdev); wake_txqs(netdev);
} }
static int liquidio_vlan_rx_add_vid(struct net_device *netdev, static int liquidio_vlan_rx_add_vid(struct net_device *netdev,
......
...@@ -284,105 +284,6 @@ static struct pci_driver liquidio_vf_pci_driver = { ...@@ -284,105 +284,6 @@ static struct pci_driver liquidio_vf_pci_driver = {
.err_handler = &liquidio_vf_err_handler, /* For AER */ .err_handler = &liquidio_vf_err_handler, /* For AER */
}; };
/**
* \brief Stop Tx queues
* @param netdev network device
*/
static void txqs_stop(struct net_device *netdev)
{
if (netif_is_multiqueue(netdev)) {
int i;
for (i = 0; i < netdev->num_tx_queues; i++)
netif_stop_subqueue(netdev, i);
} else {
netif_stop_queue(netdev);
}
}
/**
* \brief Start Tx queues
* @param netdev network device
*/
static void txqs_start(struct net_device *netdev)
{
if (netif_is_multiqueue(netdev)) {
int i;
for (i = 0; i < netdev->num_tx_queues; i++)
netif_start_subqueue(netdev, i);
} else {
netif_start_queue(netdev);
}
}
/**
* \brief Wake Tx queues
* @param netdev network device
*/
static void txqs_wake(struct net_device *netdev)
{
struct lio *lio = GET_LIO(netdev);
if (netif_is_multiqueue(netdev)) {
int i;
for (i = 0; i < netdev->num_tx_queues; i++) {
int qno = lio->linfo.txpciq[i % lio->oct_dev->num_iqs]
.s.q_no;
if (__netif_subqueue_stopped(netdev, i)) {
INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, qno,
tx_restart, 1);
netif_wake_subqueue(netdev, i);
}
}
} else {
INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, lio->txq,
tx_restart, 1);
netif_wake_queue(netdev);
}
}
/**
* \brief Start Tx queue
* @param netdev network device
*/
static void start_txq(struct net_device *netdev)
{
struct lio *lio = GET_LIO(netdev);
if (lio->linfo.link.s.link_up) {
txqs_start(netdev);
return;
}
}
/**
* \brief Wake a queue
* @param netdev network device
* @param q which queue to wake
*/
static void wake_q(struct net_device *netdev, int q)
{
if (netif_is_multiqueue(netdev))
netif_wake_subqueue(netdev, q);
else
netif_wake_queue(netdev);
}
/**
* \brief Stop a queue
* @param netdev network device
* @param q which queue to stop
*/
static void stop_q(struct net_device *netdev, int q)
{
if (netif_is_multiqueue(netdev))
netif_stop_subqueue(netdev, q);
else
netif_stop_queue(netdev);
}
/** /**
* Remove the node at the head of the list. The list would be empty at * Remove the node at the head of the list. The list would be empty at
* the end of this call if there are no more nodes in the list. * the end of this call if there are no more nodes in the list.
...@@ -614,10 +515,10 @@ static void update_link_status(struct net_device *netdev, ...@@ -614,10 +515,10 @@ static void update_link_status(struct net_device *netdev,
if (lio->linfo.link.s.link_up) { if (lio->linfo.link.s.link_up) {
netif_carrier_on(netdev); netif_carrier_on(netdev);
txqs_wake(netdev); wake_txqs(netdev);
} else { } else {
netif_carrier_off(netdev); netif_carrier_off(netdev);
txqs_stop(netdev); stop_txqs(netdev);
} }
if (lio->linfo.link.s.mtu != current_max_mtu) { if (lio->linfo.link.s.mtu != current_max_mtu) {
...@@ -1052,16 +953,6 @@ static int octeon_pci_os_setup(struct octeon_device *oct) ...@@ -1052,16 +953,6 @@ static int octeon_pci_os_setup(struct octeon_device *oct)
return 0; return 0;
} }
static int skb_iq(struct lio *lio, struct sk_buff *skb)
{
int q = 0;
if (netif_is_multiqueue(lio->netdev))
q = skb->queue_mapping % lio->linfo.num_txpciq;
return q;
}
/** /**
* \brief Check Tx queue state for a given network buffer * \brief Check Tx queue state for a given network buffer
* @param lio per-network private data * @param lio per-network private data
...@@ -1069,22 +960,17 @@ static int skb_iq(struct lio *lio, struct sk_buff *skb) ...@@ -1069,22 +960,17 @@ static int skb_iq(struct lio *lio, struct sk_buff *skb)
*/ */
static int check_txq_state(struct lio *lio, struct sk_buff *skb) static int check_txq_state(struct lio *lio, struct sk_buff *skb)
{ {
int q = 0, iq = 0; int q, iq;
if (netif_is_multiqueue(lio->netdev)) { q = skb->queue_mapping;
q = skb->queue_mapping; iq = lio->linfo.txpciq[q % lio->oct_dev->num_iqs].s.q_no;
iq = lio->linfo.txpciq[q % lio->oct_dev->num_iqs].s.q_no;
} else {
iq = lio->txq;
q = iq;
}
if (octnet_iq_is_full(lio->oct_dev, iq)) if (octnet_iq_is_full(lio->oct_dev, iq))
return 0; return 0;
if (__netif_subqueue_stopped(lio->netdev, q)) { if (__netif_subqueue_stopped(lio->netdev, q)) {
INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq, tx_restart, 1); INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq, tx_restart, 1);
wake_q(lio->netdev, q); netif_wake_subqueue(lio->netdev, q);
} }
return 1; return 1;
...@@ -1258,7 +1144,7 @@ static int liquidio_open(struct net_device *netdev) ...@@ -1258,7 +1144,7 @@ static int liquidio_open(struct net_device *netdev)
lio->intf_open = 1; lio->intf_open = 1;
netif_info(lio, ifup, lio->netdev, "Interface Open, ready for traffic\n"); netif_info(lio, ifup, lio->netdev, "Interface Open, ready for traffic\n");
start_txq(netdev); start_txqs(netdev);
/* tell Octeon to start forwarding packets to host */ /* tell Octeon to start forwarding packets to host */
send_rx_ctrl_cmd(lio, 1); send_rx_ctrl_cmd(lio, 1);
...@@ -1300,7 +1186,7 @@ static int liquidio_stop(struct net_device *netdev) ...@@ -1300,7 +1186,7 @@ static int liquidio_stop(struct net_device *netdev)
ifstate_reset(lio, LIO_IFSTATE_RUNNING); ifstate_reset(lio, LIO_IFSTATE_RUNNING);
txqs_stop(netdev); stop_txqs(netdev);
dev_info(&oct->pci_dev->dev, "%s interface is stopped\n", netdev->name); dev_info(&oct->pci_dev->dev, "%s interface is stopped\n", netdev->name);
...@@ -1718,14 +1604,9 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) ...@@ -1718,14 +1604,9 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
lio = GET_LIO(netdev); lio = GET_LIO(netdev);
oct = lio->oct_dev; oct = lio->oct_dev;
if (netif_is_multiqueue(netdev)) { q_idx = skb_iq(lio, skb);
q_idx = skb->queue_mapping; tag = q_idx;
q_idx = (q_idx % (lio->linfo.num_txpciq)); iq_no = lio->linfo.txpciq[q_idx].s.q_no;
tag = q_idx;
iq_no = lio->linfo.txpciq[q_idx].s.q_no;
} else {
iq_no = lio->txq;
}
stats = &oct->instr_queue[iq_no]->stats; stats = &oct->instr_queue[iq_no]->stats;
...@@ -1754,22 +1635,12 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) ...@@ -1754,22 +1635,12 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
ndata.q_no = iq_no; ndata.q_no = iq_no;
if (netif_is_multiqueue(netdev)) { if (octnet_iq_is_full(oct, ndata.q_no)) {
if (octnet_iq_is_full(oct, ndata.q_no)) { /* defer sending if queue is full */
/* defer sending if queue is full */ netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n",
netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n", ndata.q_no);
ndata.q_no); stats->tx_iq_busy++;
stats->tx_iq_busy++; return NETDEV_TX_BUSY;
return NETDEV_TX_BUSY;
}
} else {
if (octnet_iq_is_full(oct, lio->txq)) {
/* defer sending if queue is full */
stats->tx_iq_busy++;
netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n",
ndata.q_no);
return NETDEV_TX_BUSY;
}
} }
ndata.datasize = skb->len; ndata.datasize = skb->len;
...@@ -1911,7 +1782,7 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) ...@@ -1911,7 +1782,7 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
if (status == IQ_SEND_STOP) { if (status == IQ_SEND_STOP) {
dev_err(&oct->pci_dev->dev, "Rcvd IQ_SEND_STOP signal; stopping IQ-%d\n", dev_err(&oct->pci_dev->dev, "Rcvd IQ_SEND_STOP signal; stopping IQ-%d\n",
iq_no); iq_no);
stop_q(netdev, q_idx); netif_stop_subqueue(netdev, q_idx);
} }
netif_trans_update(netdev); netif_trans_update(netdev);
...@@ -1951,7 +1822,7 @@ static void liquidio_tx_timeout(struct net_device *netdev) ...@@ -1951,7 +1822,7 @@ static void liquidio_tx_timeout(struct net_device *netdev)
"Transmit timeout tx_dropped:%ld, waking up queues now!!\n", "Transmit timeout tx_dropped:%ld, waking up queues now!!\n",
netdev->stats.tx_dropped); netdev->stats.tx_dropped);
netif_trans_update(netdev); netif_trans_update(netdev);
txqs_wake(netdev); wake_txqs(netdev);
} }
static int static int
......
...@@ -506,4 +506,56 @@ static inline int wait_for_pending_requests(struct octeon_device *oct) ...@@ -506,4 +506,56 @@ static inline int wait_for_pending_requests(struct octeon_device *oct)
return 0; return 0;
} }
/**
* \brief Stop Tx queues
* @param netdev network device
*/
static inline void stop_txqs(struct net_device *netdev)
{
int i;
for (i = 0; i < netdev->num_tx_queues; i++)
netif_stop_subqueue(netdev, i);
}
/**
* \brief Wake Tx queues
* @param netdev network device
*/
static inline void wake_txqs(struct net_device *netdev)
{
struct lio *lio = GET_LIO(netdev);
int i, qno;
for (i = 0; i < netdev->num_tx_queues; i++) {
qno = lio->linfo.txpciq[i % lio->oct_dev->num_iqs].s.q_no;
if (__netif_subqueue_stopped(netdev, i)) {
INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, qno,
tx_restart, 1);
netif_wake_subqueue(netdev, i);
}
}
}
/**
* \brief Start Tx queues
* @param netdev network device
*/
static inline void start_txqs(struct net_device *netdev)
{
struct lio *lio = GET_LIO(netdev);
int i;
if (lio->linfo.link.s.link_up) {
for (i = 0; i < netdev->num_tx_queues; i++)
netif_start_subqueue(netdev, i);
}
}
static inline int skb_iq(struct lio *lio, struct sk_buff *skb)
{
return skb->queue_mapping % lio->linfo.num_txpciq;
}
#endif #endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment