Commit ffc385b9 authored by Thomas Falcon's avatar Thomas Falcon Committed by David S. Miller

ibmvnic: Keep track of supplementary TX descriptors

Supplementary TX descriptors were not being accounted for, which
was resulting in an overflow of the hardware device's transmit
queue. Keep track of those descriptors now when determining
how many entries remain on the TX queue.
Signed-off-by: default avatarThomas Falcon <tlfalcon@linux.vnet.ibm.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent f5c0c6f4
...@@ -1467,6 +1467,7 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) ...@@ -1467,6 +1467,7 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
if ((*hdrs >> 7) & 1) { if ((*hdrs >> 7) & 1) {
build_hdr_descs_arr(tx_buff, &num_entries, *hdrs); build_hdr_descs_arr(tx_buff, &num_entries, *hdrs);
tx_crq.v1.n_crq_elem = num_entries; tx_crq.v1.n_crq_elem = num_entries;
tx_buff->num_entries = num_entries;
tx_buff->indir_arr[0] = tx_crq; tx_buff->indir_arr[0] = tx_crq;
tx_buff->indir_dma = dma_map_single(dev, tx_buff->indir_arr, tx_buff->indir_dma = dma_map_single(dev, tx_buff->indir_arr,
sizeof(tx_buff->indir_arr), sizeof(tx_buff->indir_arr),
...@@ -1515,7 +1516,7 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) ...@@ -1515,7 +1516,7 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
goto out; goto out;
} }
if (atomic_inc_return(&tx_scrq->used) if (atomic_add_return(num_entries, &tx_scrq->used)
>= adapter->req_tx_entries_per_subcrq) { >= adapter->req_tx_entries_per_subcrq) {
netdev_info(netdev, "Stopping queue %d\n", queue_num); netdev_info(netdev, "Stopping queue %d\n", queue_num);
netif_stop_subqueue(netdev, queue_num); netif_stop_subqueue(netdev, queue_num);
...@@ -2468,6 +2469,7 @@ static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter, ...@@ -2468,6 +2469,7 @@ static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter,
restart_loop: restart_loop:
while (pending_scrq(adapter, scrq)) { while (pending_scrq(adapter, scrq)) {
unsigned int pool = scrq->pool_index; unsigned int pool = scrq->pool_index;
int num_entries = 0;
next = ibmvnic_next_scrq(adapter, scrq); next = ibmvnic_next_scrq(adapter, scrq);
for (i = 0; i < next->tx_comp.num_comps; i++) { for (i = 0; i < next->tx_comp.num_comps; i++) {
...@@ -2498,6 +2500,8 @@ static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter, ...@@ -2498,6 +2500,8 @@ static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter,
txbuff->skb = NULL; txbuff->skb = NULL;
} }
num_entries += txbuff->num_entries;
adapter->tx_pool[pool].free_map[adapter->tx_pool[pool]. adapter->tx_pool[pool].free_map[adapter->tx_pool[pool].
producer_index] = index; producer_index] = index;
adapter->tx_pool[pool].producer_index = adapter->tx_pool[pool].producer_index =
...@@ -2507,7 +2511,7 @@ static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter, ...@@ -2507,7 +2511,7 @@ static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter,
/* remove tx_comp scrq*/ /* remove tx_comp scrq*/
next->tx_comp.first = 0; next->tx_comp.first = 0;
if (atomic_sub_return(next->tx_comp.num_comps, &scrq->used) <= if (atomic_sub_return(num_entries, &scrq->used) <=
(adapter->req_tx_entries_per_subcrq / 2) && (adapter->req_tx_entries_per_subcrq / 2) &&
__netif_subqueue_stopped(adapter->netdev, __netif_subqueue_stopped(adapter->netdev,
scrq->pool_index)) { scrq->pool_index)) {
......
...@@ -909,6 +909,7 @@ struct ibmvnic_tx_buff { ...@@ -909,6 +909,7 @@ struct ibmvnic_tx_buff {
union sub_crq indir_arr[6]; union sub_crq indir_arr[6];
u8 hdr_data[140]; u8 hdr_data[140];
dma_addr_t indir_dma; dma_addr_t indir_dma;
int num_entries;
}; };
struct ibmvnic_tx_pool { struct ibmvnic_tx_pool {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment