Commit 1458d82b authored by Alan Cox's avatar Alan Cox Committed by Greg Kroah-Hartman

Staging: et131x: Bring tx into coding style

Signed-off-by: default avatarAlan Cox <alan@linux.intel.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@suse.de>
parent fb034f84
...@@ -133,7 +133,8 @@ int et131x_tx_dma_memory_alloc(struct et131x_adapter *adapter) ...@@ -133,7 +133,8 @@ int et131x_tx_dma_memory_alloc(struct et131x_adapter *adapter)
(struct tx_desc *) pci_alloc_consistent(adapter->pdev, desc_size, (struct tx_desc *) pci_alloc_consistent(adapter->pdev, desc_size,
&tx_ring->tx_desc_ring_pa); &tx_ring->tx_desc_ring_pa);
if (!adapter->tx_ring.tx_desc_ring) { if (!adapter->tx_ring.tx_desc_ring) {
dev_err(&adapter->pdev->dev, "Cannot alloc memory for Tx Ring\n"); dev_err(&adapter->pdev->dev,
"Cannot alloc memory for Tx Ring\n");
return -ENOMEM; return -ENOMEM;
} }
...@@ -193,6 +194,9 @@ void et131x_tx_dma_memory_free(struct et131x_adapter *adapter) ...@@ -193,6 +194,9 @@ void et131x_tx_dma_memory_free(struct et131x_adapter *adapter)
/** /**
* ConfigTxDmaRegs - Set up the tx dma section of the JAGCore. * ConfigTxDmaRegs - Set up the tx dma section of the JAGCore.
* @etdev: pointer to our private adapter structure * @etdev: pointer to our private adapter structure
*
* Configure the transmit engine with the ring buffers we have created
* and prepare it for use.
*/ */
void ConfigTxDmaRegs(struct et131x_adapter *etdev) void ConfigTxDmaRegs(struct et131x_adapter *etdev)
{ {
...@@ -265,11 +269,11 @@ void et131x_init_send(struct et131x_adapter *adapter) ...@@ -265,11 +269,11 @@ void et131x_init_send(struct et131x_adapter *adapter)
/* Go through and set up each TCB */ /* Go through and set up each TCB */
for (ct = 0; ct++ < NUM_TCB; tcb++) for (ct = 0; ct++ < NUM_TCB; tcb++)
/* Set the link pointer in HW TCB to the next TCB in the /* Set the link pointer in HW TCB to the next TCB in the
* chain. If this is the last TCB in the chain, also set the * chain
* tail pointer.
*/ */
tcb->next = tcb + 1; tcb->next = tcb + 1;
/* Set the tail pointer */
tcb--; tcb--;
tx_ring->tcb_qtail = tcb; tx_ring->tcb_qtail = tcb;
tcb->next = NULL; tcb->next = NULL;
...@@ -370,7 +374,7 @@ static int et131x_send_packet(struct sk_buff *skb, ...@@ -370,7 +374,7 @@ static int et131x_send_packet(struct sk_buff *skb,
tcb->skb = skb; tcb->skb = skb;
if ((skb->data != NULL) && ((skb->len - skb->data_len) >= 6)) { if (skb->data != NULL && skb->len - skb->data_len >= 6) {
shbufva = (u16 *) skb->data; shbufva = (u16 *) skb->data;
if ((shbufva[0] == 0xffff) && if ((shbufva[0] == 0xffff) &&
...@@ -389,12 +393,11 @@ static int et131x_send_packet(struct sk_buff *skb, ...@@ -389,12 +393,11 @@ static int et131x_send_packet(struct sk_buff *skb,
if (status != 0) { if (status != 0) {
spin_lock_irqsave(&etdev->TCBReadyQLock, flags); spin_lock_irqsave(&etdev->TCBReadyQLock, flags);
if (etdev->tx_ring.tcb_qtail) { if (etdev->tx_ring.tcb_qtail)
etdev->tx_ring.tcb_qtail->next = tcb; etdev->tx_ring.tcb_qtail->next = tcb;
} else { else
/* Apparently ready Q is empty. */ /* Apparently ready Q is empty. */
etdev->tx_ring.tcb_qhead = tcb; etdev->tx_ring.tcb_qhead = tcb;
}
etdev->tx_ring.tcb_qtail = tcb; etdev->tx_ring.tcb_qtail = tcb;
spin_unlock_irqrestore(&etdev->TCBReadyQLock, flags); spin_unlock_irqrestore(&etdev->TCBReadyQLock, flags);
...@@ -535,8 +538,7 @@ static int nic_send_packet(struct et131x_adapter *etdev, struct tcb *tcb) ...@@ -535,8 +538,7 @@ static int nic_send_packet(struct et131x_adapter *etdev, struct tcb *tcb)
return -EIO; return -EIO;
if (etdev->linkspeed == TRUEPHY_SPEED_1000MBPS) { if (etdev->linkspeed == TRUEPHY_SPEED_1000MBPS) {
if (++etdev->tx_ring.since_irq == if (++etdev->tx_ring.since_irq == PARM_TX_NUM_BUFS_DEF) {
PARM_TX_NUM_BUFS_DEF) {
/* Last element & Interrupt flag */ /* Last element & Interrupt flag */
desc[frag - 1].flags = 0x5; desc[frag - 1].flags = 0x5;
etdev->tx_ring.since_irq = 0; etdev->tx_ring.since_irq = 0;
...@@ -569,7 +571,7 @@ static int nic_send_packet(struct et131x_adapter *etdev, struct tcb *tcb) ...@@ -569,7 +571,7 @@ static int nic_send_packet(struct et131x_adapter *etdev, struct tcb *tcb)
add_10bit(&etdev->tx_ring.send_idx, thiscopy); add_10bit(&etdev->tx_ring.send_idx, thiscopy);
if (INDEX10(etdev->tx_ring.send_idx)== 0 || if (INDEX10(etdev->tx_ring.send_idx) == 0 ||
INDEX10(etdev->tx_ring.send_idx) == NUM_DESC_PER_RING_TX) { INDEX10(etdev->tx_ring.send_idx) == NUM_DESC_PER_RING_TX) {
etdev->tx_ring.send_idx &= ~ET_DMA10_MASK; etdev->tx_ring.send_idx &= ~ET_DMA10_MASK;
etdev->tx_ring.send_idx ^= ET_DMA10_WRAP; etdev->tx_ring.send_idx ^= ET_DMA10_WRAP;
...@@ -587,7 +589,7 @@ static int nic_send_packet(struct et131x_adapter *etdev, struct tcb *tcb) ...@@ -587,7 +589,7 @@ static int nic_send_packet(struct et131x_adapter *etdev, struct tcb *tcb)
if (etdev->tx_ring.send_idx) if (etdev->tx_ring.send_idx)
tcb->index = NUM_DESC_PER_RING_TX - 1; tcb->index = NUM_DESC_PER_RING_TX - 1;
else else
tcb->index= ET_DMA10_WRAP | (NUM_DESC_PER_RING_TX - 1); tcb->index = ET_DMA10_WRAP|(NUM_DESC_PER_RING_TX - 1);
} else } else
tcb->index = etdev->tx_ring.send_idx - 1; tcb->index = etdev->tx_ring.send_idx - 1;
...@@ -653,7 +655,7 @@ inline void et131x_free_send_packet(struct et131x_adapter *etdev, ...@@ -653,7 +655,7 @@ inline void et131x_free_send_packet(struct et131x_adapter *etdev,
* they point to * they point to
*/ */
do { do {
desc =(struct tx_desc *) (etdev->tx_ring.tx_desc_ring + desc = (struct tx_desc *)(etdev->tx_ring.tx_desc_ring +
INDEX10(tcb->index_start)); INDEX10(tcb->index_start));
pci_unmap_single(etdev->pdev, pci_unmap_single(etdev->pdev,
...@@ -708,7 +710,7 @@ void et131x_free_busy_send_packets(struct et131x_adapter *etdev) ...@@ -708,7 +710,7 @@ void et131x_free_busy_send_packets(struct et131x_adapter *etdev)
tcb = etdev->tx_ring.send_head; tcb = etdev->tx_ring.send_head;
while ((tcb != NULL) && (freed < NUM_TCB)) { while (tcb != NULL && freed < NUM_TCB) {
struct tcb *next = tcb->next; struct tcb *next = tcb->next;
etdev->tx_ring.send_head = next; etdev->tx_ring.send_head = next;
...@@ -748,7 +750,7 @@ void et131x_handle_send_interrupt(struct et131x_adapter *etdev) ...@@ -748,7 +750,7 @@ void et131x_handle_send_interrupt(struct et131x_adapter *etdev)
{ {
unsigned long flags; unsigned long flags;
u32 serviced; u32 serviced;
struct tcb * tcb; struct tcb *tcb;
u32 index; u32 index;
serviced = readl(&etdev->regs->txdma.NewServiceComplete); serviced = readl(&etdev->regs->txdma.NewServiceComplete);
...@@ -793,7 +795,7 @@ void et131x_handle_send_interrupt(struct et131x_adapter *etdev) ...@@ -793,7 +795,7 @@ void et131x_handle_send_interrupt(struct et131x_adapter *etdev)
} }
/* Wake up the queue when we hit a low-water mark */ /* Wake up the queue when we hit a low-water mark */
if (etdev->tx_ring.used <= (NUM_TCB / 3)) if (etdev->tx_ring.used <= NUM_TCB / 3)
netif_wake_queue(etdev->netdev); netif_wake_queue(etdev->netdev);
spin_unlock_irqrestore(&etdev->TCBSendQLock, flags); spin_unlock_irqrestore(&etdev->TCBSendQLock, flags);
......
...@@ -126,7 +126,7 @@ struct tx_ring { ...@@ -126,7 +126,7 @@ struct tx_ring {
* three of these (including used) are controlled via the * three of these (including used) are controlled via the
* TCBSendQLock. This lock should be secured prior to incementing / * TCBSendQLock. This lock should be secured prior to incementing /
* decrementing used, or any queue manipulation on send_head / * decrementing used, or any queue manipulation on send_head /
* Tail * tail
*/ */
struct tcb *send_head; struct tcb *send_head;
struct tcb *send_tail; struct tcb *send_tail;
...@@ -136,7 +136,7 @@ struct tx_ring { ...@@ -136,7 +136,7 @@ struct tx_ring {
struct tx_desc *tx_desc_ring; struct tx_desc *tx_desc_ring;
dma_addr_t tx_desc_ring_pa; dma_addr_t tx_desc_ring_pa;
/* ReadyToSend indicates where we last wrote to in the descriptor ring. */ /* send_idx indicates where we last wrote to in the descriptor ring. */
u32 send_idx; u32 send_idx;
/* The location of the write-back status block */ /* The location of the write-back status block */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment