Commit 7c2e9ba3 authored by Julian Wiedmann's avatar Julian Wiedmann Committed by David S. Miller

s390/qeth: don't take queue lock in send_packet_fast()

Locking the output queue prior to TX is needed on OSA devices,
to synchronize against a packing flush from the TX completion code
(via qeth_check_outbound_queue()).
But send_packet_fast() is only used for IQDs, which don't do packing.
So remove the locking, and apply some easy cleanups.
Signed-off-by: default avatarJulian Wiedmann <jwi@linux.vnet.ibm.com>
Reviewed-by: default avatarUrsula Braun <ubraun@linux.vnet.ibm.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 96279230
...@@ -962,8 +962,7 @@ int qeth_get_priority_queue(struct qeth_card *, struct sk_buff *, int, int); ...@@ -962,8 +962,7 @@ int qeth_get_priority_queue(struct qeth_card *, struct sk_buff *, int, int);
int qeth_get_elements_no(struct qeth_card *card, struct sk_buff *skb, int qeth_get_elements_no(struct qeth_card *card, struct sk_buff *skb,
int extra_elems, int data_offset); int extra_elems, int data_offset);
int qeth_get_elements_for_frags(struct sk_buff *); int qeth_get_elements_for_frags(struct sk_buff *);
int qeth_do_send_packet_fast(struct qeth_card *card, int qeth_do_send_packet_fast(struct qeth_qdio_out_q *queue, struct sk_buff *skb,
struct qeth_qdio_out_q *queue, struct sk_buff *skb,
struct qeth_hdr *hdr, unsigned int offset, struct qeth_hdr *hdr, unsigned int offset,
unsigned int hd_len); unsigned int hd_len);
int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue, int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
......
...@@ -4040,35 +4040,23 @@ static int qeth_fill_buffer(struct qeth_qdio_out_q *queue, ...@@ -4040,35 +4040,23 @@ static int qeth_fill_buffer(struct qeth_qdio_out_q *queue,
return flush_cnt; return flush_cnt;
} }
int qeth_do_send_packet_fast(struct qeth_card *card, int qeth_do_send_packet_fast(struct qeth_qdio_out_q *queue, struct sk_buff *skb,
struct qeth_qdio_out_q *queue, struct sk_buff *skb,
struct qeth_hdr *hdr, unsigned int offset, struct qeth_hdr *hdr, unsigned int offset,
unsigned int hd_len) unsigned int hd_len)
{ {
struct qeth_qdio_out_buffer *buffer; int index = queue->next_buf_to_fill;
int index; struct qeth_qdio_out_buffer *buffer = queue->bufs[index];
/* spin until we get the queue ... */
while (atomic_cmpxchg(&queue->state, QETH_OUT_Q_UNLOCKED,
QETH_OUT_Q_LOCKED) != QETH_OUT_Q_UNLOCKED);
/* ... now we've got the queue */
index = queue->next_buf_to_fill;
buffer = queue->bufs[queue->next_buf_to_fill];
/* /*
* check if buffer is empty to make sure that we do not 'overtake' * check if buffer is empty to make sure that we do not 'overtake'
* ourselves and try to fill a buffer that is already primed * ourselves and try to fill a buffer that is already primed
*/ */
if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY) if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY)
goto out; return -EBUSY;
queue->next_buf_to_fill = (queue->next_buf_to_fill + 1) % queue->next_buf_to_fill = (index + 1) % QDIO_MAX_BUFFERS_PER_Q;
QDIO_MAX_BUFFERS_PER_Q;
atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
qeth_fill_buffer(queue, buffer, skb, hdr, offset, hd_len); qeth_fill_buffer(queue, buffer, skb, hdr, offset, hd_len);
qeth_flush_buffers(queue, index, 1); qeth_flush_buffers(queue, index, 1);
return 0; return 0;
out:
atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
return -EBUSY;
} }
EXPORT_SYMBOL_GPL(qeth_do_send_packet_fast); EXPORT_SYMBOL_GPL(qeth_do_send_packet_fast);
......
...@@ -698,7 +698,7 @@ static int qeth_l2_xmit_iqd(struct qeth_card *card, struct sk_buff *skb, ...@@ -698,7 +698,7 @@ static int qeth_l2_xmit_iqd(struct qeth_card *card, struct sk_buff *skb,
rc = -E2BIG; rc = -E2BIG;
goto out; goto out;
} }
rc = qeth_do_send_packet_fast(card, queue, skb, hdr, data_offset, rc = qeth_do_send_packet_fast(queue, skb, hdr, data_offset,
sizeof(*hdr) + data_offset); sizeof(*hdr) + data_offset);
out: out:
if (rc) if (rc)
......
...@@ -2771,8 +2771,8 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb, ...@@ -2771,8 +2771,8 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb,
rc = qeth_do_send_packet(card, queue, new_skb, hdr, hd_len, rc = qeth_do_send_packet(card, queue, new_skb, hdr, hd_len,
hd_len, elements); hd_len, elements);
} else } else
rc = qeth_do_send_packet_fast(card, queue, new_skb, hdr, rc = qeth_do_send_packet_fast(queue, new_skb, hdr, data_offset,
data_offset, hd_len); hd_len);
if (!rc) { if (!rc) {
card->stats.tx_packets++; card->stats.tx_packets++;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment