Commit 1a73e427 authored by Vinay Kumar Yadav's avatar Vinay Kumar Yadav Committed by David S. Miller

ch_ktls: Fix kernel panic

Taking page refcount is not ideal and causes kernel panic
sometimes. It's better to take tx_ctx lock for the complete
skb transmit, to avoid page cleanup if ACK received in middle.

Fixes: 5a4b9fe7 ("cxgb4/chcr: complete record tx handling")
Signed-off-by: default avatarVinay Kumar Yadav <vinay.yadav@chelsio.com>
Signed-off-by: default avatarRohit Maheshwari <rohitm@chelsio.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 61d77358
...@@ -2010,12 +2010,11 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -2010,12 +2010,11 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
* we will send the complete record again. * we will send the complete record again.
*/ */
spin_lock_irqsave(&tx_ctx->base.lock, flags);
do { do {
int i;
cxgb4_reclaim_completed_tx(adap, &q->q, true); cxgb4_reclaim_completed_tx(adap, &q->q, true);
/* lock taken */
spin_lock_irqsave(&tx_ctx->base.lock, flags);
/* fetch the tls record */ /* fetch the tls record */
record = tls_get_record(&tx_ctx->base, tcp_seq, record = tls_get_record(&tx_ctx->base, tcp_seq,
&tx_info->record_no); &tx_info->record_no);
...@@ -2074,11 +2073,11 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -2074,11 +2073,11 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
tls_end_offset, skb_offset, tls_end_offset, skb_offset,
0); 0);
spin_unlock_irqrestore(&tx_ctx->base.lock, flags);
if (ret) { if (ret) {
/* free the refcount taken earlier */ /* free the refcount taken earlier */
if (tls_end_offset < data_len) if (tls_end_offset < data_len)
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
spin_unlock_irqrestore(&tx_ctx->base.lock, flags);
goto out; goto out;
} }
...@@ -2088,16 +2087,6 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -2088,16 +2087,6 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
continue; continue;
} }
/* increase page reference count of the record, so that there
* won't be any chance of page free in middle if in case stack
* receives ACK and try to delete the record.
*/
for (i = 0; i < record->num_frags; i++)
__skb_frag_ref(&record->frags[i]);
/* lock cleared */
spin_unlock_irqrestore(&tx_ctx->base.lock, flags);
/* if a tls record is finishing in this SKB */ /* if a tls record is finishing in this SKB */
if (tls_end_offset <= data_len) { if (tls_end_offset <= data_len) {
ret = chcr_end_part_handler(tx_info, skb, record, ret = chcr_end_part_handler(tx_info, skb, record,
...@@ -2122,13 +2111,9 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -2122,13 +2111,9 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
data_len = 0; data_len = 0;
} }
/* clear the frag ref count which increased locally before */
for (i = 0; i < record->num_frags; i++) {
/* clear the frag ref count */
__skb_frag_unref(&record->frags[i]);
}
/* if any failure, come out from the loop. */ /* if any failure, come out from the loop. */
if (ret) { if (ret) {
spin_unlock_irqrestore(&tx_ctx->base.lock, flags);
if (th->fin) if (th->fin)
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
...@@ -2143,6 +2128,7 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -2143,6 +2128,7 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
} while (data_len > 0); } while (data_len > 0);
spin_unlock_irqrestore(&tx_ctx->base.lock, flags);
atomic64_inc(&port_stats->ktls_tx_encrypted_packets); atomic64_inc(&port_stats->ktls_tx_encrypted_packets);
atomic64_add(skb_data_len, &port_stats->ktls_tx_encrypted_bytes); atomic64_add(skb_data_len, &port_stats->ktls_tx_encrypted_bytes);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment