Commit 0e0704bb authored by David S. Miller's avatar David S. Miller

Merge branch 'ch_tlss-fixes'

Vinay Kumar Yadav says:

====================
chelsio/ch_ktls: chelsio inline tls driver bug fixes

This series of patches fix following bugs in Chelsio inline tls driver.
Patch1: kernel panic.
Patch2: connection close issue.
Patch3: tcb close call issue.
Patch4: unnecessary snd_una update.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 61d77358 e8a41555
...@@ -349,18 +349,6 @@ static int chcr_set_tcb_field(struct chcr_ktls_info *tx_info, u16 word, ...@@ -349,18 +349,6 @@ static int chcr_set_tcb_field(struct chcr_ktls_info *tx_info, u16 word,
return cxgb4_ofld_send(tx_info->netdev, skb); return cxgb4_ofld_send(tx_info->netdev, skb);
} }
/*
* chcr_ktls_mark_tcb_close: mark tcb state to CLOSE
* @tx_info - driver specific tls info.
* return: NET_TX_OK/NET_XMIT_DROP.
*/
static int chcr_ktls_mark_tcb_close(struct chcr_ktls_info *tx_info)
{
return chcr_set_tcb_field(tx_info, TCB_T_STATE_W,
TCB_T_STATE_V(TCB_T_STATE_M),
CHCR_TCB_STATE_CLOSED, 1);
}
/* /*
* chcr_ktls_dev_del: call back for tls_dev_del. * chcr_ktls_dev_del: call back for tls_dev_del.
* Remove the tid and l2t entry and close the connection. * Remove the tid and l2t entry and close the connection.
...@@ -395,8 +383,6 @@ static void chcr_ktls_dev_del(struct net_device *netdev, ...@@ -395,8 +383,6 @@ static void chcr_ktls_dev_del(struct net_device *netdev,
/* clear tid */ /* clear tid */
if (tx_info->tid != -1) { if (tx_info->tid != -1) {
/* clear tcb state and then release tid */
chcr_ktls_mark_tcb_close(tx_info);
cxgb4_remove_tid(&tx_info->adap->tids, tx_info->tx_chan, cxgb4_remove_tid(&tx_info->adap->tids, tx_info->tx_chan,
tx_info->tid, tx_info->ip_family); tx_info->tid, tx_info->ip_family);
} }
...@@ -574,7 +560,6 @@ static int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk, ...@@ -574,7 +560,6 @@ static int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk,
return 0; return 0;
free_tid: free_tid:
chcr_ktls_mark_tcb_close(tx_info);
#if IS_ENABLED(CONFIG_IPV6) #if IS_ENABLED(CONFIG_IPV6)
/* clear clip entry */ /* clear clip entry */
if (tx_info->ip_family == AF_INET6) if (tx_info->ip_family == AF_INET6)
...@@ -672,10 +657,6 @@ static int chcr_ktls_cpl_act_open_rpl(struct adapter *adap, ...@@ -672,10 +657,6 @@ static int chcr_ktls_cpl_act_open_rpl(struct adapter *adap,
if (tx_info->pending_close) { if (tx_info->pending_close) {
spin_unlock(&tx_info->lock); spin_unlock(&tx_info->lock);
if (!status) { if (!status) {
/* it's a late success, tcb status is established,
* mark it close.
*/
chcr_ktls_mark_tcb_close(tx_info);
cxgb4_remove_tid(&tx_info->adap->tids, tx_info->tx_chan, cxgb4_remove_tid(&tx_info->adap->tids, tx_info->tx_chan,
tid, tx_info->ip_family); tid, tx_info->ip_family);
} }
...@@ -1663,54 +1644,6 @@ static void chcr_ktls_copy_record_in_skb(struct sk_buff *nskb, ...@@ -1663,54 +1644,6 @@ static void chcr_ktls_copy_record_in_skb(struct sk_buff *nskb,
refcount_add(nskb->truesize, &nskb->sk->sk_wmem_alloc); refcount_add(nskb->truesize, &nskb->sk->sk_wmem_alloc);
} }
/*
* chcr_ktls_update_snd_una: Reset the SEND_UNA. It will be done to avoid
* sending the same segment again. It will discard the segment which is before
* the current tx max.
* @tx_info - driver specific tls info.
* @q - TX queue.
* return: NET_TX_OK/NET_XMIT_DROP.
*/
static int chcr_ktls_update_snd_una(struct chcr_ktls_info *tx_info,
struct sge_eth_txq *q)
{
struct fw_ulptx_wr *wr;
unsigned int ndesc;
int credits;
void *pos;
u32 len;
len = sizeof(*wr) + roundup(CHCR_SET_TCB_FIELD_LEN, 16);
ndesc = DIV_ROUND_UP(len, 64);
credits = chcr_txq_avail(&q->q) - ndesc;
if (unlikely(credits < 0)) {
chcr_eth_txq_stop(q);
return NETDEV_TX_BUSY;
}
pos = &q->q.desc[q->q.pidx];
wr = pos;
/* ULPTX wr */
wr->op_to_compl = htonl(FW_WR_OP_V(FW_ULPTX_WR));
wr->cookie = 0;
/* fill len in wr field */
wr->flowid_len16 = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(len, 16)));
pos += sizeof(*wr);
pos = chcr_write_cpl_set_tcb_ulp(tx_info, q, tx_info->tid, pos,
TCB_SND_UNA_RAW_W,
TCB_SND_UNA_RAW_V(TCB_SND_UNA_RAW_M),
TCB_SND_UNA_RAW_V(0), 0);
chcr_txq_advance(&q->q, ndesc);
cxgb4_ring_tx_db(tx_info->adap, &q->q, ndesc);
return 0;
}
/* /*
* chcr_end_part_handler: This handler will handle the record which * chcr_end_part_handler: This handler will handle the record which
* is complete or if record's end part is received. T6 adapter has a issue that * is complete or if record's end part is received. T6 adapter has a issue that
...@@ -1735,7 +1668,9 @@ static int chcr_end_part_handler(struct chcr_ktls_info *tx_info, ...@@ -1735,7 +1668,9 @@ static int chcr_end_part_handler(struct chcr_ktls_info *tx_info,
struct sge_eth_txq *q, u32 skb_offset, struct sge_eth_txq *q, u32 skb_offset,
u32 tls_end_offset, bool last_wr) u32 tls_end_offset, bool last_wr)
{ {
bool free_skb_if_tx_fails = false;
struct sk_buff *nskb = NULL; struct sk_buff *nskb = NULL;
/* check if it is a complete record */ /* check if it is a complete record */
if (tls_end_offset == record->len) { if (tls_end_offset == record->len) {
nskb = skb; nskb = skb;
...@@ -1758,6 +1693,8 @@ static int chcr_end_part_handler(struct chcr_ktls_info *tx_info, ...@@ -1758,6 +1693,8 @@ static int chcr_end_part_handler(struct chcr_ktls_info *tx_info,
if (last_wr) if (last_wr)
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
else
free_skb_if_tx_fails = true;
last_wr = true; last_wr = true;
...@@ -1769,6 +1706,8 @@ static int chcr_end_part_handler(struct chcr_ktls_info *tx_info, ...@@ -1769,6 +1706,8 @@ static int chcr_end_part_handler(struct chcr_ktls_info *tx_info,
record->num_frags, record->num_frags,
(last_wr && tcp_push_no_fin), (last_wr && tcp_push_no_fin),
mss)) { mss)) {
if (free_skb_if_tx_fails)
dev_kfree_skb_any(skb);
goto out; goto out;
} }
tx_info->prev_seq = record->end_seq; tx_info->prev_seq = record->end_seq;
...@@ -1905,11 +1844,6 @@ static int chcr_short_record_handler(struct chcr_ktls_info *tx_info, ...@@ -1905,11 +1844,6 @@ static int chcr_short_record_handler(struct chcr_ktls_info *tx_info,
/* reset tcp_seq as per the prior_data_required len */ /* reset tcp_seq as per the prior_data_required len */
tcp_seq -= prior_data_len; tcp_seq -= prior_data_len;
} }
/* reset snd una, so the middle record won't send the already
* sent part.
*/
if (chcr_ktls_update_snd_una(tx_info, q))
goto out;
atomic64_inc(&tx_info->adap->ch_ktls_stats.ktls_tx_middle_pkts); atomic64_inc(&tx_info->adap->ch_ktls_stats.ktls_tx_middle_pkts);
} else { } else {
atomic64_inc(&tx_info->adap->ch_ktls_stats.ktls_tx_start_pkts); atomic64_inc(&tx_info->adap->ch_ktls_stats.ktls_tx_start_pkts);
...@@ -2010,12 +1944,11 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -2010,12 +1944,11 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
* we will send the complete record again. * we will send the complete record again.
*/ */
spin_lock_irqsave(&tx_ctx->base.lock, flags);
do { do {
int i;
cxgb4_reclaim_completed_tx(adap, &q->q, true); cxgb4_reclaim_completed_tx(adap, &q->q, true);
/* lock taken */
spin_lock_irqsave(&tx_ctx->base.lock, flags);
/* fetch the tls record */ /* fetch the tls record */
record = tls_get_record(&tx_ctx->base, tcp_seq, record = tls_get_record(&tx_ctx->base, tcp_seq,
&tx_info->record_no); &tx_info->record_no);
...@@ -2074,11 +2007,11 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -2074,11 +2007,11 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
tls_end_offset, skb_offset, tls_end_offset, skb_offset,
0); 0);
spin_unlock_irqrestore(&tx_ctx->base.lock, flags);
if (ret) { if (ret) {
/* free the refcount taken earlier */ /* free the refcount taken earlier */
if (tls_end_offset < data_len) if (tls_end_offset < data_len)
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
spin_unlock_irqrestore(&tx_ctx->base.lock, flags);
goto out; goto out;
} }
...@@ -2088,16 +2021,6 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -2088,16 +2021,6 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
continue; continue;
} }
/* increase page reference count of the record, so that there
* won't be any chance of page free in middle if in case stack
* receives ACK and try to delete the record.
*/
for (i = 0; i < record->num_frags; i++)
__skb_frag_ref(&record->frags[i]);
/* lock cleared */
spin_unlock_irqrestore(&tx_ctx->base.lock, flags);
/* if a tls record is finishing in this SKB */ /* if a tls record is finishing in this SKB */
if (tls_end_offset <= data_len) { if (tls_end_offset <= data_len) {
ret = chcr_end_part_handler(tx_info, skb, record, ret = chcr_end_part_handler(tx_info, skb, record,
...@@ -2122,13 +2045,9 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -2122,13 +2045,9 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
data_len = 0; data_len = 0;
} }
/* clear the frag ref count which increased locally before */
for (i = 0; i < record->num_frags; i++) {
/* clear the frag ref count */
__skb_frag_unref(&record->frags[i]);
}
/* if any failure, come out from the loop. */ /* if any failure, come out from the loop. */
if (ret) { if (ret) {
spin_unlock_irqrestore(&tx_ctx->base.lock, flags);
if (th->fin) if (th->fin)
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
...@@ -2143,6 +2062,7 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -2143,6 +2062,7 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
} while (data_len > 0); } while (data_len > 0);
spin_unlock_irqrestore(&tx_ctx->base.lock, flags);
atomic64_inc(&port_stats->ktls_tx_encrypted_packets); atomic64_inc(&port_stats->ktls_tx_encrypted_packets);
atomic64_add(skb_data_len, &port_stats->ktls_tx_encrypted_bytes); atomic64_add(skb_data_len, &port_stats->ktls_tx_encrypted_bytes);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment