Commit 0700aa3a authored by Sabrina Dubroca's avatar Sabrina Dubroca Committed by David S. Miller

chcr_ktls: use tls_offload_context_tx and driver_state like other drivers

chcr_ktls uses the space reserved in driver_state by
tls_set_device_offload, but makes up into own wrapper around
tls_offload_context_tx instead of accessing driver_state via the
__tls_driver_ctx helper.

In this driver, driver_state is only used to store a pointer to a
larger context struct allocated by the driver.
Signed-off-by: default avatarSabrina Dubroca <sd@queasysnail.net>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 1cf7fbce
...@@ -361,9 +361,7 @@ static void chcr_ktls_dev_del(struct net_device *netdev, ...@@ -361,9 +361,7 @@ static void chcr_ktls_dev_del(struct net_device *netdev,
struct tls_context *tls_ctx, struct tls_context *tls_ctx,
enum tls_offload_ctx_dir direction) enum tls_offload_ctx_dir direction)
{ {
struct chcr_ktls_ofld_ctx_tx *tx_ctx = struct chcr_ktls_info *tx_info = chcr_get_ktls_tx_info(tls_ctx);
chcr_get_ktls_tx_context(tls_ctx);
struct chcr_ktls_info *tx_info = tx_ctx->chcr_info;
struct ch_ktls_port_stats_debug *port_stats; struct ch_ktls_port_stats_debug *port_stats;
struct chcr_ktls_uld_ctx *u_ctx; struct chcr_ktls_uld_ctx *u_ctx;
...@@ -396,7 +394,7 @@ static void chcr_ktls_dev_del(struct net_device *netdev, ...@@ -396,7 +394,7 @@ static void chcr_ktls_dev_del(struct net_device *netdev,
port_stats = &tx_info->adap->ch_ktls_stats.ktls_port[tx_info->port_id]; port_stats = &tx_info->adap->ch_ktls_stats.ktls_port[tx_info->port_id];
atomic64_inc(&port_stats->ktls_tx_connection_close); atomic64_inc(&port_stats->ktls_tx_connection_close);
kvfree(tx_info); kvfree(tx_info);
tx_ctx->chcr_info = NULL; chcr_set_ktls_tx_info(tls_ctx, NULL);
/* release module refcount */ /* release module refcount */
module_put(THIS_MODULE); module_put(THIS_MODULE);
} }
...@@ -417,7 +415,6 @@ static int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk, ...@@ -417,7 +415,6 @@ static int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk,
{ {
struct tls_context *tls_ctx = tls_get_ctx(sk); struct tls_context *tls_ctx = tls_get_ctx(sk);
struct ch_ktls_port_stats_debug *port_stats; struct ch_ktls_port_stats_debug *port_stats;
struct chcr_ktls_ofld_ctx_tx *tx_ctx;
struct chcr_ktls_uld_ctx *u_ctx; struct chcr_ktls_uld_ctx *u_ctx;
struct chcr_ktls_info *tx_info; struct chcr_ktls_info *tx_info;
struct dst_entry *dst; struct dst_entry *dst;
...@@ -427,8 +424,6 @@ static int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk, ...@@ -427,8 +424,6 @@ static int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk,
u8 daaddr[16]; u8 daaddr[16];
int ret = -1; int ret = -1;
tx_ctx = chcr_get_ktls_tx_context(tls_ctx);
pi = netdev_priv(netdev); pi = netdev_priv(netdev);
adap = pi->adapter; adap = pi->adapter;
port_stats = &adap->ch_ktls_stats.ktls_port[pi->port_id]; port_stats = &adap->ch_ktls_stats.ktls_port[pi->port_id];
...@@ -440,7 +435,7 @@ static int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk, ...@@ -440,7 +435,7 @@ static int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk,
goto out; goto out;
} }
if (tx_ctx->chcr_info) if (chcr_get_ktls_tx_info(tls_ctx))
goto out; goto out;
if (u_ctx && u_ctx->detach) if (u_ctx && u_ctx->detach)
...@@ -566,7 +561,7 @@ static int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk, ...@@ -566,7 +561,7 @@ static int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk,
goto free_tid; goto free_tid;
atomic64_inc(&port_stats->ktls_tx_ctx); atomic64_inc(&port_stats->ktls_tx_ctx);
tx_ctx->chcr_info = tx_info; chcr_set_ktls_tx_info(tls_ctx, tx_info);
return 0; return 0;
...@@ -647,7 +642,7 @@ static int chcr_ktls_cpl_act_open_rpl(struct adapter *adap, ...@@ -647,7 +642,7 @@ static int chcr_ktls_cpl_act_open_rpl(struct adapter *adap,
{ {
const struct cpl_act_open_rpl *p = (void *)input; const struct cpl_act_open_rpl *p = (void *)input;
struct chcr_ktls_info *tx_info = NULL; struct chcr_ktls_info *tx_info = NULL;
struct chcr_ktls_ofld_ctx_tx *tx_ctx; struct tls_offload_context_tx *tx_ctx;
struct chcr_ktls_uld_ctx *u_ctx; struct chcr_ktls_uld_ctx *u_ctx;
unsigned int atid, tid, status; unsigned int atid, tid, status;
struct tls_context *tls_ctx; struct tls_context *tls_ctx;
...@@ -686,7 +681,7 @@ static int chcr_ktls_cpl_act_open_rpl(struct adapter *adap, ...@@ -686,7 +681,7 @@ static int chcr_ktls_cpl_act_open_rpl(struct adapter *adap,
cxgb4_insert_tid(t, tx_info, tx_info->tid, tx_info->ip_family); cxgb4_insert_tid(t, tx_info, tx_info->tid, tx_info->ip_family);
/* Adding tid */ /* Adding tid */
tls_ctx = tls_get_ctx(tx_info->sk); tls_ctx = tls_get_ctx(tx_info->sk);
tx_ctx = chcr_get_ktls_tx_context(tls_ctx); tx_ctx = tls_offload_ctx_tx(tls_ctx);
u_ctx = adap->uld[CXGB4_ULD_KTLS].handle; u_ctx = adap->uld[CXGB4_ULD_KTLS].handle;
if (u_ctx) { if (u_ctx) {
ret = xa_insert_bh(&u_ctx->tid_list, tid, tx_ctx, ret = xa_insert_bh(&u_ctx->tid_list, tid, tx_ctx,
...@@ -1924,7 +1919,7 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -1924,7 +1919,7 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
{ {
u32 tls_end_offset, tcp_seq, skb_data_len, skb_offset; u32 tls_end_offset, tcp_seq, skb_data_len, skb_offset;
struct ch_ktls_port_stats_debug *port_stats; struct ch_ktls_port_stats_debug *port_stats;
struct chcr_ktls_ofld_ctx_tx *tx_ctx; struct tls_offload_context_tx *tx_ctx;
struct ch_ktls_stats_debug *stats; struct ch_ktls_stats_debug *stats;
struct tcphdr *th = tcp_hdr(skb); struct tcphdr *th = tcp_hdr(skb);
int data_len, qidx, ret = 0, mss; int data_len, qidx, ret = 0, mss;
...@@ -1944,6 +1939,7 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -1944,6 +1939,7 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
mss = skb_is_gso(skb) ? skb_shinfo(skb)->gso_size : data_len; mss = skb_is_gso(skb) ? skb_shinfo(skb)->gso_size : data_len;
tls_ctx = tls_get_ctx(skb->sk); tls_ctx = tls_get_ctx(skb->sk);
tx_ctx = tls_offload_ctx_tx(tls_ctx);
tls_netdev = rcu_dereference_bh(tls_ctx->netdev); tls_netdev = rcu_dereference_bh(tls_ctx->netdev);
/* Don't quit on NULL: if tls_device_down is running in parallel, /* Don't quit on NULL: if tls_device_down is running in parallel,
* netdev might become NULL, even if tls_is_skb_tx_device_offloaded was * netdev might become NULL, even if tls_is_skb_tx_device_offloaded was
...@@ -1952,8 +1948,7 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -1952,8 +1948,7 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
if (unlikely(tls_netdev && tls_netdev != dev)) if (unlikely(tls_netdev && tls_netdev != dev))
goto out; goto out;
tx_ctx = chcr_get_ktls_tx_context(tls_ctx); tx_info = chcr_get_ktls_tx_info(tls_ctx);
tx_info = tx_ctx->chcr_info;
if (unlikely(!tx_info)) if (unlikely(!tx_info))
goto out; goto out;
...@@ -1979,19 +1974,19 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -1979,19 +1974,19 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
* we will send the complete record again. * we will send the complete record again.
*/ */
spin_lock_irqsave(&tx_ctx->base.lock, flags); spin_lock_irqsave(&tx_ctx->lock, flags);
do { do {
cxgb4_reclaim_completed_tx(adap, &q->q, true); cxgb4_reclaim_completed_tx(adap, &q->q, true);
/* fetch the tls record */ /* fetch the tls record */
record = tls_get_record(&tx_ctx->base, tcp_seq, record = tls_get_record(tx_ctx, tcp_seq,
&tx_info->record_no); &tx_info->record_no);
/* By the time packet reached to us, ACK is received, and record /* By the time packet reached to us, ACK is received, and record
* won't be found in that case, handle it gracefully. * won't be found in that case, handle it gracefully.
*/ */
if (unlikely(!record)) { if (unlikely(!record)) {
spin_unlock_irqrestore(&tx_ctx->base.lock, flags); spin_unlock_irqrestore(&tx_ctx->lock, flags);
atomic64_inc(&port_stats->ktls_tx_drop_no_sync_data); atomic64_inc(&port_stats->ktls_tx_drop_no_sync_data);
goto out; goto out;
} }
...@@ -2015,7 +2010,7 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -2015,7 +2010,7 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
tls_end_offset != tls_end_offset !=
record->len); record->len);
if (ret) { if (ret) {
spin_unlock_irqrestore(&tx_ctx->base.lock, spin_unlock_irqrestore(&tx_ctx->lock,
flags); flags);
goto out; goto out;
} }
...@@ -2046,7 +2041,7 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -2046,7 +2041,7 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
/* free the refcount taken earlier */ /* free the refcount taken earlier */
if (tls_end_offset < data_len) if (tls_end_offset < data_len)
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
spin_unlock_irqrestore(&tx_ctx->base.lock, flags); spin_unlock_irqrestore(&tx_ctx->lock, flags);
goto out; goto out;
} }
...@@ -2082,7 +2077,7 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -2082,7 +2077,7 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
/* if any failure, come out from the loop. */ /* if any failure, come out from the loop. */
if (ret) { if (ret) {
spin_unlock_irqrestore(&tx_ctx->base.lock, flags); spin_unlock_irqrestore(&tx_ctx->lock, flags);
if (th->fin) if (th->fin)
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
...@@ -2097,7 +2092,7 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -2097,7 +2092,7 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
} while (data_len > 0); } while (data_len > 0);
spin_unlock_irqrestore(&tx_ctx->base.lock, flags); spin_unlock_irqrestore(&tx_ctx->lock, flags);
atomic64_inc(&port_stats->ktls_tx_encrypted_packets); atomic64_inc(&port_stats->ktls_tx_encrypted_packets);
atomic64_add(skb_data_len, &port_stats->ktls_tx_encrypted_bytes); atomic64_add(skb_data_len, &port_stats->ktls_tx_encrypted_bytes);
...@@ -2185,17 +2180,17 @@ static void clear_conn_resources(struct chcr_ktls_info *tx_info) ...@@ -2185,17 +2180,17 @@ static void clear_conn_resources(struct chcr_ktls_info *tx_info)
static void ch_ktls_reset_all_conn(struct chcr_ktls_uld_ctx *u_ctx) static void ch_ktls_reset_all_conn(struct chcr_ktls_uld_ctx *u_ctx)
{ {
struct ch_ktls_port_stats_debug *port_stats; struct ch_ktls_port_stats_debug *port_stats;
struct chcr_ktls_ofld_ctx_tx *tx_ctx; struct tls_offload_context_tx *tx_ctx;
struct chcr_ktls_info *tx_info; struct chcr_ktls_info *tx_info;
unsigned long index; unsigned long index;
xa_for_each(&u_ctx->tid_list, index, tx_ctx) { xa_for_each(&u_ctx->tid_list, index, tx_ctx) {
tx_info = tx_ctx->chcr_info; tx_info = __chcr_get_ktls_tx_info(tx_ctx);
clear_conn_resources(tx_info); clear_conn_resources(tx_info);
port_stats = &tx_info->adap->ch_ktls_stats.ktls_port[tx_info->port_id]; port_stats = &tx_info->adap->ch_ktls_stats.ktls_port[tx_info->port_id];
atomic64_inc(&port_stats->ktls_tx_connection_close); atomic64_inc(&port_stats->ktls_tx_connection_close);
kvfree(tx_info); kvfree(tx_info);
tx_ctx->chcr_info = NULL; memset(tx_ctx->driver_state, 0, TLS_DRIVER_STATE_SIZE_TX);
/* release module refcount */ /* release module refcount */
module_put(THIS_MODULE); module_put(THIS_MODULE);
} }
......
...@@ -67,8 +67,7 @@ struct chcr_ktls_info { ...@@ -67,8 +67,7 @@ struct chcr_ktls_info {
bool pending_close; bool pending_close;
}; };
struct chcr_ktls_ofld_ctx_tx { struct chcr_ktls_ctx_tx {
struct tls_offload_context_tx base;
struct chcr_ktls_info *chcr_info; struct chcr_ktls_info *chcr_info;
}; };
...@@ -79,14 +78,33 @@ struct chcr_ktls_uld_ctx { ...@@ -79,14 +78,33 @@ struct chcr_ktls_uld_ctx {
bool detach; bool detach;
}; };
static inline struct chcr_ktls_ofld_ctx_tx * static inline struct chcr_ktls_info *
chcr_get_ktls_tx_context(struct tls_context *tls_ctx) __chcr_get_ktls_tx_info(struct tls_offload_context_tx *octx)
{ {
BUILD_BUG_ON(sizeof(struct chcr_ktls_ofld_ctx_tx) > struct chcr_ktls_ctx_tx *priv_ctx;
TLS_OFFLOAD_CONTEXT_SIZE_TX);
return container_of(tls_offload_ctx_tx(tls_ctx), BUILD_BUG_ON(sizeof(struct chcr_ktls_ctx_tx) > TLS_DRIVER_STATE_SIZE_TX);
struct chcr_ktls_ofld_ctx_tx, priv_ctx = (struct chcr_ktls_ctx_tx *)octx->driver_state;
base); return priv_ctx->chcr_info;
}
static inline struct chcr_ktls_info *
chcr_get_ktls_tx_info(struct tls_context *tls_ctx)
{
struct chcr_ktls_ctx_tx *priv_ctx;
BUILD_BUG_ON(sizeof(struct chcr_ktls_ctx_tx) > TLS_DRIVER_STATE_SIZE_TX);
priv_ctx = (struct chcr_ktls_ctx_tx *)__tls_driver_ctx(tls_ctx, TLS_OFFLOAD_CTX_DIR_TX);
return priv_ctx->chcr_info;
}
static inline void
chcr_set_ktls_tx_info(struct tls_context *tls_ctx, struct chcr_ktls_info *chcr_info)
{
struct chcr_ktls_ctx_tx *priv_ctx;
priv_ctx = __tls_driver_ctx(tls_ctx, TLS_OFFLOAD_CTX_DIR_TX);
priv_ctx->chcr_info = chcr_info;
} }
static inline int chcr_get_first_rx_qid(struct adapter *adap) static inline int chcr_get_first_rx_qid(struct adapter *adap)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment