Commit aea06eb2 authored by David S. Miller's avatar David S. Miller

Merge branch 'TLS-offload-rx-netdev-and-mlx5'

Boris Pismenny says:

====================
TLS offload rx, netdev & mlx5

The following series provides TLS RX inline crypto offload.

v5->v4:
    - Remove the Kconfig to mutually exclude both IPsec and TLS

v4->v3:
    - Remove the iov revert for zero copy send flow

v2->v3:
    - Fix typo
    - Adjust cover letter
    - Fix bug in zero copy flows
    - Use network byte order for the record number in resync
    - Adjust the sequence provided in resync

v1->v2:
    - Fix bisectability problems due to variable name changes
    - Fix potential uninitialized return value

This series completes the generic infrastructure to offload TLS crypto to
a network devices. It enables the kernel TLS socket to skip decryption and
authentication operations for SKBs marked as decrypted on the receive
side of the data path. Leaving those computationally expensive operations
to the NIC.

This infrastructure doesn't require a TCP offload engine. Instead, the
NIC decrypts a packet's payload if the packet contains the expected TCP
sequence number. The TLS record authentication tag remains unmodified
regardless of decryption. If the packet is decrypted successfully and it
contains an authentication tag, then the authentication check has passed.
Otherwise, if the authentication fails, then the packet is provided
unmodified and the KTLS layer is responsible for handling it.
Out-Of-Order TCP packets are provided unmodified. As a result,
in the slow path some of the SKBs are decrypted while others remain as
ciphertext.

The GRO and TCP layers must not coalesce decrypted and non-decrypted SKBs.
At the worst case a received TLS record consists of both plaintext
and ciphertext packets. These partially decrypted records must be
reencrypted, only to be decrypted.

The notable differences between SW KTLS and NIC offloaded TLS
implementations are as follows:
1. Partial decryption - Software must handle the case of a TLS record
that was only partially decrypted by HW. This can happen due to packet
reordering.
2. Resynchronization - tls_read_size calls the device driver to
resynchronize HW whenever it lost track of the TLS record framing in
the TCP stream.

The infrastructure should be extendable to support various NIC offload
implementations.  However it is currently written with the
implementation below in mind:
The NIC identifies packets that should be offloaded according to
the 5-tuple and the TCP sequence number. If these match and the
packet is decrypted and authenticated successfully, then a syndrome
is provided to software. Otherwise, the packet is unmodified.
Decrypted and non-decrypted packets aren't coalesced by the network stack,
and the KTLS layer decrypts and authenticates partially decrypted records.
The NIC provides an indication whenever a resync is required. The resync
operation is triggered by the KTLS layer while parsing TLS record headers.

Finally, we measure the performance obtained by running single stream
iperf with two Intel(R) Xeon(R) CPU E5-2620 v3 @ 2.40GHz machines connected
back-to-back with Innova TLS (40Gbps) NICs. We compare TCP (upper bound)
and KTLS-Offload running both in Tx and Rx. The results show that the
performance of offload is comparable to TCP.

                          | Bandwidth (Gbps) | CPU Tx (%) | CPU rx (%)
TCP                       | 28.8             | 5          | 12
KTLS-Offload-Tx-Rx 	  | 28.6	     | 7          | 14

Paper: https://netdevconf.org/2.2/papers/pismenny-tlscrypto-talk.pdf
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents cc98419a b3ccf978
#ifndef __MLX5E_ACCEL_H__
#define __MLX5E_ACCEL_H__
#ifdef CONFIG_MLX5_ACCEL
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include "en.h"
static inline bool is_metadata_hdr_valid(struct sk_buff *skb)
{
__be16 *ethtype;
if (unlikely(skb->len < ETH_HLEN + MLX5E_METADATA_ETHER_LEN))
return false;
ethtype = (__be16 *)(skb->data + ETH_ALEN * 2);
if (*ethtype != cpu_to_be16(MLX5E_METADATA_ETHER_TYPE))
return false;
return true;
}
static inline void remove_metadata_hdr(struct sk_buff *skb)
{
struct ethhdr *old_eth;
struct ethhdr *new_eth;
/* Remove the metadata from the buffer */
old_eth = (struct ethhdr *)skb->data;
new_eth = (struct ethhdr *)(skb->data + MLX5E_METADATA_ETHER_LEN);
memmove(new_eth, old_eth, 2 * ETH_ALEN);
/* Ethertype is already in its new place */
skb_pull_inline(skb, MLX5E_METADATA_ETHER_LEN);
}
#endif /* CONFIG_MLX5_ACCEL */
#endif /* __MLX5E_EN_ACCEL_H__ */
...@@ -37,17 +37,26 @@ ...@@ -37,17 +37,26 @@
#include "mlx5_core.h" #include "mlx5_core.h"
#include "fpga/tls.h" #include "fpga/tls.h"
int mlx5_accel_tls_add_tx_flow(struct mlx5_core_dev *mdev, void *flow, int mlx5_accel_tls_add_flow(struct mlx5_core_dev *mdev, void *flow,
struct tls_crypto_info *crypto_info, struct tls_crypto_info *crypto_info,
u32 start_offload_tcp_sn, u32 *p_swid) u32 start_offload_tcp_sn, u32 *p_swid,
bool direction_sx)
{ {
return mlx5_fpga_tls_add_tx_flow(mdev, flow, crypto_info, return mlx5_fpga_tls_add_flow(mdev, flow, crypto_info,
start_offload_tcp_sn, p_swid); start_offload_tcp_sn, p_swid,
direction_sx);
} }
void mlx5_accel_tls_del_tx_flow(struct mlx5_core_dev *mdev, u32 swid) void mlx5_accel_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid,
bool direction_sx)
{ {
mlx5_fpga_tls_del_tx_flow(mdev, swid, GFP_KERNEL); mlx5_fpga_tls_del_flow(mdev, swid, GFP_KERNEL, direction_sx);
}
int mlx5_accel_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq,
u64 rcd_sn)
{
return mlx5_fpga_tls_resync_rx(mdev, handle, seq, rcd_sn);
} }
bool mlx5_accel_is_tls_device(struct mlx5_core_dev *mdev) bool mlx5_accel_is_tls_device(struct mlx5_core_dev *mdev)
......
...@@ -60,10 +60,14 @@ struct mlx5_ifc_tls_flow_bits { ...@@ -60,10 +60,14 @@ struct mlx5_ifc_tls_flow_bits {
u8 reserved_at_2[0x1e]; u8 reserved_at_2[0x1e];
}; };
int mlx5_accel_tls_add_tx_flow(struct mlx5_core_dev *mdev, void *flow, int mlx5_accel_tls_add_flow(struct mlx5_core_dev *mdev, void *flow,
struct tls_crypto_info *crypto_info, struct tls_crypto_info *crypto_info,
u32 start_offload_tcp_sn, u32 *p_swid); u32 start_offload_tcp_sn, u32 *p_swid,
void mlx5_accel_tls_del_tx_flow(struct mlx5_core_dev *mdev, u32 swid); bool direction_sx);
void mlx5_accel_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid,
bool direction_sx);
int mlx5_accel_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq,
u64 rcd_sn);
bool mlx5_accel_is_tls_device(struct mlx5_core_dev *mdev); bool mlx5_accel_is_tls_device(struct mlx5_core_dev *mdev);
u32 mlx5_accel_tls_device_caps(struct mlx5_core_dev *mdev); u32 mlx5_accel_tls_device_caps(struct mlx5_core_dev *mdev);
int mlx5_accel_tls_init(struct mlx5_core_dev *mdev); int mlx5_accel_tls_init(struct mlx5_core_dev *mdev);
...@@ -71,11 +75,15 @@ void mlx5_accel_tls_cleanup(struct mlx5_core_dev *mdev); ...@@ -71,11 +75,15 @@ void mlx5_accel_tls_cleanup(struct mlx5_core_dev *mdev);
#else #else
static inline int static int
mlx5_accel_tls_add_tx_flow(struct mlx5_core_dev *mdev, void *flow, mlx5_accel_tls_add_flow(struct mlx5_core_dev *mdev, void *flow,
struct tls_crypto_info *crypto_info, struct tls_crypto_info *crypto_info,
u32 start_offload_tcp_sn, u32 *p_swid) { return 0; } u32 start_offload_tcp_sn, u32 *p_swid,
static inline void mlx5_accel_tls_del_tx_flow(struct mlx5_core_dev *mdev, u32 swid) { } bool direction_sx) { return -ENOTSUPP; }
static inline void mlx5_accel_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid,
bool direction_sx) { }
static inline int mlx5_accel_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle,
u32 seq, u64 rcd_sn) { return 0; }
static inline bool mlx5_accel_is_tls_device(struct mlx5_core_dev *mdev) { return false; } static inline bool mlx5_accel_is_tls_device(struct mlx5_core_dev *mdev) { return false; }
static inline u32 mlx5_accel_tls_device_caps(struct mlx5_core_dev *mdev) { return 0; } static inline u32 mlx5_accel_tls_device_caps(struct mlx5_core_dev *mdev) { return 0; }
static inline int mlx5_accel_tls_init(struct mlx5_core_dev *mdev) { return 0; } static inline int mlx5_accel_tls_init(struct mlx5_core_dev *mdev) { return 0; }
......
...@@ -37,6 +37,7 @@ ...@@ -37,6 +37,7 @@
#include "en_accel/ipsec_rxtx.h" #include "en_accel/ipsec_rxtx.h"
#include "en_accel/ipsec.h" #include "en_accel/ipsec.h"
#include "accel/accel.h"
#include "en.h" #include "en.h"
enum { enum {
...@@ -346,19 +347,12 @@ mlx5e_ipsec_build_sp(struct net_device *netdev, struct sk_buff *skb, ...@@ -346,19 +347,12 @@ mlx5e_ipsec_build_sp(struct net_device *netdev, struct sk_buff *skb,
} }
struct sk_buff *mlx5e_ipsec_handle_rx_skb(struct net_device *netdev, struct sk_buff *mlx5e_ipsec_handle_rx_skb(struct net_device *netdev,
struct sk_buff *skb) struct sk_buff *skb, u32 *cqe_bcnt)
{ {
struct mlx5e_ipsec_metadata *mdata; struct mlx5e_ipsec_metadata *mdata;
struct ethhdr *old_eth;
struct ethhdr *new_eth;
struct xfrm_state *xs; struct xfrm_state *xs;
__be16 *ethtype;
/* Detect inline metadata */ if (!is_metadata_hdr_valid(skb))
if (skb->len < ETH_HLEN + MLX5E_METADATA_ETHER_LEN)
return skb;
ethtype = (__be16 *)(skb->data + ETH_ALEN * 2);
if (*ethtype != cpu_to_be16(MLX5E_METADATA_ETHER_TYPE))
return skb; return skb;
/* Use the metadata */ /* Use the metadata */
...@@ -369,12 +363,8 @@ struct sk_buff *mlx5e_ipsec_handle_rx_skb(struct net_device *netdev, ...@@ -369,12 +363,8 @@ struct sk_buff *mlx5e_ipsec_handle_rx_skb(struct net_device *netdev,
return NULL; return NULL;
} }
/* Remove the metadata from the buffer */ remove_metadata_hdr(skb);
old_eth = (struct ethhdr *)skb->data; *cqe_bcnt -= MLX5E_METADATA_ETHER_LEN;
new_eth = (struct ethhdr *)(skb->data + MLX5E_METADATA_ETHER_LEN);
memmove(new_eth, old_eth, 2 * ETH_ALEN);
/* Ethertype is already in its new place */
skb_pull_inline(skb, MLX5E_METADATA_ETHER_LEN);
return skb; return skb;
} }
......
...@@ -41,7 +41,7 @@ ...@@ -41,7 +41,7 @@
#include "en.h" #include "en.h"
struct sk_buff *mlx5e_ipsec_handle_rx_skb(struct net_device *netdev, struct sk_buff *mlx5e_ipsec_handle_rx_skb(struct net_device *netdev,
struct sk_buff *skb); struct sk_buff *skb, u32 *cqe_bcnt);
void mlx5e_ipsec_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe); void mlx5e_ipsec_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
void mlx5e_ipsec_inverse_table_init(void); void mlx5e_ipsec_inverse_table_init(void);
......
...@@ -110,9 +110,7 @@ static int mlx5e_tls_add(struct net_device *netdev, struct sock *sk, ...@@ -110,9 +110,7 @@ static int mlx5e_tls_add(struct net_device *netdev, struct sock *sk,
u32 caps = mlx5_accel_tls_device_caps(mdev); u32 caps = mlx5_accel_tls_device_caps(mdev);
int ret = -ENOMEM; int ret = -ENOMEM;
void *flow; void *flow;
u32 swid;
if (direction != TLS_OFFLOAD_CTX_DIR_TX)
return -EINVAL;
flow = kzalloc(MLX5_ST_SZ_BYTES(tls_flow), GFP_KERNEL); flow = kzalloc(MLX5_ST_SZ_BYTES(tls_flow), GFP_KERNEL);
if (!flow) if (!flow)
...@@ -122,18 +120,23 @@ static int mlx5e_tls_add(struct net_device *netdev, struct sock *sk, ...@@ -122,18 +120,23 @@ static int mlx5e_tls_add(struct net_device *netdev, struct sock *sk,
if (ret) if (ret)
goto free_flow; goto free_flow;
if (direction == TLS_OFFLOAD_CTX_DIR_TX) { ret = mlx5_accel_tls_add_flow(mdev, flow, crypto_info,
struct mlx5e_tls_offload_context *tx_ctx = start_offload_tcp_sn, &swid,
mlx5e_get_tls_tx_context(tls_ctx); direction == TLS_OFFLOAD_CTX_DIR_TX);
u32 swid;
ret = mlx5_accel_tls_add_tx_flow(mdev, flow, crypto_info,
start_offload_tcp_sn, &swid);
if (ret < 0) if (ret < 0)
goto free_flow; goto free_flow;
if (direction == TLS_OFFLOAD_CTX_DIR_TX) {
struct mlx5e_tls_offload_context_tx *tx_ctx =
mlx5e_get_tls_tx_context(tls_ctx);
tx_ctx->swid = htonl(swid); tx_ctx->swid = htonl(swid);
tx_ctx->expected_seq = start_offload_tcp_sn; tx_ctx->expected_seq = start_offload_tcp_sn;
} else {
struct mlx5e_tls_offload_context_rx *rx_ctx =
mlx5e_get_tls_rx_context(tls_ctx);
rx_ctx->handle = htonl(swid);
} }
return 0; return 0;
...@@ -147,30 +150,60 @@ static void mlx5e_tls_del(struct net_device *netdev, ...@@ -147,30 +150,60 @@ static void mlx5e_tls_del(struct net_device *netdev,
enum tls_offload_ctx_dir direction) enum tls_offload_ctx_dir direction)
{ {
struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5e_priv *priv = netdev_priv(netdev);
unsigned int handle;
if (direction == TLS_OFFLOAD_CTX_DIR_TX) { handle = ntohl((direction == TLS_OFFLOAD_CTX_DIR_TX) ?
u32 swid = ntohl(mlx5e_get_tls_tx_context(tls_ctx)->swid); mlx5e_get_tls_tx_context(tls_ctx)->swid :
mlx5e_get_tls_rx_context(tls_ctx)->handle);
mlx5_accel_tls_del_tx_flow(priv->mdev, swid); mlx5_accel_tls_del_flow(priv->mdev, handle,
} else { direction == TLS_OFFLOAD_CTX_DIR_TX);
netdev_err(netdev, "unsupported direction %d\n", direction); }
}
static void mlx5e_tls_resync_rx(struct net_device *netdev, struct sock *sk,
u32 seq, u64 rcd_sn)
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5e_tls_offload_context_rx *rx_ctx;
rx_ctx = mlx5e_get_tls_rx_context(tls_ctx);
netdev_info(netdev, "resyncing seq %d rcd %lld\n", seq,
be64_to_cpu(rcd_sn));
mlx5_accel_tls_resync_rx(priv->mdev, rx_ctx->handle, seq, rcd_sn);
atomic64_inc(&priv->tls->sw_stats.rx_tls_resync_reply);
} }
static const struct tlsdev_ops mlx5e_tls_ops = { static const struct tlsdev_ops mlx5e_tls_ops = {
.tls_dev_add = mlx5e_tls_add, .tls_dev_add = mlx5e_tls_add,
.tls_dev_del = mlx5e_tls_del, .tls_dev_del = mlx5e_tls_del,
.tls_dev_resync_rx = mlx5e_tls_resync_rx,
}; };
void mlx5e_tls_build_netdev(struct mlx5e_priv *priv) void mlx5e_tls_build_netdev(struct mlx5e_priv *priv)
{ {
u32 caps = mlx5_accel_tls_device_caps(priv->mdev);
struct net_device *netdev = priv->netdev; struct net_device *netdev = priv->netdev;
if (!mlx5_accel_is_tls_device(priv->mdev)) if (!mlx5_accel_is_tls_device(priv->mdev))
return; return;
if (caps & MLX5_ACCEL_TLS_TX) {
netdev->features |= NETIF_F_HW_TLS_TX; netdev->features |= NETIF_F_HW_TLS_TX;
netdev->hw_features |= NETIF_F_HW_TLS_TX; netdev->hw_features |= NETIF_F_HW_TLS_TX;
}
if (caps & MLX5_ACCEL_TLS_RX) {
netdev->features |= NETIF_F_HW_TLS_RX;
netdev->hw_features |= NETIF_F_HW_TLS_RX;
}
if (!(caps & MLX5_ACCEL_TLS_LRO)) {
netdev->features &= ~NETIF_F_LRO;
netdev->hw_features &= ~NETIF_F_LRO;
}
netdev->tlsdev_ops = &mlx5e_tls_ops; netdev->tlsdev_ops = &mlx5e_tls_ops;
} }
......
...@@ -43,25 +43,44 @@ struct mlx5e_tls_sw_stats { ...@@ -43,25 +43,44 @@ struct mlx5e_tls_sw_stats {
atomic64_t tx_tls_drop_resync_alloc; atomic64_t tx_tls_drop_resync_alloc;
atomic64_t tx_tls_drop_no_sync_data; atomic64_t tx_tls_drop_no_sync_data;
atomic64_t tx_tls_drop_bypass_required; atomic64_t tx_tls_drop_bypass_required;
atomic64_t rx_tls_drop_resync_request;
atomic64_t rx_tls_resync_request;
atomic64_t rx_tls_resync_reply;
atomic64_t rx_tls_auth_fail;
}; };
struct mlx5e_tls { struct mlx5e_tls {
struct mlx5e_tls_sw_stats sw_stats; struct mlx5e_tls_sw_stats sw_stats;
}; };
struct mlx5e_tls_offload_context { struct mlx5e_tls_offload_context_tx {
struct tls_offload_context base; struct tls_offload_context_tx base;
u32 expected_seq; u32 expected_seq;
__be32 swid; __be32 swid;
}; };
static inline struct mlx5e_tls_offload_context * static inline struct mlx5e_tls_offload_context_tx *
mlx5e_get_tls_tx_context(struct tls_context *tls_ctx) mlx5e_get_tls_tx_context(struct tls_context *tls_ctx)
{ {
BUILD_BUG_ON(sizeof(struct mlx5e_tls_offload_context) > BUILD_BUG_ON(sizeof(struct mlx5e_tls_offload_context_tx) >
TLS_OFFLOAD_CONTEXT_SIZE); TLS_OFFLOAD_CONTEXT_SIZE_TX);
return container_of(tls_offload_ctx(tls_ctx), return container_of(tls_offload_ctx_tx(tls_ctx),
struct mlx5e_tls_offload_context, struct mlx5e_tls_offload_context_tx,
base);
}
struct mlx5e_tls_offload_context_rx {
struct tls_offload_context_rx base;
__be32 handle;
};
static inline struct mlx5e_tls_offload_context_rx *
mlx5e_get_tls_rx_context(struct tls_context *tls_ctx)
{
BUILD_BUG_ON(sizeof(struct mlx5e_tls_offload_context_rx) >
TLS_OFFLOAD_CONTEXT_SIZE_RX);
return container_of(tls_offload_ctx_rx(tls_ctx),
struct mlx5e_tls_offload_context_rx,
base); base);
} }
......
...@@ -33,6 +33,14 @@ ...@@ -33,6 +33,14 @@
#include "en_accel/tls.h" #include "en_accel/tls.h"
#include "en_accel/tls_rxtx.h" #include "en_accel/tls_rxtx.h"
#include "accel/accel.h"
#include <net/inet6_hashtables.h>
#include <linux/ipv6.h>
#define SYNDROM_DECRYPTED 0x30
#define SYNDROM_RESYNC_REQUEST 0x31
#define SYNDROM_AUTH_FAILED 0x32
#define SYNDROME_OFFLOAD_REQUIRED 32 #define SYNDROME_OFFLOAD_REQUIRED 32
#define SYNDROME_SYNC 33 #define SYNDROME_SYNC 33
...@@ -44,10 +52,26 @@ struct sync_info { ...@@ -44,10 +52,26 @@ struct sync_info {
skb_frag_t frags[MAX_SKB_FRAGS]; skb_frag_t frags[MAX_SKB_FRAGS];
}; };
struct mlx5e_tls_metadata { struct recv_metadata_content {
u8 syndrome;
u8 reserved;
__be32 sync_seq;
} __packed;
struct send_metadata_content {
/* One byte of syndrome followed by 3 bytes of swid */ /* One byte of syndrome followed by 3 bytes of swid */
__be32 syndrome_swid; __be32 syndrome_swid;
__be16 first_seq; __be16 first_seq;
} __packed;
struct mlx5e_tls_metadata {
union {
/* from fpga to host */
struct recv_metadata_content recv;
/* from host to fpga */
struct send_metadata_content send;
unsigned char raw[6];
} __packed content;
/* packet type ID field */ /* packet type ID field */
__be16 ethertype; __be16 ethertype;
} __packed; } __packed;
...@@ -68,12 +92,13 @@ static int mlx5e_tls_add_metadata(struct sk_buff *skb, __be32 swid) ...@@ -68,12 +92,13 @@ static int mlx5e_tls_add_metadata(struct sk_buff *skb, __be32 swid)
2 * ETH_ALEN); 2 * ETH_ALEN);
eth->h_proto = cpu_to_be16(MLX5E_METADATA_ETHER_TYPE); eth->h_proto = cpu_to_be16(MLX5E_METADATA_ETHER_TYPE);
pet->syndrome_swid = htonl(SYNDROME_OFFLOAD_REQUIRED << 24) | swid; pet->content.send.syndrome_swid =
htonl(SYNDROME_OFFLOAD_REQUIRED << 24) | swid;
return 0; return 0;
} }
static int mlx5e_tls_get_sync_data(struct mlx5e_tls_offload_context *context, static int mlx5e_tls_get_sync_data(struct mlx5e_tls_offload_context_tx *context,
u32 tcp_seq, struct sync_info *info) u32 tcp_seq, struct sync_info *info)
{ {
int remaining, i = 0, ret = -EINVAL; int remaining, i = 0, ret = -EINVAL;
...@@ -149,7 +174,7 @@ static void mlx5e_tls_complete_sync_skb(struct sk_buff *skb, ...@@ -149,7 +174,7 @@ static void mlx5e_tls_complete_sync_skb(struct sk_buff *skb,
pet = (struct mlx5e_tls_metadata *)(nskb->data + sizeof(struct ethhdr)); pet = (struct mlx5e_tls_metadata *)(nskb->data + sizeof(struct ethhdr));
memcpy(pet, &syndrome, sizeof(syndrome)); memcpy(pet, &syndrome, sizeof(syndrome));
pet->first_seq = htons(tcp_seq); pet->content.send.first_seq = htons(tcp_seq);
/* MLX5 devices don't care about the checksum partial start, offset /* MLX5 devices don't care about the checksum partial start, offset
* and pseudo header * and pseudo header
...@@ -161,7 +186,7 @@ static void mlx5e_tls_complete_sync_skb(struct sk_buff *skb, ...@@ -161,7 +186,7 @@ static void mlx5e_tls_complete_sync_skb(struct sk_buff *skb,
} }
static struct sk_buff * static struct sk_buff *
mlx5e_tls_handle_ooo(struct mlx5e_tls_offload_context *context, mlx5e_tls_handle_ooo(struct mlx5e_tls_offload_context_tx *context,
struct mlx5e_txqsq *sq, struct sk_buff *skb, struct mlx5e_txqsq *sq, struct sk_buff *skb,
struct mlx5e_tx_wqe **wqe, struct mlx5e_tx_wqe **wqe,
u16 *pi, u16 *pi,
...@@ -239,7 +264,7 @@ struct sk_buff *mlx5e_tls_handle_tx_skb(struct net_device *netdev, ...@@ -239,7 +264,7 @@ struct sk_buff *mlx5e_tls_handle_tx_skb(struct net_device *netdev,
u16 *pi) u16 *pi)
{ {
struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5e_tls_offload_context *context; struct mlx5e_tls_offload_context_tx *context;
struct tls_context *tls_ctx; struct tls_context *tls_ctx;
u32 expected_seq; u32 expected_seq;
int datalen; int datalen;
...@@ -276,3 +301,83 @@ struct sk_buff *mlx5e_tls_handle_tx_skb(struct net_device *netdev, ...@@ -276,3 +301,83 @@ struct sk_buff *mlx5e_tls_handle_tx_skb(struct net_device *netdev,
out: out:
return skb; return skb;
} }
static int tls_update_resync_sn(struct net_device *netdev,
struct sk_buff *skb,
struct mlx5e_tls_metadata *mdata)
{
struct sock *sk = NULL;
struct iphdr *iph;
struct tcphdr *th;
__be32 seq;
if (mdata->ethertype != htons(ETH_P_IP))
return -EINVAL;
iph = (struct iphdr *)(mdata + 1);
th = ((void *)iph) + iph->ihl * 4;
if (iph->version == 4) {
sk = inet_lookup_established(dev_net(netdev), &tcp_hashinfo,
iph->saddr, th->source, iph->daddr,
th->dest, netdev->ifindex);
#if IS_ENABLED(CONFIG_IPV6)
} else {
struct ipv6hdr *ipv6h = (struct ipv6hdr *)iph;
sk = __inet6_lookup_established(dev_net(netdev), &tcp_hashinfo,
&ipv6h->saddr, th->source,
&ipv6h->daddr, th->dest,
netdev->ifindex, 0);
#endif
}
if (!sk || sk->sk_state == TCP_TIME_WAIT) {
struct mlx5e_priv *priv = netdev_priv(netdev);
atomic64_inc(&priv->tls->sw_stats.rx_tls_drop_resync_request);
goto out;
}
skb->sk = sk;
skb->destructor = sock_edemux;
memcpy(&seq, &mdata->content.recv.sync_seq, sizeof(seq));
tls_offload_rx_resync_request(sk, seq);
out:
return 0;
}
void mlx5e_tls_handle_rx_skb(struct net_device *netdev, struct sk_buff *skb,
u32 *cqe_bcnt)
{
struct mlx5e_tls_metadata *mdata;
struct mlx5e_priv *priv;
if (!is_metadata_hdr_valid(skb))
return;
/* Use the metadata */
mdata = (struct mlx5e_tls_metadata *)(skb->data + ETH_HLEN);
switch (mdata->content.recv.syndrome) {
case SYNDROM_DECRYPTED:
skb->decrypted = 1;
break;
case SYNDROM_RESYNC_REQUEST:
tls_update_resync_sn(netdev, skb, mdata);
priv = netdev_priv(netdev);
atomic64_inc(&priv->tls->sw_stats.rx_tls_resync_request);
break;
case SYNDROM_AUTH_FAILED:
/* Authentication failure will be observed and verified by kTLS */
priv = netdev_priv(netdev);
atomic64_inc(&priv->tls->sw_stats.rx_tls_auth_fail);
break;
default:
/* Bypass the metadata header to others */
return;
}
remove_metadata_hdr(skb);
*cqe_bcnt -= MLX5E_METADATA_ETHER_LEN;
}
...@@ -45,6 +45,9 @@ struct sk_buff *mlx5e_tls_handle_tx_skb(struct net_device *netdev, ...@@ -45,6 +45,9 @@ struct sk_buff *mlx5e_tls_handle_tx_skb(struct net_device *netdev,
struct mlx5e_tx_wqe **wqe, struct mlx5e_tx_wqe **wqe,
u16 *pi); u16 *pi);
void mlx5e_tls_handle_rx_skb(struct net_device *netdev, struct sk_buff *skb,
u32 *cqe_bcnt);
#endif /* CONFIG_MLX5_EN_TLS */ #endif /* CONFIG_MLX5_EN_TLS */
#endif /* __MLX5E_TLS_RXTX_H__ */ #endif /* __MLX5E_TLS_RXTX_H__ */
...@@ -44,6 +44,7 @@ ...@@ -44,6 +44,7 @@
#include "en_rep.h" #include "en_rep.h"
#include "ipoib/ipoib.h" #include "ipoib/ipoib.h"
#include "en_accel/ipsec_rxtx.h" #include "en_accel/ipsec_rxtx.h"
#include "en_accel/tls_rxtx.h"
#include "lib/clock.h" #include "lib/clock.h"
static inline bool mlx5e_rx_hw_stamp(struct hwtstamp_config *config) static inline bool mlx5e_rx_hw_stamp(struct hwtstamp_config *config)
...@@ -797,6 +798,11 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe, ...@@ -797,6 +798,11 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
struct net_device *netdev = rq->netdev; struct net_device *netdev = rq->netdev;
skb->mac_len = ETH_HLEN; skb->mac_len = ETH_HLEN;
#ifdef CONFIG_MLX5_EN_TLS
mlx5e_tls_handle_rx_skb(netdev, skb, &cqe_bcnt);
#endif
if (lro_num_seg > 1) { if (lro_num_seg > 1) {
mlx5e_lro_update_hdr(skb, cqe, cqe_bcnt); mlx5e_lro_update_hdr(skb, cqe, cqe_bcnt);
skb_shinfo(skb)->gso_size = DIV_ROUND_UP(cqe_bcnt, lro_num_seg); skb_shinfo(skb)->gso_size = DIV_ROUND_UP(cqe_bcnt, lro_num_seg);
...@@ -1541,7 +1547,7 @@ void mlx5e_ipsec_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) ...@@ -1541,7 +1547,7 @@ void mlx5e_ipsec_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
mlx5e_free_rx_wqe(rq, wi); mlx5e_free_rx_wqe(rq, wi);
goto wq_cyc_pop; goto wq_cyc_pop;
} }
skb = mlx5e_ipsec_handle_rx_skb(rq->netdev, skb); skb = mlx5e_ipsec_handle_rx_skb(rq->netdev, skb, &cqe_bcnt);
if (unlikely(!skb)) { if (unlikely(!skb)) {
mlx5e_free_rx_wqe(rq, wi); mlx5e_free_rx_wqe(rq, wi);
goto wq_cyc_pop; goto wq_cyc_pop;
......
...@@ -129,6 +129,7 @@ static void mlx5_fpga_tls_cmd_send(struct mlx5_fpga_device *fdev, ...@@ -129,6 +129,7 @@ static void mlx5_fpga_tls_cmd_send(struct mlx5_fpga_device *fdev,
static int mlx5_fpga_tls_alloc_swid(struct idr *idr, spinlock_t *idr_spinlock, static int mlx5_fpga_tls_alloc_swid(struct idr *idr, spinlock_t *idr_spinlock,
void *ptr) void *ptr)
{ {
unsigned long flags;
int ret; int ret;
/* TLS metadata format is 1 byte for syndrome followed /* TLS metadata format is 1 byte for syndrome followed
...@@ -139,9 +140,9 @@ static int mlx5_fpga_tls_alloc_swid(struct idr *idr, spinlock_t *idr_spinlock, ...@@ -139,9 +140,9 @@ static int mlx5_fpga_tls_alloc_swid(struct idr *idr, spinlock_t *idr_spinlock,
BUILD_BUG_ON((SWID_END - 1) & 0xFF000000); BUILD_BUG_ON((SWID_END - 1) & 0xFF000000);
idr_preload(GFP_KERNEL); idr_preload(GFP_KERNEL);
spin_lock_irq(idr_spinlock); spin_lock_irqsave(idr_spinlock, flags);
ret = idr_alloc(idr, ptr, SWID_START, SWID_END, GFP_ATOMIC); ret = idr_alloc(idr, ptr, SWID_START, SWID_END, GFP_ATOMIC);
spin_unlock_irq(idr_spinlock); spin_unlock_irqrestore(idr_spinlock, flags);
idr_preload_end(); idr_preload_end();
return ret; return ret;
...@@ -157,6 +158,13 @@ static void mlx5_fpga_tls_release_swid(struct idr *idr, ...@@ -157,6 +158,13 @@ static void mlx5_fpga_tls_release_swid(struct idr *idr,
spin_unlock_irqrestore(idr_spinlock, flags); spin_unlock_irqrestore(idr_spinlock, flags);
} }
static void mlx_tls_kfree_complete(struct mlx5_fpga_conn *conn,
struct mlx5_fpga_device *fdev,
struct mlx5_fpga_dma_buf *buf, u8 status)
{
kfree(buf);
}
struct mlx5_teardown_stream_context { struct mlx5_teardown_stream_context {
struct mlx5_fpga_tls_command_context cmd; struct mlx5_fpga_tls_command_context cmd;
u32 swid; u32 swid;
...@@ -178,9 +186,13 @@ mlx5_fpga_tls_teardown_completion(struct mlx5_fpga_conn *conn, ...@@ -178,9 +186,13 @@ mlx5_fpga_tls_teardown_completion(struct mlx5_fpga_conn *conn,
mlx5_fpga_err(fdev, mlx5_fpga_err(fdev,
"Teardown stream failed with syndrome = %d", "Teardown stream failed with syndrome = %d",
syndrome); syndrome);
else else if (MLX5_GET(tls_cmd, cmd->buf.sg[0].data, direction_sx))
mlx5_fpga_tls_release_swid(&fdev->tls->tx_idr, mlx5_fpga_tls_release_swid(&fdev->tls->tx_idr,
&fdev->tls->idr_spinlock, &fdev->tls->tx_idr_spinlock,
ctx->swid);
else
mlx5_fpga_tls_release_swid(&fdev->tls->rx_idr,
&fdev->tls->rx_idr_spinlock,
ctx->swid); ctx->swid);
} }
mlx5_fpga_tls_put_command_ctx(cmd); mlx5_fpga_tls_put_command_ctx(cmd);
...@@ -196,6 +208,40 @@ static void mlx5_fpga_tls_flow_to_cmd(void *flow, void *cmd) ...@@ -196,6 +208,40 @@ static void mlx5_fpga_tls_flow_to_cmd(void *flow, void *cmd)
MLX5_GET(tls_flow, flow, direction_sx)); MLX5_GET(tls_flow, flow, direction_sx));
} }
int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq,
u64 rcd_sn)
{
struct mlx5_fpga_dma_buf *buf;
int size = sizeof(*buf) + MLX5_TLS_COMMAND_SIZE;
void *flow;
void *cmd;
int ret;
buf = kzalloc(size, GFP_ATOMIC);
if (!buf)
return -ENOMEM;
cmd = (buf + 1);
rcu_read_lock();
flow = idr_find(&mdev->fpga->tls->rx_idr, ntohl(handle));
rcu_read_unlock();
mlx5_fpga_tls_flow_to_cmd(flow, cmd);
MLX5_SET(tls_cmd, cmd, swid, ntohl(handle));
MLX5_SET64(tls_cmd, cmd, tls_rcd_sn, be64_to_cpu(rcd_sn));
MLX5_SET(tls_cmd, cmd, tcp_sn, seq);
MLX5_SET(tls_cmd, cmd, command_type, CMD_RESYNC_RX);
buf->sg[0].data = cmd;
buf->sg[0].size = MLX5_TLS_COMMAND_SIZE;
buf->complete = mlx_tls_kfree_complete;
ret = mlx5_fpga_sbu_conn_sendmsg(mdev->fpga->tls->conn, buf);
return ret;
}
static void mlx5_fpga_tls_send_teardown_cmd(struct mlx5_core_dev *mdev, static void mlx5_fpga_tls_send_teardown_cmd(struct mlx5_core_dev *mdev,
void *flow, u32 swid, gfp_t flags) void *flow, u32 swid, gfp_t flags)
{ {
...@@ -223,14 +269,18 @@ static void mlx5_fpga_tls_send_teardown_cmd(struct mlx5_core_dev *mdev, ...@@ -223,14 +269,18 @@ static void mlx5_fpga_tls_send_teardown_cmd(struct mlx5_core_dev *mdev,
mlx5_fpga_tls_teardown_completion); mlx5_fpga_tls_teardown_completion);
} }
void mlx5_fpga_tls_del_tx_flow(struct mlx5_core_dev *mdev, u32 swid, void mlx5_fpga_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid,
gfp_t flags) gfp_t flags, bool direction_sx)
{ {
struct mlx5_fpga_tls *tls = mdev->fpga->tls; struct mlx5_fpga_tls *tls = mdev->fpga->tls;
void *flow; void *flow;
rcu_read_lock(); rcu_read_lock();
if (direction_sx)
flow = idr_find(&tls->tx_idr, swid); flow = idr_find(&tls->tx_idr, swid);
else
flow = idr_find(&tls->rx_idr, swid);
rcu_read_unlock(); rcu_read_unlock();
if (!flow) { if (!flow) {
...@@ -289,9 +339,11 @@ mlx5_fpga_tls_setup_completion(struct mlx5_fpga_conn *conn, ...@@ -289,9 +339,11 @@ mlx5_fpga_tls_setup_completion(struct mlx5_fpga_conn *conn,
* the command context because we might not have received * the command context because we might not have received
* the tx completion yet. * the tx completion yet.
*/ */
mlx5_fpga_tls_del_tx_flow(fdev->mdev, mlx5_fpga_tls_del_flow(fdev->mdev,
MLX5_GET(tls_cmd, tls_cmd, swid), MLX5_GET(tls_cmd, tls_cmd, swid),
GFP_ATOMIC); GFP_ATOMIC,
MLX5_GET(tls_cmd, tls_cmd,
direction_sx));
} }
mlx5_fpga_tls_put_command_ctx(cmd); mlx5_fpga_tls_put_command_ctx(cmd);
...@@ -415,8 +467,7 @@ int mlx5_fpga_tls_init(struct mlx5_core_dev *mdev) ...@@ -415,8 +467,7 @@ int mlx5_fpga_tls_init(struct mlx5_core_dev *mdev)
if (err) if (err)
goto error; goto error;
if (!(tls->caps & (MLX5_ACCEL_TLS_TX | MLX5_ACCEL_TLS_V12 | if (!(tls->caps & (MLX5_ACCEL_TLS_V12 | MLX5_ACCEL_TLS_AES_GCM128))) {
MLX5_ACCEL_TLS_AES_GCM128))) {
err = -ENOTSUPP; err = -ENOTSUPP;
goto error; goto error;
} }
...@@ -438,7 +489,9 @@ int mlx5_fpga_tls_init(struct mlx5_core_dev *mdev) ...@@ -438,7 +489,9 @@ int mlx5_fpga_tls_init(struct mlx5_core_dev *mdev)
INIT_LIST_HEAD(&tls->pending_cmds); INIT_LIST_HEAD(&tls->pending_cmds);
idr_init(&tls->tx_idr); idr_init(&tls->tx_idr);
spin_lock_init(&tls->idr_spinlock); idr_init(&tls->rx_idr);
spin_lock_init(&tls->tx_idr_spinlock);
spin_lock_init(&tls->rx_idr_spinlock);
fdev->tls = tls; fdev->tls = tls;
return 0; return 0;
...@@ -500,9 +553,9 @@ static int mlx5_fpga_tls_set_key_material(void *cmd, u32 caps, ...@@ -500,9 +553,9 @@ static int mlx5_fpga_tls_set_key_material(void *cmd, u32 caps,
return 0; return 0;
} }
static int mlx5_fpga_tls_add_flow(struct mlx5_core_dev *mdev, void *flow, static int _mlx5_fpga_tls_add_flow(struct mlx5_core_dev *mdev, void *flow,
struct tls_crypto_info *crypto_info, u32 swid, struct tls_crypto_info *crypto_info,
u32 tcp_sn) u32 swid, u32 tcp_sn)
{ {
u32 caps = mlx5_fpga_tls_device_caps(mdev); u32 caps = mlx5_fpga_tls_device_caps(mdev);
struct mlx5_setup_stream_context *ctx; struct mlx5_setup_stream_context *ctx;
...@@ -533,22 +586,29 @@ static int mlx5_fpga_tls_add_flow(struct mlx5_core_dev *mdev, void *flow, ...@@ -533,22 +586,29 @@ static int mlx5_fpga_tls_add_flow(struct mlx5_core_dev *mdev, void *flow,
return ret; return ret;
} }
int mlx5_fpga_tls_add_tx_flow(struct mlx5_core_dev *mdev, void *flow, int mlx5_fpga_tls_add_flow(struct mlx5_core_dev *mdev, void *flow,
struct tls_crypto_info *crypto_info, struct tls_crypto_info *crypto_info,
u32 start_offload_tcp_sn, u32 *p_swid) u32 start_offload_tcp_sn, u32 *p_swid,
bool direction_sx)
{ {
struct mlx5_fpga_tls *tls = mdev->fpga->tls; struct mlx5_fpga_tls *tls = mdev->fpga->tls;
int ret = -ENOMEM; int ret = -ENOMEM;
u32 swid; u32 swid;
ret = mlx5_fpga_tls_alloc_swid(&tls->tx_idr, &tls->idr_spinlock, flow); if (direction_sx)
ret = mlx5_fpga_tls_alloc_swid(&tls->tx_idr,
&tls->tx_idr_spinlock, flow);
else
ret = mlx5_fpga_tls_alloc_swid(&tls->rx_idr,
&tls->rx_idr_spinlock, flow);
if (ret < 0) if (ret < 0)
return ret; return ret;
swid = ret; swid = ret;
MLX5_SET(tls_flow, flow, direction_sx, 1); MLX5_SET(tls_flow, flow, direction_sx, direction_sx ? 1 : 0);
ret = mlx5_fpga_tls_add_flow(mdev, flow, crypto_info, swid, ret = _mlx5_fpga_tls_add_flow(mdev, flow, crypto_info, swid,
start_offload_tcp_sn); start_offload_tcp_sn);
if (ret && ret != -EINTR) if (ret && ret != -EINTR)
goto free_swid; goto free_swid;
...@@ -556,7 +616,12 @@ int mlx5_fpga_tls_add_tx_flow(struct mlx5_core_dev *mdev, void *flow, ...@@ -556,7 +616,12 @@ int mlx5_fpga_tls_add_tx_flow(struct mlx5_core_dev *mdev, void *flow,
*p_swid = swid; *p_swid = swid;
return 0; return 0;
free_swid: free_swid:
mlx5_fpga_tls_release_swid(&tls->tx_idr, &tls->idr_spinlock, swid); if (direction_sx)
mlx5_fpga_tls_release_swid(&tls->tx_idr,
&tls->tx_idr_spinlock, swid);
else
mlx5_fpga_tls_release_swid(&tls->rx_idr,
&tls->rx_idr_spinlock, swid);
return ret; return ret;
} }
...@@ -46,15 +46,18 @@ struct mlx5_fpga_tls { ...@@ -46,15 +46,18 @@ struct mlx5_fpga_tls {
struct mlx5_fpga_conn *conn; struct mlx5_fpga_conn *conn;
struct idr tx_idr; struct idr tx_idr;
spinlock_t idr_spinlock; /* protects the IDR */ struct idr rx_idr;
spinlock_t tx_idr_spinlock; /* protects the IDR */
spinlock_t rx_idr_spinlock; /* protects the IDR */
}; };
int mlx5_fpga_tls_add_tx_flow(struct mlx5_core_dev *mdev, void *flow, int mlx5_fpga_tls_add_flow(struct mlx5_core_dev *mdev, void *flow,
struct tls_crypto_info *crypto_info, struct tls_crypto_info *crypto_info,
u32 start_offload_tcp_sn, u32 *p_swid); u32 start_offload_tcp_sn, u32 *p_swid,
bool direction_sx);
void mlx5_fpga_tls_del_tx_flow(struct mlx5_core_dev *mdev, u32 swid, void mlx5_fpga_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid,
gfp_t flags); gfp_t flags, bool direction_sx);
bool mlx5_fpga_is_tls_device(struct mlx5_core_dev *mdev); bool mlx5_fpga_is_tls_device(struct mlx5_core_dev *mdev);
int mlx5_fpga_tls_init(struct mlx5_core_dev *mdev); int mlx5_fpga_tls_init(struct mlx5_core_dev *mdev);
...@@ -65,4 +68,7 @@ static inline u32 mlx5_fpga_tls_device_caps(struct mlx5_core_dev *mdev) ...@@ -65,4 +68,7 @@ static inline u32 mlx5_fpga_tls_device_caps(struct mlx5_core_dev *mdev)
return mdev->fpga->tls->caps; return mdev->fpga->tls->caps;
} }
int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq,
u64 rcd_sn);
#endif /* __MLX5_FPGA_TLS_H__ */ #endif /* __MLX5_FPGA_TLS_H__ */
...@@ -576,6 +576,7 @@ struct mlx5_ifc_fpga_ipsec_sa { ...@@ -576,6 +576,7 @@ struct mlx5_ifc_fpga_ipsec_sa {
enum fpga_tls_cmds { enum fpga_tls_cmds {
CMD_SETUP_STREAM = 0x1001, CMD_SETUP_STREAM = 0x1001,
CMD_TEARDOWN_STREAM = 0x1002, CMD_TEARDOWN_STREAM = 0x1002,
CMD_RESYNC_RX = 0x1003,
}; };
#define MLX5_TLS_1_2 (0) #define MLX5_TLS_1_2 (0)
......
...@@ -79,6 +79,7 @@ enum { ...@@ -79,6 +79,7 @@ enum {
NETIF_F_HW_ESP_TX_CSUM_BIT, /* ESP with TX checksum offload */ NETIF_F_HW_ESP_TX_CSUM_BIT, /* ESP with TX checksum offload */
NETIF_F_RX_UDP_TUNNEL_PORT_BIT, /* Offload of RX port for UDP tunnels */ NETIF_F_RX_UDP_TUNNEL_PORT_BIT, /* Offload of RX port for UDP tunnels */
NETIF_F_HW_TLS_TX_BIT, /* Hardware TLS TX offload */ NETIF_F_HW_TLS_TX_BIT, /* Hardware TLS TX offload */
NETIF_F_HW_TLS_RX_BIT, /* Hardware TLS RX offload */
NETIF_F_GRO_HW_BIT, /* Hardware Generic receive offload */ NETIF_F_GRO_HW_BIT, /* Hardware Generic receive offload */
NETIF_F_HW_TLS_RECORD_BIT, /* Offload TLS record */ NETIF_F_HW_TLS_RECORD_BIT, /* Offload TLS record */
...@@ -151,6 +152,7 @@ enum { ...@@ -151,6 +152,7 @@ enum {
#define NETIF_F_HW_TLS_RECORD __NETIF_F(HW_TLS_RECORD) #define NETIF_F_HW_TLS_RECORD __NETIF_F(HW_TLS_RECORD)
#define NETIF_F_GSO_UDP_L4 __NETIF_F(GSO_UDP_L4) #define NETIF_F_GSO_UDP_L4 __NETIF_F(GSO_UDP_L4)
#define NETIF_F_HW_TLS_TX __NETIF_F(HW_TLS_TX) #define NETIF_F_HW_TLS_TX __NETIF_F(HW_TLS_TX)
#define NETIF_F_HW_TLS_RX __NETIF_F(HW_TLS_RX)
#define for_each_netdev_feature(mask_addr, bit) \ #define for_each_netdev_feature(mask_addr, bit) \
for_each_set_bit(bit, (unsigned long *)mask_addr, NETDEV_FEATURE_COUNT) for_each_set_bit(bit, (unsigned long *)mask_addr, NETDEV_FEATURE_COUNT)
......
...@@ -903,6 +903,8 @@ struct tlsdev_ops { ...@@ -903,6 +903,8 @@ struct tlsdev_ops {
void (*tls_dev_del)(struct net_device *netdev, void (*tls_dev_del)(struct net_device *netdev,
struct tls_context *ctx, struct tls_context *ctx,
enum tls_offload_ctx_dir direction); enum tls_offload_ctx_dir direction);
void (*tls_dev_resync_rx)(struct net_device *netdev,
struct sock *sk, u32 seq, u64 rcd_sn);
}; };
#endif #endif
......
...@@ -630,6 +630,7 @@ typedef unsigned char *sk_buff_data_t; ...@@ -630,6 +630,7 @@ typedef unsigned char *sk_buff_data_t;
* @hash: the packet hash * @hash: the packet hash
* @queue_mapping: Queue mapping for multiqueue devices * @queue_mapping: Queue mapping for multiqueue devices
* @xmit_more: More SKBs are pending for this queue * @xmit_more: More SKBs are pending for this queue
* @decrypted: Decrypted SKB
* @ndisc_nodetype: router type (from link layer) * @ndisc_nodetype: router type (from link layer)
* @ooo_okay: allow the mapping of a socket to a queue to be changed * @ooo_okay: allow the mapping of a socket to a queue to be changed
* @l4_hash: indicate hash is a canonical 4-tuple hash over transport * @l4_hash: indicate hash is a canonical 4-tuple hash over transport
...@@ -736,7 +737,11 @@ struct sk_buff { ...@@ -736,7 +737,11 @@ struct sk_buff {
peeked:1, peeked:1,
head_frag:1, head_frag:1,
xmit_more:1, xmit_more:1,
__unused:1; /* one bit hole */ #ifdef CONFIG_TLS_DEVICE
decrypted:1;
#else
__unused:1;
#endif
/* fields enclosed in headers_start/headers_end are copied /* fields enclosed in headers_start/headers_end are copied
* using a single memcpy() in __copy_skb_header() * using a single memcpy() in __copy_skb_header()
......
...@@ -83,6 +83,16 @@ struct tls_device { ...@@ -83,6 +83,16 @@ struct tls_device {
void (*unhash)(struct tls_device *device, struct sock *sk); void (*unhash)(struct tls_device *device, struct sock *sk);
}; };
enum {
TLS_BASE,
TLS_SW,
#ifdef CONFIG_TLS_DEVICE
TLS_HW,
#endif
TLS_HW_RECORD,
TLS_NUM_CONFIG,
};
struct tls_sw_context_tx { struct tls_sw_context_tx {
struct crypto_aead *aead_send; struct crypto_aead *aead_send;
struct crypto_wait async_wait; struct crypto_wait async_wait;
...@@ -128,7 +138,7 @@ struct tls_record_info { ...@@ -128,7 +138,7 @@ struct tls_record_info {
skb_frag_t frags[MAX_SKB_FRAGS]; skb_frag_t frags[MAX_SKB_FRAGS];
}; };
struct tls_offload_context { struct tls_offload_context_tx {
struct crypto_aead *aead_send; struct crypto_aead *aead_send;
spinlock_t lock; /* protects records list */ spinlock_t lock; /* protects records list */
struct list_head records_list; struct list_head records_list;
...@@ -147,8 +157,8 @@ struct tls_offload_context { ...@@ -147,8 +157,8 @@ struct tls_offload_context {
#define TLS_DRIVER_STATE_SIZE (max_t(size_t, 8, sizeof(void *))) #define TLS_DRIVER_STATE_SIZE (max_t(size_t, 8, sizeof(void *)))
}; };
#define TLS_OFFLOAD_CONTEXT_SIZE \ #define TLS_OFFLOAD_CONTEXT_SIZE_TX \
(ALIGN(sizeof(struct tls_offload_context), sizeof(void *)) + \ (ALIGN(sizeof(struct tls_offload_context_tx), sizeof(void *)) + \
TLS_DRIVER_STATE_SIZE) TLS_DRIVER_STATE_SIZE)
enum { enum {
...@@ -197,6 +207,7 @@ struct tls_context { ...@@ -197,6 +207,7 @@ struct tls_context {
int (*push_pending_record)(struct sock *sk, int flags); int (*push_pending_record)(struct sock *sk, int flags);
void (*sk_write_space)(struct sock *sk); void (*sk_write_space)(struct sock *sk);
void (*sk_destruct)(struct sock *sk);
void (*sk_proto_close)(struct sock *sk, long timeout); void (*sk_proto_close)(struct sock *sk, long timeout);
int (*setsockopt)(struct sock *sk, int level, int (*setsockopt)(struct sock *sk, int level,
...@@ -209,13 +220,27 @@ struct tls_context { ...@@ -209,13 +220,27 @@ struct tls_context {
void (*unhash)(struct sock *sk); void (*unhash)(struct sock *sk);
}; };
struct tls_offload_context_rx {
/* sw must be the first member of tls_offload_context_rx */
struct tls_sw_context_rx sw;
atomic64_t resync_req;
u8 driver_state[];
/* The TLS layer reserves room for driver specific state
* Currently the belief is that there is not enough
* driver specific state to justify another layer of indirection
*/
};
#define TLS_OFFLOAD_CONTEXT_SIZE_RX \
(ALIGN(sizeof(struct tls_offload_context_rx), sizeof(void *)) + \
TLS_DRIVER_STATE_SIZE)
int wait_on_pending_writer(struct sock *sk, long *timeo); int wait_on_pending_writer(struct sock *sk, long *timeo);
int tls_sk_query(struct sock *sk, int optname, char __user *optval, int tls_sk_query(struct sock *sk, int optname, char __user *optval,
int __user *optlen); int __user *optlen);
int tls_sk_attach(struct sock *sk, int optname, char __user *optval, int tls_sk_attach(struct sock *sk, int optname, char __user *optval,
unsigned int optlen); unsigned int optlen);
int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx); int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx);
int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size); int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
int tls_sw_sendpage(struct sock *sk, struct page *page, int tls_sw_sendpage(struct sock *sk, struct page *page,
...@@ -223,6 +248,7 @@ int tls_sw_sendpage(struct sock *sk, struct page *page, ...@@ -223,6 +248,7 @@ int tls_sw_sendpage(struct sock *sk, struct page *page,
void tls_sw_close(struct sock *sk, long timeout); void tls_sw_close(struct sock *sk, long timeout);
void tls_sw_free_resources_tx(struct sock *sk); void tls_sw_free_resources_tx(struct sock *sk);
void tls_sw_free_resources_rx(struct sock *sk); void tls_sw_free_resources_rx(struct sock *sk);
void tls_sw_release_resources_rx(struct sock *sk);
int tls_sw_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int tls_sw_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
int nonblock, int flags, int *addr_len); int nonblock, int flags, int *addr_len);
unsigned int tls_sw_poll(struct file *file, struct socket *sock, unsigned int tls_sw_poll(struct file *file, struct socket *sock,
...@@ -239,7 +265,7 @@ void tls_device_sk_destruct(struct sock *sk); ...@@ -239,7 +265,7 @@ void tls_device_sk_destruct(struct sock *sk);
void tls_device_init(void); void tls_device_init(void);
void tls_device_cleanup(void); void tls_device_cleanup(void);
struct tls_record_info *tls_get_record(struct tls_offload_context *context, struct tls_record_info *tls_get_record(struct tls_offload_context_tx *context,
u32 seq, u64 *p_record_sn); u32 seq, u64 *p_record_sn);
static inline bool tls_record_is_start_marker(struct tls_record_info *rec) static inline bool tls_record_is_start_marker(struct tls_record_info *rec)
...@@ -289,11 +315,19 @@ static inline bool tls_is_pending_open_record(struct tls_context *tls_ctx) ...@@ -289,11 +315,19 @@ static inline bool tls_is_pending_open_record(struct tls_context *tls_ctx)
return tls_ctx->pending_open_record_frags; return tls_ctx->pending_open_record_frags;
} }
struct sk_buff *
tls_validate_xmit_skb(struct sock *sk, struct net_device *dev,
struct sk_buff *skb);
static inline bool tls_is_sk_tx_device_offloaded(struct sock *sk) static inline bool tls_is_sk_tx_device_offloaded(struct sock *sk)
{ {
return sk_fullsock(sk) && #ifdef CONFIG_SOCK_VALIDATE_XMIT
/* matches smp_store_release in tls_set_device_offload */ return sk_fullsock(sk) &
smp_load_acquire(&sk->sk_destruct) == &tls_device_sk_destruct; (smp_load_acquire(&sk->sk_validate_xmit_skb) ==
&tls_validate_xmit_skb);
#else
return false;
#endif
} }
static inline void tls_err_abort(struct sock *sk, int err) static inline void tls_err_abort(struct sock *sk, int err)
...@@ -380,23 +414,47 @@ static inline struct tls_sw_context_tx *tls_sw_ctx_tx( ...@@ -380,23 +414,47 @@ static inline struct tls_sw_context_tx *tls_sw_ctx_tx(
return (struct tls_sw_context_tx *)tls_ctx->priv_ctx_tx; return (struct tls_sw_context_tx *)tls_ctx->priv_ctx_tx;
} }
static inline struct tls_offload_context *tls_offload_ctx( static inline struct tls_offload_context_tx *
const struct tls_context *tls_ctx) tls_offload_ctx_tx(const struct tls_context *tls_ctx)
{ {
return (struct tls_offload_context *)tls_ctx->priv_ctx_tx; return (struct tls_offload_context_tx *)tls_ctx->priv_ctx_tx;
} }
static inline struct tls_offload_context_rx *
tls_offload_ctx_rx(const struct tls_context *tls_ctx)
{
return (struct tls_offload_context_rx *)tls_ctx->priv_ctx_rx;
}
/* The TLS context is valid until sk_destruct is called */
static inline void tls_offload_rx_resync_request(struct sock *sk, __be32 seq)
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx);
atomic64_set(&rx_ctx->resync_req, ((((uint64_t)seq) << 32) | 1));
}
int tls_proccess_cmsg(struct sock *sk, struct msghdr *msg, int tls_proccess_cmsg(struct sock *sk, struct msghdr *msg,
unsigned char *record_type); unsigned char *record_type);
void tls_register_device(struct tls_device *device); void tls_register_device(struct tls_device *device);
void tls_unregister_device(struct tls_device *device); void tls_unregister_device(struct tls_device *device);
int tls_device_decrypted(struct sock *sk, struct sk_buff *skb);
int decrypt_skb(struct sock *sk, struct sk_buff *skb,
struct scatterlist *sgout);
struct sk_buff *tls_validate_xmit_skb(struct sock *sk, struct sk_buff *tls_validate_xmit_skb(struct sock *sk,
struct net_device *dev, struct net_device *dev,
struct sk_buff *skb); struct sk_buff *skb);
int tls_sw_fallback_init(struct sock *sk, int tls_sw_fallback_init(struct sock *sk,
struct tls_offload_context *offload_ctx, struct tls_offload_context_tx *offload_ctx,
struct tls_crypto_info *crypto_info); struct tls_crypto_info *crypto_info);
int tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx);
void tls_device_offload_cleanup_rx(struct sock *sk);
void handle_device_resync(struct sock *sk, u32 seq, u64 rcd_sn);
#endif /* _TLS_OFFLOAD_H */ #endif /* _TLS_OFFLOAD_H */
...@@ -111,6 +111,7 @@ static const char netdev_features_strings[NETDEV_FEATURE_COUNT][ETH_GSTRING_LEN] ...@@ -111,6 +111,7 @@ static const char netdev_features_strings[NETDEV_FEATURE_COUNT][ETH_GSTRING_LEN]
[NETIF_F_RX_UDP_TUNNEL_PORT_BIT] = "rx-udp_tunnel-port-offload", [NETIF_F_RX_UDP_TUNNEL_PORT_BIT] = "rx-udp_tunnel-port-offload",
[NETIF_F_HW_TLS_RECORD_BIT] = "tls-hw-record", [NETIF_F_HW_TLS_RECORD_BIT] = "tls-hw-record",
[NETIF_F_HW_TLS_TX_BIT] = "tls-hw-tx-offload", [NETIF_F_HW_TLS_TX_BIT] = "tls-hw-tx-offload",
[NETIF_F_HW_TLS_RX_BIT] = "tls-hw-rx-offload",
}; };
static const char static const char
......
...@@ -805,6 +805,9 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old) ...@@ -805,6 +805,9 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
* It is not yet because we do not want to have a 16 bit hole * It is not yet because we do not want to have a 16 bit hole
*/ */
new->queue_mapping = old->queue_mapping; new->queue_mapping = old->queue_mapping;
#ifdef CONFIG_TLS_DEVICE
new->decrypted = old->decrypted;
#endif
memcpy(&new->headers_start, &old->headers_start, memcpy(&new->headers_start, &old->headers_start,
offsetof(struct sk_buff, headers_end) - offsetof(struct sk_buff, headers_end) -
...@@ -865,6 +868,9 @@ static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb) ...@@ -865,6 +868,9 @@ static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb)
C(head_frag); C(head_frag);
C(data); C(data);
C(truesize); C(truesize);
#ifdef CONFIG_TLS_DEVICE
C(decrypted);
#endif
refcount_set(&n->users, 1); refcount_set(&n->users, 1);
atomic_inc(&(skb_shinfo(skb)->dataref)); atomic_inc(&(skb_shinfo(skb)->dataref));
......
...@@ -4343,6 +4343,11 @@ static bool tcp_try_coalesce(struct sock *sk, ...@@ -4343,6 +4343,11 @@ static bool tcp_try_coalesce(struct sock *sk,
if (TCP_SKB_CB(from)->seq != TCP_SKB_CB(to)->end_seq) if (TCP_SKB_CB(from)->seq != TCP_SKB_CB(to)->end_seq)
return false; return false;
#ifdef CONFIG_TLS_DEVICE
if (from->decrypted != to->decrypted)
return false;
#endif
if (!skb_try_coalesce(to, from, fragstolen, &delta)) if (!skb_try_coalesce(to, from, fragstolen, &delta))
return false; return false;
...@@ -4871,6 +4876,9 @@ tcp_collapse(struct sock *sk, struct sk_buff_head *list, struct rb_root *root, ...@@ -4871,6 +4876,9 @@ tcp_collapse(struct sock *sk, struct sk_buff_head *list, struct rb_root *root,
break; break;
memcpy(nskb->cb, skb->cb, sizeof(skb->cb)); memcpy(nskb->cb, skb->cb, sizeof(skb->cb));
#ifdef CONFIG_TLS_DEVICE
nskb->decrypted = skb->decrypted;
#endif
TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(nskb)->end_seq = start; TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(nskb)->end_seq = start;
if (list) if (list)
__skb_queue_before(list, skb, nskb); __skb_queue_before(list, skb, nskb);
...@@ -4898,6 +4906,10 @@ tcp_collapse(struct sock *sk, struct sk_buff_head *list, struct rb_root *root, ...@@ -4898,6 +4906,10 @@ tcp_collapse(struct sock *sk, struct sk_buff_head *list, struct rb_root *root,
skb == tail || skb == tail ||
(TCP_SKB_CB(skb)->tcp_flags & (TCPHDR_SYN | TCPHDR_FIN))) (TCP_SKB_CB(skb)->tcp_flags & (TCPHDR_SYN | TCPHDR_FIN)))
goto end; goto end;
#ifdef CONFIG_TLS_DEVICE
if (skb->decrypted != nskb->decrypted)
goto end;
#endif
} }
} }
} }
......
...@@ -262,6 +262,9 @@ struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb) ...@@ -262,6 +262,9 @@ struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb)
flush |= (len - 1) >= mss; flush |= (len - 1) >= mss;
flush |= (ntohl(th2->seq) + skb_gro_len(p)) ^ ntohl(th->seq); flush |= (ntohl(th2->seq) + skb_gro_len(p)) ^ ntohl(th->seq);
#ifdef CONFIG_TLS_DEVICE
flush |= p->decrypted ^ skb->decrypted;
#endif
if (flush || skb_gro_receive(p, skb)) { if (flush || skb_gro_receive(p, skb)) {
mss = 1; mss = 1;
......
This diff is collapsed.
...@@ -214,7 +214,7 @@ static void complete_skb(struct sk_buff *nskb, struct sk_buff *skb, int headln) ...@@ -214,7 +214,7 @@ static void complete_skb(struct sk_buff *nskb, struct sk_buff *skb, int headln)
static int fill_sg_in(struct scatterlist *sg_in, static int fill_sg_in(struct scatterlist *sg_in,
struct sk_buff *skb, struct sk_buff *skb,
struct tls_offload_context *ctx, struct tls_offload_context_tx *ctx,
u64 *rcd_sn, u64 *rcd_sn,
s32 *sync_size, s32 *sync_size,
int *resync_sgs) int *resync_sgs)
...@@ -299,7 +299,7 @@ static struct sk_buff *tls_enc_skb(struct tls_context *tls_ctx, ...@@ -299,7 +299,7 @@ static struct sk_buff *tls_enc_skb(struct tls_context *tls_ctx,
s32 sync_size, u64 rcd_sn) s32 sync_size, u64 rcd_sn)
{ {
int tcp_payload_offset = skb_transport_offset(skb) + tcp_hdrlen(skb); int tcp_payload_offset = skb_transport_offset(skb) + tcp_hdrlen(skb);
struct tls_offload_context *ctx = tls_offload_ctx(tls_ctx); struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx);
int payload_len = skb->len - tcp_payload_offset; int payload_len = skb->len - tcp_payload_offset;
void *buf, *iv, *aad, *dummy_buf; void *buf, *iv, *aad, *dummy_buf;
struct aead_request *aead_req; struct aead_request *aead_req;
...@@ -361,7 +361,7 @@ static struct sk_buff *tls_sw_fallback(struct sock *sk, struct sk_buff *skb) ...@@ -361,7 +361,7 @@ static struct sk_buff *tls_sw_fallback(struct sock *sk, struct sk_buff *skb)
{ {
int tcp_payload_offset = skb_transport_offset(skb) + tcp_hdrlen(skb); int tcp_payload_offset = skb_transport_offset(skb) + tcp_hdrlen(skb);
struct tls_context *tls_ctx = tls_get_ctx(sk); struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_offload_context *ctx = tls_offload_ctx(tls_ctx); struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx);
int payload_len = skb->len - tcp_payload_offset; int payload_len = skb->len - tcp_payload_offset;
struct scatterlist *sg_in, sg_out[3]; struct scatterlist *sg_in, sg_out[3];
struct sk_buff *nskb = NULL; struct sk_buff *nskb = NULL;
...@@ -413,9 +413,10 @@ struct sk_buff *tls_validate_xmit_skb(struct sock *sk, ...@@ -413,9 +413,10 @@ struct sk_buff *tls_validate_xmit_skb(struct sock *sk,
return tls_sw_fallback(sk, skb); return tls_sw_fallback(sk, skb);
} }
EXPORT_SYMBOL_GPL(tls_validate_xmit_skb);
int tls_sw_fallback_init(struct sock *sk, int tls_sw_fallback_init(struct sock *sk,
struct tls_offload_context *offload_ctx, struct tls_offload_context_tx *offload_ctx,
struct tls_crypto_info *crypto_info) struct tls_crypto_info *crypto_info)
{ {
const u8 *key; const u8 *key;
......
...@@ -51,15 +51,6 @@ enum { ...@@ -51,15 +51,6 @@ enum {
TLSV6, TLSV6,
TLS_NUM_PROTS, TLS_NUM_PROTS,
}; };
enum {
TLS_BASE,
TLS_SW,
#ifdef CONFIG_TLS_DEVICE
TLS_HW,
#endif
TLS_HW_RECORD,
TLS_NUM_CONFIG,
};
static struct proto *saved_tcpv6_prot; static struct proto *saved_tcpv6_prot;
static DEFINE_MUTEX(tcpv6_prot_mutex); static DEFINE_MUTEX(tcpv6_prot_mutex);
...@@ -290,7 +281,10 @@ static void tls_sk_proto_close(struct sock *sk, long timeout) ...@@ -290,7 +281,10 @@ static void tls_sk_proto_close(struct sock *sk, long timeout)
} }
#ifdef CONFIG_TLS_DEVICE #ifdef CONFIG_TLS_DEVICE
if (ctx->tx_conf != TLS_HW) { if (ctx->rx_conf == TLS_HW)
tls_device_offload_cleanup_rx(sk);
if (ctx->tx_conf != TLS_HW && ctx->rx_conf != TLS_HW) {
#else #else
{ {
#endif #endif
...@@ -470,9 +464,17 @@ static int do_tls_setsockopt_conf(struct sock *sk, char __user *optval, ...@@ -470,9 +464,17 @@ static int do_tls_setsockopt_conf(struct sock *sk, char __user *optval,
conf = TLS_SW; conf = TLS_SW;
} }
} else { } else {
#ifdef CONFIG_TLS_DEVICE
rc = tls_set_device_offload_rx(sk, ctx);
conf = TLS_HW;
if (rc) {
#else
{
#endif
rc = tls_set_sw_offload(sk, ctx, 0); rc = tls_set_sw_offload(sk, ctx, 0);
conf = TLS_SW; conf = TLS_SW;
} }
}
if (rc) if (rc)
goto err_crypto_info; goto err_crypto_info;
...@@ -629,6 +631,12 @@ static void build_protos(struct proto prot[TLS_NUM_CONFIG][TLS_NUM_CONFIG], ...@@ -629,6 +631,12 @@ static void build_protos(struct proto prot[TLS_NUM_CONFIG][TLS_NUM_CONFIG],
prot[TLS_HW][TLS_SW] = prot[TLS_BASE][TLS_SW]; prot[TLS_HW][TLS_SW] = prot[TLS_BASE][TLS_SW];
prot[TLS_HW][TLS_SW].sendmsg = tls_device_sendmsg; prot[TLS_HW][TLS_SW].sendmsg = tls_device_sendmsg;
prot[TLS_HW][TLS_SW].sendpage = tls_device_sendpage; prot[TLS_HW][TLS_SW].sendpage = tls_device_sendpage;
prot[TLS_BASE][TLS_HW] = prot[TLS_BASE][TLS_SW];
prot[TLS_SW][TLS_HW] = prot[TLS_SW][TLS_SW];
prot[TLS_HW][TLS_HW] = prot[TLS_HW][TLS_SW];
#endif #endif
prot[TLS_HW_RECORD][TLS_HW_RECORD] = *base; prot[TLS_HW_RECORD][TLS_HW_RECORD] = *base;
......
...@@ -53,7 +53,6 @@ static int tls_do_decryption(struct sock *sk, ...@@ -53,7 +53,6 @@ static int tls_do_decryption(struct sock *sk,
{ {
struct tls_context *tls_ctx = tls_get_ctx(sk); struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
struct strp_msg *rxm = strp_msg(skb);
struct aead_request *aead_req; struct aead_request *aead_req;
int ret; int ret;
...@@ -71,18 +70,6 @@ static int tls_do_decryption(struct sock *sk, ...@@ -71,18 +70,6 @@ static int tls_do_decryption(struct sock *sk,
ret = crypto_wait_req(crypto_aead_decrypt(aead_req), &ctx->async_wait); ret = crypto_wait_req(crypto_aead_decrypt(aead_req), &ctx->async_wait);
if (ret < 0)
goto out;
rxm->offset += tls_ctx->rx.prepend_size;
rxm->full_len -= tls_ctx->rx.overhead_size;
tls_advance_record_sn(sk, &tls_ctx->rx);
ctx->decrypted = true;
ctx->saved_data_ready(sk);
out:
aead_request_free(aead_req); aead_request_free(aead_req);
return ret; return ret;
} }
...@@ -276,7 +263,7 @@ static int zerocopy_from_iter(struct sock *sk, struct iov_iter *from, ...@@ -276,7 +263,7 @@ static int zerocopy_from_iter(struct sock *sk, struct iov_iter *from,
int length, int *pages_used, int length, int *pages_used,
unsigned int *size_used, unsigned int *size_used,
struct scatterlist *to, int to_max_pages, struct scatterlist *to, int to_max_pages,
bool charge) bool charge, bool revert)
{ {
struct page *pages[MAX_SKB_FRAGS]; struct page *pages[MAX_SKB_FRAGS];
...@@ -327,6 +314,8 @@ static int zerocopy_from_iter(struct sock *sk, struct iov_iter *from, ...@@ -327,6 +314,8 @@ static int zerocopy_from_iter(struct sock *sk, struct iov_iter *from,
out: out:
*size_used = size; *size_used = size;
*pages_used = num_elem; *pages_used = num_elem;
if (revert)
iov_iter_revert(from, size);
return rc; return rc;
} }
...@@ -428,7 +417,7 @@ int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) ...@@ -428,7 +417,7 @@ int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
&ctx->sg_plaintext_size, &ctx->sg_plaintext_size,
ctx->sg_plaintext_data, ctx->sg_plaintext_data,
ARRAY_SIZE(ctx->sg_plaintext_data), ARRAY_SIZE(ctx->sg_plaintext_data),
true); true, false);
if (ret) if (ret)
goto fallback_to_reg_send; goto fallback_to_reg_send;
...@@ -666,7 +655,37 @@ static struct sk_buff *tls_wait_data(struct sock *sk, int flags, ...@@ -666,7 +655,37 @@ static struct sk_buff *tls_wait_data(struct sock *sk, int flags,
return skb; return skb;
} }
static int decrypt_skb(struct sock *sk, struct sk_buff *skb, static int decrypt_skb_update(struct sock *sk, struct sk_buff *skb,
struct scatterlist *sgout, bool *zc)
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
struct strp_msg *rxm = strp_msg(skb);
int err = 0;
#ifdef CONFIG_TLS_DEVICE
err = tls_device_decrypted(sk, skb);
if (err < 0)
return err;
#endif
if (!ctx->decrypted) {
err = decrypt_skb(sk, skb, sgout);
if (err < 0)
return err;
} else {
*zc = false;
}
rxm->offset += tls_ctx->rx.prepend_size;
rxm->full_len -= tls_ctx->rx.overhead_size;
tls_advance_record_sn(sk, &tls_ctx->rx);
ctx->decrypted = true;
ctx->saved_data_ready(sk);
return err;
}
int decrypt_skb(struct sock *sk, struct sk_buff *skb,
struct scatterlist *sgout) struct scatterlist *sgout)
{ {
struct tls_context *tls_ctx = tls_get_ctx(sk); struct tls_context *tls_ctx = tls_get_ctx(sk);
...@@ -808,11 +827,11 @@ int tls_sw_recvmsg(struct sock *sk, ...@@ -808,11 +827,11 @@ int tls_sw_recvmsg(struct sock *sk,
err = zerocopy_from_iter(sk, &msg->msg_iter, err = zerocopy_from_iter(sk, &msg->msg_iter,
to_copy, &pages, to_copy, &pages,
&chunk, &sgin[1], &chunk, &sgin[1],
MAX_SKB_FRAGS, false); MAX_SKB_FRAGS, false, true);
if (err < 0) if (err < 0)
goto fallback_to_reg_recv; goto fallback_to_reg_recv;
err = decrypt_skb(sk, skb, sgin); err = decrypt_skb_update(sk, skb, sgin, &zc);
for (; pages > 0; pages--) for (; pages > 0; pages--)
put_page(sg_page(&sgin[pages])); put_page(sg_page(&sgin[pages]));
if (err < 0) { if (err < 0) {
...@@ -821,7 +840,7 @@ int tls_sw_recvmsg(struct sock *sk, ...@@ -821,7 +840,7 @@ int tls_sw_recvmsg(struct sock *sk,
} }
} else { } else {
fallback_to_reg_recv: fallback_to_reg_recv:
err = decrypt_skb(sk, skb, NULL); err = decrypt_skb_update(sk, skb, NULL, &zc);
if (err < 0) { if (err < 0) {
tls_err_abort(sk, EBADMSG); tls_err_abort(sk, EBADMSG);
goto recv_end; goto recv_end;
...@@ -876,6 +895,7 @@ ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos, ...@@ -876,6 +895,7 @@ ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos,
int err = 0; int err = 0;
long timeo; long timeo;
int chunk; int chunk;
bool zc;
lock_sock(sk); lock_sock(sk);
...@@ -892,7 +912,7 @@ ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos, ...@@ -892,7 +912,7 @@ ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos,
} }
if (!ctx->decrypted) { if (!ctx->decrypted) {
err = decrypt_skb(sk, skb, NULL); err = decrypt_skb_update(sk, skb, NULL, &zc);
if (err < 0) { if (err < 0) {
tls_err_abort(sk, EBADMSG); tls_err_abort(sk, EBADMSG);
...@@ -981,6 +1001,10 @@ static int tls_read_size(struct strparser *strp, struct sk_buff *skb) ...@@ -981,6 +1001,10 @@ static int tls_read_size(struct strparser *strp, struct sk_buff *skb)
goto read_failure; goto read_failure;
} }
#ifdef CONFIG_TLS_DEVICE
handle_device_resync(strp->sk, TCP_SKB_CB(skb)->seq + rxm->offset,
*(u64*)tls_ctx->rx.rec_seq);
#endif
return data_len + TLS_HEADER_SIZE; return data_len + TLS_HEADER_SIZE;
read_failure: read_failure:
...@@ -1022,7 +1046,7 @@ void tls_sw_free_resources_tx(struct sock *sk) ...@@ -1022,7 +1046,7 @@ void tls_sw_free_resources_tx(struct sock *sk)
kfree(ctx); kfree(ctx);
} }
void tls_sw_free_resources_rx(struct sock *sk) void tls_sw_release_resources_rx(struct sock *sk)
{ {
struct tls_context *tls_ctx = tls_get_ctx(sk); struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
...@@ -1041,6 +1065,14 @@ void tls_sw_free_resources_rx(struct sock *sk) ...@@ -1041,6 +1065,14 @@ void tls_sw_free_resources_rx(struct sock *sk)
strp_done(&ctx->strp); strp_done(&ctx->strp);
lock_sock(sk); lock_sock(sk);
} }
}
void tls_sw_free_resources_rx(struct sock *sk)
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
tls_sw_release_resources_rx(sk);
kfree(ctx); kfree(ctx);
} }
...@@ -1065,28 +1097,38 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx) ...@@ -1065,28 +1097,38 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
} }
if (tx) { if (tx) {
if (!ctx->priv_ctx_tx) {
sw_ctx_tx = kzalloc(sizeof(*sw_ctx_tx), GFP_KERNEL); sw_ctx_tx = kzalloc(sizeof(*sw_ctx_tx), GFP_KERNEL);
if (!sw_ctx_tx) { if (!sw_ctx_tx) {
rc = -ENOMEM; rc = -ENOMEM;
goto out; goto out;
} }
crypto_init_wait(&sw_ctx_tx->async_wait);
ctx->priv_ctx_tx = sw_ctx_tx; ctx->priv_ctx_tx = sw_ctx_tx;
} else { } else {
sw_ctx_tx =
(struct tls_sw_context_tx *)ctx->priv_ctx_tx;
}
} else {
if (!ctx->priv_ctx_rx) {
sw_ctx_rx = kzalloc(sizeof(*sw_ctx_rx), GFP_KERNEL); sw_ctx_rx = kzalloc(sizeof(*sw_ctx_rx), GFP_KERNEL);
if (!sw_ctx_rx) { if (!sw_ctx_rx) {
rc = -ENOMEM; rc = -ENOMEM;
goto out; goto out;
} }
crypto_init_wait(&sw_ctx_rx->async_wait);
ctx->priv_ctx_rx = sw_ctx_rx; ctx->priv_ctx_rx = sw_ctx_rx;
} else {
sw_ctx_rx =
(struct tls_sw_context_rx *)ctx->priv_ctx_rx;
}
} }
if (tx) { if (tx) {
crypto_init_wait(&sw_ctx_tx->async_wait);
crypto_info = &ctx->crypto_send; crypto_info = &ctx->crypto_send;
cctx = &ctx->tx; cctx = &ctx->tx;
aead = &sw_ctx_tx->aead_send; aead = &sw_ctx_tx->aead_send;
} else { } else {
crypto_init_wait(&sw_ctx_rx->async_wait);
crypto_info = &ctx->crypto_recv; crypto_info = &ctx->crypto_recv;
cctx = &ctx->rx; cctx = &ctx->rx;
aead = &sw_ctx_rx->aead_recv; aead = &sw_ctx_rx->aead_recv;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment