Commit 4960c414 authored by Gal Pressman's avatar Gal Pressman Committed by Jakub Kicinski

net/mlx5e: Support 256 bit keys with kTLS device offload

Add support for 256 bit TLS keys using device offload.
Reviewed-by: default avatarTariq Toukan <tariqt@nvidia.com>
Signed-off-by: default avatarGal Pressman <gal@nvidia.com>
Signed-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parent 56e5a6d3
...@@ -25,7 +25,8 @@ static inline bool mlx5e_is_ktls_device(struct mlx5_core_dev *mdev) ...@@ -25,7 +25,8 @@ static inline bool mlx5e_is_ktls_device(struct mlx5_core_dev *mdev)
if (!MLX5_CAP_GEN(mdev, log_max_dek)) if (!MLX5_CAP_GEN(mdev, log_max_dek))
return false; return false;
return MLX5_CAP_TLS(mdev, tls_1_2_aes_gcm_128); return (MLX5_CAP_TLS(mdev, tls_1_2_aes_gcm_128) ||
MLX5_CAP_TLS(mdev, tls_1_2_aes_gcm_256));
} }
static inline bool mlx5e_ktls_type_check(struct mlx5_core_dev *mdev, static inline bool mlx5e_ktls_type_check(struct mlx5_core_dev *mdev,
...@@ -36,6 +37,10 @@ static inline bool mlx5e_ktls_type_check(struct mlx5_core_dev *mdev, ...@@ -36,6 +37,10 @@ static inline bool mlx5e_ktls_type_check(struct mlx5_core_dev *mdev,
if (crypto_info->version == TLS_1_2_VERSION) if (crypto_info->version == TLS_1_2_VERSION)
return MLX5_CAP_TLS(mdev, tls_1_2_aes_gcm_128); return MLX5_CAP_TLS(mdev, tls_1_2_aes_gcm_128);
break; break;
case TLS_CIPHER_AES_GCM_256:
if (crypto_info->version == TLS_1_2_VERSION)
return MLX5_CAP_TLS(mdev, tls_1_2_aes_gcm_256);
break;
} }
return false; return false;
......
...@@ -43,7 +43,7 @@ struct mlx5e_ktls_rx_resync_ctx { ...@@ -43,7 +43,7 @@ struct mlx5e_ktls_rx_resync_ctx {
}; };
struct mlx5e_ktls_offload_context_rx { struct mlx5e_ktls_offload_context_rx {
struct tls12_crypto_info_aes_gcm_128 crypto_info; union mlx5e_crypto_info crypto_info;
struct accel_rule rule; struct accel_rule rule;
struct sock *sk; struct sock *sk;
struct mlx5e_rq_stats *rq_stats; struct mlx5e_rq_stats *rq_stats;
...@@ -362,7 +362,6 @@ static void resync_init(struct mlx5e_ktls_rx_resync_ctx *resync, ...@@ -362,7 +362,6 @@ static void resync_init(struct mlx5e_ktls_rx_resync_ctx *resync,
static void resync_handle_seq_match(struct mlx5e_ktls_offload_context_rx *priv_rx, static void resync_handle_seq_match(struct mlx5e_ktls_offload_context_rx *priv_rx,
struct mlx5e_channel *c) struct mlx5e_channel *c)
{ {
struct tls12_crypto_info_aes_gcm_128 *info = &priv_rx->crypto_info;
struct mlx5e_ktls_resync_resp *ktls_resync; struct mlx5e_ktls_resync_resp *ktls_resync;
struct mlx5e_icosq *sq; struct mlx5e_icosq *sq;
bool trigger_poll; bool trigger_poll;
...@@ -373,7 +372,31 @@ static void resync_handle_seq_match(struct mlx5e_ktls_offload_context_rx *priv_r ...@@ -373,7 +372,31 @@ static void resync_handle_seq_match(struct mlx5e_ktls_offload_context_rx *priv_r
spin_lock_bh(&ktls_resync->lock); spin_lock_bh(&ktls_resync->lock);
spin_lock_bh(&priv_rx->lock); spin_lock_bh(&priv_rx->lock);
memcpy(info->rec_seq, &priv_rx->resync.sw_rcd_sn_be, sizeof(info->rec_seq)); switch (priv_rx->crypto_info.crypto_info.cipher_type) {
case TLS_CIPHER_AES_GCM_128: {
struct tls12_crypto_info_aes_gcm_128 *info =
&priv_rx->crypto_info.crypto_info_128;
memcpy(info->rec_seq, &priv_rx->resync.sw_rcd_sn_be,
sizeof(info->rec_seq));
break;
}
case TLS_CIPHER_AES_GCM_256: {
struct tls12_crypto_info_aes_gcm_256 *info =
&priv_rx->crypto_info.crypto_info_256;
memcpy(info->rec_seq, &priv_rx->resync.sw_rcd_sn_be,
sizeof(info->rec_seq));
break;
}
default:
WARN_ONCE(1, "Unsupported cipher type %u\n",
priv_rx->crypto_info.crypto_info.cipher_type);
spin_unlock_bh(&priv_rx->lock);
spin_unlock_bh(&ktls_resync->lock);
return;
}
if (list_empty(&priv_rx->list)) { if (list_empty(&priv_rx->list)) {
list_add_tail(&priv_rx->list, &ktls_resync->list); list_add_tail(&priv_rx->list, &ktls_resync->list);
trigger_poll = !test_and_set_bit(MLX5E_SQ_STATE_PENDING_TLS_RX_RESYNC, &sq->state); trigger_poll = !test_and_set_bit(MLX5E_SQ_STATE_PENDING_TLS_RX_RESYNC, &sq->state);
...@@ -604,8 +627,20 @@ int mlx5e_ktls_add_rx(struct net_device *netdev, struct sock *sk, ...@@ -604,8 +627,20 @@ int mlx5e_ktls_add_rx(struct net_device *netdev, struct sock *sk,
INIT_LIST_HEAD(&priv_rx->list); INIT_LIST_HEAD(&priv_rx->list);
spin_lock_init(&priv_rx->lock); spin_lock_init(&priv_rx->lock);
priv_rx->crypto_info = switch (crypto_info->cipher_type) {
*(struct tls12_crypto_info_aes_gcm_128 *)crypto_info; case TLS_CIPHER_AES_GCM_128:
priv_rx->crypto_info.crypto_info_128 =
*(struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
break;
case TLS_CIPHER_AES_GCM_256:
priv_rx->crypto_info.crypto_info_256 =
*(struct tls12_crypto_info_aes_gcm_256 *)crypto_info;
break;
default:
WARN_ONCE(1, "Unsupported cipher type %u\n",
crypto_info->cipher_type);
return -EOPNOTSUPP;
}
rxq = mlx5e_ktls_sk_get_rxq(sk); rxq = mlx5e_ktls_sk_get_rxq(sk);
priv_rx->rxq = rxq; priv_rx->rxq = rxq;
......
...@@ -93,7 +93,7 @@ struct mlx5e_ktls_offload_context_tx { ...@@ -93,7 +93,7 @@ struct mlx5e_ktls_offload_context_tx {
bool ctx_post_pending; bool ctx_post_pending;
/* control / resync */ /* control / resync */
struct list_head list_node; /* member of the pool */ struct list_head list_node; /* member of the pool */
struct tls12_crypto_info_aes_gcm_128 crypto_info; union mlx5e_crypto_info crypto_info;
struct tls_offload_context_tx *tx_ctx; struct tls_offload_context_tx *tx_ctx;
struct mlx5_core_dev *mdev; struct mlx5_core_dev *mdev;
struct mlx5e_tls_sw_stats *sw_stats; struct mlx5e_tls_sw_stats *sw_stats;
...@@ -485,8 +485,20 @@ int mlx5e_ktls_add_tx(struct net_device *netdev, struct sock *sk, ...@@ -485,8 +485,20 @@ int mlx5e_ktls_add_tx(struct net_device *netdev, struct sock *sk,
goto err_create_key; goto err_create_key;
priv_tx->expected_seq = start_offload_tcp_sn; priv_tx->expected_seq = start_offload_tcp_sn;
priv_tx->crypto_info = switch (crypto_info->cipher_type) {
*(struct tls12_crypto_info_aes_gcm_128 *)crypto_info; case TLS_CIPHER_AES_GCM_128:
priv_tx->crypto_info.crypto_info_128 =
*(struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
break;
case TLS_CIPHER_AES_GCM_256:
priv_tx->crypto_info.crypto_info_256 =
*(struct tls12_crypto_info_aes_gcm_256 *)crypto_info;
break;
default:
WARN_ONCE(1, "Unsupported cipher type %u\n",
crypto_info->cipher_type);
return -EOPNOTSUPP;
}
priv_tx->tx_ctx = tls_offload_ctx_tx(tls_ctx); priv_tx->tx_ctx = tls_offload_ctx_tx(tls_ctx);
mlx5e_set_ktls_tx_priv_ctx(tls_ctx, priv_tx); mlx5e_set_ktls_tx_priv_ctx(tls_ctx, priv_tx);
...@@ -671,14 +683,31 @@ tx_post_resync_params(struct mlx5e_txqsq *sq, ...@@ -671,14 +683,31 @@ tx_post_resync_params(struct mlx5e_txqsq *sq,
struct mlx5e_ktls_offload_context_tx *priv_tx, struct mlx5e_ktls_offload_context_tx *priv_tx,
u64 rcd_sn) u64 rcd_sn)
{ {
struct tls12_crypto_info_aes_gcm_128 *info = &priv_tx->crypto_info;
__be64 rn_be = cpu_to_be64(rcd_sn); __be64 rn_be = cpu_to_be64(rcd_sn);
bool skip_static_post; bool skip_static_post;
u16 rec_seq_sz; u16 rec_seq_sz;
char *rec_seq; char *rec_seq;
rec_seq = info->rec_seq; switch (priv_tx->crypto_info.crypto_info.cipher_type) {
rec_seq_sz = sizeof(info->rec_seq); case TLS_CIPHER_AES_GCM_128: {
struct tls12_crypto_info_aes_gcm_128 *info = &priv_tx->crypto_info.crypto_info_128;
rec_seq = info->rec_seq;
rec_seq_sz = sizeof(info->rec_seq);
break;
}
case TLS_CIPHER_AES_GCM_256: {
struct tls12_crypto_info_aes_gcm_256 *info = &priv_tx->crypto_info.crypto_info_256;
rec_seq = info->rec_seq;
rec_seq_sz = sizeof(info->rec_seq);
break;
}
default:
WARN_ONCE(1, "Unsupported cipher type %u\n",
priv_tx->crypto_info.crypto_info.cipher_type);
return;
}
skip_static_post = !memcmp(rec_seq, &rn_be, rec_seq_sz); skip_static_post = !memcmp(rec_seq, &rn_be, rec_seq_sz);
if (!skip_static_post) if (!skip_static_post)
......
...@@ -21,7 +21,7 @@ enum { ...@@ -21,7 +21,7 @@ enum {
static void static void
fill_static_params(struct mlx5_wqe_tls_static_params_seg *params, fill_static_params(struct mlx5_wqe_tls_static_params_seg *params,
struct tls12_crypto_info_aes_gcm_128 *info, union mlx5e_crypto_info *crypto_info,
u32 key_id, u32 resync_tcp_sn) u32 key_id, u32 resync_tcp_sn)
{ {
char *initial_rn, *gcm_iv; char *initial_rn, *gcm_iv;
...@@ -32,7 +32,26 @@ fill_static_params(struct mlx5_wqe_tls_static_params_seg *params, ...@@ -32,7 +32,26 @@ fill_static_params(struct mlx5_wqe_tls_static_params_seg *params,
ctx = params->ctx; ctx = params->ctx;
EXTRACT_INFO_FIELDS; switch (crypto_info->crypto_info.cipher_type) {
case TLS_CIPHER_AES_GCM_128: {
struct tls12_crypto_info_aes_gcm_128 *info =
&crypto_info->crypto_info_128;
EXTRACT_INFO_FIELDS;
break;
}
case TLS_CIPHER_AES_GCM_256: {
struct tls12_crypto_info_aes_gcm_256 *info =
&crypto_info->crypto_info_256;
EXTRACT_INFO_FIELDS;
break;
}
default:
WARN_ONCE(1, "Unsupported cipher type %u\n",
crypto_info->crypto_info.cipher_type);
return;
}
gcm_iv = MLX5_ADDR_OF(tls_static_params, ctx, gcm_iv); gcm_iv = MLX5_ADDR_OF(tls_static_params, ctx, gcm_iv);
initial_rn = MLX5_ADDR_OF(tls_static_params, ctx, initial_record_number); initial_rn = MLX5_ADDR_OF(tls_static_params, ctx, initial_record_number);
...@@ -54,7 +73,7 @@ fill_static_params(struct mlx5_wqe_tls_static_params_seg *params, ...@@ -54,7 +73,7 @@ fill_static_params(struct mlx5_wqe_tls_static_params_seg *params,
void void
mlx5e_ktls_build_static_params(struct mlx5e_set_tls_static_params_wqe *wqe, mlx5e_ktls_build_static_params(struct mlx5e_set_tls_static_params_wqe *wqe,
u16 pc, u32 sqn, u16 pc, u32 sqn,
struct tls12_crypto_info_aes_gcm_128 *info, union mlx5e_crypto_info *crypto_info,
u32 tis_tir_num, u32 key_id, u32 resync_tcp_sn, u32 tis_tir_num, u32 key_id, u32 resync_tcp_sn,
bool fence, enum tls_offload_ctx_dir direction) bool fence, enum tls_offload_ctx_dir direction)
{ {
...@@ -75,7 +94,7 @@ mlx5e_ktls_build_static_params(struct mlx5e_set_tls_static_params_wqe *wqe, ...@@ -75,7 +94,7 @@ mlx5e_ktls_build_static_params(struct mlx5e_set_tls_static_params_wqe *wqe,
ucseg->flags = MLX5_UMR_INLINE; ucseg->flags = MLX5_UMR_INLINE;
ucseg->bsf_octowords = cpu_to_be16(MLX5_ST_SZ_BYTES(tls_static_params) / 16); ucseg->bsf_octowords = cpu_to_be16(MLX5_ST_SZ_BYTES(tls_static_params) / 16);
fill_static_params(&wqe->params, info, key_id, resync_tcp_sn); fill_static_params(&wqe->params, crypto_info, key_id, resync_tcp_sn);
} }
static void static void
......
...@@ -27,6 +27,12 @@ int mlx5e_ktls_add_rx(struct net_device *netdev, struct sock *sk, ...@@ -27,6 +27,12 @@ int mlx5e_ktls_add_rx(struct net_device *netdev, struct sock *sk,
void mlx5e_ktls_del_rx(struct net_device *netdev, struct tls_context *tls_ctx); void mlx5e_ktls_del_rx(struct net_device *netdev, struct tls_context *tls_ctx);
void mlx5e_ktls_rx_resync(struct net_device *netdev, struct sock *sk, u32 seq, u8 *rcd_sn); void mlx5e_ktls_rx_resync(struct net_device *netdev, struct sock *sk, u32 seq, u8 *rcd_sn);
union mlx5e_crypto_info {
struct tls_crypto_info crypto_info;
struct tls12_crypto_info_aes_gcm_128 crypto_info_128;
struct tls12_crypto_info_aes_gcm_256 crypto_info_256;
};
struct mlx5e_set_tls_static_params_wqe { struct mlx5e_set_tls_static_params_wqe {
struct mlx5_wqe_ctrl_seg ctrl; struct mlx5_wqe_ctrl_seg ctrl;
struct mlx5_wqe_umr_ctrl_seg uctrl; struct mlx5_wqe_umr_ctrl_seg uctrl;
...@@ -72,7 +78,7 @@ struct mlx5e_get_tls_progress_params_wqe { ...@@ -72,7 +78,7 @@ struct mlx5e_get_tls_progress_params_wqe {
void void
mlx5e_ktls_build_static_params(struct mlx5e_set_tls_static_params_wqe *wqe, mlx5e_ktls_build_static_params(struct mlx5e_set_tls_static_params_wqe *wqe,
u16 pc, u32 sqn, u16 pc, u32 sqn,
struct tls12_crypto_info_aes_gcm_128 *info, union mlx5e_crypto_info *crypto_info,
u32 tis_tir_num, u32 key_id, u32 resync_tcp_sn, u32 tis_tir_num, u32 key_id, u32 resync_tcp_sn,
bool fence, enum tls_offload_ctx_dir direction); bool fence, enum tls_offload_ctx_dir direction);
void void
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment