Commit 9566e650 authored by David S. Miller's avatar David S. Miller

Merge tag 'mlx5-fixes-2019-08-08' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
Mellanox, mlx5 fixes 2019-08-08

This series introduces some fixes to mlx5 driver.

Highlights:
1) From Tariq, Critical mlx5 kTLS fixes to better align with hw specs.
2) From Aya, Fixes to mlx5 tx devlink health reporter.
3) From Maxim, aRFs parsing to use flow dissector to avoid relying on
invalid skb fields.

Please pull and let me know if there is any problem.

For -stable v4.3
 ('net/mlx5e: Only support tx/rx pause setting for port owner')
For -stable v4.9
 ('net/mlx5e: Use flow keys dissector to parse packets for ARFS')
For -stable v5.1
 ('net/mlx5e: Fix false negative indication on tx reporter CQE recovery')
 ('net/mlx5e: Remove redundant check in CQE recovery flow of tx reporter')
 ('net/mlx5e: ethtool, Avoid setting speed to 56GBASE when autoneg off')

Note: when merged with net-next this minor conflict will pop up:
++<<<<<<< (net-next)
 +      if (is_eswitch_flow) {
 +              flow->esw_attr->match_level = match_level;
 +              flow->esw_attr->tunnel_match_level = tunnel_match_level;
++=======
+       if (flow->flags & MLX5E_TC_FLOW_ESWITCH) {
+               flow->esw_attr->inner_match_level = inner_match_level;
+               flow->esw_attr->outer_match_level = outer_match_level;
++>>>>>>> (net)

To resolve, use hunks from net (2nd) and replace:
if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
with
if (is_eswitch_flow)
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 8b638160 a4e508ca
...@@ -184,8 +184,13 @@ static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev) ...@@ -184,8 +184,13 @@ static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev)
struct mlx5e_tx_wqe { struct mlx5e_tx_wqe {
struct mlx5_wqe_ctrl_seg ctrl; struct mlx5_wqe_ctrl_seg ctrl;
union {
struct {
struct mlx5_wqe_eth_seg eth; struct mlx5_wqe_eth_seg eth;
struct mlx5_wqe_data_seg data[0]; struct mlx5_wqe_data_seg data[0];
};
u8 tls_progress_params_ctx[0];
};
}; };
struct mlx5e_rx_wqe_ll { struct mlx5e_rx_wqe_ll {
......
...@@ -76,26 +76,21 @@ static int mlx5e_tx_reporter_err_cqe_recover(struct mlx5e_txqsq *sq) ...@@ -76,26 +76,21 @@ static int mlx5e_tx_reporter_err_cqe_recover(struct mlx5e_txqsq *sq)
u8 state; u8 state;
int err; int err;
if (!test_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state))
return 0;
err = mlx5_core_query_sq_state(mdev, sq->sqn, &state); err = mlx5_core_query_sq_state(mdev, sq->sqn, &state);
if (err) { if (err) {
netdev_err(dev, "Failed to query SQ 0x%x state. err = %d\n", netdev_err(dev, "Failed to query SQ 0x%x state. err = %d\n",
sq->sqn, err); sq->sqn, err);
return err; goto out;
} }
if (state != MLX5_SQC_STATE_ERR) { if (state != MLX5_SQC_STATE_ERR)
netdev_err(dev, "SQ 0x%x not in ERROR state\n", sq->sqn); goto out;
return -EINVAL;
}
mlx5e_tx_disable_queue(sq->txq); mlx5e_tx_disable_queue(sq->txq);
err = mlx5e_wait_for_sq_flush(sq); err = mlx5e_wait_for_sq_flush(sq);
if (err) if (err)
return err; goto out;
/* At this point, no new packets will arrive from the stack as TXQ is /* At this point, no new packets will arrive from the stack as TXQ is
* marked with QUEUE_STATE_DRV_XOFF. In addition, NAPI cleared all * marked with QUEUE_STATE_DRV_XOFF. In addition, NAPI cleared all
...@@ -104,13 +99,17 @@ static int mlx5e_tx_reporter_err_cqe_recover(struct mlx5e_txqsq *sq) ...@@ -104,13 +99,17 @@ static int mlx5e_tx_reporter_err_cqe_recover(struct mlx5e_txqsq *sq)
err = mlx5e_sq_to_ready(sq, state); err = mlx5e_sq_to_ready(sq, state);
if (err) if (err)
return err; goto out;
mlx5e_reset_txqsq_cc_pc(sq); mlx5e_reset_txqsq_cc_pc(sq);
sq->stats->recover++; sq->stats->recover++;
clear_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state);
mlx5e_activate_txqsq(sq); mlx5e_activate_txqsq(sq);
return 0; return 0;
out:
clear_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state);
return err;
} }
static int mlx5_tx_health_report(struct devlink_health_reporter *tx_reporter, static int mlx5_tx_health_report(struct devlink_health_reporter *tx_reporter,
......
...@@ -11,12 +11,14 @@ ...@@ -11,12 +11,14 @@
#include "accel/tls.h" #include "accel/tls.h"
#define MLX5E_KTLS_STATIC_UMR_WQE_SZ \ #define MLX5E_KTLS_STATIC_UMR_WQE_SZ \
(sizeof(struct mlx5e_umr_wqe) + MLX5_ST_SZ_BYTES(tls_static_params)) (offsetof(struct mlx5e_umr_wqe, tls_static_params_ctx) + \
MLX5_ST_SZ_BYTES(tls_static_params))
#define MLX5E_KTLS_STATIC_WQEBBS \ #define MLX5E_KTLS_STATIC_WQEBBS \
(DIV_ROUND_UP(MLX5E_KTLS_STATIC_UMR_WQE_SZ, MLX5_SEND_WQE_BB)) (DIV_ROUND_UP(MLX5E_KTLS_STATIC_UMR_WQE_SZ, MLX5_SEND_WQE_BB))
#define MLX5E_KTLS_PROGRESS_WQE_SZ \ #define MLX5E_KTLS_PROGRESS_WQE_SZ \
(sizeof(struct mlx5e_tx_wqe) + MLX5_ST_SZ_BYTES(tls_progress_params)) (offsetof(struct mlx5e_tx_wqe, tls_progress_params_ctx) + \
MLX5_ST_SZ_BYTES(tls_progress_params))
#define MLX5E_KTLS_PROGRESS_WQEBBS \ #define MLX5E_KTLS_PROGRESS_WQEBBS \
(DIV_ROUND_UP(MLX5E_KTLS_PROGRESS_WQE_SZ, MLX5_SEND_WQE_BB)) (DIV_ROUND_UP(MLX5E_KTLS_PROGRESS_WQE_SZ, MLX5_SEND_WQE_BB))
#define MLX5E_KTLS_MAX_DUMP_WQEBBS 2 #define MLX5E_KTLS_MAX_DUMP_WQEBBS 2
......
...@@ -69,7 +69,7 @@ build_static_params(struct mlx5e_umr_wqe *wqe, u16 pc, u32 sqn, ...@@ -69,7 +69,7 @@ build_static_params(struct mlx5e_umr_wqe *wqe, u16 pc, u32 sqn,
cseg->qpn_ds = cpu_to_be32((sqn << MLX5_WQE_CTRL_QPN_SHIFT) | cseg->qpn_ds = cpu_to_be32((sqn << MLX5_WQE_CTRL_QPN_SHIFT) |
STATIC_PARAMS_DS_CNT); STATIC_PARAMS_DS_CNT);
cseg->fm_ce_se = fence ? MLX5_FENCE_MODE_INITIATOR_SMALL : 0; cseg->fm_ce_se = fence ? MLX5_FENCE_MODE_INITIATOR_SMALL : 0;
cseg->imm = cpu_to_be32(priv_tx->tisn); cseg->tisn = cpu_to_be32(priv_tx->tisn << 8);
ucseg->flags = MLX5_UMR_INLINE; ucseg->flags = MLX5_UMR_INLINE;
ucseg->bsf_octowords = cpu_to_be16(MLX5_ST_SZ_BYTES(tls_static_params) / 16); ucseg->bsf_octowords = cpu_to_be16(MLX5_ST_SZ_BYTES(tls_static_params) / 16);
...@@ -80,7 +80,7 @@ build_static_params(struct mlx5e_umr_wqe *wqe, u16 pc, u32 sqn, ...@@ -80,7 +80,7 @@ build_static_params(struct mlx5e_umr_wqe *wqe, u16 pc, u32 sqn,
static void static void
fill_progress_params_ctx(void *ctx, struct mlx5e_ktls_offload_context_tx *priv_tx) fill_progress_params_ctx(void *ctx, struct mlx5e_ktls_offload_context_tx *priv_tx)
{ {
MLX5_SET(tls_progress_params, ctx, pd, priv_tx->tisn); MLX5_SET(tls_progress_params, ctx, tisn, priv_tx->tisn);
MLX5_SET(tls_progress_params, ctx, record_tracker_state, MLX5_SET(tls_progress_params, ctx, record_tracker_state,
MLX5E_TLS_PROGRESS_PARAMS_RECORD_TRACKER_STATE_START); MLX5E_TLS_PROGRESS_PARAMS_RECORD_TRACKER_STATE_START);
MLX5_SET(tls_progress_params, ctx, auth_state, MLX5_SET(tls_progress_params, ctx, auth_state,
...@@ -104,7 +104,7 @@ build_progress_params(struct mlx5e_tx_wqe *wqe, u16 pc, u32 sqn, ...@@ -104,7 +104,7 @@ build_progress_params(struct mlx5e_tx_wqe *wqe, u16 pc, u32 sqn,
PROGRESS_PARAMS_DS_CNT); PROGRESS_PARAMS_DS_CNT);
cseg->fm_ce_se = fence ? MLX5_FENCE_MODE_INITIATOR_SMALL : 0; cseg->fm_ce_se = fence ? MLX5_FENCE_MODE_INITIATOR_SMALL : 0;
fill_progress_params_ctx(wqe->data, priv_tx); fill_progress_params_ctx(wqe->tls_progress_params_ctx, priv_tx);
} }
static void tx_fill_wi(struct mlx5e_txqsq *sq, static void tx_fill_wi(struct mlx5e_txqsq *sq,
...@@ -278,7 +278,7 @@ tx_post_resync_dump(struct mlx5e_txqsq *sq, struct sk_buff *skb, ...@@ -278,7 +278,7 @@ tx_post_resync_dump(struct mlx5e_txqsq *sq, struct sk_buff *skb,
cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_DUMP); cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_DUMP);
cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt); cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
cseg->imm = cpu_to_be32(tisn); cseg->tisn = cpu_to_be32(tisn << 8);
cseg->fm_ce_se = first ? MLX5_FENCE_MODE_INITIATOR_SMALL : 0; cseg->fm_ce_se = first ? MLX5_FENCE_MODE_INITIATOR_SMALL : 0;
eseg->inline_hdr.sz = cpu_to_be16(ihs); eseg->inline_hdr.sz = cpu_to_be16(ihs);
...@@ -434,7 +434,7 @@ struct sk_buff *mlx5e_ktls_handle_tx_skb(struct net_device *netdev, ...@@ -434,7 +434,7 @@ struct sk_buff *mlx5e_ktls_handle_tx_skb(struct net_device *netdev,
priv_tx->expected_seq = seq + datalen; priv_tx->expected_seq = seq + datalen;
cseg = &(*wqe)->ctrl; cseg = &(*wqe)->ctrl;
cseg->imm = cpu_to_be32(priv_tx->tisn); cseg->tisn = cpu_to_be32(priv_tx->tisn << 8);
stats->tls_encrypted_packets += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1; stats->tls_encrypted_packets += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
stats->tls_encrypted_bytes += datalen; stats->tls_encrypted_bytes += datalen;
......
...@@ -437,12 +437,6 @@ arfs_hash_bucket(struct arfs_table *arfs_t, __be16 src_port, ...@@ -437,12 +437,6 @@ arfs_hash_bucket(struct arfs_table *arfs_t, __be16 src_port,
return &arfs_t->rules_hash[bucket_idx]; return &arfs_t->rules_hash[bucket_idx];
} }
static u8 arfs_get_ip_proto(const struct sk_buff *skb)
{
return (skb->protocol == htons(ETH_P_IP)) ?
ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
}
static struct arfs_table *arfs_get_table(struct mlx5e_arfs_tables *arfs, static struct arfs_table *arfs_get_table(struct mlx5e_arfs_tables *arfs,
u8 ip_proto, __be16 etype) u8 ip_proto, __be16 etype)
{ {
...@@ -602,31 +596,9 @@ static void arfs_handle_work(struct work_struct *work) ...@@ -602,31 +596,9 @@ static void arfs_handle_work(struct work_struct *work)
arfs_may_expire_flow(priv); arfs_may_expire_flow(priv);
} }
/* return L4 destination port from ip4/6 packets */
static __be16 arfs_get_dst_port(const struct sk_buff *skb)
{
char *transport_header;
transport_header = skb_transport_header(skb);
if (arfs_get_ip_proto(skb) == IPPROTO_TCP)
return ((struct tcphdr *)transport_header)->dest;
return ((struct udphdr *)transport_header)->dest;
}
/* return L4 source port from ip4/6 packets */
static __be16 arfs_get_src_port(const struct sk_buff *skb)
{
char *transport_header;
transport_header = skb_transport_header(skb);
if (arfs_get_ip_proto(skb) == IPPROTO_TCP)
return ((struct tcphdr *)transport_header)->source;
return ((struct udphdr *)transport_header)->source;
}
static struct arfs_rule *arfs_alloc_rule(struct mlx5e_priv *priv, static struct arfs_rule *arfs_alloc_rule(struct mlx5e_priv *priv,
struct arfs_table *arfs_t, struct arfs_table *arfs_t,
const struct sk_buff *skb, const struct flow_keys *fk,
u16 rxq, u32 flow_id) u16 rxq, u32 flow_id)
{ {
struct arfs_rule *rule; struct arfs_rule *rule;
...@@ -641,19 +613,19 @@ static struct arfs_rule *arfs_alloc_rule(struct mlx5e_priv *priv, ...@@ -641,19 +613,19 @@ static struct arfs_rule *arfs_alloc_rule(struct mlx5e_priv *priv,
INIT_WORK(&rule->arfs_work, arfs_handle_work); INIT_WORK(&rule->arfs_work, arfs_handle_work);
tuple = &rule->tuple; tuple = &rule->tuple;
tuple->etype = skb->protocol; tuple->etype = fk->basic.n_proto;
tuple->ip_proto = fk->basic.ip_proto;
if (tuple->etype == htons(ETH_P_IP)) { if (tuple->etype == htons(ETH_P_IP)) {
tuple->src_ipv4 = ip_hdr(skb)->saddr; tuple->src_ipv4 = fk->addrs.v4addrs.src;
tuple->dst_ipv4 = ip_hdr(skb)->daddr; tuple->dst_ipv4 = fk->addrs.v4addrs.dst;
} else { } else {
memcpy(&tuple->src_ipv6, &ipv6_hdr(skb)->saddr, memcpy(&tuple->src_ipv6, &fk->addrs.v6addrs.src,
sizeof(struct in6_addr)); sizeof(struct in6_addr));
memcpy(&tuple->dst_ipv6, &ipv6_hdr(skb)->daddr, memcpy(&tuple->dst_ipv6, &fk->addrs.v6addrs.dst,
sizeof(struct in6_addr)); sizeof(struct in6_addr));
} }
tuple->ip_proto = arfs_get_ip_proto(skb); tuple->src_port = fk->ports.src;
tuple->src_port = arfs_get_src_port(skb); tuple->dst_port = fk->ports.dst;
tuple->dst_port = arfs_get_dst_port(skb);
rule->flow_id = flow_id; rule->flow_id = flow_id;
rule->filter_id = priv->fs.arfs.last_filter_id++ % RPS_NO_FILTER; rule->filter_id = priv->fs.arfs.last_filter_id++ % RPS_NO_FILTER;
...@@ -664,38 +636,34 @@ static struct arfs_rule *arfs_alloc_rule(struct mlx5e_priv *priv, ...@@ -664,38 +636,34 @@ static struct arfs_rule *arfs_alloc_rule(struct mlx5e_priv *priv,
return rule; return rule;
} }
static bool arfs_cmp_ips(struct arfs_tuple *tuple, static bool arfs_cmp(const struct arfs_tuple *tuple, const struct flow_keys *fk)
const struct sk_buff *skb)
{ {
if (tuple->etype == htons(ETH_P_IP) && if (tuple->src_port != fk->ports.src || tuple->dst_port != fk->ports.dst)
tuple->src_ipv4 == ip_hdr(skb)->saddr && return false;
tuple->dst_ipv4 == ip_hdr(skb)->daddr) if (tuple->etype != fk->basic.n_proto)
return true; return false;
if (tuple->etype == htons(ETH_P_IPV6) && if (tuple->etype == htons(ETH_P_IP))
(!memcmp(&tuple->src_ipv6, &ipv6_hdr(skb)->saddr, return tuple->src_ipv4 == fk->addrs.v4addrs.src &&
sizeof(struct in6_addr))) && tuple->dst_ipv4 == fk->addrs.v4addrs.dst;
(!memcmp(&tuple->dst_ipv6, &ipv6_hdr(skb)->daddr, if (tuple->etype == htons(ETH_P_IPV6))
sizeof(struct in6_addr)))) return !memcmp(&tuple->src_ipv6, &fk->addrs.v6addrs.src,
return true; sizeof(struct in6_addr)) &&
!memcmp(&tuple->dst_ipv6, &fk->addrs.v6addrs.dst,
sizeof(struct in6_addr));
return false; return false;
} }
static struct arfs_rule *arfs_find_rule(struct arfs_table *arfs_t, static struct arfs_rule *arfs_find_rule(struct arfs_table *arfs_t,
const struct sk_buff *skb) const struct flow_keys *fk)
{ {
struct arfs_rule *arfs_rule; struct arfs_rule *arfs_rule;
struct hlist_head *head; struct hlist_head *head;
__be16 src_port = arfs_get_src_port(skb);
__be16 dst_port = arfs_get_dst_port(skb);
head = arfs_hash_bucket(arfs_t, src_port, dst_port); head = arfs_hash_bucket(arfs_t, fk->ports.src, fk->ports.dst);
hlist_for_each_entry(arfs_rule, head, hlist) { hlist_for_each_entry(arfs_rule, head, hlist) {
if (arfs_rule->tuple.src_port == src_port && if (arfs_cmp(&arfs_rule->tuple, fk))
arfs_rule->tuple.dst_port == dst_port &&
arfs_cmp_ips(&arfs_rule->tuple, skb)) {
return arfs_rule; return arfs_rule;
} }
}
return NULL; return NULL;
} }
...@@ -707,20 +675,24 @@ int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, ...@@ -707,20 +675,24 @@ int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
struct mlx5e_arfs_tables *arfs = &priv->fs.arfs; struct mlx5e_arfs_tables *arfs = &priv->fs.arfs;
struct arfs_table *arfs_t; struct arfs_table *arfs_t;
struct arfs_rule *arfs_rule; struct arfs_rule *arfs_rule;
struct flow_keys fk;
if (!skb_flow_dissect_flow_keys(skb, &fk, 0))
return -EPROTONOSUPPORT;
if (skb->protocol != htons(ETH_P_IP) && if (fk.basic.n_proto != htons(ETH_P_IP) &&
skb->protocol != htons(ETH_P_IPV6)) fk.basic.n_proto != htons(ETH_P_IPV6))
return -EPROTONOSUPPORT; return -EPROTONOSUPPORT;
if (skb->encapsulation) if (skb->encapsulation)
return -EPROTONOSUPPORT; return -EPROTONOSUPPORT;
arfs_t = arfs_get_table(arfs, arfs_get_ip_proto(skb), skb->protocol); arfs_t = arfs_get_table(arfs, fk.basic.ip_proto, fk.basic.n_proto);
if (!arfs_t) if (!arfs_t)
return -EPROTONOSUPPORT; return -EPROTONOSUPPORT;
spin_lock_bh(&arfs->arfs_lock); spin_lock_bh(&arfs->arfs_lock);
arfs_rule = arfs_find_rule(arfs_t, skb); arfs_rule = arfs_find_rule(arfs_t, &fk);
if (arfs_rule) { if (arfs_rule) {
if (arfs_rule->rxq == rxq_index) { if (arfs_rule->rxq == rxq_index) {
spin_unlock_bh(&arfs->arfs_lock); spin_unlock_bh(&arfs->arfs_lock);
...@@ -728,8 +700,7 @@ int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, ...@@ -728,8 +700,7 @@ int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
} }
arfs_rule->rxq = rxq_index; arfs_rule->rxq = rxq_index;
} else { } else {
arfs_rule = arfs_alloc_rule(priv, arfs_t, skb, arfs_rule = arfs_alloc_rule(priv, arfs_t, &fk, rxq_index, flow_id);
rxq_index, flow_id);
if (!arfs_rule) { if (!arfs_rule) {
spin_unlock_bh(&arfs->arfs_lock); spin_unlock_bh(&arfs->arfs_lock);
return -ENOMEM; return -ENOMEM;
......
...@@ -1081,6 +1081,14 @@ int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv, ...@@ -1081,6 +1081,14 @@ int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
link_modes = autoneg == AUTONEG_ENABLE ? ethtool2ptys_adver_func(adver) : link_modes = autoneg == AUTONEG_ENABLE ? ethtool2ptys_adver_func(adver) :
mlx5e_port_speed2linkmodes(mdev, speed, !ext); mlx5e_port_speed2linkmodes(mdev, speed, !ext);
if ((link_modes & MLX5E_PROT_MASK(MLX5E_56GBASE_R4)) &&
autoneg != AUTONEG_ENABLE) {
netdev_err(priv->netdev, "%s: 56G link speed requires autoneg enabled\n",
__func__);
err = -EINVAL;
goto out;
}
link_modes = link_modes & eproto.cap; link_modes = link_modes & eproto.cap;
if (!link_modes) { if (!link_modes) {
netdev_err(priv->netdev, "%s: Not supported link mode(s) requested", netdev_err(priv->netdev, "%s: Not supported link mode(s) requested",
...@@ -1338,6 +1346,9 @@ int mlx5e_ethtool_set_pauseparam(struct mlx5e_priv *priv, ...@@ -1338,6 +1346,9 @@ int mlx5e_ethtool_set_pauseparam(struct mlx5e_priv *priv,
struct mlx5_core_dev *mdev = priv->mdev; struct mlx5_core_dev *mdev = priv->mdev;
int err; int err;
if (!MLX5_CAP_GEN(mdev, vport_group_manager))
return -EOPNOTSUPP;
if (pauseparam->autoneg) if (pauseparam->autoneg)
return -EINVAL; return -EINVAL;
......
...@@ -1321,7 +1321,6 @@ static int mlx5e_open_txqsq(struct mlx5e_channel *c, ...@@ -1321,7 +1321,6 @@ static int mlx5e_open_txqsq(struct mlx5e_channel *c,
void mlx5e_activate_txqsq(struct mlx5e_txqsq *sq) void mlx5e_activate_txqsq(struct mlx5e_txqsq *sq)
{ {
sq->txq = netdev_get_tx_queue(sq->channel->netdev, sq->txq_ix); sq->txq = netdev_get_tx_queue(sq->channel->netdev, sq->txq_ix);
clear_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state);
set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state); set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
netdev_tx_reset_queue(sq->txq); netdev_tx_reset_queue(sq->txq);
netif_tx_start_queue(sq->txq); netif_tx_start_queue(sq->txq);
......
...@@ -1480,7 +1480,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, ...@@ -1480,7 +1480,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
struct mlx5_flow_spec *spec, struct mlx5_flow_spec *spec,
struct flow_cls_offload *f, struct flow_cls_offload *f,
struct net_device *filter_dev, struct net_device *filter_dev,
u8 *match_level, u8 *tunnel_match_level) u8 *inner_match_level, u8 *outer_match_level)
{ {
struct netlink_ext_ack *extack = f->common.extack; struct netlink_ext_ack *extack = f->common.extack;
void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
...@@ -1495,8 +1495,9 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, ...@@ -1495,8 +1495,9 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
struct flow_dissector *dissector = rule->match.dissector; struct flow_dissector *dissector = rule->match.dissector;
u16 addr_type = 0; u16 addr_type = 0;
u8 ip_proto = 0; u8 ip_proto = 0;
u8 *match_level;
*match_level = MLX5_MATCH_NONE; match_level = outer_match_level;
if (dissector->used_keys & if (dissector->used_keys &
~(BIT(FLOW_DISSECTOR_KEY_META) | ~(BIT(FLOW_DISSECTOR_KEY_META) |
...@@ -1524,12 +1525,14 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, ...@@ -1524,12 +1525,14 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
} }
if (mlx5e_get_tc_tun(filter_dev)) { if (mlx5e_get_tc_tun(filter_dev)) {
if (parse_tunnel_attr(priv, spec, f, filter_dev, tunnel_match_level)) if (parse_tunnel_attr(priv, spec, f, filter_dev,
outer_match_level))
return -EOPNOTSUPP; return -EOPNOTSUPP;
/* In decap flow, header pointers should point to the inner /* At this point, header pointers should point to the inner
* headers, outer header were already set by parse_tunnel_attr * headers, outer header were already set by parse_tunnel_attr
*/ */
match_level = inner_match_level;
headers_c = get_match_headers_criteria(MLX5_FLOW_CONTEXT_ACTION_DECAP, headers_c = get_match_headers_criteria(MLX5_FLOW_CONTEXT_ACTION_DECAP,
spec); spec);
headers_v = get_match_headers_value(MLX5_FLOW_CONTEXT_ACTION_DECAP, headers_v = get_match_headers_value(MLX5_FLOW_CONTEXT_ACTION_DECAP,
...@@ -1831,35 +1834,41 @@ static int parse_cls_flower(struct mlx5e_priv *priv, ...@@ -1831,35 +1834,41 @@ static int parse_cls_flower(struct mlx5e_priv *priv,
struct flow_cls_offload *f, struct flow_cls_offload *f,
struct net_device *filter_dev) struct net_device *filter_dev)
{ {
u8 inner_match_level, outer_match_level, non_tunnel_match_level;
struct netlink_ext_ack *extack = f->common.extack; struct netlink_ext_ack *extack = f->common.extack;
struct mlx5_core_dev *dev = priv->mdev; struct mlx5_core_dev *dev = priv->mdev;
struct mlx5_eswitch *esw = dev->priv.eswitch; struct mlx5_eswitch *esw = dev->priv.eswitch;
struct mlx5e_rep_priv *rpriv = priv->ppriv; struct mlx5e_rep_priv *rpriv = priv->ppriv;
u8 match_level, tunnel_match_level = MLX5_MATCH_NONE;
struct mlx5_eswitch_rep *rep; struct mlx5_eswitch_rep *rep;
int err; int err;
err = __parse_cls_flower(priv, spec, f, filter_dev, &match_level, &tunnel_match_level); inner_match_level = MLX5_MATCH_NONE;
outer_match_level = MLX5_MATCH_NONE;
err = __parse_cls_flower(priv, spec, f, filter_dev, &inner_match_level,
&outer_match_level);
non_tunnel_match_level = (inner_match_level == MLX5_MATCH_NONE) ?
outer_match_level : inner_match_level;
if (!err && (flow->flags & MLX5E_TC_FLOW_ESWITCH)) { if (!err && (flow->flags & MLX5E_TC_FLOW_ESWITCH)) {
rep = rpriv->rep; rep = rpriv->rep;
if (rep->vport != MLX5_VPORT_UPLINK && if (rep->vport != MLX5_VPORT_UPLINK &&
(esw->offloads.inline_mode != MLX5_INLINE_MODE_NONE && (esw->offloads.inline_mode != MLX5_INLINE_MODE_NONE &&
esw->offloads.inline_mode < match_level)) { esw->offloads.inline_mode < non_tunnel_match_level)) {
NL_SET_ERR_MSG_MOD(extack, NL_SET_ERR_MSG_MOD(extack,
"Flow is not offloaded due to min inline setting"); "Flow is not offloaded due to min inline setting");
netdev_warn(priv->netdev, netdev_warn(priv->netdev,
"Flow is not offloaded due to min inline setting, required %d actual %d\n", "Flow is not offloaded due to min inline setting, required %d actual %d\n",
match_level, esw->offloads.inline_mode); non_tunnel_match_level, esw->offloads.inline_mode);
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
} }
if (flow->flags & MLX5E_TC_FLOW_ESWITCH) { if (flow->flags & MLX5E_TC_FLOW_ESWITCH) {
flow->esw_attr->match_level = match_level; flow->esw_attr->inner_match_level = inner_match_level;
flow->esw_attr->tunnel_match_level = tunnel_match_level; flow->esw_attr->outer_match_level = outer_match_level;
} else { } else {
flow->nic_attr->match_level = match_level; flow->nic_attr->match_level = non_tunnel_match_level;
} }
return err; return err;
......
...@@ -377,8 +377,8 @@ struct mlx5_esw_flow_attr { ...@@ -377,8 +377,8 @@ struct mlx5_esw_flow_attr {
struct mlx5_termtbl_handle *termtbl; struct mlx5_termtbl_handle *termtbl;
} dests[MLX5_MAX_FLOW_FWD_VPORTS]; } dests[MLX5_MAX_FLOW_FWD_VPORTS];
u32 mod_hdr_id; u32 mod_hdr_id;
u8 match_level; u8 inner_match_level;
u8 tunnel_match_level; u8 outer_match_level;
struct mlx5_fc *counter; struct mlx5_fc *counter;
u32 chain; u32 chain;
u16 prio; u16 prio;
......
...@@ -207,14 +207,10 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, ...@@ -207,14 +207,10 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
mlx5_eswitch_set_rule_source_port(esw, spec, attr); mlx5_eswitch_set_rule_source_port(esw, spec, attr);
if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DECAP) { if (attr->outer_match_level != MLX5_MATCH_NONE)
if (attr->tunnel_match_level != MLX5_MATCH_NONE)
spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS; spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
if (attr->match_level != MLX5_MATCH_NONE) if (attr->inner_match_level != MLX5_MATCH_NONE)
spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS; spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS;
} else if (attr->match_level != MLX5_MATCH_NONE) {
spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
}
if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
flow_act.modify_id = attr->mod_hdr_id; flow_act.modify_id = attr->mod_hdr_id;
...@@ -290,7 +286,7 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw, ...@@ -290,7 +286,7 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
mlx5_eswitch_set_rule_source_port(esw, spec, attr); mlx5_eswitch_set_rule_source_port(esw, spec, attr);
spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS; spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
if (attr->match_level != MLX5_MATCH_NONE) if (attr->outer_match_level != MLX5_MATCH_NONE)
spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS; spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
rule = mlx5_add_flow_rules(fast_fdb, spec, &flow_act, dest, i); rule = mlx5_add_flow_rules(fast_fdb, spec, &flow_act, dest, i);
......
...@@ -27,6 +27,7 @@ int mlx5_create_encryption_key(struct mlx5_core_dev *mdev, ...@@ -27,6 +27,7 @@ int mlx5_create_encryption_key(struct mlx5_core_dev *mdev,
case 128: case 128:
general_obj_key_size = general_obj_key_size =
MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_KEY_SIZE_128; MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_KEY_SIZE_128;
key_p += sz_bytes;
break; break;
case 256: case 256:
general_obj_key_size = general_obj_key_size =
......
...@@ -446,11 +446,11 @@ enum { ...@@ -446,11 +446,11 @@ enum {
}; };
enum { enum {
MLX5_OPC_MOD_TLS_TIS_STATIC_PARAMS = 0x20, MLX5_OPC_MOD_TLS_TIS_STATIC_PARAMS = 0x1,
}; };
enum { enum {
MLX5_OPC_MOD_TLS_TIS_PROGRESS_PARAMS = 0x20, MLX5_OPC_MOD_TLS_TIS_PROGRESS_PARAMS = 0x1,
}; };
enum { enum {
......
...@@ -10054,9 +10054,8 @@ struct mlx5_ifc_tls_static_params_bits { ...@@ -10054,9 +10054,8 @@ struct mlx5_ifc_tls_static_params_bits {
}; };
struct mlx5_ifc_tls_progress_params_bits { struct mlx5_ifc_tls_progress_params_bits {
u8 valid[0x1]; u8 reserved_at_0[0x8];
u8 reserved_at_1[0x7]; u8 tisn[0x18];
u8 pd[0x18];
u8 next_record_tcp_sn[0x20]; u8 next_record_tcp_sn[0x20];
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment