Commit 9520aea7 authored by Jakub Kicinski's avatar Jakub Kicinski

Merge tag 'mlx5-updates-2019-11-22' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5-updates-2019-11-22

1) Misc Cleanups
2) Software steering support for Geneve
====================
Signed-off-by: default avatarJakub Kicinski <jakub.kicinski@netronome.com>
parents d46b7e4f 90ac2458
...@@ -141,7 +141,6 @@ static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv, ...@@ -141,7 +141,6 @@ static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv,
struct dst_entry *dst; struct dst_entry *dst;
struct neighbour *n; struct neighbour *n;
#if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6)
int ret; int ret;
ret = ipv6_stub->ipv6_dst_lookup(dev_net(mirred_dev), NULL, &dst, ret = ipv6_stub->ipv6_dst_lookup(dev_net(mirred_dev), NULL, &dst,
...@@ -157,9 +156,6 @@ static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv, ...@@ -157,9 +156,6 @@ static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv,
dst_release(dst); dst_release(dst);
return ret; return ret;
} }
#else
return -EOPNOTSUPP;
#endif
n = dst_neigh_lookup(dst, &fl6->daddr); n = dst_neigh_lookup(dst, &fl6->daddr);
dst_release(dst); dst_release(dst);
...@@ -240,13 +236,13 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv, ...@@ -240,13 +236,13 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv,
mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n", mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n",
ipv4_encap_size, max_encap_size); ipv4_encap_size, max_encap_size);
err = -EOPNOTSUPP; err = -EOPNOTSUPP;
goto out; goto release_neigh;
} }
encap_header = kzalloc(ipv4_encap_size, GFP_KERNEL); encap_header = kzalloc(ipv4_encap_size, GFP_KERNEL);
if (!encap_header) { if (!encap_header) {
err = -ENOMEM; err = -ENOMEM;
goto out; goto release_neigh;
} }
/* used by mlx5e_detach_encap to lookup a neigh hash table /* used by mlx5e_detach_encap to lookup a neigh hash table
...@@ -298,7 +294,7 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv, ...@@ -298,7 +294,7 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv,
/* the encap entry will be made valid on neigh update event /* the encap entry will be made valid on neigh update event
* and not used before that. * and not used before that.
*/ */
goto out; goto release_neigh;
} }
e->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev, e->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev,
e->reformat_type, e->reformat_type,
...@@ -318,9 +314,8 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv, ...@@ -318,9 +314,8 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv,
mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e); mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
free_encap: free_encap:
kfree(encap_header); kfree(encap_header);
out: release_neigh:
if (n) neigh_release(n);
neigh_release(n);
return err; return err;
} }
...@@ -359,13 +354,13 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv, ...@@ -359,13 +354,13 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n", mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n",
ipv6_encap_size, max_encap_size); ipv6_encap_size, max_encap_size);
err = -EOPNOTSUPP; err = -EOPNOTSUPP;
goto out; goto release_neigh;
} }
encap_header = kzalloc(ipv6_encap_size, GFP_KERNEL); encap_header = kzalloc(ipv6_encap_size, GFP_KERNEL);
if (!encap_header) { if (!encap_header) {
err = -ENOMEM; err = -ENOMEM;
goto out; goto release_neigh;
} }
/* used by mlx5e_detach_encap to lookup a neigh hash table /* used by mlx5e_detach_encap to lookup a neigh hash table
...@@ -416,7 +411,7 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv, ...@@ -416,7 +411,7 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
/* the encap entry will be made valid on neigh update event /* the encap entry will be made valid on neigh update event
* and not used before that. * and not used before that.
*/ */
goto out; goto release_neigh;
} }
e->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev, e->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev,
...@@ -437,9 +432,8 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv, ...@@ -437,9 +432,8 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e); mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
free_encap: free_encap:
kfree(encap_header); kfree(encap_header);
out: release_neigh:
if (n) neigh_release(n);
neigh_release(n);
return err; return err;
} }
......
...@@ -58,9 +58,16 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv, ...@@ -58,9 +58,16 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv,
struct net_device *mirred_dev, struct net_device *mirred_dev,
struct mlx5e_encap_entry *e); struct mlx5e_encap_entry *e);
#if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6)
int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv, int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
struct net_device *mirred_dev, struct net_device *mirred_dev,
struct mlx5e_encap_entry *e); struct mlx5e_encap_entry *e);
#else
static inline int
mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
struct net_device *mirred_dev,
struct mlx5e_encap_entry *e) { return -EOPNOTSUPP; }
#endif
bool mlx5e_tc_tun_device_to_offload(struct mlx5e_priv *priv, bool mlx5e_tc_tun_device_to_offload(struct mlx5e_priv *priv,
struct net_device *netdev); struct net_device *netdev);
......
...@@ -102,13 +102,52 @@ static bool dr_mask_is_gre_set(struct mlx5dr_match_misc *misc) ...@@ -102,13 +102,52 @@ static bool dr_mask_is_gre_set(struct mlx5dr_match_misc *misc)
DR_MASK_IS_OUTER_MPLS_OVER_GRE_UDP_SET((_misc2), gre) || \ DR_MASK_IS_OUTER_MPLS_OVER_GRE_UDP_SET((_misc2), gre) || \
DR_MASK_IS_OUTER_MPLS_OVER_GRE_UDP_SET((_misc2), udp)) DR_MASK_IS_OUTER_MPLS_OVER_GRE_UDP_SET((_misc2), udp))
static bool dr_mask_is_flex_parser_tnl_set(struct mlx5dr_match_misc3 *misc3) static bool
dr_mask_is_misc3_vxlan_gpe_set(struct mlx5dr_match_misc3 *misc3)
{ {
return (misc3->outer_vxlan_gpe_vni || return (misc3->outer_vxlan_gpe_vni ||
misc3->outer_vxlan_gpe_next_protocol || misc3->outer_vxlan_gpe_next_protocol ||
misc3->outer_vxlan_gpe_flags); misc3->outer_vxlan_gpe_flags);
} }
static bool
dr_matcher_supp_flex_parser_vxlan_gpe(struct mlx5dr_cmd_caps *caps)
{
return caps->flex_protocols &
MLX5_FLEX_PARSER_VXLAN_GPE_ENABLED;
}
static bool
dr_mask_is_flex_parser_tnl_vxlan_gpe_set(struct mlx5dr_match_param *mask,
struct mlx5dr_domain *dmn)
{
return dr_mask_is_misc3_vxlan_gpe_set(&mask->misc3) &&
dr_matcher_supp_flex_parser_vxlan_gpe(&dmn->info.caps);
}
static bool dr_mask_is_misc_geneve_set(struct mlx5dr_match_misc *misc)
{
return misc->geneve_vni ||
misc->geneve_oam ||
misc->geneve_protocol_type ||
misc->geneve_opt_len;
}
static bool
dr_matcher_supp_flex_parser_geneve(struct mlx5dr_cmd_caps *caps)
{
return caps->flex_protocols &
MLX5_FLEX_PARSER_GENEVE_ENABLED;
}
static bool
dr_mask_is_flex_parser_tnl_geneve_set(struct mlx5dr_match_param *mask,
struct mlx5dr_domain *dmn)
{
return dr_mask_is_misc_geneve_set(&mask->misc) &&
dr_matcher_supp_flex_parser_geneve(&dmn->info.caps);
}
static bool dr_mask_is_flex_parser_icmpv6_set(struct mlx5dr_match_misc3 *misc3) static bool dr_mask_is_flex_parser_icmpv6_set(struct mlx5dr_match_misc3 *misc3)
{ {
return (misc3->icmpv6_type || misc3->icmpv6_code || return (misc3->icmpv6_type || misc3->icmpv6_code ||
...@@ -137,13 +176,6 @@ static bool dr_mask_is_gvmi_or_qpn_set(struct mlx5dr_match_misc *misc) ...@@ -137,13 +176,6 @@ static bool dr_mask_is_gvmi_or_qpn_set(struct mlx5dr_match_misc *misc)
return (misc->source_sqn || misc->source_port); return (misc->source_sqn || misc->source_port);
} }
static bool
dr_matcher_supp_flex_parser_vxlan_gpe(struct mlx5dr_domain *dmn)
{
return dmn->info.caps.flex_protocols &
MLX5_FLEX_PARSER_VXLAN_GPE_ENABLED;
}
int mlx5dr_matcher_select_builders(struct mlx5dr_matcher *matcher, int mlx5dr_matcher_select_builders(struct mlx5dr_matcher *matcher,
struct mlx5dr_matcher_rx_tx *nic_matcher, struct mlx5dr_matcher_rx_tx *nic_matcher,
enum mlx5dr_ipv outer_ipv, enum mlx5dr_ipv outer_ipv,
...@@ -262,10 +294,14 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher, ...@@ -262,10 +294,14 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher,
inner, rx); inner, rx);
} }
if (dr_mask_is_flex_parser_tnl_set(&mask.misc3) && if (dr_mask_is_flex_parser_tnl_vxlan_gpe_set(&mask, dmn))
dr_matcher_supp_flex_parser_vxlan_gpe(dmn)) mlx5dr_ste_build_flex_parser_tnl_vxlan_gpe(&sb[idx++],
mlx5dr_ste_build_flex_parser_tnl(&sb[idx++], &mask, &mask,
inner, rx); inner, rx);
else if (dr_mask_is_flex_parser_tnl_geneve_set(&mask, dmn))
mlx5dr_ste_build_flex_parser_tnl_geneve(&sb[idx++],
&mask,
inner, rx);
if (DR_MASK_IS_ETH_L4_MISC_SET(mask.misc3, outer)) if (DR_MASK_IS_ETH_L4_MISC_SET(mask.misc3, outer))
mlx5dr_ste_build_eth_l4_misc(&sb[idx++], &mask, inner, rx); mlx5dr_ste_build_eth_l4_misc(&sb[idx++], &mask, inner, rx);
......
...@@ -2083,68 +2083,110 @@ void mlx5dr_ste_build_eth_l4_misc(struct mlx5dr_ste_build *sb, ...@@ -2083,68 +2083,110 @@ void mlx5dr_ste_build_eth_l4_misc(struct mlx5dr_ste_build *sb,
sb->ste_build_tag_func = &dr_ste_build_eth_l4_misc_tag; sb->ste_build_tag_func = &dr_ste_build_eth_l4_misc_tag;
} }
static void dr_ste_build_flex_parser_tnl_bit_mask(struct mlx5dr_match_param *value, static void
bool inner, u8 *bit_mask) dr_ste_build_flex_parser_tnl_vxlan_gpe_bit_mask(struct mlx5dr_match_param *value,
bool inner, u8 *bit_mask)
{ {
struct mlx5dr_match_misc3 *misc_3_mask = &value->misc3; struct mlx5dr_match_misc3 *misc_3_mask = &value->misc3;
if (misc_3_mask->outer_vxlan_gpe_flags || DR_STE_SET_MASK_V(flex_parser_tnl_vxlan_gpe, bit_mask,
misc_3_mask->outer_vxlan_gpe_next_protocol) { outer_vxlan_gpe_flags,
MLX5_SET(ste_flex_parser_tnl, bit_mask, misc_3_mask, outer_vxlan_gpe_flags);
flex_parser_tunneling_header_63_32, DR_STE_SET_MASK_V(flex_parser_tnl_vxlan_gpe, bit_mask,
(misc_3_mask->outer_vxlan_gpe_flags << 24) | outer_vxlan_gpe_next_protocol,
(misc_3_mask->outer_vxlan_gpe_next_protocol)); misc_3_mask, outer_vxlan_gpe_next_protocol);
misc_3_mask->outer_vxlan_gpe_flags = 0; DR_STE_SET_MASK_V(flex_parser_tnl_vxlan_gpe, bit_mask,
misc_3_mask->outer_vxlan_gpe_next_protocol = 0; outer_vxlan_gpe_vni,
} misc_3_mask, outer_vxlan_gpe_vni);
if (misc_3_mask->outer_vxlan_gpe_vni) {
MLX5_SET(ste_flex_parser_tnl, bit_mask,
flex_parser_tunneling_header_31_0,
misc_3_mask->outer_vxlan_gpe_vni << 8);
misc_3_mask->outer_vxlan_gpe_vni = 0;
}
} }
static int dr_ste_build_flex_parser_tnl_tag(struct mlx5dr_match_param *value, static int
struct mlx5dr_ste_build *sb, dr_ste_build_flex_parser_tnl_vxlan_gpe_tag(struct mlx5dr_match_param *value,
u8 *hw_ste_p) struct mlx5dr_ste_build *sb,
u8 *hw_ste_p)
{ {
struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p; struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
struct mlx5dr_match_misc3 *misc3 = &value->misc3; struct mlx5dr_match_misc3 *misc3 = &value->misc3;
u8 *tag = hw_ste->tag; u8 *tag = hw_ste->tag;
if (misc3->outer_vxlan_gpe_flags || DR_STE_SET_TAG(flex_parser_tnl_vxlan_gpe, tag,
misc3->outer_vxlan_gpe_next_protocol) { outer_vxlan_gpe_flags, misc3,
MLX5_SET(ste_flex_parser_tnl, tag, outer_vxlan_gpe_flags);
flex_parser_tunneling_header_63_32, DR_STE_SET_TAG(flex_parser_tnl_vxlan_gpe, tag,
(misc3->outer_vxlan_gpe_flags << 24) | outer_vxlan_gpe_next_protocol, misc3,
(misc3->outer_vxlan_gpe_next_protocol)); outer_vxlan_gpe_next_protocol);
misc3->outer_vxlan_gpe_flags = 0; DR_STE_SET_TAG(flex_parser_tnl_vxlan_gpe, tag,
misc3->outer_vxlan_gpe_next_protocol = 0; outer_vxlan_gpe_vni, misc3,
} outer_vxlan_gpe_vni);
if (misc3->outer_vxlan_gpe_vni) {
MLX5_SET(ste_flex_parser_tnl, tag,
flex_parser_tunneling_header_31_0,
misc3->outer_vxlan_gpe_vni << 8);
misc3->outer_vxlan_gpe_vni = 0;
}
return 0; return 0;
} }
void mlx5dr_ste_build_flex_parser_tnl(struct mlx5dr_ste_build *sb, void mlx5dr_ste_build_flex_parser_tnl_vxlan_gpe(struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask, struct mlx5dr_match_param *mask,
bool inner, bool rx) bool inner, bool rx)
{
dr_ste_build_flex_parser_tnl_vxlan_gpe_bit_mask(mask, inner,
sb->bit_mask);
sb->rx = rx;
sb->inner = inner;
sb->lu_type = MLX5DR_STE_LU_TYPE_FLEX_PARSER_TNL_HEADER;
sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
sb->ste_build_tag_func = &dr_ste_build_flex_parser_tnl_vxlan_gpe_tag;
}
static void
dr_ste_build_flex_parser_tnl_geneve_bit_mask(struct mlx5dr_match_param *value,
u8 *bit_mask)
{ {
dr_ste_build_flex_parser_tnl_bit_mask(mask, inner, sb->bit_mask); struct mlx5dr_match_misc *misc_mask = &value->misc;
DR_STE_SET_MASK_V(flex_parser_tnl_geneve, bit_mask,
geneve_protocol_type,
misc_mask, geneve_protocol_type);
DR_STE_SET_MASK_V(flex_parser_tnl_geneve, bit_mask,
geneve_oam,
misc_mask, geneve_oam);
DR_STE_SET_MASK_V(flex_parser_tnl_geneve, bit_mask,
geneve_opt_len,
misc_mask, geneve_opt_len);
DR_STE_SET_MASK_V(flex_parser_tnl_geneve, bit_mask,
geneve_vni,
misc_mask, geneve_vni);
}
static int
dr_ste_build_flex_parser_tnl_geneve_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
u8 *hw_ste_p)
{
struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
struct mlx5dr_match_misc *misc = &value->misc;
u8 *tag = hw_ste->tag;
DR_STE_SET_TAG(flex_parser_tnl_geneve, tag,
geneve_protocol_type, misc, geneve_protocol_type);
DR_STE_SET_TAG(flex_parser_tnl_geneve, tag,
geneve_oam, misc, geneve_oam);
DR_STE_SET_TAG(flex_parser_tnl_geneve, tag,
geneve_opt_len, misc, geneve_opt_len);
DR_STE_SET_TAG(flex_parser_tnl_geneve, tag,
geneve_vni, misc, geneve_vni);
return 0;
}
void mlx5dr_ste_build_flex_parser_tnl_geneve(struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx)
{
dr_ste_build_flex_parser_tnl_geneve_bit_mask(mask, sb->bit_mask);
sb->rx = rx; sb->rx = rx;
sb->inner = inner; sb->inner = inner;
sb->lu_type = MLX5DR_STE_LU_TYPE_FLEX_PARSER_TNL_HEADER; sb->lu_type = MLX5DR_STE_LU_TYPE_FLEX_PARSER_TNL_HEADER;
sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask); sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
sb->ste_build_tag_func = &dr_ste_build_flex_parser_tnl_tag; sb->ste_build_tag_func = &dr_ste_build_flex_parser_tnl_geneve_tag;
} }
static void dr_ste_build_register_0_bit_mask(struct mlx5dr_match_param *value, static void dr_ste_build_register_0_bit_mask(struct mlx5dr_match_param *value,
......
...@@ -325,9 +325,12 @@ int mlx5dr_ste_build_flex_parser_1(struct mlx5dr_ste_build *sb, ...@@ -325,9 +325,12 @@ int mlx5dr_ste_build_flex_parser_1(struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask, struct mlx5dr_match_param *mask,
struct mlx5dr_cmd_caps *caps, struct mlx5dr_cmd_caps *caps,
bool inner, bool rx); bool inner, bool rx);
void mlx5dr_ste_build_flex_parser_tnl(struct mlx5dr_ste_build *sb, void mlx5dr_ste_build_flex_parser_tnl_vxlan_gpe(struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask, struct mlx5dr_match_param *mask,
bool inner, bool rx); bool inner, bool rx);
void mlx5dr_ste_build_flex_parser_tnl_geneve(struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx);
void mlx5dr_ste_build_general_purpose(struct mlx5dr_ste_build *sb, void mlx5dr_ste_build_general_purpose(struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask, struct mlx5dr_match_param *mask,
bool inner, bool rx); bool inner, bool rx);
......
...@@ -548,6 +548,30 @@ struct mlx5_ifc_ste_flex_parser_tnl_bits { ...@@ -548,6 +548,30 @@ struct mlx5_ifc_ste_flex_parser_tnl_bits {
u8 reserved_at_40[0x40]; u8 reserved_at_40[0x40];
}; };
struct mlx5_ifc_ste_flex_parser_tnl_vxlan_gpe_bits {
u8 outer_vxlan_gpe_flags[0x8];
u8 reserved_at_8[0x10];
u8 outer_vxlan_gpe_next_protocol[0x8];
u8 outer_vxlan_gpe_vni[0x18];
u8 reserved_at_38[0x8];
u8 reserved_at_40[0x40];
};
struct mlx5_ifc_ste_flex_parser_tnl_geneve_bits {
u8 reserved_at_0[0x2];
u8 geneve_opt_len[0x6];
u8 geneve_oam[0x1];
u8 reserved_at_9[0x7];
u8 geneve_protocol_type[0x10];
u8 geneve_vni[0x18];
u8 reserved_at_38[0x8];
u8 reserved_at_40[0x40];
};
struct mlx5_ifc_ste_general_purpose_bits { struct mlx5_ifc_ste_general_purpose_bits {
u8 general_purpose_lookup_field[0x20]; u8 general_purpose_lookup_field[0x20];
......
...@@ -1064,26 +1064,13 @@ int mlx5_core_modify_hca_vport_context(struct mlx5_core_dev *dev, ...@@ -1064,26 +1064,13 @@ int mlx5_core_modify_hca_vport_context(struct mlx5_core_dev *dev,
ctx = MLX5_ADDR_OF(modify_hca_vport_context_in, in, hca_vport_context); ctx = MLX5_ADDR_OF(modify_hca_vport_context_in, in, hca_vport_context);
MLX5_SET(hca_vport_context, ctx, field_select, req->field_select); MLX5_SET(hca_vport_context, ctx, field_select, req->field_select);
MLX5_SET(hca_vport_context, ctx, sm_virt_aware, req->sm_virt_aware); if (req->field_select & MLX5_HCA_VPORT_SEL_STATE_POLICY)
MLX5_SET(hca_vport_context, ctx, has_smi, req->has_smi); MLX5_SET(hca_vport_context, ctx, vport_state_policy,
MLX5_SET(hca_vport_context, ctx, has_raw, req->has_raw); req->policy);
MLX5_SET(hca_vport_context, ctx, vport_state_policy, req->policy); if (req->field_select & MLX5_HCA_VPORT_SEL_PORT_GUID)
MLX5_SET(hca_vport_context, ctx, port_physical_state, req->phys_state); MLX5_SET64(hca_vport_context, ctx, port_guid, req->port_guid);
MLX5_SET(hca_vport_context, ctx, vport_state, req->vport_state); if (req->field_select & MLX5_HCA_VPORT_SEL_NODE_GUID)
MLX5_SET64(hca_vport_context, ctx, port_guid, req->port_guid); MLX5_SET64(hca_vport_context, ctx, node_guid, req->node_guid);
MLX5_SET64(hca_vport_context, ctx, node_guid, req->node_guid);
MLX5_SET(hca_vport_context, ctx, cap_mask1, req->cap_mask1);
MLX5_SET(hca_vport_context, ctx, cap_mask1_field_select, req->cap_mask1_perm);
MLX5_SET(hca_vport_context, ctx, cap_mask2, req->cap_mask2);
MLX5_SET(hca_vport_context, ctx, cap_mask2_field_select, req->cap_mask2_perm);
MLX5_SET(hca_vport_context, ctx, lid, req->lid);
MLX5_SET(hca_vport_context, ctx, init_type_reply, req->init_type_reply);
MLX5_SET(hca_vport_context, ctx, lmc, req->lmc);
MLX5_SET(hca_vport_context, ctx, subnet_timeout, req->subnet_timeout);
MLX5_SET(hca_vport_context, ctx, sm_lid, req->sm_lid);
MLX5_SET(hca_vport_context, ctx, sm_sl, req->sm_sl);
MLX5_SET(hca_vport_context, ctx, qkey_violation_counter, req->qkey_violation_counter);
MLX5_SET(hca_vport_context, ctx, pkey_violation_counter, req->pkey_violation_counter);
err = mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out)); err = mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out));
ex: ex:
kfree(in); kfree(in);
......
...@@ -1110,6 +1110,7 @@ enum { ...@@ -1110,6 +1110,7 @@ enum {
}; };
enum { enum {
MLX5_FLEX_PARSER_GENEVE_ENABLED = 1 << 3,
MLX5_FLEX_PARSER_VXLAN_GPE_ENABLED = 1 << 7, MLX5_FLEX_PARSER_VXLAN_GPE_ENABLED = 1 << 7,
MLX5_FLEX_PARSER_ICMP_V4_ENABLED = 1 << 8, MLX5_FLEX_PARSER_ICMP_V4_ENABLED = 1 << 8,
MLX5_FLEX_PARSER_ICMP_V6_ENABLED = 1 << 9, MLX5_FLEX_PARSER_ICMP_V6_ENABLED = 1 << 9,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment