Commit 6c9ee306 authored by David S. Miller's avatar David S. Miller

Merge tag 'mlx5-updates-2020-03-09' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5-updates-2020-03-09

This series provides updates to mlx5 driver:

1) Use vport metadata matching only when mandatory
2) Introduce root flow table and ethtool steering for uplink representors
3) Expose port speed via FW when link modes are not available
3) Misc cleanups
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 119959a0 b63293e7
......@@ -204,7 +204,7 @@ struct mlx5e_tx_wqe {
struct mlx5e_rx_wqe_ll {
struct mlx5_wqe_srq_next_seg next;
struct mlx5_wqe_data_seg data[0];
struct mlx5_wqe_data_seg data[];
};
struct mlx5e_rx_wqe_cyc {
......@@ -1169,6 +1169,12 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
struct ethtool_link_ksettings *link_ksettings);
int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
const struct ethtool_link_ksettings *link_ksettings);
int mlx5e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc);
int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key,
const u8 hfunc);
int mlx5e_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
u32 *rule_locs);
int mlx5e_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd);
u32 mlx5e_ethtool_get_rxfh_key_size(struct mlx5e_priv *priv);
u32 mlx5e_ethtool_get_rxfh_indir_size(struct mlx5e_priv *priv);
int mlx5e_ethtool_get_ts_info(struct mlx5e_priv *priv,
......
......@@ -773,6 +773,7 @@ static void ptys2ethtool_supported_advertised_port(struct ethtool_link_ksettings
static void get_speed_duplex(struct net_device *netdev,
u32 eth_proto_oper, bool force_legacy,
u16 data_rate_oper,
struct ethtool_link_ksettings *link_ksettings)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
......@@ -784,7 +785,10 @@ static void get_speed_duplex(struct net_device *netdev,
speed = mlx5e_port_ptys2speed(priv->mdev, eth_proto_oper, force_legacy);
if (!speed) {
speed = SPEED_UNKNOWN;
if (data_rate_oper)
speed = 100 * data_rate_oper;
else
speed = SPEED_UNKNOWN;
goto out;
}
......@@ -873,17 +877,18 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
struct ethtool_link_ksettings *link_ksettings)
{
struct mlx5_core_dev *mdev = priv->mdev;
u32 out[MLX5_ST_SZ_DW(ptys_reg)] = {0};
u32 rx_pause = 0;
u32 tx_pause = 0;
u32 eth_proto_cap;
u32 out[MLX5_ST_SZ_DW(ptys_reg)] = {};
u32 eth_proto_admin;
u32 eth_proto_lp;
u32 eth_proto_oper;
u8 an_disable_admin;
u8 an_status;
u16 data_rate_oper;
u32 eth_proto_oper;
u32 eth_proto_cap;
u8 connector_type;
u32 rx_pause = 0;
u32 tx_pause = 0;
u32 eth_proto_lp;
bool admin_ext;
u8 an_status;
bool ext;
int err;
......@@ -917,6 +922,7 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
an_disable_admin = MLX5_GET(ptys_reg, out, an_disable_admin);
an_status = MLX5_GET(ptys_reg, out, an_status);
connector_type = MLX5_GET(ptys_reg, out, connector_type);
data_rate_oper = MLX5_GET(ptys_reg, out, data_rate_oper);
mlx5_query_port_pause(mdev, &rx_pause, &tx_pause);
......@@ -927,7 +933,7 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
get_advertising(eth_proto_admin, tx_pause, rx_pause, link_ksettings,
admin_ext);
get_speed_duplex(priv->netdev, eth_proto_oper, !admin_ext,
link_ksettings);
data_rate_oper, link_ksettings);
eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap;
......@@ -1126,8 +1132,8 @@ static u32 mlx5e_get_rxfh_indir_size(struct net_device *netdev)
return mlx5e_ethtool_get_rxfh_indir_size(priv);
}
static int mlx5e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
u8 *hfunc)
int mlx5e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
u8 *hfunc)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5e_rss_params *rss = &priv->rss_params;
......@@ -1146,8 +1152,8 @@ static int mlx5e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
return 0;
}
static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
const u8 *key, const u8 hfunc)
int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
const u8 *key, const u8 hfunc)
{
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5e_rss_params *rss = &priv->rss_params;
......@@ -1942,7 +1948,8 @@ static u32 mlx5e_get_priv_flags(struct net_device *netdev)
return priv->channels.params.pflags;
}
static int mlx5e_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, u32 *rule_locs)
int mlx5e_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
u32 *rule_locs)
{
struct mlx5e_priv *priv = netdev_priv(dev);
......@@ -1959,7 +1966,7 @@ static int mlx5e_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, u
return mlx5e_ethtool_get_rxnfc(dev, info, rule_locs);
}
static int mlx5e_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
int mlx5e_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
{
return mlx5e_ethtool_set_rxnfc(dev, cmd);
}
......
......@@ -253,25 +253,6 @@ static int mlx5e_rep_set_ringparam(struct net_device *dev,
return mlx5e_ethtool_set_ringparam(priv, param);
}
static int mlx5e_replace_rep_vport_rx_rule(struct mlx5e_priv *priv,
struct mlx5_flow_destination *dest)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5e_rep_priv *rpriv = priv->ppriv;
struct mlx5_eswitch_rep *rep = rpriv->rep;
struct mlx5_flow_handle *flow_rule;
flow_rule = mlx5_eswitch_create_vport_rx_rule(esw,
rep->vport,
dest);
if (IS_ERR(flow_rule))
return PTR_ERR(flow_rule);
mlx5_del_flow_rules(rpriv->vport_rx_rule);
rpriv->vport_rx_rule = flow_rule;
return 0;
}
static void mlx5e_rep_get_channels(struct net_device *dev,
struct ethtool_channels *ch)
{
......@@ -284,33 +265,8 @@ static int mlx5e_rep_set_channels(struct net_device *dev,
struct ethtool_channels *ch)
{
struct mlx5e_priv *priv = netdev_priv(dev);
u16 curr_channels_amount = priv->channels.params.num_channels;
u32 new_channels_amount = ch->combined_count;
struct mlx5_flow_destination new_dest;
int err = 0;
err = mlx5e_ethtool_set_channels(priv, ch);
if (err)
return err;
if (curr_channels_amount == 1 && new_channels_amount > 1) {
new_dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
new_dest.ft = priv->fs.ttc.ft.t;
} else if (new_channels_amount == 1 && curr_channels_amount > 1) {
new_dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
new_dest.tir_num = priv->direct_tir[0].tirn;
} else {
return 0;
}
err = mlx5e_replace_rep_vport_rx_rule(priv, &new_dest);
if (err) {
netdev_warn(priv->netdev, "Failed to update vport rx rule, when going from (%d) channels to (%d) channels\n",
curr_channels_amount, new_channels_amount);
return err;
}
return 0;
return mlx5e_ethtool_set_channels(priv, ch);
}
static int mlx5e_rep_get_coalesce(struct net_device *netdev,
......@@ -413,6 +369,10 @@ static const struct ethtool_ops mlx5e_uplink_rep_ethtool_ops = {
.set_link_ksettings = mlx5e_uplink_rep_set_link_ksettings,
.get_rxfh_key_size = mlx5e_rep_get_rxfh_key_size,
.get_rxfh_indir_size = mlx5e_rep_get_rxfh_indir_size,
.get_rxfh = mlx5e_get_rxfh,
.set_rxfh = mlx5e_set_rxfh,
.get_rxnfc = mlx5e_get_rxnfc,
.set_rxnfc = mlx5e_set_rxnfc,
.get_pauseparam = mlx5e_uplink_rep_get_pauseparam,
.set_pauseparam = mlx5e_uplink_rep_set_pauseparam,
};
......@@ -1596,6 +1556,8 @@ static void mlx5e_cleanup_rep(struct mlx5e_priv *priv)
static int mlx5e_create_rep_ttc_table(struct mlx5e_priv *priv)
{
struct mlx5e_rep_priv *rpriv = priv->ppriv;
struct mlx5_eswitch_rep *rep = rpriv->rep;
struct ttc_params ttc_params = {};
int tt, err;
......@@ -1605,6 +1567,11 @@ static int mlx5e_create_rep_ttc_table(struct mlx5e_priv *priv)
/* The inner_ttc in the ttc params is intentionally not set */
ttc_params.any_tt_tirn = priv->direct_tir[0].tirn;
mlx5e_set_ttc_ft_params(&ttc_params);
if (rep->vport != MLX5_VPORT_UPLINK)
/* To give uplik rep TTC a lower level for chaining from root ft */
ttc_params.ft_attr.level = MLX5E_TTC_FT_LEVEL + 1;
for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
ttc_params.indir_tirn[tt] = priv->indir_tir[tt].tirn;
......@@ -1616,6 +1583,51 @@ static int mlx5e_create_rep_ttc_table(struct mlx5e_priv *priv)
return 0;
}
static int mlx5e_create_rep_root_ft(struct mlx5e_priv *priv)
{
struct mlx5e_rep_priv *rpriv = priv->ppriv;
struct mlx5_eswitch_rep *rep = rpriv->rep;
struct mlx5_flow_table_attr ft_attr = {};
struct mlx5_flow_namespace *ns;
int err = 0;
if (rep->vport != MLX5_VPORT_UPLINK) {
/* non uplik reps will skip any bypass tables and go directly to
* their own ttc
*/
rpriv->root_ft = priv->fs.ttc.ft.t;
return 0;
}
/* uplink root ft will be used to auto chain, to ethtool or ttc tables */
ns = mlx5_get_flow_namespace(priv->mdev, MLX5_FLOW_NAMESPACE_OFFLOADS);
if (!ns) {
netdev_err(priv->netdev, "Failed to get reps offloads namespace\n");
return -EOPNOTSUPP;
}
ft_attr.max_fte = 0; /* Empty table, miss rule will always point to next table */
ft_attr.level = 1;
rpriv->root_ft = mlx5_create_flow_table(ns, &ft_attr);
if (IS_ERR(rpriv->root_ft)) {
err = PTR_ERR(rpriv->root_ft);
rpriv->root_ft = NULL;
}
return err;
}
static void mlx5e_destroy_rep_root_ft(struct mlx5e_priv *priv)
{
struct mlx5e_rep_priv *rpriv = priv->ppriv;
struct mlx5_eswitch_rep *rep = rpriv->rep;
if (rep->vport != MLX5_VPORT_UPLINK)
return;
mlx5_destroy_flow_table(rpriv->root_ft);
}
static int mlx5e_create_rep_vport_rx_rule(struct mlx5e_priv *priv)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
......@@ -1624,11 +1636,10 @@ static int mlx5e_create_rep_vport_rx_rule(struct mlx5e_priv *priv)
struct mlx5_flow_handle *flow_rule;
struct mlx5_flow_destination dest;
dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
dest.tir_num = priv->direct_tir[0].tirn;
flow_rule = mlx5_eswitch_create_vport_rx_rule(esw,
rep->vport,
&dest);
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dest.ft = rpriv->root_ft;
flow_rule = mlx5_eswitch_create_vport_rx_rule(esw, rep->vport, &dest);
if (IS_ERR(flow_rule))
return PTR_ERR(flow_rule);
rpriv->vport_rx_rule = flow_rule;
......@@ -1668,12 +1679,20 @@ static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
if (err)
goto err_destroy_direct_tirs;
err = mlx5e_create_rep_vport_rx_rule(priv);
err = mlx5e_create_rep_root_ft(priv);
if (err)
goto err_destroy_ttc_table;
err = mlx5e_create_rep_vport_rx_rule(priv);
if (err)
goto err_destroy_root_ft;
mlx5e_ethtool_init_steering(priv);
return 0;
err_destroy_root_ft:
mlx5e_destroy_rep_root_ft(priv);
err_destroy_ttc_table:
mlx5e_destroy_ttc_table(priv, &priv->fs.ttc);
err_destroy_direct_tirs:
......@@ -1694,6 +1713,7 @@ static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv)
struct mlx5e_rep_priv *rpriv = priv->ppriv;
mlx5_del_flow_rules(rpriv->vport_rx_rule);
mlx5e_destroy_rep_root_ft(priv);
mlx5e_destroy_ttc_table(priv, &priv->fs.ttc);
mlx5e_destroy_direct_tirs(priv, priv->direct_tir);
mlx5e_destroy_indirect_tirs(priv, false);
......
......@@ -87,6 +87,7 @@ struct mlx5e_rep_priv {
struct mlx5_eswitch_rep *rep;
struct mlx5e_neigh_update_table neigh_update;
struct net_device *netdev;
struct mlx5_flow_table *root_ft;
struct mlx5_flow_handle *vport_rx_rule;
struct list_head vport_sqs_list;
struct mlx5_rep_uplink_priv uplink_priv; /* valid for uplink rep */
......
......@@ -3313,6 +3313,45 @@ static bool is_duplicated_output_device(struct net_device *dev,
return false;
}
static int mlx5_validate_goto_chain(struct mlx5_eswitch *esw,
struct mlx5e_tc_flow *flow,
const struct flow_action_entry *act,
u32 actions,
struct netlink_ext_ack *extack)
{
u32 max_chain = mlx5_esw_chains_get_chain_range(esw);
struct mlx5_esw_flow_attr *attr = flow->esw_attr;
bool ft_flow = mlx5e_is_ft_flow(flow);
u32 dest_chain = act->chain_index;
if (ft_flow) {
NL_SET_ERR_MSG_MOD(extack, "Goto action is not supported");
return -EOPNOTSUPP;
}
if (!mlx5_esw_chains_backwards_supported(esw) &&
dest_chain <= attr->chain) {
NL_SET_ERR_MSG_MOD(extack,
"Goto lower numbered chain isn't supported");
return -EOPNOTSUPP;
}
if (dest_chain > max_chain) {
NL_SET_ERR_MSG_MOD(extack,
"Requested destination chain is out of supported range");
return -EOPNOTSUPP;
}
if (actions & (MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT |
MLX5_FLOW_CONTEXT_ACTION_DECAP) &&
!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, reformat_and_fwd_to_table)) {
NL_SET_ERR_MSG_MOD(extack,
"Goto chain is not allowed if action has reformat or decap");
return -EOPNOTSUPP;
}
return 0;
}
static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
struct flow_action *flow_action,
struct mlx5e_tc_flow *flow,
......@@ -3534,29 +3573,15 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
case FLOW_ACTION_TUNNEL_DECAP:
action |= MLX5_FLOW_CONTEXT_ACTION_DECAP;
break;
case FLOW_ACTION_GOTO: {
u32 dest_chain = act->chain_index;
u32 max_chain = mlx5_esw_chains_get_chain_range(esw);
case FLOW_ACTION_GOTO:
err = mlx5_validate_goto_chain(esw, flow, act, action,
extack);
if (err)
return err;
if (ft_flow) {
NL_SET_ERR_MSG_MOD(extack, "Goto action is not supported");
return -EOPNOTSUPP;
}
if (!mlx5_esw_chains_backwards_supported(esw) &&
dest_chain <= attr->chain) {
NL_SET_ERR_MSG_MOD(extack,
"Goto earlier chain isn't supported");
return -EOPNOTSUPP;
}
if (dest_chain > max_chain) {
NL_SET_ERR_MSG_MOD(extack,
"Requested destination chain is out of supported range");
return -EOPNOTSUPP;
}
action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
attr->dest_chain = dest_chain;
attr->dest_chain = act->chain_index;
break;
}
default:
NL_SET_ERR_MSG_MOD(extack, "The offload action is not supported");
return -EOPNOTSUPP;
......
......@@ -425,7 +425,6 @@ int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode);
int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
struct netlink_ext_ack *extack);
int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode);
int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, u8 *mode);
int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink,
enum devlink_eswitch_encap_mode encap,
struct netlink_ext_ack *extack);
......
......@@ -198,7 +198,7 @@ int mlx5_esw_vport_tbl_get(struct mlx5_eswitch *esw)
mlx5_esw_for_all_vports(esw, i, vport) {
attr.in_rep->vport = vport->vport;
fdb = esw_vport_tbl_get(esw, &attr);
if (!fdb)
if (IS_ERR(fdb))
goto out;
}
return 0;
......@@ -1344,6 +1344,43 @@ mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, u16 vport,
return flow_rule;
}
static int mlx5_eswitch_inline_mode_get(const struct mlx5_eswitch *esw, u8 *mode)
{
u8 prev_mlx5_mode, mlx5_mode = MLX5_INLINE_MODE_L2;
struct mlx5_core_dev *dev = esw->dev;
int vport;
if (!MLX5_CAP_GEN(dev, vport_group_manager))
return -EOPNOTSUPP;
if (esw->mode == MLX5_ESWITCH_NONE)
return -EOPNOTSUPP;
switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
mlx5_mode = MLX5_INLINE_MODE_NONE;
goto out;
case MLX5_CAP_INLINE_MODE_L2:
mlx5_mode = MLX5_INLINE_MODE_L2;
goto out;
case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
goto query_vports;
}
query_vports:
mlx5_query_nic_vport_min_inline(dev, esw->first_host_vport, &prev_mlx5_mode);
mlx5_esw_for_each_host_func_vport(esw, vport, esw->esw_funcs.num_vfs) {
mlx5_query_nic_vport_min_inline(dev, vport, &mlx5_mode);
if (prev_mlx5_mode != mlx5_mode)
return -EINVAL;
prev_mlx5_mode = mlx5_mode;
}
out:
*mode = mlx5_mode;
return 0;
}
static int esw_offloads_start(struct mlx5_eswitch *esw,
struct netlink_ext_ack *extack)
{
......@@ -2021,6 +2058,18 @@ esw_check_vport_match_metadata_supported(const struct mlx5_eswitch *esw)
return true;
}
static bool
esw_check_vport_match_metadata_mandatory(const struct mlx5_eswitch *esw)
{
return mlx5_core_mp_enabled(esw->dev);
}
static bool esw_use_vport_metadata(const struct mlx5_eswitch *esw)
{
return esw_check_vport_match_metadata_mandatory(esw) &&
esw_check_vport_match_metadata_supported(esw);
}
int
esw_vport_create_offloads_acl_tables(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
......@@ -2059,7 +2108,7 @@ static int esw_create_uplink_offloads_acl_tables(struct mlx5_eswitch *esw)
struct mlx5_vport *vport;
int err;
if (esw_check_vport_match_metadata_supported(esw))
if (esw_use_vport_metadata(esw))
esw->flags |= MLX5_ESWITCH_VPORT_MATCH_METADATA;
vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK);
......@@ -2479,43 +2528,6 @@ int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode)
return esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode);
}
int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, u8 *mode)
{
u8 prev_mlx5_mode, mlx5_mode = MLX5_INLINE_MODE_L2;
struct mlx5_core_dev *dev = esw->dev;
int vport;
if (!MLX5_CAP_GEN(dev, vport_group_manager))
return -EOPNOTSUPP;
if (esw->mode == MLX5_ESWITCH_NONE)
return -EOPNOTSUPP;
switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
mlx5_mode = MLX5_INLINE_MODE_NONE;
goto out;
case MLX5_CAP_INLINE_MODE_L2:
mlx5_mode = MLX5_INLINE_MODE_L2;
goto out;
case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
goto query_vports;
}
query_vports:
mlx5_query_nic_vport_min_inline(dev, esw->first_host_vport, &prev_mlx5_mode);
mlx5_esw_for_each_host_func_vport(esw, vport, esw->esw_funcs.num_vfs) {
mlx5_query_nic_vport_min_inline(dev, vport, &mlx5_mode);
if (prev_mlx5_mode != mlx5_mode)
return -EINVAL;
prev_mlx5_mode = mlx5_mode;
}
out:
*mode = mlx5_mode;
return 0;
}
int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink,
enum devlink_eswitch_encap_mode encap,
struct netlink_ext_ack *extack)
......
......@@ -34,6 +34,7 @@ static const unsigned int ESW_POOLS[] = { 4 * 1024 * 1024,
1 * 1024 * 1024,
64 * 1024,
128 };
#define ESW_FT_TBL_SZ (64 * 1024)
struct mlx5_esw_chains_priv {
struct rhashtable chains_ht;
......@@ -201,7 +202,9 @@ mlx5_esw_chains_create_fdb_table(struct mlx5_eswitch *esw,
ft_attr.flags |= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT |
MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
sz = mlx5_esw_chains_get_avail_sz_from_pool(esw, POOL_NEXT_SIZE);
sz = (chain == mlx5_esw_chains_get_ft_chain(esw)) ?
mlx5_esw_chains_get_avail_sz_from_pool(esw, ESW_FT_TBL_SZ) :
mlx5_esw_chains_get_avail_sz_from_pool(esw, POOL_NEXT_SIZE);
if (!sz)
return ERR_PTR(-ENOSPC);
ft_attr.max_fte = sz;
......
......@@ -57,7 +57,7 @@ struct mlx5_fpga_ipsec_cmd_context {
struct completion complete;
struct mlx5_fpga_device *dev;
struct list_head list; /* Item in pending_cmds */
u8 command[0];
u8 command[];
};
struct mlx5_fpga_esp_xfrm;
......
......@@ -110,7 +110,7 @@
#define ANCHOR_NUM_PRIOS 1
#define ANCHOR_MIN_LEVEL (BY_PASS_MIN_LEVEL + 1)
#define OFFLOADS_MAX_FT 1
#define OFFLOADS_MAX_FT 2
#define OFFLOADS_NUM_PRIOS 1
#define OFFLOADS_MIN_LEVEL (ANCHOR_MIN_LEVEL + 1)
......@@ -145,7 +145,7 @@ static struct init_tree_node {
ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
ADD_MULTIPLE_PRIO(LAG_NUM_PRIOS,
LAG_PRIO_NUM_LEVELS))),
ADD_PRIO(0, OFFLOADS_MIN_LEVEL, 0, {},
ADD_PRIO(0, OFFLOADS_MIN_LEVEL, 0, FS_CHAINING_CAPS,
ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
ADD_MULTIPLE_PRIO(OFFLOADS_NUM_PRIOS,
OFFLOADS_MAX_FT))),
......
......@@ -470,7 +470,7 @@ struct mlx5_fc_bulk {
u32 base_id;
int bulk_len;
unsigned long *bitmask;
struct mlx5_fc fcs[0];
struct mlx5_fc fcs[];
};
static void mlx5_fc_init(struct mlx5_fc *counter, struct mlx5_fc_bulk *bulk,
......
......@@ -56,7 +56,7 @@ struct mlx5i_priv {
u32 qkey;
u16 pkey_index;
struct mlx5i_pkey_qpn_ht *qpn_htbl;
char *mlx5e_priv[0];
char *mlx5e_priv[];
};
int mlx5i_create_tis(struct mlx5_core_dev *mdev, u32 underlay_qpn, u32 *tisn);
......@@ -107,7 +107,7 @@ struct mlx5i_tx_wqe {
struct mlx5_wqe_datagram_seg datagram;
struct mlx5_wqe_eth_pad pad;
struct mlx5_wqe_eth_seg eth;
struct mlx5_wqe_data_seg data[0];
struct mlx5_wqe_data_seg data[];
};
static inline void mlx5i_sq_fetch_wqe(struct mlx5e_txqsq *sq,
......
......@@ -42,7 +42,7 @@ int mlx5_create_encryption_key(struct mlx5_core_dev *mdev,
MLX5_SET(encryption_key_obj, obj, key_size, general_obj_key_size);
MLX5_SET(encryption_key_obj, obj, key_type,
MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_TYPE_DEK);
MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_TYPE_TLS);
MLX5_SET(general_obj_in_cmd_hdr, in, opcode,
MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
MLX5_SET(general_obj_in_cmd_hdr, in, obj_type,
......
......@@ -101,22 +101,39 @@ int mlx5_destroy_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy,
return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
static bool mlx5_rl_are_equal_raw(struct mlx5_rl_entry *entry, void *rl_in,
u16 uid)
{
return (!memcmp(entry->rl_raw, rl_in, sizeof(entry->rl_raw)) &&
entry->uid == uid);
}
/* Finds an entry where we can register the given rate
* If the rate already exists, return the entry where it is registered,
* otherwise return the first available entry.
* If the table is full, return NULL
*/
static struct mlx5_rl_entry *find_rl_entry(struct mlx5_rl_table *table,
struct mlx5_rate_limit *rl)
void *rl_in, u16 uid, bool dedicated)
{
struct mlx5_rl_entry *ret_entry = NULL;
bool empty_found = false;
int i;
for (i = 0; i < table->max_size; i++) {
if (mlx5_rl_are_equal(&table->rl_entry[i].rl, rl))
return &table->rl_entry[i];
if (!empty_found && !table->rl_entry[i].rl.rate) {
if (dedicated) {
if (!table->rl_entry[i].refcount)
return &table->rl_entry[i];
continue;
}
if (table->rl_entry[i].refcount) {
if (table->rl_entry[i].dedicated)
continue;
if (mlx5_rl_are_equal_raw(&table->rl_entry[i], rl_in,
uid))
return &table->rl_entry[i];
} else if (!empty_found) {
empty_found = true;
ret_entry = &table->rl_entry[i];
}
......@@ -126,18 +143,19 @@ static struct mlx5_rl_entry *find_rl_entry(struct mlx5_rl_table *table,
}
static int mlx5_set_pp_rate_limit_cmd(struct mlx5_core_dev *dev,
u16 index,
struct mlx5_rate_limit *rl)
struct mlx5_rl_entry *entry, bool set)
{
u32 in[MLX5_ST_SZ_DW(set_pp_rate_limit_in)] = {0};
u32 out[MLX5_ST_SZ_DW(set_pp_rate_limit_out)] = {0};
u32 in[MLX5_ST_SZ_DW(set_pp_rate_limit_in)] = {};
u32 out[MLX5_ST_SZ_DW(set_pp_rate_limit_out)] = {};
void *pp_context;
pp_context = MLX5_ADDR_OF(set_pp_rate_limit_in, in, ctx);
MLX5_SET(set_pp_rate_limit_in, in, opcode,
MLX5_CMD_OP_SET_PP_RATE_LIMIT);
MLX5_SET(set_pp_rate_limit_in, in, rate_limit_index, index);
MLX5_SET(set_pp_rate_limit_in, in, rate_limit, rl->rate);
MLX5_SET(set_pp_rate_limit_in, in, burst_upper_bound, rl->max_burst_sz);
MLX5_SET(set_pp_rate_limit_in, in, typical_packet_size, rl->typical_pkt_sz);
MLX5_SET(set_pp_rate_limit_in, in, uid, entry->uid);
MLX5_SET(set_pp_rate_limit_in, in, rate_limit_index, entry->index);
if (set)
memcpy(pp_context, entry->rl_raw, sizeof(entry->rl_raw));
return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
......@@ -158,23 +176,25 @@ bool mlx5_rl_are_equal(struct mlx5_rate_limit *rl_0,
}
EXPORT_SYMBOL(mlx5_rl_are_equal);
int mlx5_rl_add_rate(struct mlx5_core_dev *dev, u16 *index,
struct mlx5_rate_limit *rl)
int mlx5_rl_add_rate_raw(struct mlx5_core_dev *dev, void *rl_in, u16 uid,
bool dedicated_entry, u16 *index)
{
struct mlx5_rl_table *table = &dev->priv.rl_table;
struct mlx5_rl_entry *entry;
int err = 0;
u32 rate;
rate = MLX5_GET(set_pp_rate_limit_context, rl_in, rate_limit);
mutex_lock(&table->rl_lock);
if (!rl->rate || !mlx5_rl_is_in_range(dev, rl->rate)) {
if (!rate || !mlx5_rl_is_in_range(dev, rate)) {
mlx5_core_err(dev, "Invalid rate: %u, should be %u to %u\n",
rl->rate, table->min_rate, table->max_rate);
rate, table->min_rate, table->max_rate);
err = -EINVAL;
goto out;
}
entry = find_rl_entry(table, rl);
entry = find_rl_entry(table, rl_in, uid, dedicated_entry);
if (!entry) {
mlx5_core_err(dev, "Max number of %u rates reached\n",
table->max_size);
......@@ -185,16 +205,24 @@ int mlx5_rl_add_rate(struct mlx5_core_dev *dev, u16 *index,
/* rate already configured */
entry->refcount++;
} else {
memcpy(entry->rl_raw, rl_in, sizeof(entry->rl_raw));
entry->uid = uid;
/* new rate limit */
err = mlx5_set_pp_rate_limit_cmd(dev, entry->index, rl);
err = mlx5_set_pp_rate_limit_cmd(dev, entry, true);
if (err) {
mlx5_core_err(dev, "Failed configuring rate limit(err %d): rate %u, max_burst_sz %u, typical_pkt_sz %u\n",
err, rl->rate, rl->max_burst_sz,
rl->typical_pkt_sz);
mlx5_core_err(
dev,
"Failed configuring rate limit(err %d): rate %u, max_burst_sz %u, typical_pkt_sz %u\n",
err, rate,
MLX5_GET(set_pp_rate_limit_context, rl_in,
burst_upper_bound),
MLX5_GET(set_pp_rate_limit_context, rl_in,
typical_packet_size));
goto out;
}
entry->rl = *rl;
entry->refcount = 1;
entry->dedicated = dedicated_entry;
}
*index = entry->index;
......@@ -202,20 +230,61 @@ int mlx5_rl_add_rate(struct mlx5_core_dev *dev, u16 *index,
mutex_unlock(&table->rl_lock);
return err;
}
EXPORT_SYMBOL(mlx5_rl_add_rate_raw);
void mlx5_rl_remove_rate_raw(struct mlx5_core_dev *dev, u16 index)
{
struct mlx5_rl_table *table = &dev->priv.rl_table;
struct mlx5_rl_entry *entry;
mutex_lock(&table->rl_lock);
entry = &table->rl_entry[index - 1];
entry->refcount--;
if (!entry->refcount)
/* need to remove rate */
mlx5_set_pp_rate_limit_cmd(dev, entry, false);
mutex_unlock(&table->rl_lock);
}
EXPORT_SYMBOL(mlx5_rl_remove_rate_raw);
int mlx5_rl_add_rate(struct mlx5_core_dev *dev, u16 *index,
struct mlx5_rate_limit *rl)
{
u8 rl_raw[MLX5_ST_SZ_BYTES(set_pp_rate_limit_context)] = {};
MLX5_SET(set_pp_rate_limit_context, rl_raw, rate_limit, rl->rate);
MLX5_SET(set_pp_rate_limit_context, rl_raw, burst_upper_bound,
rl->max_burst_sz);
MLX5_SET(set_pp_rate_limit_context, rl_raw, typical_packet_size,
rl->typical_pkt_sz);
return mlx5_rl_add_rate_raw(dev, rl_raw,
MLX5_CAP_QOS(dev, packet_pacing_uid) ?
MLX5_SHARED_RESOURCE_UID : 0,
false, index);
}
EXPORT_SYMBOL(mlx5_rl_add_rate);
void mlx5_rl_remove_rate(struct mlx5_core_dev *dev, struct mlx5_rate_limit *rl)
{
u8 rl_raw[MLX5_ST_SZ_BYTES(set_pp_rate_limit_context)] = {};
struct mlx5_rl_table *table = &dev->priv.rl_table;
struct mlx5_rl_entry *entry = NULL;
struct mlx5_rate_limit reset_rl = {0};
/* 0 is a reserved value for unlimited rate */
if (rl->rate == 0)
return;
MLX5_SET(set_pp_rate_limit_context, rl_raw, rate_limit, rl->rate);
MLX5_SET(set_pp_rate_limit_context, rl_raw, burst_upper_bound,
rl->max_burst_sz);
MLX5_SET(set_pp_rate_limit_context, rl_raw, typical_packet_size,
rl->typical_pkt_sz);
mutex_lock(&table->rl_lock);
entry = find_rl_entry(table, rl);
entry = find_rl_entry(table, rl_raw,
MLX5_CAP_QOS(dev, packet_pacing_uid) ?
MLX5_SHARED_RESOURCE_UID : 0, false);
if (!entry || !entry->refcount) {
mlx5_core_warn(dev, "Rate %u, max_burst_sz %u typical_pkt_sz %u are not configured\n",
rl->rate, rl->max_burst_sz, rl->typical_pkt_sz);
......@@ -223,11 +292,9 @@ void mlx5_rl_remove_rate(struct mlx5_core_dev *dev, struct mlx5_rate_limit *rl)
}
entry->refcount--;
if (!entry->refcount) {
if (!entry->refcount)
/* need to remove rate */
mlx5_set_pp_rate_limit_cmd(dev, entry->index, &reset_rl);
entry->rl = reset_rl;
}
mlx5_set_pp_rate_limit_cmd(dev, entry, false);
out:
mutex_unlock(&table->rl_lock);
......@@ -273,14 +340,13 @@ int mlx5_init_rl_table(struct mlx5_core_dev *dev)
void mlx5_cleanup_rl_table(struct mlx5_core_dev *dev)
{
struct mlx5_rl_table *table = &dev->priv.rl_table;
struct mlx5_rate_limit rl = {0};
int i;
/* Clear all configured rates */
for (i = 0; i < table->max_size; i++)
if (table->rl_entry[i].rl.rate)
mlx5_set_pp_rate_limit_cmd(dev, table->rl_entry[i].index,
&rl);
if (table->rl_entry[i].refcount)
mlx5_set_pp_rate_limit_cmd(dev, &table->rl_entry[i],
false);
kfree(dev->priv.rl_table.rl_entry);
}
......@@ -518,9 +518,11 @@ struct mlx5_rate_limit {
};
struct mlx5_rl_entry {
struct mlx5_rate_limit rl;
u16 index;
u16 refcount;
u8 rl_raw[MLX5_ST_SZ_BYTES(set_pp_rate_limit_context)];
u16 index;
u64 refcount;
u16 uid;
u8 dedicated : 1;
};
struct mlx5_rl_table {
......@@ -1008,6 +1010,9 @@ int mlx5_rl_add_rate(struct mlx5_core_dev *dev, u16 *index,
struct mlx5_rate_limit *rl);
void mlx5_rl_remove_rate(struct mlx5_core_dev *dev, struct mlx5_rate_limit *rl);
bool mlx5_rl_is_in_range(struct mlx5_core_dev *dev, u32 rate);
int mlx5_rl_add_rate_raw(struct mlx5_core_dev *dev, void *rl_in, u16 uid,
bool dedicated_entry, u16 *index);
void mlx5_rl_remove_rate_raw(struct mlx5_core_dev *dev, u16 index);
bool mlx5_rl_are_equal(struct mlx5_rate_limit *rl_0,
struct mlx5_rate_limit *rl_1);
int mlx5_alloc_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg,
......
......@@ -414,7 +414,8 @@ struct mlx5_ifc_flow_table_prop_layout_bits {
u8 reserved_at_16[0x1];
u8 table_miss_action_domain[0x1];
u8 termination_table[0x1];
u8 reserved_at_19[0x7];
u8 reformat_and_fwd_to_table[0x1];
u8 reserved_at_1a[0x6];
u8 reserved_at_20[0x2];
u8 log_max_ft_size[0x6];
u8 log_max_modify_header_context[0x8];
......@@ -741,7 +742,7 @@ struct mlx5_ifc_flow_table_eswitch_cap_bits {
u8 flow_source[0x1];
u8 reserved_at_18[0x2];
u8 multi_fdb_encap[0x1];
u8 reserved_at_1b[0x1];
u8 egress_acl_forward_to_vport[0x1];
u8 fdb_multi_path_to_table[0x1];
u8 reserved_at_1d[0x3];
......@@ -813,7 +814,9 @@ struct mlx5_ifc_qos_cap_bits {
u8 reserved_at_4[0x1];
u8 packet_pacing_burst_bound[0x1];
u8 packet_pacing_typical_size[0x1];
u8 reserved_at_7[0x19];
u8 reserved_at_7[0x4];
u8 packet_pacing_uid[0x1];
u8 reserved_at_c[0x14];
u8 reserved_at_20[0x20];
......@@ -8265,9 +8268,20 @@ struct mlx5_ifc_set_pp_rate_limit_out_bits {
u8 reserved_at_40[0x40];
};
struct mlx5_ifc_set_pp_rate_limit_context_bits {
u8 rate_limit[0x20];
u8 burst_upper_bound[0x20];
u8 reserved_at_40[0x10];
u8 typical_packet_size[0x10];
u8 reserved_at_60[0x120];
};
struct mlx5_ifc_set_pp_rate_limit_in_bits {
u8 opcode[0x10];
u8 reserved_at_10[0x10];
u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
......@@ -8277,14 +8291,7 @@ struct mlx5_ifc_set_pp_rate_limit_in_bits {
u8 reserved_at_60[0x20];
u8 rate_limit[0x20];
u8 burst_upper_bound[0x20];
u8 reserved_at_c0[0x10];
u8 typical_packet_size[0x10];
u8 reserved_at_e0[0x120];
struct mlx5_ifc_set_pp_rate_limit_context_bits ctx;
};
struct mlx5_ifc_access_register_out_bits {
......@@ -8420,7 +8427,8 @@ struct mlx5_ifc_ptys_reg_bits {
u8 proto_mask[0x3];
u8 an_status[0x4];
u8 reserved_at_24[0x1c];
u8 reserved_at_24[0xc];
u8 data_rate_oper[0x10];
u8 ext_eth_proto_capability[0x20];
......@@ -10486,7 +10494,8 @@ enum {
};
enum {
MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_TYPE_DEK = 0x1,
MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_TYPE_TLS = 0x1,
MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_TYPE_IPSEC = 0x2,
};
struct mlx5_ifc_tls_static_params_bits {
......
......@@ -608,7 +608,7 @@ struct mlx5_ifc_tls_cmd_bits {
struct mlx5_ifc_tls_resp_bits {
u8 syndrome[0x20];
u8 stream_id[0x20];
u8 reserverd[0x40];
u8 reserved[0x40];
};
#define MLX5_TLS_COMMAND_SIZE (0x100)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment