Commit a460fc5d authored by David S. Miller's avatar David S. Miller

Merge tag 'mlx5-fixes-2020-04-20' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

mlx5-fixes-2020-04-20
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 1c30fbc7 dcdf4ce0
...@@ -7,10 +7,10 @@ config MLX5_CORE ...@@ -7,10 +7,10 @@ config MLX5_CORE
tristate "Mellanox 5th generation network adapters (ConnectX series) core driver" tristate "Mellanox 5th generation network adapters (ConnectX series) core driver"
depends on PCI depends on PCI
select NET_DEVLINK select NET_DEVLINK
imply PTP_1588_CLOCK depends on VXLAN || !VXLAN
imply VXLAN depends on MLXFW || !MLXFW
imply MLXFW depends on PTP_1588_CLOCK || !PTP_1588_CLOCK
imply PCI_HYPERV_INTERFACE depends on PCI_HYPERV_INTERFACE || !PCI_HYPERV_INTERFACE
default n default n
---help--- ---help---
Core driver for low level functionality of the ConnectX-4 and Core driver for low level functionality of the ConnectX-4 and
......
...@@ -935,7 +935,7 @@ struct mlx5_fw_tracer *mlx5_fw_tracer_create(struct mlx5_core_dev *dev) ...@@ -935,7 +935,7 @@ struct mlx5_fw_tracer *mlx5_fw_tracer_create(struct mlx5_core_dev *dev)
return NULL; return NULL;
} }
tracer = kzalloc(sizeof(*tracer), GFP_KERNEL); tracer = kvzalloc(sizeof(*tracer), GFP_KERNEL);
if (!tracer) if (!tracer)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
...@@ -982,7 +982,7 @@ struct mlx5_fw_tracer *mlx5_fw_tracer_create(struct mlx5_core_dev *dev) ...@@ -982,7 +982,7 @@ struct mlx5_fw_tracer *mlx5_fw_tracer_create(struct mlx5_core_dev *dev)
tracer->dev = NULL; tracer->dev = NULL;
destroy_workqueue(tracer->work_queue); destroy_workqueue(tracer->work_queue);
free_tracer: free_tracer:
kfree(tracer); kvfree(tracer);
return ERR_PTR(err); return ERR_PTR(err);
} }
...@@ -1061,7 +1061,7 @@ void mlx5_fw_tracer_destroy(struct mlx5_fw_tracer *tracer) ...@@ -1061,7 +1061,7 @@ void mlx5_fw_tracer_destroy(struct mlx5_fw_tracer *tracer)
mlx5_fw_tracer_destroy_log_buf(tracer); mlx5_fw_tracer_destroy_log_buf(tracer);
flush_workqueue(tracer->work_queue); flush_workqueue(tracer->work_queue);
destroy_workqueue(tracer->work_queue); destroy_workqueue(tracer->work_queue);
kfree(tracer); kvfree(tracer);
} }
static int fw_tracer_event(struct notifier_block *nb, unsigned long action, void *data) static int fw_tracer_event(struct notifier_block *nb, unsigned long action, void *data)
......
...@@ -367,6 +367,7 @@ enum { ...@@ -367,6 +367,7 @@ enum {
MLX5E_SQ_STATE_AM, MLX5E_SQ_STATE_AM,
MLX5E_SQ_STATE_TLS, MLX5E_SQ_STATE_TLS,
MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE, MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE,
MLX5E_SQ_STATE_PENDING_XSK_TX,
}; };
struct mlx5e_sq_wqe_info { struct mlx5e_sq_wqe_info {
...@@ -960,7 +961,7 @@ void mlx5e_page_release_dynamic(struct mlx5e_rq *rq, ...@@ -960,7 +961,7 @@ void mlx5e_page_release_dynamic(struct mlx5e_rq *rq,
void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe); void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe); void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq); bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq);
void mlx5e_poll_ico_cq(struct mlx5e_cq *cq); int mlx5e_poll_ico_cq(struct mlx5e_cq *cq);
bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq); bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq);
void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix); void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix);
void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix); void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix);
......
...@@ -12,6 +12,7 @@ ...@@ -12,6 +12,7 @@
#include <net/flow_offload.h> #include <net/flow_offload.h>
#include <net/netfilter/nf_flow_table.h> #include <net/netfilter/nf_flow_table.h>
#include <linux/workqueue.h> #include <linux/workqueue.h>
#include <linux/xarray.h>
#include "esw/chains.h" #include "esw/chains.h"
#include "en/tc_ct.h" #include "en/tc_ct.h"
...@@ -35,7 +36,7 @@ struct mlx5_tc_ct_priv { ...@@ -35,7 +36,7 @@ struct mlx5_tc_ct_priv {
struct mlx5_eswitch *esw; struct mlx5_eswitch *esw;
const struct net_device *netdev; const struct net_device *netdev;
struct idr fte_ids; struct idr fte_ids;
struct idr tuple_ids; struct xarray tuple_ids;
struct rhashtable zone_ht; struct rhashtable zone_ht;
struct mlx5_flow_table *ct; struct mlx5_flow_table *ct;
struct mlx5_flow_table *ct_nat; struct mlx5_flow_table *ct_nat;
...@@ -238,7 +239,7 @@ mlx5_tc_ct_entry_del_rule(struct mlx5_tc_ct_priv *ct_priv, ...@@ -238,7 +239,7 @@ mlx5_tc_ct_entry_del_rule(struct mlx5_tc_ct_priv *ct_priv,
mlx5_eswitch_del_offloaded_rule(esw, zone_rule->rule, attr); mlx5_eswitch_del_offloaded_rule(esw, zone_rule->rule, attr);
mlx5_modify_header_dealloc(esw->dev, attr->modify_hdr); mlx5_modify_header_dealloc(esw->dev, attr->modify_hdr);
idr_remove(&ct_priv->tuple_ids, zone_rule->tupleid); xa_erase(&ct_priv->tuple_ids, zone_rule->tupleid);
} }
static void static void
...@@ -483,7 +484,7 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv, ...@@ -483,7 +484,7 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv,
struct mlx5_esw_flow_attr *attr = &zone_rule->attr; struct mlx5_esw_flow_attr *attr = &zone_rule->attr;
struct mlx5_eswitch *esw = ct_priv->esw; struct mlx5_eswitch *esw = ct_priv->esw;
struct mlx5_flow_spec *spec = NULL; struct mlx5_flow_spec *spec = NULL;
u32 tupleid = 1; u32 tupleid;
int err; int err;
zone_rule->nat = nat; zone_rule->nat = nat;
...@@ -493,12 +494,12 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv, ...@@ -493,12 +494,12 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv,
return -ENOMEM; return -ENOMEM;
/* Get tuple unique id */ /* Get tuple unique id */
err = idr_alloc_u32(&ct_priv->tuple_ids, zone_rule, &tupleid, err = xa_alloc(&ct_priv->tuple_ids, &tupleid, zone_rule,
TUPLE_ID_MAX, GFP_KERNEL); XA_LIMIT(1, TUPLE_ID_MAX), GFP_KERNEL);
if (err) { if (err) {
netdev_warn(ct_priv->netdev, netdev_warn(ct_priv->netdev,
"Failed to allocate tuple id, err: %d\n", err); "Failed to allocate tuple id, err: %d\n", err);
goto err_idr_alloc; goto err_xa_alloc;
} }
zone_rule->tupleid = tupleid; zone_rule->tupleid = tupleid;
...@@ -539,8 +540,8 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv, ...@@ -539,8 +540,8 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv,
err_rule: err_rule:
mlx5_modify_header_dealloc(esw->dev, attr->modify_hdr); mlx5_modify_header_dealloc(esw->dev, attr->modify_hdr);
err_mod_hdr: err_mod_hdr:
idr_remove(&ct_priv->tuple_ids, zone_rule->tupleid); xa_erase(&ct_priv->tuple_ids, zone_rule->tupleid);
err_idr_alloc: err_xa_alloc:
kfree(spec); kfree(spec);
return err; return err;
} }
...@@ -1299,7 +1300,7 @@ mlx5_tc_ct_init(struct mlx5_rep_uplink_priv *uplink_priv) ...@@ -1299,7 +1300,7 @@ mlx5_tc_ct_init(struct mlx5_rep_uplink_priv *uplink_priv)
} }
idr_init(&ct_priv->fte_ids); idr_init(&ct_priv->fte_ids);
idr_init(&ct_priv->tuple_ids); xa_init_flags(&ct_priv->tuple_ids, XA_FLAGS_ALLOC1);
mutex_init(&ct_priv->control_lock); mutex_init(&ct_priv->control_lock);
rhashtable_init(&ct_priv->zone_ht, &zone_params); rhashtable_init(&ct_priv->zone_ht, &zone_params);
...@@ -1334,7 +1335,7 @@ mlx5_tc_ct_clean(struct mlx5_rep_uplink_priv *uplink_priv) ...@@ -1334,7 +1335,7 @@ mlx5_tc_ct_clean(struct mlx5_rep_uplink_priv *uplink_priv)
rhashtable_destroy(&ct_priv->zone_ht); rhashtable_destroy(&ct_priv->zone_ht);
mutex_destroy(&ct_priv->control_lock); mutex_destroy(&ct_priv->control_lock);
idr_destroy(&ct_priv->tuple_ids); xa_destroy(&ct_priv->tuple_ids);
idr_destroy(&ct_priv->fte_ids); idr_destroy(&ct_priv->fte_ids);
kfree(ct_priv); kfree(ct_priv);
...@@ -1352,7 +1353,7 @@ mlx5e_tc_ct_restore_flow(struct mlx5_rep_uplink_priv *uplink_priv, ...@@ -1352,7 +1353,7 @@ mlx5e_tc_ct_restore_flow(struct mlx5_rep_uplink_priv *uplink_priv,
if (!ct_priv || !tupleid) if (!ct_priv || !tupleid)
return true; return true;
zone_rule = idr_find(&ct_priv->tuple_ids, tupleid); zone_rule = xa_load(&ct_priv->tuple_ids, tupleid);
if (!zone_rule) if (!zone_rule)
return false; return false;
......
...@@ -33,6 +33,9 @@ int mlx5e_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags) ...@@ -33,6 +33,9 @@ int mlx5e_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags)
if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &c->xskicosq.state))) if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &c->xskicosq.state)))
return 0; return 0;
if (test_and_set_bit(MLX5E_SQ_STATE_PENDING_XSK_TX, &c->xskicosq.state))
return 0;
spin_lock(&c->xskicosq_lock); spin_lock(&c->xskicosq_lock);
mlx5e_trigger_irq(&c->xskicosq); mlx5e_trigger_irq(&c->xskicosq);
spin_unlock(&c->xskicosq_lock); spin_unlock(&c->xskicosq_lock);
......
...@@ -3583,7 +3583,12 @@ mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats) ...@@ -3583,7 +3583,12 @@ mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
struct mlx5e_vport_stats *vstats = &priv->stats.vport; struct mlx5e_vport_stats *vstats = &priv->stats.vport;
struct mlx5e_pport_stats *pstats = &priv->stats.pport; struct mlx5e_pport_stats *pstats = &priv->stats.pport;
if (!mlx5e_monitor_counter_supported(priv)) { /* In switchdev mode, monitor counters doesn't monitor
* rx/tx stats of 802_3. The update stats mechanism
* should keep the 802_3 layout counters updated
*/
if (!mlx5e_monitor_counter_supported(priv) ||
mlx5e_is_uplink_rep(priv)) {
/* update HW stats in background for next time */ /* update HW stats in background for next time */
mlx5e_queue_update_stats(priv); mlx5e_queue_update_stats(priv);
} }
......
...@@ -589,7 +589,7 @@ bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq) ...@@ -589,7 +589,7 @@ bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq)
return !!err; return !!err;
} }
void mlx5e_poll_ico_cq(struct mlx5e_cq *cq) int mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
{ {
struct mlx5e_icosq *sq = container_of(cq, struct mlx5e_icosq, cq); struct mlx5e_icosq *sq = container_of(cq, struct mlx5e_icosq, cq);
struct mlx5_cqe64 *cqe; struct mlx5_cqe64 *cqe;
...@@ -597,11 +597,11 @@ void mlx5e_poll_ico_cq(struct mlx5e_cq *cq) ...@@ -597,11 +597,11 @@ void mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
int i; int i;
if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state))) if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state)))
return; return 0;
cqe = mlx5_cqwq_get_cqe(&cq->wq); cqe = mlx5_cqwq_get_cqe(&cq->wq);
if (likely(!cqe)) if (likely(!cqe))
return; return 0;
/* sq->cc must be updated only after mlx5_cqwq_update_db_record(), /* sq->cc must be updated only after mlx5_cqwq_update_db_record(),
* otherwise a cq overrun may occur * otherwise a cq overrun may occur
...@@ -650,6 +650,8 @@ void mlx5e_poll_ico_cq(struct mlx5e_cq *cq) ...@@ -650,6 +650,8 @@ void mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
sq->cc = sqcc; sq->cc = sqcc;
mlx5_cqwq_update_db_record(&cq->wq); mlx5_cqwq_update_db_record(&cq->wq);
return i;
} }
bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq) bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq)
......
...@@ -152,7 +152,11 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget) ...@@ -152,7 +152,11 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
mlx5e_post_rx_wqes, mlx5e_post_rx_wqes,
rq); rq);
if (xsk_open) { if (xsk_open) {
mlx5e_poll_ico_cq(&c->xskicosq.cq); if (mlx5e_poll_ico_cq(&c->xskicosq.cq))
/* Don't clear the flag if nothing was polled to prevent
* queueing more WQEs and overflowing XSKICOSQ.
*/
clear_bit(MLX5E_SQ_STATE_PENDING_XSK_TX, &c->xskicosq.state);
busy |= mlx5e_poll_xdpsq_cq(&xsksq->cq); busy |= mlx5e_poll_xdpsq_cq(&xsksq->cq);
busy_xsk |= mlx5e_napi_xsk_post(xsksq, xskrq); busy_xsk |= mlx5e_napi_xsk_post(xsksq, xskrq);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment