Commit 6ecfdd28 authored by David S. Miller's avatar David S. Miller

Merge branch 'mlx5e-next'

Amir Vadai says:

====================
ConnectX-4 driver update 2015-07-23

This patchset introduce some performance enhancements to the ConnectX-4 driver.
1. Improving RSS distribution, and make RSS function controlable using ethtool.
2. Make memory that is written by NIC and read by host CPU allocate in the
   local NUMA to the processing CPU
3. Support tx copybreak
4. Using hardware feature called blueflame to save DMA reads when possible

Another patch by Achiad fix some cosmetic issues in the driver.

Patchset was applied and tested on top of commit 045a0fa0 ("ip_tunnel: Call
ip_tunnel_core_init() from inet_init()")
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents fda19e83 a741749f
...@@ -45,15 +45,34 @@ ...@@ -45,15 +45,34 @@
* register it in a memory region at HCA virtual address 0. * register it in a memory region at HCA virtual address 0.
*/ */
int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf) static void *mlx5_dma_zalloc_coherent_node(struct mlx5_core_dev *dev,
size_t size, dma_addr_t *dma_handle,
int node)
{
struct mlx5_priv *priv = &dev->priv;
int original_node;
void *cpu_handle;
mutex_lock(&priv->alloc_mutex);
original_node = dev_to_node(&dev->pdev->dev);
set_dev_node(&dev->pdev->dev, node);
cpu_handle = dma_zalloc_coherent(&dev->pdev->dev, size,
dma_handle, GFP_KERNEL);
set_dev_node(&dev->pdev->dev, original_node);
mutex_unlock(&priv->alloc_mutex);
return cpu_handle;
}
int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size,
struct mlx5_buf *buf, int node)
{ {
dma_addr_t t; dma_addr_t t;
buf->size = size; buf->size = size;
buf->npages = 1; buf->npages = 1;
buf->page_shift = (u8)get_order(size) + PAGE_SHIFT; buf->page_shift = (u8)get_order(size) + PAGE_SHIFT;
buf->direct.buf = dma_zalloc_coherent(&dev->pdev->dev, buf->direct.buf = mlx5_dma_zalloc_coherent_node(dev, size,
size, &t, GFP_KERNEL); &t, node);
if (!buf->direct.buf) if (!buf->direct.buf)
return -ENOMEM; return -ENOMEM;
...@@ -66,6 +85,11 @@ int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf) ...@@ -66,6 +85,11 @@ int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf)
return 0; return 0;
} }
int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf)
{
return mlx5_buf_alloc_node(dev, size, buf, dev->priv.numa_node);
}
EXPORT_SYMBOL_GPL(mlx5_buf_alloc); EXPORT_SYMBOL_GPL(mlx5_buf_alloc);
void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf) void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf)
...@@ -75,7 +99,8 @@ void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf) ...@@ -75,7 +99,8 @@ void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf)
} }
EXPORT_SYMBOL_GPL(mlx5_buf_free); EXPORT_SYMBOL_GPL(mlx5_buf_free);
static struct mlx5_db_pgdir *mlx5_alloc_db_pgdir(struct device *dma_device) static struct mlx5_db_pgdir *mlx5_alloc_db_pgdir(struct mlx5_core_dev *dev,
int node)
{ {
struct mlx5_db_pgdir *pgdir; struct mlx5_db_pgdir *pgdir;
...@@ -84,8 +109,9 @@ static struct mlx5_db_pgdir *mlx5_alloc_db_pgdir(struct device *dma_device) ...@@ -84,8 +109,9 @@ static struct mlx5_db_pgdir *mlx5_alloc_db_pgdir(struct device *dma_device)
return NULL; return NULL;
bitmap_fill(pgdir->bitmap, MLX5_DB_PER_PAGE); bitmap_fill(pgdir->bitmap, MLX5_DB_PER_PAGE);
pgdir->db_page = dma_alloc_coherent(dma_device, PAGE_SIZE,
&pgdir->db_dma, GFP_KERNEL); pgdir->db_page = mlx5_dma_zalloc_coherent_node(dev, PAGE_SIZE,
&pgdir->db_dma, node);
if (!pgdir->db_page) { if (!pgdir->db_page) {
kfree(pgdir); kfree(pgdir);
return NULL; return NULL;
...@@ -118,7 +144,7 @@ static int mlx5_alloc_db_from_pgdir(struct mlx5_db_pgdir *pgdir, ...@@ -118,7 +144,7 @@ static int mlx5_alloc_db_from_pgdir(struct mlx5_db_pgdir *pgdir,
return 0; return 0;
} }
int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db) int mlx5_db_alloc_node(struct mlx5_core_dev *dev, struct mlx5_db *db, int node)
{ {
struct mlx5_db_pgdir *pgdir; struct mlx5_db_pgdir *pgdir;
int ret = 0; int ret = 0;
...@@ -129,7 +155,7 @@ int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db) ...@@ -129,7 +155,7 @@ int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db)
if (!mlx5_alloc_db_from_pgdir(pgdir, db)) if (!mlx5_alloc_db_from_pgdir(pgdir, db))
goto out; goto out;
pgdir = mlx5_alloc_db_pgdir(&(dev->pdev->dev)); pgdir = mlx5_alloc_db_pgdir(dev, node);
if (!pgdir) { if (!pgdir) {
ret = -ENOMEM; ret = -ENOMEM;
goto out; goto out;
...@@ -145,6 +171,12 @@ int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db) ...@@ -145,6 +171,12 @@ int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db)
return ret; return ret;
} }
EXPORT_SYMBOL_GPL(mlx5_db_alloc_node);
int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db)
{
return mlx5_db_alloc_node(dev, db, dev->priv.numa_node);
}
EXPORT_SYMBOL_GPL(mlx5_db_alloc); EXPORT_SYMBOL_GPL(mlx5_db_alloc);
void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db) void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db)
......
...@@ -60,6 +60,7 @@ ...@@ -60,6 +60,7 @@
#define MLX5E_TX_CQ_POLL_BUDGET 128 #define MLX5E_TX_CQ_POLL_BUDGET 128
#define MLX5E_UPDATE_STATS_INTERVAL 200 /* msecs */ #define MLX5E_UPDATE_STATS_INTERVAL 200 /* msecs */
#define MLX5E_SQ_BF_BUDGET 16
static const char vport_strings[][ETH_GSTRING_LEN] = { static const char vport_strings[][ETH_GSTRING_LEN] = {
/* vport statistics */ /* vport statistics */
...@@ -195,6 +196,8 @@ struct mlx5e_params { ...@@ -195,6 +196,8 @@ struct mlx5e_params {
u16 rx_hash_log_tbl_sz; u16 rx_hash_log_tbl_sz;
bool lro_en; bool lro_en;
u32 lro_wqe_sz; u32 lro_wqe_sz;
u8 rss_hfunc;
u16 tx_max_inline;
}; };
enum { enum {
...@@ -266,7 +269,9 @@ struct mlx5e_sq { ...@@ -266,7 +269,9 @@ struct mlx5e_sq {
/* dirtied @xmit */ /* dirtied @xmit */
u16 pc ____cacheline_aligned_in_smp; u16 pc ____cacheline_aligned_in_smp;
u32 dma_fifo_pc; u32 dma_fifo_pc;
u32 bf_offset; u16 bf_offset;
u16 prev_cc;
u8 bf_budget;
struct mlx5e_sq_stats stats; struct mlx5e_sq_stats stats;
struct mlx5e_cq cq; struct mlx5e_cq cq;
...@@ -279,9 +284,10 @@ struct mlx5e_sq { ...@@ -279,9 +284,10 @@ struct mlx5e_sq {
struct mlx5_wq_cyc wq; struct mlx5_wq_cyc wq;
u32 dma_fifo_mask; u32 dma_fifo_mask;
void __iomem *uar_map; void __iomem *uar_map;
void __iomem *uar_bf_map;
struct netdev_queue *txq; struct netdev_queue *txq;
u32 sqn; u32 sqn;
u32 bf_buf_size; u16 bf_buf_size;
u16 max_inline; u16 max_inline;
u16 edge; u16 edge;
struct device *pdev; struct device *pdev;
...@@ -324,14 +330,18 @@ struct mlx5e_channel { ...@@ -324,14 +330,18 @@ struct mlx5e_channel {
}; };
enum mlx5e_traffic_types { enum mlx5e_traffic_types {
MLX5E_TT_IPV4_TCP = 0, MLX5E_TT_IPV4_TCP,
MLX5E_TT_IPV6_TCP = 1, MLX5E_TT_IPV6_TCP,
MLX5E_TT_IPV4_UDP = 2, MLX5E_TT_IPV4_UDP,
MLX5E_TT_IPV6_UDP = 3, MLX5E_TT_IPV6_UDP,
MLX5E_TT_IPV4 = 4, MLX5E_TT_IPV4_IPSEC_AH,
MLX5E_TT_IPV6 = 5, MLX5E_TT_IPV6_IPSEC_AH,
MLX5E_TT_ANY = 6, MLX5E_TT_IPV4_IPSEC_ESP,
MLX5E_NUM_TT = 7, MLX5E_TT_IPV6_IPSEC_ESP,
MLX5E_TT_IPV4,
MLX5E_TT_IPV6,
MLX5E_TT_ANY,
MLX5E_NUM_TT,
}; };
enum { enum {
...@@ -491,8 +501,10 @@ int mlx5e_update_priv_params(struct mlx5e_priv *priv, ...@@ -491,8 +501,10 @@ int mlx5e_update_priv_params(struct mlx5e_priv *priv,
struct mlx5e_params *new_params); struct mlx5e_params *new_params);
static inline void mlx5e_tx_notify_hw(struct mlx5e_sq *sq, static inline void mlx5e_tx_notify_hw(struct mlx5e_sq *sq,
struct mlx5e_tx_wqe *wqe) struct mlx5e_tx_wqe *wqe, int bf_sz)
{ {
u16 ofst = MLX5_BF_OFFSET + sq->bf_offset;
/* ensure wqe is visible to device before updating doorbell record */ /* ensure wqe is visible to device before updating doorbell record */
dma_wmb(); dma_wmb();
...@@ -503,9 +515,15 @@ static inline void mlx5e_tx_notify_hw(struct mlx5e_sq *sq, ...@@ -503,9 +515,15 @@ static inline void mlx5e_tx_notify_hw(struct mlx5e_sq *sq,
*/ */
wmb(); wmb();
mlx5_write64((__be32 *)&wqe->ctrl, if (bf_sz) {
sq->uar_map + MLX5_BF_OFFSET + sq->bf_offset, __iowrite64_copy(sq->uar_bf_map + ofst, &wqe->ctrl, bf_sz);
NULL);
/* flush the write-combining mapped buffer */
wmb();
} else {
mlx5_write64((__be32 *)&wqe->ctrl, sq->uar_map + ofst, NULL);
}
sq->bf_offset ^= sq->bf_buf_size; sq->bf_offset ^= sq->bf_buf_size;
} }
...@@ -519,3 +537,4 @@ static inline void mlx5e_cq_arm(struct mlx5e_cq *cq) ...@@ -519,3 +537,4 @@ static inline void mlx5e_cq_arm(struct mlx5e_cq *cq)
} }
extern const struct ethtool_ops mlx5e_ethtool_ops; extern const struct ethtool_ops mlx5e_ethtool_ops;
u16 mlx5e_get_max_inline_cap(struct mlx5_core_dev *mdev);
...@@ -662,6 +662,94 @@ static int mlx5e_set_settings(struct net_device *netdev, ...@@ -662,6 +662,94 @@ static int mlx5e_set_settings(struct net_device *netdev,
return err; return err;
} }
static int mlx5e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
u8 *hfunc)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
if (hfunc)
*hfunc = priv->params.rss_hfunc;
return 0;
}
static int mlx5e_set_rxfh(struct net_device *netdev, const u32 *indir,
const u8 *key, const u8 hfunc)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
int err = 0;
if (hfunc == ETH_RSS_HASH_NO_CHANGE)
return 0;
if ((hfunc != ETH_RSS_HASH_XOR) &&
(hfunc != ETH_RSS_HASH_TOP))
return -EINVAL;
mutex_lock(&priv->state_lock);
priv->params.rss_hfunc = hfunc;
if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
mlx5e_close_locked(priv->netdev);
err = mlx5e_open_locked(priv->netdev);
}
mutex_unlock(&priv->state_lock);
return err;
}
static int mlx5e_get_tunable(struct net_device *dev,
const struct ethtool_tunable *tuna,
void *data)
{
const struct mlx5e_priv *priv = netdev_priv(dev);
int err = 0;
switch (tuna->id) {
case ETHTOOL_TX_COPYBREAK:
*(u32 *)data = priv->params.tx_max_inline;
break;
default:
err = -EINVAL;
break;
}
return err;
}
static int mlx5e_set_tunable(struct net_device *dev,
const struct ethtool_tunable *tuna,
const void *data)
{
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5e_params new_params;
u32 val;
int err = 0;
switch (tuna->id) {
case ETHTOOL_TX_COPYBREAK:
val = *(u32 *)data;
if (val > mlx5e_get_max_inline_cap(mdev)) {
err = -EINVAL;
break;
}
mutex_lock(&priv->state_lock);
new_params = priv->params;
new_params.tx_max_inline = val;
err = mlx5e_update_priv_params(priv, &new_params);
mutex_unlock(&priv->state_lock);
break;
default:
err = -EINVAL;
break;
}
return err;
}
const struct ethtool_ops mlx5e_ethtool_ops = { const struct ethtool_ops mlx5e_ethtool_ops = {
.get_drvinfo = mlx5e_get_drvinfo, .get_drvinfo = mlx5e_get_drvinfo,
.get_link = ethtool_op_get_link, .get_link = ethtool_op_get_link,
...@@ -676,4 +764,8 @@ const struct ethtool_ops mlx5e_ethtool_ops = { ...@@ -676,4 +764,8 @@ const struct ethtool_ops mlx5e_ethtool_ops = {
.set_coalesce = mlx5e_set_coalesce, .set_coalesce = mlx5e_set_coalesce,
.get_settings = mlx5e_get_settings, .get_settings = mlx5e_get_settings,
.set_settings = mlx5e_set_settings, .set_settings = mlx5e_set_settings,
.get_rxfh = mlx5e_get_rxfh,
.set_rxfh = mlx5e_set_rxfh,
.get_tunable = mlx5e_get_tunable,
.set_tunable = mlx5e_set_tunable,
}; };
...@@ -41,6 +41,7 @@ struct mlx5e_rq_param { ...@@ -41,6 +41,7 @@ struct mlx5e_rq_param {
struct mlx5e_sq_param { struct mlx5e_sq_param {
u32 sqc[MLX5_ST_SZ_DW(sqc)]; u32 sqc[MLX5_ST_SZ_DW(sqc)];
struct mlx5_wq_param wq; struct mlx5_wq_param wq;
u16 max_inline;
}; };
struct mlx5e_cq_param { struct mlx5e_cq_param {
...@@ -272,6 +273,8 @@ static int mlx5e_create_rq(struct mlx5e_channel *c, ...@@ -272,6 +273,8 @@ static int mlx5e_create_rq(struct mlx5e_channel *c,
int err; int err;
int i; int i;
param->wq.db_numa_node = cpu_to_node(c->cpu);
err = mlx5_wq_ll_create(mdev, &param->wq, rqc_wq, &rq->wq, err = mlx5_wq_ll_create(mdev, &param->wq, rqc_wq, &rq->wq,
&rq->wq_ctrl); &rq->wq_ctrl);
if (err) if (err)
...@@ -502,6 +505,8 @@ static int mlx5e_create_sq(struct mlx5e_channel *c, ...@@ -502,6 +505,8 @@ static int mlx5e_create_sq(struct mlx5e_channel *c,
if (err) if (err)
return err; return err;
param->wq.db_numa_node = cpu_to_node(c->cpu);
err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, &sq->wq, err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, &sq->wq,
&sq->wq_ctrl); &sq->wq_ctrl);
if (err) if (err)
...@@ -509,7 +514,9 @@ static int mlx5e_create_sq(struct mlx5e_channel *c, ...@@ -509,7 +514,9 @@ static int mlx5e_create_sq(struct mlx5e_channel *c,
sq->wq.db = &sq->wq.db[MLX5_SND_DBR]; sq->wq.db = &sq->wq.db[MLX5_SND_DBR];
sq->uar_map = sq->uar.map; sq->uar_map = sq->uar.map;
sq->uar_bf_map = sq->uar.bf_map;
sq->bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2; sq->bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2;
sq->max_inline = param->max_inline;
err = mlx5e_alloc_sq_db(sq, cpu_to_node(c->cpu)); err = mlx5e_alloc_sq_db(sq, cpu_to_node(c->cpu));
if (err) if (err)
...@@ -523,6 +530,7 @@ static int mlx5e_create_sq(struct mlx5e_channel *c, ...@@ -523,6 +530,7 @@ static int mlx5e_create_sq(struct mlx5e_channel *c,
sq->channel = c; sq->channel = c;
sq->tc = tc; sq->tc = tc;
sq->edge = (sq->wq.sz_m1 + 1) - MLX5_SEND_WQE_MAX_WQEBBS; sq->edge = (sq->wq.sz_m1 + 1) - MLX5_SEND_WQE_MAX_WQEBBS;
sq->bf_budget = MLX5E_SQ_BF_BUDGET;
priv->txq_to_sq_map[txq_ix] = sq; priv->txq_to_sq_map[txq_ix] = sq;
return 0; return 0;
...@@ -702,7 +710,8 @@ static int mlx5e_create_cq(struct mlx5e_channel *c, ...@@ -702,7 +710,8 @@ static int mlx5e_create_cq(struct mlx5e_channel *c,
int err; int err;
u32 i; u32 i;
param->wq.numa = cpu_to_node(c->cpu); param->wq.buf_numa_node = cpu_to_node(c->cpu);
param->wq.db_numa_node = cpu_to_node(c->cpu);
param->eq_ix = c->ix; param->eq_ix = c->ix;
err = mlx5_cqwq_create(mdev, &param->wq, param->cqc, &cq->wq, err = mlx5_cqwq_create(mdev, &param->wq, param->cqc, &cq->wq,
...@@ -1000,7 +1009,7 @@ static void mlx5e_build_rq_param(struct mlx5e_priv *priv, ...@@ -1000,7 +1009,7 @@ static void mlx5e_build_rq_param(struct mlx5e_priv *priv,
MLX5_SET(wq, wq, log_wq_sz, priv->params.log_rq_size); MLX5_SET(wq, wq, log_wq_sz, priv->params.log_rq_size);
MLX5_SET(wq, wq, pd, priv->pdn); MLX5_SET(wq, wq, pd, priv->pdn);
param->wq.numa = dev_to_node(&priv->mdev->pdev->dev); param->wq.buf_numa_node = dev_to_node(&priv->mdev->pdev->dev);
param->wq.linear = 1; param->wq.linear = 1;
} }
...@@ -1014,7 +1023,8 @@ static void mlx5e_build_sq_param(struct mlx5e_priv *priv, ...@@ -1014,7 +1023,8 @@ static void mlx5e_build_sq_param(struct mlx5e_priv *priv,
MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB)); MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB));
MLX5_SET(wq, wq, pd, priv->pdn); MLX5_SET(wq, wq, pd, priv->pdn);
param->wq.numa = dev_to_node(&priv->mdev->pdev->dev); param->wq.buf_numa_node = dev_to_node(&priv->mdev->pdev->dev);
param->max_inline = priv->params.tx_max_inline;
} }
static void mlx5e_build_common_cq_param(struct mlx5e_priv *priv, static void mlx5e_build_common_cq_param(struct mlx5e_priv *priv,
...@@ -1158,6 +1168,24 @@ static void mlx5e_close_tises(struct mlx5e_priv *priv) ...@@ -1158,6 +1168,24 @@ static void mlx5e_close_tises(struct mlx5e_priv *priv)
mlx5e_close_tis(priv, tc); mlx5e_close_tis(priv, tc);
} }
static int mlx5e_rx_hash_fn(int hfunc)
{
return (hfunc == ETH_RSS_HASH_TOP) ?
MLX5_RX_HASH_FN_TOEPLITZ :
MLX5_RX_HASH_FN_INVERTED_XOR8;
}
static int mlx5e_bits_invert(unsigned long a, int size)
{
int inv = 0;
int i;
for (i = 0; i < size; i++)
inv |= (test_bit(size - i - 1, &a) ? 1 : 0) << i;
return inv;
}
static int mlx5e_open_rqt(struct mlx5e_priv *priv) static int mlx5e_open_rqt(struct mlx5e_priv *priv)
{ {
struct mlx5_core_dev *mdev = priv->mdev; struct mlx5_core_dev *mdev = priv->mdev;
...@@ -1166,11 +1194,10 @@ static int mlx5e_open_rqt(struct mlx5e_priv *priv) ...@@ -1166,11 +1194,10 @@ static int mlx5e_open_rqt(struct mlx5e_priv *priv)
void *rqtc; void *rqtc;
int inlen; int inlen;
int err; int err;
int sz; int log_tbl_sz = priv->params.rx_hash_log_tbl_sz;
int sz = 1 << log_tbl_sz;
int i; int i;
sz = 1 << priv->params.rx_hash_log_tbl_sz;
inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz; inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz;
in = mlx5_vzalloc(inlen); in = mlx5_vzalloc(inlen);
if (!in) if (!in)
...@@ -1182,8 +1209,12 @@ static int mlx5e_open_rqt(struct mlx5e_priv *priv) ...@@ -1182,8 +1209,12 @@ static int mlx5e_open_rqt(struct mlx5e_priv *priv)
MLX5_SET(rqtc, rqtc, rqt_max_size, sz); MLX5_SET(rqtc, rqtc, rqt_max_size, sz);
for (i = 0; i < sz; i++) { for (i = 0; i < sz; i++) {
int ix = i % priv->params.num_channels; int ix = i;
if (priv->params.rss_hfunc == ETH_RSS_HASH_XOR)
ix = mlx5e_bits_invert(i, log_tbl_sz);
ix = ix % priv->params.num_channels;
MLX5_SET(rqtc, rqtc, rq_num[i], priv->channel[ix]->rq.rqn); MLX5_SET(rqtc, rqtc, rq_num[i], priv->channel[ix]->rq.rqn);
} }
...@@ -1224,11 +1255,15 @@ static void mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, int tt) ...@@ -1224,11 +1255,15 @@ static void mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, int tt)
#define MLX5_HASH_IP (MLX5_HASH_FIELD_SEL_SRC_IP |\ #define MLX5_HASH_IP (MLX5_HASH_FIELD_SEL_SRC_IP |\
MLX5_HASH_FIELD_SEL_DST_IP) MLX5_HASH_FIELD_SEL_DST_IP)
#define MLX5_HASH_ALL (MLX5_HASH_FIELD_SEL_SRC_IP |\ #define MLX5_HASH_IP_L4PORTS (MLX5_HASH_FIELD_SEL_SRC_IP |\
MLX5_HASH_FIELD_SEL_DST_IP |\ MLX5_HASH_FIELD_SEL_DST_IP |\
MLX5_HASH_FIELD_SEL_L4_SPORT |\ MLX5_HASH_FIELD_SEL_L4_SPORT |\
MLX5_HASH_FIELD_SEL_L4_DPORT) MLX5_HASH_FIELD_SEL_L4_DPORT)
#define MLX5_HASH_IP_IPSEC_SPI (MLX5_HASH_FIELD_SEL_SRC_IP |\
MLX5_HASH_FIELD_SEL_DST_IP |\
MLX5_HASH_FIELD_SEL_IPSEC_SPI)
if (priv->params.lro_en) { if (priv->params.lro_en) {
MLX5_SET(tirc, tirc, lro_enable_mask, MLX5_SET(tirc, tirc, lro_enable_mask,
MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO | MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO |
...@@ -1254,12 +1289,16 @@ static void mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, int tt) ...@@ -1254,12 +1289,16 @@ static void mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, int tt)
MLX5_SET(tirc, tirc, indirect_table, MLX5_SET(tirc, tirc, indirect_table,
priv->rqtn); priv->rqtn);
MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_SET(tirc, tirc, rx_hash_fn,
MLX5_TIRC_RX_HASH_FN_HASH_TOEPLITZ); mlx5e_rx_hash_fn(priv->params.rss_hfunc));
if (priv->params.rss_hfunc == ETH_RSS_HASH_TOP) {
void *rss_key = MLX5_ADDR_OF(tirc, tirc,
rx_hash_toeplitz_key);
size_t len = MLX5_FLD_SZ_BYTES(tirc,
rx_hash_toeplitz_key);
MLX5_SET(tirc, tirc, rx_hash_symmetric, 1); MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
netdev_rss_key_fill(MLX5_ADDR_OF(tirc, tirc, netdev_rss_key_fill(rss_key, len);
rx_hash_toeplitz_key), }
MLX5_FLD_SZ_BYTES(tirc,
rx_hash_toeplitz_key));
break; break;
} }
...@@ -1270,7 +1309,7 @@ static void mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, int tt) ...@@ -1270,7 +1309,7 @@ static void mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, int tt)
MLX5_SET(rx_hash_field_select, hfso, l4_prot_type, MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
MLX5_L4_PROT_TYPE_TCP); MLX5_L4_PROT_TYPE_TCP);
MLX5_SET(rx_hash_field_select, hfso, selected_fields, MLX5_SET(rx_hash_field_select, hfso, selected_fields,
MLX5_HASH_ALL); MLX5_HASH_IP_L4PORTS);
break; break;
case MLX5E_TT_IPV6_TCP: case MLX5E_TT_IPV6_TCP:
...@@ -1279,7 +1318,7 @@ static void mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, int tt) ...@@ -1279,7 +1318,7 @@ static void mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, int tt)
MLX5_SET(rx_hash_field_select, hfso, l4_prot_type, MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
MLX5_L4_PROT_TYPE_TCP); MLX5_L4_PROT_TYPE_TCP);
MLX5_SET(rx_hash_field_select, hfso, selected_fields, MLX5_SET(rx_hash_field_select, hfso, selected_fields,
MLX5_HASH_ALL); MLX5_HASH_IP_L4PORTS);
break; break;
case MLX5E_TT_IPV4_UDP: case MLX5E_TT_IPV4_UDP:
...@@ -1288,7 +1327,7 @@ static void mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, int tt) ...@@ -1288,7 +1327,7 @@ static void mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, int tt)
MLX5_SET(rx_hash_field_select, hfso, l4_prot_type, MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
MLX5_L4_PROT_TYPE_UDP); MLX5_L4_PROT_TYPE_UDP);
MLX5_SET(rx_hash_field_select, hfso, selected_fields, MLX5_SET(rx_hash_field_select, hfso, selected_fields,
MLX5_HASH_ALL); MLX5_HASH_IP_L4PORTS);
break; break;
case MLX5E_TT_IPV6_UDP: case MLX5E_TT_IPV6_UDP:
...@@ -1297,7 +1336,35 @@ static void mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, int tt) ...@@ -1297,7 +1336,35 @@ static void mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, int tt)
MLX5_SET(rx_hash_field_select, hfso, l4_prot_type, MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
MLX5_L4_PROT_TYPE_UDP); MLX5_L4_PROT_TYPE_UDP);
MLX5_SET(rx_hash_field_select, hfso, selected_fields, MLX5_SET(rx_hash_field_select, hfso, selected_fields,
MLX5_HASH_ALL); MLX5_HASH_IP_L4PORTS);
break;
case MLX5E_TT_IPV4_IPSEC_AH:
MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
MLX5_L3_PROT_TYPE_IPV4);
MLX5_SET(rx_hash_field_select, hfso, selected_fields,
MLX5_HASH_IP_IPSEC_SPI);
break;
case MLX5E_TT_IPV6_IPSEC_AH:
MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
MLX5_L3_PROT_TYPE_IPV6);
MLX5_SET(rx_hash_field_select, hfso, selected_fields,
MLX5_HASH_IP_IPSEC_SPI);
break;
case MLX5E_TT_IPV4_IPSEC_ESP:
MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
MLX5_L3_PROT_TYPE_IPV4);
MLX5_SET(rx_hash_field_select, hfso, selected_fields,
MLX5_HASH_IP_IPSEC_SPI);
break;
case MLX5E_TT_IPV6_IPSEC_ESP:
MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
MLX5_L3_PROT_TYPE_IPV6);
MLX5_SET(rx_hash_field_select, hfso, selected_fields,
MLX5_HASH_IP_IPSEC_SPI);
break; break;
case MLX5E_TT_IPV4: case MLX5E_TT_IPV4:
...@@ -1673,6 +1740,15 @@ static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev) ...@@ -1673,6 +1740,15 @@ static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
return 0; return 0;
} }
u16 mlx5e_get_max_inline_cap(struct mlx5_core_dev *mdev)
{
int bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2;
return bf_buf_size -
sizeof(struct mlx5e_tx_wqe) +
2 /*sizeof(mlx5e_tx_wqe.inline_hdr_start)*/;
}
static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev, static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev,
struct net_device *netdev, struct net_device *netdev,
int num_comp_vectors) int num_comp_vectors)
...@@ -1691,6 +1767,7 @@ static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev, ...@@ -1691,6 +1767,7 @@ static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev,
MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC; MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC;
priv->params.tx_cq_moderation_pkts = priv->params.tx_cq_moderation_pkts =
MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS; MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS;
priv->params.tx_max_inline = mlx5e_get_max_inline_cap(mdev);
priv->params.min_rx_wqes = priv->params.min_rx_wqes =
MLX5E_PARAMS_DEFAULT_MIN_RX_WQES; MLX5E_PARAMS_DEFAULT_MIN_RX_WQES;
priv->params.rx_hash_log_tbl_sz = priv->params.rx_hash_log_tbl_sz =
...@@ -1700,6 +1777,7 @@ static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev, ...@@ -1700,6 +1777,7 @@ static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev,
MLX5E_PARAMS_DEFAULT_RX_HASH_LOG_TBL_SZ; MLX5E_PARAMS_DEFAULT_RX_HASH_LOG_TBL_SZ;
priv->params.num_tc = 1; priv->params.num_tc = 1;
priv->params.default_vlan_prio = 0; priv->params.default_vlan_prio = 0;
priv->params.rss_hfunc = ETH_RSS_HASH_XOR;
priv->params.lro_en = false && !!MLX5_CAP_ETH(priv->mdev, lro_cap); priv->params.lro_en = false && !!MLX5_CAP_ETH(priv->mdev, lro_cap);
priv->params.lro_wqe_sz = priv->params.lro_wqe_sz =
......
...@@ -57,7 +57,7 @@ void mlx5e_send_nop(struct mlx5e_sq *sq, bool notify_hw) ...@@ -57,7 +57,7 @@ void mlx5e_send_nop(struct mlx5e_sq *sq, bool notify_hw)
if (notify_hw) { if (notify_hw) {
cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE; cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
mlx5e_tx_notify_hw(sq, wqe); mlx5e_tx_notify_hw(sq, wqe, 0);
} }
} }
...@@ -110,9 +110,17 @@ u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb, ...@@ -110,9 +110,17 @@ u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
} }
static inline u16 mlx5e_get_inline_hdr_size(struct mlx5e_sq *sq, static inline u16 mlx5e_get_inline_hdr_size(struct mlx5e_sq *sq,
struct sk_buff *skb) struct sk_buff *skb, bool bf)
{ {
#define MLX5E_MIN_INLINE 16 /* eth header with vlan (w/o next ethertype) */ /* Some NIC TX decisions, e.g loopback, are based on the packet
* headers and occur before the data gather.
* Therefore these headers must be copied into the WQE
*/
#define MLX5E_MIN_INLINE (ETH_HLEN + 2/*vlan tag*/)
if (bf && (skb_headlen(skb) <= sq->max_inline))
return skb_headlen(skb);
return MLX5E_MIN_INLINE; return MLX5E_MIN_INLINE;
} }
...@@ -129,6 +137,7 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb) ...@@ -129,6 +137,7 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
u8 opcode = MLX5_OPCODE_SEND; u8 opcode = MLX5_OPCODE_SEND;
dma_addr_t dma_addr = 0; dma_addr_t dma_addr = 0;
bool bf = false;
u16 headlen; u16 headlen;
u16 ds_cnt; u16 ds_cnt;
u16 ihs; u16 ihs;
...@@ -141,6 +150,11 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb) ...@@ -141,6 +150,11 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
else else
sq->stats.csum_offload_none++; sq->stats.csum_offload_none++;
if (sq->cc != sq->prev_cc) {
sq->prev_cc = sq->cc;
sq->bf_budget = (sq->cc == sq->pc) ? MLX5E_SQ_BF_BUDGET : 0;
}
if (skb_is_gso(skb)) { if (skb_is_gso(skb)) {
u32 payload_len; u32 payload_len;
...@@ -153,7 +167,10 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb) ...@@ -153,7 +167,10 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
sq->stats.tso_packets++; sq->stats.tso_packets++;
sq->stats.tso_bytes += payload_len; sq->stats.tso_bytes += payload_len;
} else { } else {
ihs = mlx5e_get_inline_hdr_size(sq, skb); bf = sq->bf_budget &&
!skb->xmit_more &&
!skb_shinfo(skb)->nr_frags;
ihs = mlx5e_get_inline_hdr_size(sq, skb, bf);
MLX5E_TX_SKB_CB(skb)->num_bytes = max_t(unsigned int, skb->len, MLX5E_TX_SKB_CB(skb)->num_bytes = max_t(unsigned int, skb->len,
ETH_ZLEN); ETH_ZLEN);
} }
...@@ -225,14 +242,21 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb) ...@@ -225,14 +242,21 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
} }
if (!skb->xmit_more || netif_xmit_stopped(sq->txq)) { if (!skb->xmit_more || netif_xmit_stopped(sq->txq)) {
int bf_sz = 0;
if (bf && sq->uar_bf_map)
bf_sz = MLX5E_TX_SKB_CB(skb)->num_wqebbs << 3;
cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE; cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
mlx5e_tx_notify_hw(sq, wqe); mlx5e_tx_notify_hw(sq, wqe, bf_sz);
} }
/* fill sq edge with nops to avoid wqe wrap around */ /* fill sq edge with nops to avoid wqe wrap around */
while ((sq->pc & wq->sz_m1) > sq->edge) while ((sq->pc & wq->sz_m1) > sq->edge)
mlx5e_send_nop(sq, false); mlx5e_send_nop(sq, false);
sq->bf_budget = bf ? sq->bf_budget - 1 : 0;
sq->stats.packets++; sq->stats.packets++;
return NETDEV_TX_OK; return NETDEV_TX_OK;
......
...@@ -455,7 +455,7 @@ static int mlx5_irq_set_affinity_hint(struct mlx5_core_dev *mdev, int i) ...@@ -455,7 +455,7 @@ static int mlx5_irq_set_affinity_hint(struct mlx5_core_dev *mdev, int i)
struct mlx5_priv *priv = &mdev->priv; struct mlx5_priv *priv = &mdev->priv;
struct msix_entry *msix = priv->msix_arr; struct msix_entry *msix = priv->msix_arr;
int irq = msix[i + MLX5_EQ_VEC_COMP_BASE].vector; int irq = msix[i + MLX5_EQ_VEC_COMP_BASE].vector;
int numa_node = dev_to_node(&mdev->pdev->dev); int numa_node = priv->numa_node;
int err; int err;
if (!zalloc_cpumask_var(&priv->irq_info[i].mask, GFP_KERNEL)) { if (!zalloc_cpumask_var(&priv->irq_info[i].mask, GFP_KERNEL)) {
...@@ -654,6 +654,22 @@ static int mlx5_core_set_issi(struct mlx5_core_dev *dev) ...@@ -654,6 +654,22 @@ static int mlx5_core_set_issi(struct mlx5_core_dev *dev)
} }
#endif #endif
static int map_bf_area(struct mlx5_core_dev *dev)
{
resource_size_t bf_start = pci_resource_start(dev->pdev, 0);
resource_size_t bf_len = pci_resource_len(dev->pdev, 0);
dev->priv.bf_mapping = io_mapping_create_wc(bf_start, bf_len);
return dev->priv.bf_mapping ? 0 : -ENOMEM;
}
static void unmap_bf_area(struct mlx5_core_dev *dev)
{
if (dev->priv.bf_mapping)
io_mapping_free(dev->priv.bf_mapping);
}
static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev) static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
{ {
struct mlx5_priv *priv = &dev->priv; struct mlx5_priv *priv = &dev->priv;
...@@ -668,6 +684,10 @@ static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev) ...@@ -668,6 +684,10 @@ static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
INIT_LIST_HEAD(&priv->pgdir_list); INIT_LIST_HEAD(&priv->pgdir_list);
spin_lock_init(&priv->mkey_lock); spin_lock_init(&priv->mkey_lock);
mutex_init(&priv->alloc_mutex);
priv->numa_node = dev_to_node(&dev->pdev->dev);
priv->dbg_root = debugfs_create_dir(dev_name(&pdev->dev), mlx5_debugfs_root); priv->dbg_root = debugfs_create_dir(dev_name(&pdev->dev), mlx5_debugfs_root);
if (!priv->dbg_root) if (!priv->dbg_root)
return -ENOMEM; return -ENOMEM;
...@@ -804,10 +824,13 @@ static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev) ...@@ -804,10 +824,13 @@ static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
goto err_stop_eqs; goto err_stop_eqs;
} }
if (map_bf_area(dev))
dev_err(&pdev->dev, "Failed to map blue flame area\n");
err = mlx5_irq_set_affinity_hints(dev); err = mlx5_irq_set_affinity_hints(dev);
if (err) { if (err) {
dev_err(&pdev->dev, "Failed to alloc affinity hint cpumask\n"); dev_err(&pdev->dev, "Failed to alloc affinity hint cpumask\n");
goto err_free_comp_eqs; goto err_unmap_bf_area;
} }
MLX5_INIT_DOORBELL_LOCK(&priv->cq_uar_lock); MLX5_INIT_DOORBELL_LOCK(&priv->cq_uar_lock);
...@@ -819,7 +842,9 @@ static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev) ...@@ -819,7 +842,9 @@ static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
return 0; return 0;
err_free_comp_eqs: err_unmap_bf_area:
unmap_bf_area(dev);
free_comp_eqs(dev); free_comp_eqs(dev);
err_stop_eqs: err_stop_eqs:
...@@ -877,6 +902,7 @@ static void mlx5_dev_cleanup(struct mlx5_core_dev *dev) ...@@ -877,6 +902,7 @@ static void mlx5_dev_cleanup(struct mlx5_core_dev *dev)
mlx5_cleanup_qp_table(dev); mlx5_cleanup_qp_table(dev);
mlx5_cleanup_cq_table(dev); mlx5_cleanup_cq_table(dev);
mlx5_irq_clear_affinity_hints(dev); mlx5_irq_clear_affinity_hints(dev);
unmap_bf_area(dev);
free_comp_eqs(dev); free_comp_eqs(dev);
mlx5_stop_eqs(dev); mlx5_stop_eqs(dev);
mlx5_free_uuars(dev, &priv->uuari); mlx5_free_uuars(dev, &priv->uuari);
......
...@@ -32,6 +32,7 @@ ...@@ -32,6 +32,7 @@
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/io-mapping.h>
#include <linux/mlx5/driver.h> #include <linux/mlx5/driver.h>
#include <linux/mlx5/cmd.h> #include <linux/mlx5/cmd.h>
#include "mlx5_core.h" #include "mlx5_core.h"
...@@ -246,6 +247,10 @@ int mlx5_alloc_map_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar) ...@@ -246,6 +247,10 @@ int mlx5_alloc_map_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar)
goto err_free_uar; goto err_free_uar;
} }
if (mdev->priv.bf_mapping)
uar->bf_map = io_mapping_map_wc(mdev->priv.bf_mapping,
uar->index << PAGE_SHIFT);
return 0; return 0;
err_free_uar: err_free_uar:
...@@ -257,6 +262,7 @@ EXPORT_SYMBOL(mlx5_alloc_map_uar); ...@@ -257,6 +262,7 @@ EXPORT_SYMBOL(mlx5_alloc_map_uar);
void mlx5_unmap_free_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar) void mlx5_unmap_free_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar)
{ {
io_mapping_unmap(uar->bf_map);
iounmap(uar->map); iounmap(uar->map);
mlx5_cmd_free_uar(mdev, uar->index); mlx5_cmd_free_uar(mdev, uar->index);
} }
......
...@@ -73,13 +73,14 @@ int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, ...@@ -73,13 +73,14 @@ int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
wq->log_stride = MLX5_GET(wq, wqc, log_wq_stride); wq->log_stride = MLX5_GET(wq, wqc, log_wq_stride);
wq->sz_m1 = (1 << MLX5_GET(wq, wqc, log_wq_sz)) - 1; wq->sz_m1 = (1 << MLX5_GET(wq, wqc, log_wq_sz)) - 1;
err = mlx5_db_alloc(mdev, &wq_ctrl->db); err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
if (err) { if (err) {
mlx5_core_warn(mdev, "mlx5_db_alloc() failed, %d\n", err); mlx5_core_warn(mdev, "mlx5_db_alloc() failed, %d\n", err);
return err; return err;
} }
err = mlx5_buf_alloc(mdev, mlx5_wq_cyc_get_byte_size(wq), &wq_ctrl->buf); err = mlx5_buf_alloc_node(mdev, mlx5_wq_cyc_get_byte_size(wq),
&wq_ctrl->buf, param->buf_numa_node);
if (err) { if (err) {
mlx5_core_warn(mdev, "mlx5_buf_alloc() failed, %d\n", err); mlx5_core_warn(mdev, "mlx5_buf_alloc() failed, %d\n", err);
goto err_db_free; goto err_db_free;
...@@ -108,13 +109,14 @@ int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, ...@@ -108,13 +109,14 @@ int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
wq->log_sz = MLX5_GET(cqc, cqc, log_cq_size); wq->log_sz = MLX5_GET(cqc, cqc, log_cq_size);
wq->sz_m1 = (1 << wq->log_sz) - 1; wq->sz_m1 = (1 << wq->log_sz) - 1;
err = mlx5_db_alloc(mdev, &wq_ctrl->db); err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
if (err) { if (err) {
mlx5_core_warn(mdev, "mlx5_db_alloc() failed, %d\n", err); mlx5_core_warn(mdev, "mlx5_db_alloc() failed, %d\n", err);
return err; return err;
} }
err = mlx5_buf_alloc(mdev, mlx5_cqwq_get_byte_size(wq), &wq_ctrl->buf); err = mlx5_buf_alloc_node(mdev, mlx5_cqwq_get_byte_size(wq),
&wq_ctrl->buf, param->buf_numa_node);
if (err) { if (err) {
mlx5_core_warn(mdev, "mlx5_buf_alloc() failed, %d\n", err); mlx5_core_warn(mdev, "mlx5_buf_alloc() failed, %d\n", err);
goto err_db_free; goto err_db_free;
...@@ -144,7 +146,7 @@ int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, ...@@ -144,7 +146,7 @@ int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
wq->log_stride = MLX5_GET(wq, wqc, log_wq_stride); wq->log_stride = MLX5_GET(wq, wqc, log_wq_stride);
wq->sz_m1 = (1 << MLX5_GET(wq, wqc, log_wq_sz)) - 1; wq->sz_m1 = (1 << MLX5_GET(wq, wqc, log_wq_sz)) - 1;
err = mlx5_db_alloc(mdev, &wq_ctrl->db); err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
if (err) { if (err) {
mlx5_core_warn(mdev, "mlx5_db_alloc() failed, %d\n", err); mlx5_core_warn(mdev, "mlx5_db_alloc() failed, %d\n", err);
return err; return err;
......
...@@ -37,7 +37,8 @@ ...@@ -37,7 +37,8 @@
struct mlx5_wq_param { struct mlx5_wq_param {
int linear; int linear;
int numa; int buf_numa_node;
int db_numa_node;
}; };
struct mlx5_wq_ctrl { struct mlx5_wq_ctrl {
......
...@@ -380,7 +380,7 @@ struct mlx5_uar { ...@@ -380,7 +380,7 @@ struct mlx5_uar {
u32 index; u32 index;
struct list_head bf_list; struct list_head bf_list;
unsigned free_bf_bmap; unsigned free_bf_bmap;
void __iomem *wc_map; void __iomem *bf_map;
void __iomem *map; void __iomem *map;
}; };
...@@ -435,6 +435,8 @@ struct mlx5_priv { ...@@ -435,6 +435,8 @@ struct mlx5_priv {
struct mlx5_uuar_info uuari; struct mlx5_uuar_info uuari;
MLX5_DECLARE_DOORBELL_LOCK(cq_uar_lock); MLX5_DECLARE_DOORBELL_LOCK(cq_uar_lock);
struct io_mapping *bf_mapping;
/* pages stuff */ /* pages stuff */
struct workqueue_struct *pg_wq; struct workqueue_struct *pg_wq;
struct rb_root page_root; struct rb_root page_root;
...@@ -463,6 +465,10 @@ struct mlx5_priv { ...@@ -463,6 +465,10 @@ struct mlx5_priv {
/* end: mr staff */ /* end: mr staff */
/* start: alloc staff */ /* start: alloc staff */
/* protect buffer alocation according to numa node */
struct mutex alloc_mutex;
int numa_node;
struct mutex pgdir_mutex; struct mutex pgdir_mutex;
struct list_head pgdir_list; struct list_head pgdir_list;
/* end: alloc staff */ /* end: alloc staff */
...@@ -672,6 +678,8 @@ void mlx5_health_cleanup(void); ...@@ -672,6 +678,8 @@ void mlx5_health_cleanup(void);
void __init mlx5_health_init(void); void __init mlx5_health_init(void);
void mlx5_start_health_poll(struct mlx5_core_dev *dev); void mlx5_start_health_poll(struct mlx5_core_dev *dev);
void mlx5_stop_health_poll(struct mlx5_core_dev *dev); void mlx5_stop_health_poll(struct mlx5_core_dev *dev);
int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size,
struct mlx5_buf *buf, int node);
int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf); int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf);
void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf); void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf);
struct mlx5_cmd_mailbox *mlx5_alloc_cmd_mailbox_chain(struct mlx5_core_dev *dev, struct mlx5_cmd_mailbox *mlx5_alloc_cmd_mailbox_chain(struct mlx5_core_dev *dev,
...@@ -773,6 +781,8 @@ void mlx5_eq_debugfs_cleanup(struct mlx5_core_dev *dev); ...@@ -773,6 +781,8 @@ void mlx5_eq_debugfs_cleanup(struct mlx5_core_dev *dev);
int mlx5_cq_debugfs_init(struct mlx5_core_dev *dev); int mlx5_cq_debugfs_init(struct mlx5_core_dev *dev);
void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev); void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev);
int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db); int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db);
int mlx5_db_alloc_node(struct mlx5_core_dev *dev, struct mlx5_db *db,
int node);
void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db); void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db);
const char *mlx5_command_str(int command); const char *mlx5_command_str(int command);
......
...@@ -1936,9 +1936,9 @@ enum { ...@@ -1936,9 +1936,9 @@ enum {
}; };
enum { enum {
MLX5_TIRC_RX_HASH_FN_HASH_NONE = 0x0, MLX5_RX_HASH_FN_NONE = 0x0,
MLX5_TIRC_RX_HASH_FN_HASH_INVERTED_XOR8 = 0x1, MLX5_RX_HASH_FN_INVERTED_XOR8 = 0x1,
MLX5_TIRC_RX_HASH_FN_HASH_TOEPLITZ = 0x2, MLX5_RX_HASH_FN_TOEPLITZ = 0x2,
}; };
enum { enum {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment