Commit 0ac3ea70 authored by Mohamad Haj Yahia's avatar Mohamad Haj Yahia Committed by David S. Miller

net/mlx5: Make the command interface cache more flexible

Add more cache command size sets and more entries for each set based on
the current commands set different sizes and commands frequency.

Fixes: e126ba97 ('mlx5: Add driver for Mellanox Connect-IB adapters')
Signed-off-by: default avatarMohamad Haj Yahia <mohamad@mellanox.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@mellanox.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 7a8bca04
...@@ -53,14 +53,6 @@ enum { ...@@ -53,14 +53,6 @@ enum {
CMD_MODE_EVENTS CMD_MODE_EVENTS
}; };
enum {
NUM_LONG_LISTS = 2,
NUM_MED_LISTS = 64,
LONG_LIST_SIZE = (2ULL * 1024 * 1024 * 1024 / PAGE_SIZE) * 8 + 16 +
MLX5_CMD_DATA_BLOCK_SIZE,
MED_LIST_SIZE = 16 + MLX5_CMD_DATA_BLOCK_SIZE,
};
enum { enum {
MLX5_CMD_DELIVERY_STAT_OK = 0x0, MLX5_CMD_DELIVERY_STAT_OK = 0x0,
MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR = 0x1, MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR = 0x1,
...@@ -1372,10 +1364,10 @@ static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg) ...@@ -1372,10 +1364,10 @@ static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg)
{ {
unsigned long flags; unsigned long flags;
if (msg->cache) { if (msg->parent) {
spin_lock_irqsave(&msg->cache->lock, flags); spin_lock_irqsave(&msg->parent->lock, flags);
list_add_tail(&msg->list, &msg->cache->head); list_add_tail(&msg->list, &msg->parent->head);
spin_unlock_irqrestore(&msg->cache->lock, flags); spin_unlock_irqrestore(&msg->parent->lock, flags);
} else { } else {
mlx5_free_cmd_msg(dev, msg); mlx5_free_cmd_msg(dev, msg);
} }
...@@ -1472,30 +1464,37 @@ static struct mlx5_cmd_msg *alloc_msg(struct mlx5_core_dev *dev, int in_size, ...@@ -1472,30 +1464,37 @@ static struct mlx5_cmd_msg *alloc_msg(struct mlx5_core_dev *dev, int in_size,
gfp_t gfp) gfp_t gfp)
{ {
struct mlx5_cmd_msg *msg = ERR_PTR(-ENOMEM); struct mlx5_cmd_msg *msg = ERR_PTR(-ENOMEM);
struct cmd_msg_cache *ch = NULL;
struct mlx5_cmd *cmd = &dev->cmd; struct mlx5_cmd *cmd = &dev->cmd;
struct cache_ent *ent = NULL; int i;
if (in_size > MED_LIST_SIZE && in_size <= LONG_LIST_SIZE) if (in_size <= 16)
ent = &cmd->cache.large; goto cache_miss;
else if (in_size > 16 && in_size <= MED_LIST_SIZE)
ent = &cmd->cache.med; for (i = 0; i < MLX5_NUM_COMMAND_CACHES; i++) {
ch = &cmd->cache[i];
if (ent) { if (in_size > ch->max_inbox_size)
spin_lock_irq(&ent->lock); continue;
if (!list_empty(&ent->head)) { spin_lock_irq(&ch->lock);
msg = list_entry(ent->head.next, typeof(*msg), list); if (list_empty(&ch->head)) {
/* For cached lists, we must explicitly state what is spin_unlock_irq(&ch->lock);
* the real size continue;
*/
msg->len = in_size;
list_del(&msg->list);
} }
spin_unlock_irq(&ent->lock); msg = list_entry(ch->head.next, typeof(*msg), list);
/* For cached lists, we must explicitly state what is
* the real size
*/
msg->len = in_size;
list_del(&msg->list);
spin_unlock_irq(&ch->lock);
break;
} }
if (IS_ERR(msg)) if (!IS_ERR(msg))
msg = mlx5_alloc_cmd_msg(dev, gfp, in_size, 0); return msg;
cache_miss:
msg = mlx5_alloc_cmd_msg(dev, gfp, in_size, 0);
return msg; return msg;
} }
...@@ -1593,58 +1592,56 @@ EXPORT_SYMBOL(mlx5_cmd_exec_cb); ...@@ -1593,58 +1592,56 @@ EXPORT_SYMBOL(mlx5_cmd_exec_cb);
static void destroy_msg_cache(struct mlx5_core_dev *dev) static void destroy_msg_cache(struct mlx5_core_dev *dev)
{ {
struct mlx5_cmd *cmd = &dev->cmd; struct cmd_msg_cache *ch;
struct mlx5_cmd_msg *msg; struct mlx5_cmd_msg *msg;
struct mlx5_cmd_msg *n; struct mlx5_cmd_msg *n;
int i;
list_for_each_entry_safe(msg, n, &cmd->cache.large.head, list) { for (i = 0; i < MLX5_NUM_COMMAND_CACHES; i++) {
list_del(&msg->list); ch = &dev->cmd.cache[i];
mlx5_free_cmd_msg(dev, msg); list_for_each_entry_safe(msg, n, &ch->head, list) {
} list_del(&msg->list);
mlx5_free_cmd_msg(dev, msg);
list_for_each_entry_safe(msg, n, &cmd->cache.med.head, list) { }
list_del(&msg->list);
mlx5_free_cmd_msg(dev, msg);
} }
} }
static int create_msg_cache(struct mlx5_core_dev *dev) static unsigned cmd_cache_num_ent[MLX5_NUM_COMMAND_CACHES] = {
512, 32, 16, 8, 2
};
static unsigned cmd_cache_ent_size[MLX5_NUM_COMMAND_CACHES] = {
16 + MLX5_CMD_DATA_BLOCK_SIZE,
16 + MLX5_CMD_DATA_BLOCK_SIZE * 2,
16 + MLX5_CMD_DATA_BLOCK_SIZE * 16,
16 + MLX5_CMD_DATA_BLOCK_SIZE * 256,
16 + MLX5_CMD_DATA_BLOCK_SIZE * 512,
};
static void create_msg_cache(struct mlx5_core_dev *dev)
{ {
struct mlx5_cmd *cmd = &dev->cmd; struct mlx5_cmd *cmd = &dev->cmd;
struct cmd_msg_cache *ch;
struct mlx5_cmd_msg *msg; struct mlx5_cmd_msg *msg;
int err;
int i; int i;
int k;
spin_lock_init(&cmd->cache.large.lock);
INIT_LIST_HEAD(&cmd->cache.large.head); /* Initialize and fill the caches with initial entries */
spin_lock_init(&cmd->cache.med.lock); for (k = 0; k < MLX5_NUM_COMMAND_CACHES; k++) {
INIT_LIST_HEAD(&cmd->cache.med.head); ch = &cmd->cache[k];
spin_lock_init(&ch->lock);
for (i = 0; i < NUM_LONG_LISTS; i++) { INIT_LIST_HEAD(&ch->head);
msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, LONG_LIST_SIZE, 0); ch->num_ent = cmd_cache_num_ent[k];
if (IS_ERR(msg)) { ch->max_inbox_size = cmd_cache_ent_size[k];
err = PTR_ERR(msg); for (i = 0; i < ch->num_ent; i++) {
goto ex_err; msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL | __GFP_NOWARN,
} ch->max_inbox_size, 0);
msg->cache = &cmd->cache.large; if (IS_ERR(msg))
list_add_tail(&msg->list, &cmd->cache.large.head); break;
} msg->parent = ch;
list_add_tail(&msg->list, &ch->head);
for (i = 0; i < NUM_MED_LISTS; i++) {
msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, MED_LIST_SIZE, 0);
if (IS_ERR(msg)) {
err = PTR_ERR(msg);
goto ex_err;
} }
msg->cache = &cmd->cache.med;
list_add_tail(&msg->list, &cmd->cache.med.head);
} }
return 0;
ex_err:
destroy_msg_cache(dev);
return err;
} }
static int alloc_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd) static int alloc_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd)
...@@ -1767,11 +1764,7 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev) ...@@ -1767,11 +1764,7 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
cmd->mode = CMD_MODE_POLLING; cmd->mode = CMD_MODE_POLLING;
err = create_msg_cache(dev); create_msg_cache(dev);
if (err) {
dev_err(&dev->pdev->dev, "failed to create command cache\n");
goto err_free_page;
}
set_wqname(dev); set_wqname(dev);
cmd->wq = create_singlethread_workqueue(cmd->wq_name); cmd->wq = create_singlethread_workqueue(cmd->wq_name);
......
...@@ -208,7 +208,7 @@ struct mlx5_cmd_first { ...@@ -208,7 +208,7 @@ struct mlx5_cmd_first {
struct mlx5_cmd_msg { struct mlx5_cmd_msg {
struct list_head list; struct list_head list;
struct cache_ent *cache; struct cmd_msg_cache *parent;
u32 len; u32 len;
struct mlx5_cmd_first first; struct mlx5_cmd_first first;
struct mlx5_cmd_mailbox *next; struct mlx5_cmd_mailbox *next;
...@@ -228,17 +228,17 @@ struct mlx5_cmd_debug { ...@@ -228,17 +228,17 @@ struct mlx5_cmd_debug {
u16 outlen; u16 outlen;
}; };
struct cache_ent { struct cmd_msg_cache {
/* protect block chain allocations /* protect block chain allocations
*/ */
spinlock_t lock; spinlock_t lock;
struct list_head head; struct list_head head;
unsigned int max_inbox_size;
unsigned int num_ent;
}; };
struct cmd_msg_cache { enum {
struct cache_ent large; MLX5_NUM_COMMAND_CACHES = 5,
struct cache_ent med;
}; };
struct mlx5_cmd_stats { struct mlx5_cmd_stats {
...@@ -281,7 +281,7 @@ struct mlx5_cmd { ...@@ -281,7 +281,7 @@ struct mlx5_cmd {
struct mlx5_cmd_work_ent *ent_arr[MLX5_MAX_COMMANDS]; struct mlx5_cmd_work_ent *ent_arr[MLX5_MAX_COMMANDS];
struct pci_pool *pool; struct pci_pool *pool;
struct mlx5_cmd_debug dbg; struct mlx5_cmd_debug dbg;
struct cmd_msg_cache cache; struct cmd_msg_cache cache[MLX5_NUM_COMMAND_CACHES];
int checksum_disabled; int checksum_disabled;
struct mlx5_cmd_stats stats[MLX5_CMD_OP_MAX]; struct mlx5_cmd_stats stats[MLX5_CMD_OP_MAX];
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment