Commit 29cc6679 authored by Amir Vadai's avatar Amir Vadai Committed by David S. Miller

net/mlx5: Store counters in rbtree instead of list

In order to use bulk counters, we need to have counters sorted by id.
Signed-off-by: default avatarAmir Vadai <amir@vadai.me>
Reviewed-by: default avatarOr Gerlitz <ogerlitz@mellanox.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@mellanox.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 18017723
...@@ -111,6 +111,7 @@ struct mlx5_fc_cache { ...@@ -111,6 +111,7 @@ struct mlx5_fc_cache {
}; };
struct mlx5_fc { struct mlx5_fc {
struct rb_node node;
struct list_head list; struct list_head list;
/* last{packets,bytes} members are used when calculating the delta since /* last{packets,bytes} members are used when calculating the delta since
......
...@@ -32,6 +32,7 @@ ...@@ -32,6 +32,7 @@
#include <linux/mlx5/driver.h> #include <linux/mlx5/driver.h>
#include <linux/mlx5/fs.h> #include <linux/mlx5/fs.h>
#include <linux/rbtree.h>
#include "mlx5_core.h" #include "mlx5_core.h"
#include "fs_core.h" #include "fs_core.h"
#include "fs_cmd.h" #include "fs_cmd.h"
...@@ -68,6 +69,27 @@ ...@@ -68,6 +69,27 @@
* elapsed, the thread will actually query the hardware. * elapsed, the thread will actually query the hardware.
*/ */
static void mlx5_fc_stats_insert(struct rb_root *root, struct mlx5_fc *counter)
{
struct rb_node **new = &root->rb_node;
struct rb_node *parent = NULL;
while (*new) {
struct mlx5_fc *this = container_of(*new, struct mlx5_fc, node);
int result = counter->id - this->id;
parent = *new;
if (result < 0)
new = &((*new)->rb_left);
else
new = &((*new)->rb_right);
}
/* Add new node and rebalance tree. */
rb_link_node(&counter->node, parent, new);
rb_insert_color(&counter->node, root);
}
static void mlx5_fc_stats_work(struct work_struct *work) static void mlx5_fc_stats_work(struct work_struct *work)
{ {
struct mlx5_core_dev *dev = container_of(work, struct mlx5_core_dev, struct mlx5_core_dev *dev = container_of(work, struct mlx5_core_dev,
...@@ -75,25 +97,35 @@ static void mlx5_fc_stats_work(struct work_struct *work) ...@@ -75,25 +97,35 @@ static void mlx5_fc_stats_work(struct work_struct *work)
struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats; struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
unsigned long now = jiffies; unsigned long now = jiffies;
struct mlx5_fc *counter; struct mlx5_fc *counter;
struct mlx5_fc *tmp; struct rb_node *node;
LIST_HEAD(tmplist);
int err = 0; int err = 0;
spin_lock(&fc_stats->addlist_lock); spin_lock(&fc_stats->addlist_lock);
list_splice_tail_init(&fc_stats->addlist, &fc_stats->list); list_splice_tail_init(&fc_stats->addlist, &tmplist);
if (!list_empty(&fc_stats->list)) if (!list_empty(&tmplist) || !RB_EMPTY_ROOT(&fc_stats->counters))
queue_delayed_work(fc_stats->wq, &fc_stats->work, MLX5_FC_STATS_PERIOD); queue_delayed_work(fc_stats->wq, &fc_stats->work, MLX5_FC_STATS_PERIOD);
spin_unlock(&fc_stats->addlist_lock); spin_unlock(&fc_stats->addlist_lock);
list_for_each_entry_safe(counter, tmp, &fc_stats->list, list) { list_for_each_entry(counter, &tmplist, list)
struct mlx5_fc_cache *c = &counter->cache; mlx5_fc_stats_insert(&fc_stats->counters, counter);
node = rb_first(&fc_stats->counters);
while (node) {
struct mlx5_fc_cache *c;
u64 packets; u64 packets;
u64 bytes; u64 bytes;
counter = rb_entry(node, struct mlx5_fc, node);
c = &counter->cache;
node = rb_next(node);
if (counter->deleted) { if (counter->deleted) {
list_del(&counter->list); rb_erase(&counter->node, &fc_stats->counters);
mlx5_cmd_fc_free(dev, counter->id); mlx5_cmd_fc_free(dev, counter->id);
...@@ -176,7 +208,7 @@ int mlx5_init_fc_stats(struct mlx5_core_dev *dev) ...@@ -176,7 +208,7 @@ int mlx5_init_fc_stats(struct mlx5_core_dev *dev)
{ {
struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats; struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
INIT_LIST_HEAD(&fc_stats->list); fc_stats->counters = RB_ROOT;
INIT_LIST_HEAD(&fc_stats->addlist); INIT_LIST_HEAD(&fc_stats->addlist);
spin_lock_init(&fc_stats->addlist_lock); spin_lock_init(&fc_stats->addlist_lock);
...@@ -194,20 +226,32 @@ void mlx5_cleanup_fc_stats(struct mlx5_core_dev *dev) ...@@ -194,20 +226,32 @@ void mlx5_cleanup_fc_stats(struct mlx5_core_dev *dev)
struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats; struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
struct mlx5_fc *counter; struct mlx5_fc *counter;
struct mlx5_fc *tmp; struct mlx5_fc *tmp;
struct rb_node *node;
cancel_delayed_work_sync(&dev->priv.fc_stats.work); cancel_delayed_work_sync(&dev->priv.fc_stats.work);
destroy_workqueue(dev->priv.fc_stats.wq); destroy_workqueue(dev->priv.fc_stats.wq);
dev->priv.fc_stats.wq = NULL; dev->priv.fc_stats.wq = NULL;
list_splice_tail_init(&fc_stats->addlist, &fc_stats->list); list_for_each_entry_safe(counter, tmp, &fc_stats->addlist, list) {
list_for_each_entry_safe(counter, tmp, &fc_stats->list, list) {
list_del(&counter->list); list_del(&counter->list);
mlx5_cmd_fc_free(dev, counter->id); mlx5_cmd_fc_free(dev, counter->id);
kfree(counter); kfree(counter);
} }
node = rb_first(&fc_stats->counters);
while (node) {
counter = rb_entry(node, struct mlx5_fc, node);
node = rb_next(node);
rb_erase(&counter->node, &fc_stats->counters);
mlx5_cmd_fc_free(dev, counter->id);
kfree(counter);
}
} }
void mlx5_fc_query_cached(struct mlx5_fc *counter, void mlx5_fc_query_cached(struct mlx5_fc *counter,
......
...@@ -469,7 +469,7 @@ struct mlx5_irq_info { ...@@ -469,7 +469,7 @@ struct mlx5_irq_info {
}; };
struct mlx5_fc_stats { struct mlx5_fc_stats {
struct list_head list; struct rb_root counters;
struct list_head addlist; struct list_head addlist;
/* protect addlist add/splice operations */ /* protect addlist add/splice operations */
spinlock_t addlist_lock; spinlock_t addlist_lock;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment