Commit a3b89864 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

rbtree, perf: Use new rbtree helpers

Reduce rbtree boiler plate by using the new helpers.

One noteworthy change is unification of the various (partial) compare
functions. We construct a subtree match by forcing the sub-order to
always match, see __group_cmp().

Due to 'const' we had to touch cgroup_id().
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
Acked-by: default avatarTejun Heo <tj@kernel.org>
Acked-by: default avatarDavidlohr Bueso <dbueso@suse.de>
parent 8ecca394
...@@ -307,7 +307,7 @@ void css_task_iter_end(struct css_task_iter *it); ...@@ -307,7 +307,7 @@ void css_task_iter_end(struct css_task_iter *it);
* Inline functions. * Inline functions.
*/ */
static inline u64 cgroup_id(struct cgroup *cgrp) static inline u64 cgroup_id(const struct cgroup *cgrp)
{ {
return cgrp->kn->id; return cgrp->kn->id;
} }
...@@ -701,7 +701,7 @@ void cgroup_path_from_kernfs_id(u64 id, char *buf, size_t buflen); ...@@ -701,7 +701,7 @@ void cgroup_path_from_kernfs_id(u64 id, char *buf, size_t buflen);
struct cgroup_subsys_state; struct cgroup_subsys_state;
struct cgroup; struct cgroup;
static inline u64 cgroup_id(struct cgroup *cgrp) { return 1; } static inline u64 cgroup_id(const struct cgroup *cgrp) { return 1; }
static inline void css_get(struct cgroup_subsys_state *css) {} static inline void css_get(struct cgroup_subsys_state *css) {}
static inline void css_put(struct cgroup_subsys_state *css) {} static inline void css_put(struct cgroup_subsys_state *css) {}
static inline int cgroup_attach_task_all(struct task_struct *from, static inline int cgroup_attach_task_all(struct task_struct *from,
......
...@@ -1595,50 +1595,91 @@ static void perf_event_groups_init(struct perf_event_groups *groups) ...@@ -1595,50 +1595,91 @@ static void perf_event_groups_init(struct perf_event_groups *groups)
groups->index = 0; groups->index = 0;
} }
static inline struct cgroup *event_cgroup(const struct perf_event *event)
{
struct cgroup *cgroup = NULL;
#ifdef CONFIG_CGROUP_PERF
if (event->cgrp)
cgroup = event->cgrp->css.cgroup;
#endif
return cgroup;
}
/* /*
* Compare function for event groups; * Compare function for event groups;
* *
* Implements complex key that first sorts by CPU and then by virtual index * Implements complex key that first sorts by CPU and then by virtual index
* which provides ordering when rotating groups for the same CPU. * which provides ordering when rotating groups for the same CPU.
*/ */
static bool static __always_inline int
perf_event_groups_less(struct perf_event *left, struct perf_event *right) perf_event_groups_cmp(const int left_cpu, const struct cgroup *left_cgroup,
const u64 left_group_index, const struct perf_event *right)
{ {
if (left->cpu < right->cpu) if (left_cpu < right->cpu)
return true; return -1;
if (left->cpu > right->cpu) if (left_cpu > right->cpu)
return false; return 1;
#ifdef CONFIG_CGROUP_PERF #ifdef CONFIG_CGROUP_PERF
if (left->cgrp != right->cgrp) { {
if (!left->cgrp || !left->cgrp->css.cgroup) { const struct cgroup *right_cgroup = event_cgroup(right);
if (left_cgroup != right_cgroup) {
if (!left_cgroup) {
/* /*
* Left has no cgroup but right does, no cgroups come * Left has no cgroup but right does, no
* first. * cgroups come first.
*/ */
return true; return -1;
} }
if (!right->cgrp || !right->cgrp->css.cgroup) { if (!right_cgroup) {
/* /*
* Right has no cgroup but left does, no cgroups come * Right has no cgroup but left does, no
* first. * cgroups come first.
*/ */
return false; return 1;
} }
/* Two dissimilar cgroups, order by id. */ /* Two dissimilar cgroups, order by id. */
if (left->cgrp->css.cgroup->kn->id < right->cgrp->css.cgroup->kn->id) if (cgroup_id(left_cgroup) < cgroup_id(right_cgroup))
return true; return -1;
return false; return 1;
}
} }
#endif #endif
if (left->group_index < right->group_index) if (left_group_index < right->group_index)
return true; return -1;
if (left->group_index > right->group_index) if (left_group_index > right->group_index)
return false; return 1;
return false; return 0;
}
#define __node_2_pe(node) \
rb_entry((node), struct perf_event, group_node)
static inline bool __group_less(struct rb_node *a, const struct rb_node *b)
{
struct perf_event *e = __node_2_pe(a);
return perf_event_groups_cmp(e->cpu, event_cgroup(e), e->group_index,
__node_2_pe(b)) < 0;
}
struct __group_key {
int cpu;
struct cgroup *cgroup;
};
static inline int __group_cmp(const void *key, const struct rb_node *node)
{
const struct __group_key *a = key;
const struct perf_event *b = __node_2_pe(node);
/* partial/subtree match: @cpu, @cgroup; ignore: @group_index */
return perf_event_groups_cmp(a->cpu, a->cgroup, b->group_index, b);
} }
/* /*
...@@ -1650,27 +1691,9 @@ static void ...@@ -1650,27 +1691,9 @@ static void
perf_event_groups_insert(struct perf_event_groups *groups, perf_event_groups_insert(struct perf_event_groups *groups,
struct perf_event *event) struct perf_event *event)
{ {
struct perf_event *node_event;
struct rb_node *parent;
struct rb_node **node;
event->group_index = ++groups->index; event->group_index = ++groups->index;
node = &groups->tree.rb_node; rb_add(&event->group_node, &groups->tree, __group_less);
parent = *node;
while (*node) {
parent = *node;
node_event = container_of(*node, struct perf_event, group_node);
if (perf_event_groups_less(event, node_event))
node = &parent->rb_left;
else
node = &parent->rb_right;
}
rb_link_node(&event->group_node, parent, node);
rb_insert_color(&event->group_node, &groups->tree);
} }
/* /*
...@@ -1718,45 +1741,17 @@ static struct perf_event * ...@@ -1718,45 +1741,17 @@ static struct perf_event *
perf_event_groups_first(struct perf_event_groups *groups, int cpu, perf_event_groups_first(struct perf_event_groups *groups, int cpu,
struct cgroup *cgrp) struct cgroup *cgrp)
{ {
struct perf_event *node_event = NULL, *match = NULL; struct __group_key key = {
struct rb_node *node = groups->tree.rb_node; .cpu = cpu,
#ifdef CONFIG_CGROUP_PERF .cgroup = cgrp,
u64 node_cgrp_id, cgrp_id = 0; };
struct rb_node *node;
if (cgrp)
cgrp_id = cgrp->kn->id;
#endif
while (node) {
node_event = container_of(node, struct perf_event, group_node);
if (cpu < node_event->cpu) {
node = node->rb_left;
continue;
}
if (cpu > node_event->cpu) {
node = node->rb_right;
continue;
}
#ifdef CONFIG_CGROUP_PERF
node_cgrp_id = 0;
if (node_event->cgrp && node_event->cgrp->css.cgroup)
node_cgrp_id = node_event->cgrp->css.cgroup->kn->id;
if (cgrp_id < node_cgrp_id) { node = rb_find_first(&key, &groups->tree, __group_cmp);
node = node->rb_left; if (node)
continue; return __node_2_pe(node);
}
if (cgrp_id > node_cgrp_id) {
node = node->rb_right;
continue;
}
#endif
match = node_event;
node = node->rb_left;
}
return match; return NULL;
} }
/* /*
...@@ -1765,27 +1760,17 @@ perf_event_groups_first(struct perf_event_groups *groups, int cpu, ...@@ -1765,27 +1760,17 @@ perf_event_groups_first(struct perf_event_groups *groups, int cpu,
static struct perf_event * static struct perf_event *
perf_event_groups_next(struct perf_event *event) perf_event_groups_next(struct perf_event *event)
{ {
struct perf_event *next; struct __group_key key = {
#ifdef CONFIG_CGROUP_PERF .cpu = event->cpu,
u64 curr_cgrp_id = 0; .cgroup = event_cgroup(event),
u64 next_cgrp_id = 0; };
#endif struct rb_node *next;
next = rb_entry_safe(rb_next(&event->group_node), typeof(*event), group_node);
if (next == NULL || next->cpu != event->cpu)
return NULL;
#ifdef CONFIG_CGROUP_PERF
if (event->cgrp && event->cgrp->css.cgroup)
curr_cgrp_id = event->cgrp->css.cgroup->kn->id;
if (next->cgrp && next->cgrp->css.cgroup) next = rb_next_match(&key, &event->group_node, __group_cmp);
next_cgrp_id = next->cgrp->css.cgroup->kn->id; if (next)
return __node_2_pe(next);
if (curr_cgrp_id != next_cgrp_id)
return NULL; return NULL;
#endif
return next;
} }
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment