Commit 9821d8d4 authored by Eli Cohen's avatar Eli Cohen Committed by Saeed Mahameed

lib: cpu_rmap: Use allocator for rmap entries

Use a proper allocator for rmap entries using a naive for loop. The
allocator relies on whether an entry is NULL to be considered free.
Remove the used field of rmap which is not needed.

Also, avoid crashing the kernel if an entry is not available.

Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarEli Cohen <elic@nvidia.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@nvidia.com>
Reviewed-by: default avatarJacob Keller <jacob.e.keller@intel.com>
parent 4e0473f1
......@@ -16,14 +16,13 @@
* struct cpu_rmap - CPU affinity reverse-map
* @refcount: kref for object
* @size: Number of objects to be reverse-mapped
* @used: Number of objects added
* @obj: Pointer to array of object pointers
* @near: For each CPU, the index and distance to the nearest object,
* based on affinity masks
*/
struct cpu_rmap {
struct kref refcount;
u16 size, used;
u16 size;
void **obj;
struct {
u16 index;
......
......@@ -128,19 +128,31 @@ debug_print_rmap(const struct cpu_rmap *rmap, const char *prefix)
}
#endif
static int get_free_index(struct cpu_rmap *rmap)
{
int i;
for (i = 0; i < rmap->size; i++)
if (!rmap->obj[i])
return i;
return -ENOSPC;
}
/**
* cpu_rmap_add - add object to a rmap
* @rmap: CPU rmap allocated with alloc_cpu_rmap()
* @obj: Object to add to rmap
*
* Return index of object.
* Return index of object or -ENOSPC if no free entry was found
*/
int cpu_rmap_add(struct cpu_rmap *rmap, void *obj)
{
u16 index;
int index = get_free_index(rmap);
if (index < 0)
return index;
BUG_ON(rmap->used >= rmap->size);
index = rmap->used++;
rmap->obj[index] = obj;
return index;
}
......@@ -230,7 +242,7 @@ void free_irq_cpu_rmap(struct cpu_rmap *rmap)
if (!rmap)
return;
for (index = 0; index < rmap->used; index++) {
for (index = 0; index < rmap->size; index++) {
glue = rmap->obj[index];
if (glue)
irq_set_affinity_notifier(glue->notify.irq, NULL);
......@@ -295,13 +307,22 @@ int irq_cpu_rmap_add(struct cpu_rmap *rmap, int irq)
glue->notify.release = irq_cpu_rmap_release;
glue->rmap = rmap;
cpu_rmap_get(rmap);
glue->index = cpu_rmap_add(rmap, glue);
rc = cpu_rmap_add(rmap, glue);
if (rc < 0)
goto err_add;
glue->index = rc;
rc = irq_set_affinity_notifier(irq, &glue->notify);
if (rc) {
cpu_rmap_put(glue->rmap);
if (rc)
goto err_set;
return rc;
err_set:
rmap->obj[glue->index] = NULL;
err_add:
cpu_rmap_put(glue->rmap);
kfree(glue);
}
return rc;
}
EXPORT_SYMBOL(irq_cpu_rmap_add);
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment