Commit 66150d0d authored by Thomas Gleixner's avatar Thomas Gleixner Committed by Alexei Starovoitov

bpf, lpm: Make locking RT friendly

The LPM trie map cannot be used in contexts like perf, kprobes and tracing
as this map type dynamically allocates memory.

The memory allocation happens with a raw spinlock held which is a truly
spinning lock on a PREEMPT RT enabled kernel which disables preemption and
interrupts.

As RT does not allow memory allocation from such a section for various
reasons, convert the raw spinlock to a regular spinlock.

On a RT enabled kernel these locks are substituted by 'sleeping' spinlocks
which provide the proper protection but keep the code preemptible.

On a non-RT kernel regular spinlocks map to raw spinlocks, i.e. this does
not cause any functional change.
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
Link: https://lore.kernel.org/bpf/20200224145644.602129531@linutronix.de
parent 7f805d17
...@@ -34,7 +34,7 @@ struct lpm_trie { ...@@ -34,7 +34,7 @@ struct lpm_trie {
size_t n_entries; size_t n_entries;
size_t max_prefixlen; size_t max_prefixlen;
size_t data_size; size_t data_size;
raw_spinlock_t lock; spinlock_t lock;
}; };
/* This trie implements a longest prefix match algorithm that can be used to /* This trie implements a longest prefix match algorithm that can be used to
...@@ -315,7 +315,7 @@ static int trie_update_elem(struct bpf_map *map, ...@@ -315,7 +315,7 @@ static int trie_update_elem(struct bpf_map *map,
if (key->prefixlen > trie->max_prefixlen) if (key->prefixlen > trie->max_prefixlen)
return -EINVAL; return -EINVAL;
raw_spin_lock_irqsave(&trie->lock, irq_flags); spin_lock_irqsave(&trie->lock, irq_flags);
/* Allocate and fill a new node */ /* Allocate and fill a new node */
...@@ -422,7 +422,7 @@ static int trie_update_elem(struct bpf_map *map, ...@@ -422,7 +422,7 @@ static int trie_update_elem(struct bpf_map *map,
kfree(im_node); kfree(im_node);
} }
raw_spin_unlock_irqrestore(&trie->lock, irq_flags); spin_unlock_irqrestore(&trie->lock, irq_flags);
return ret; return ret;
} }
...@@ -442,7 +442,7 @@ static int trie_delete_elem(struct bpf_map *map, void *_key) ...@@ -442,7 +442,7 @@ static int trie_delete_elem(struct bpf_map *map, void *_key)
if (key->prefixlen > trie->max_prefixlen) if (key->prefixlen > trie->max_prefixlen)
return -EINVAL; return -EINVAL;
raw_spin_lock_irqsave(&trie->lock, irq_flags); spin_lock_irqsave(&trie->lock, irq_flags);
/* Walk the tree looking for an exact key/length match and keeping /* Walk the tree looking for an exact key/length match and keeping
* track of the path we traverse. We will need to know the node * track of the path we traverse. We will need to know the node
...@@ -518,7 +518,7 @@ static int trie_delete_elem(struct bpf_map *map, void *_key) ...@@ -518,7 +518,7 @@ static int trie_delete_elem(struct bpf_map *map, void *_key)
kfree_rcu(node, rcu); kfree_rcu(node, rcu);
out: out:
raw_spin_unlock_irqrestore(&trie->lock, irq_flags); spin_unlock_irqrestore(&trie->lock, irq_flags);
return ret; return ret;
} }
...@@ -575,7 +575,7 @@ static struct bpf_map *trie_alloc(union bpf_attr *attr) ...@@ -575,7 +575,7 @@ static struct bpf_map *trie_alloc(union bpf_attr *attr)
if (ret) if (ret)
goto out_err; goto out_err;
raw_spin_lock_init(&trie->lock); spin_lock_init(&trie->lock);
return &trie->map; return &trie->map;
out_err: out_err:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment