Commit d01f9b19 authored by Thomas Gleixner's avatar Thomas Gleixner Committed by Alexei Starovoitov

bpf: Factor out hashtab bucket lock operations

As a preparation for making the BPF locking RT friendly, factor out the
hash bucket lock operations into inline functions. This allows to do the
necessary RT modification in one place instead of sprinkling it all over
the place. No functional change.

The now unused htab argument of the lock/unlock functions will be used in
the next step which adds PREEMPT_RT support.
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
Link: https://lore.kernel.org/bpf/20200224145644.420416916@linutronix.de
parent b6e5dae1
...@@ -88,6 +88,32 @@ struct htab_elem { ...@@ -88,6 +88,32 @@ struct htab_elem {
char key[0] __aligned(8); char key[0] __aligned(8);
}; };
static void htab_init_buckets(struct bpf_htab *htab)
{
unsigned i;
for (i = 0; i < htab->n_buckets; i++) {
INIT_HLIST_NULLS_HEAD(&htab->buckets[i].head, i);
raw_spin_lock_init(&htab->buckets[i].lock);
}
}
static inline unsigned long htab_lock_bucket(const struct bpf_htab *htab,
struct bucket *b)
{
unsigned long flags;
raw_spin_lock_irqsave(&b->lock, flags);
return flags;
}
static inline void htab_unlock_bucket(const struct bpf_htab *htab,
struct bucket *b,
unsigned long flags)
{
raw_spin_unlock_irqrestore(&b->lock, flags);
}
static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node); static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node);
static bool htab_is_lru(const struct bpf_htab *htab) static bool htab_is_lru(const struct bpf_htab *htab)
...@@ -348,8 +374,8 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr) ...@@ -348,8 +374,8 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
bool percpu_lru = (attr->map_flags & BPF_F_NO_COMMON_LRU); bool percpu_lru = (attr->map_flags & BPF_F_NO_COMMON_LRU);
bool prealloc = !(attr->map_flags & BPF_F_NO_PREALLOC); bool prealloc = !(attr->map_flags & BPF_F_NO_PREALLOC);
struct bpf_htab *htab; struct bpf_htab *htab;
int err, i;
u64 cost; u64 cost;
int err;
htab = kzalloc(sizeof(*htab), GFP_USER); htab = kzalloc(sizeof(*htab), GFP_USER);
if (!htab) if (!htab)
...@@ -411,10 +437,7 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr) ...@@ -411,10 +437,7 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
else else
htab->hashrnd = get_random_int(); htab->hashrnd = get_random_int();
for (i = 0; i < htab->n_buckets; i++) { htab_init_buckets(htab);
INIT_HLIST_NULLS_HEAD(&htab->buckets[i].head, i);
raw_spin_lock_init(&htab->buckets[i].lock);
}
if (prealloc) { if (prealloc) {
err = prealloc_init(htab); err = prealloc_init(htab);
...@@ -622,7 +645,7 @@ static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node) ...@@ -622,7 +645,7 @@ static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node)
b = __select_bucket(htab, tgt_l->hash); b = __select_bucket(htab, tgt_l->hash);
head = &b->head; head = &b->head;
raw_spin_lock_irqsave(&b->lock, flags); flags = htab_lock_bucket(htab, b);
hlist_nulls_for_each_entry_rcu(l, n, head, hash_node) hlist_nulls_for_each_entry_rcu(l, n, head, hash_node)
if (l == tgt_l) { if (l == tgt_l) {
...@@ -630,7 +653,7 @@ static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node) ...@@ -630,7 +653,7 @@ static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node)
break; break;
} }
raw_spin_unlock_irqrestore(&b->lock, flags); htab_unlock_bucket(htab, b, flags);
return l == tgt_l; return l == tgt_l;
} }
...@@ -896,7 +919,7 @@ static int htab_map_update_elem(struct bpf_map *map, void *key, void *value, ...@@ -896,7 +919,7 @@ static int htab_map_update_elem(struct bpf_map *map, void *key, void *value,
*/ */
} }
raw_spin_lock_irqsave(&b->lock, flags); flags = htab_lock_bucket(htab, b);
l_old = lookup_elem_raw(head, hash, key, key_size); l_old = lookup_elem_raw(head, hash, key, key_size);
...@@ -937,7 +960,7 @@ static int htab_map_update_elem(struct bpf_map *map, void *key, void *value, ...@@ -937,7 +960,7 @@ static int htab_map_update_elem(struct bpf_map *map, void *key, void *value,
} }
ret = 0; ret = 0;
err: err:
raw_spin_unlock_irqrestore(&b->lock, flags); htab_unlock_bucket(htab, b, flags);
return ret; return ret;
} }
...@@ -975,7 +998,7 @@ static int htab_lru_map_update_elem(struct bpf_map *map, void *key, void *value, ...@@ -975,7 +998,7 @@ static int htab_lru_map_update_elem(struct bpf_map *map, void *key, void *value,
return -ENOMEM; return -ENOMEM;
memcpy(l_new->key + round_up(map->key_size, 8), value, map->value_size); memcpy(l_new->key + round_up(map->key_size, 8), value, map->value_size);
raw_spin_lock_irqsave(&b->lock, flags); flags = htab_lock_bucket(htab, b);
l_old = lookup_elem_raw(head, hash, key, key_size); l_old = lookup_elem_raw(head, hash, key, key_size);
...@@ -994,7 +1017,7 @@ static int htab_lru_map_update_elem(struct bpf_map *map, void *key, void *value, ...@@ -994,7 +1017,7 @@ static int htab_lru_map_update_elem(struct bpf_map *map, void *key, void *value,
ret = 0; ret = 0;
err: err:
raw_spin_unlock_irqrestore(&b->lock, flags); htab_unlock_bucket(htab, b, flags);
if (ret) if (ret)
bpf_lru_push_free(&htab->lru, &l_new->lru_node); bpf_lru_push_free(&htab->lru, &l_new->lru_node);
...@@ -1029,7 +1052,7 @@ static int __htab_percpu_map_update_elem(struct bpf_map *map, void *key, ...@@ -1029,7 +1052,7 @@ static int __htab_percpu_map_update_elem(struct bpf_map *map, void *key,
b = __select_bucket(htab, hash); b = __select_bucket(htab, hash);
head = &b->head; head = &b->head;
raw_spin_lock_irqsave(&b->lock, flags); flags = htab_lock_bucket(htab, b);
l_old = lookup_elem_raw(head, hash, key, key_size); l_old = lookup_elem_raw(head, hash, key, key_size);
...@@ -1052,7 +1075,7 @@ static int __htab_percpu_map_update_elem(struct bpf_map *map, void *key, ...@@ -1052,7 +1075,7 @@ static int __htab_percpu_map_update_elem(struct bpf_map *map, void *key,
} }
ret = 0; ret = 0;
err: err:
raw_spin_unlock_irqrestore(&b->lock, flags); htab_unlock_bucket(htab, b, flags);
return ret; return ret;
} }
...@@ -1092,7 +1115,7 @@ static int __htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key, ...@@ -1092,7 +1115,7 @@ static int __htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,
return -ENOMEM; return -ENOMEM;
} }
raw_spin_lock_irqsave(&b->lock, flags); flags = htab_lock_bucket(htab, b);
l_old = lookup_elem_raw(head, hash, key, key_size); l_old = lookup_elem_raw(head, hash, key, key_size);
...@@ -1114,7 +1137,7 @@ static int __htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key, ...@@ -1114,7 +1137,7 @@ static int __htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,
} }
ret = 0; ret = 0;
err: err:
raw_spin_unlock_irqrestore(&b->lock, flags); htab_unlock_bucket(htab, b, flags);
if (l_new) if (l_new)
bpf_lru_push_free(&htab->lru, &l_new->lru_node); bpf_lru_push_free(&htab->lru, &l_new->lru_node);
return ret; return ret;
...@@ -1152,7 +1175,7 @@ static int htab_map_delete_elem(struct bpf_map *map, void *key) ...@@ -1152,7 +1175,7 @@ static int htab_map_delete_elem(struct bpf_map *map, void *key)
b = __select_bucket(htab, hash); b = __select_bucket(htab, hash);
head = &b->head; head = &b->head;
raw_spin_lock_irqsave(&b->lock, flags); flags = htab_lock_bucket(htab, b);
l = lookup_elem_raw(head, hash, key, key_size); l = lookup_elem_raw(head, hash, key, key_size);
...@@ -1162,7 +1185,7 @@ static int htab_map_delete_elem(struct bpf_map *map, void *key) ...@@ -1162,7 +1185,7 @@ static int htab_map_delete_elem(struct bpf_map *map, void *key)
ret = 0; ret = 0;
} }
raw_spin_unlock_irqrestore(&b->lock, flags); htab_unlock_bucket(htab, b, flags);
return ret; return ret;
} }
...@@ -1184,7 +1207,7 @@ static int htab_lru_map_delete_elem(struct bpf_map *map, void *key) ...@@ -1184,7 +1207,7 @@ static int htab_lru_map_delete_elem(struct bpf_map *map, void *key)
b = __select_bucket(htab, hash); b = __select_bucket(htab, hash);
head = &b->head; head = &b->head;
raw_spin_lock_irqsave(&b->lock, flags); flags = htab_lock_bucket(htab, b);
l = lookup_elem_raw(head, hash, key, key_size); l = lookup_elem_raw(head, hash, key, key_size);
...@@ -1193,7 +1216,7 @@ static int htab_lru_map_delete_elem(struct bpf_map *map, void *key) ...@@ -1193,7 +1216,7 @@ static int htab_lru_map_delete_elem(struct bpf_map *map, void *key)
ret = 0; ret = 0;
} }
raw_spin_unlock_irqrestore(&b->lock, flags); htab_unlock_bucket(htab, b, flags);
if (l) if (l)
bpf_lru_push_free(&htab->lru, &l->lru_node); bpf_lru_push_free(&htab->lru, &l->lru_node);
return ret; return ret;
...@@ -1342,7 +1365,7 @@ __htab_map_lookup_and_delete_batch(struct bpf_map *map, ...@@ -1342,7 +1365,7 @@ __htab_map_lookup_and_delete_batch(struct bpf_map *map,
head = &b->head; head = &b->head;
/* do not grab the lock unless need it (bucket_cnt > 0). */ /* do not grab the lock unless need it (bucket_cnt > 0). */
if (locked) if (locked)
raw_spin_lock_irqsave(&b->lock, flags); flags = htab_lock_bucket(htab, b);
bucket_cnt = 0; bucket_cnt = 0;
hlist_nulls_for_each_entry_rcu(l, n, head, hash_node) hlist_nulls_for_each_entry_rcu(l, n, head, hash_node)
...@@ -1359,7 +1382,7 @@ __htab_map_lookup_and_delete_batch(struct bpf_map *map, ...@@ -1359,7 +1382,7 @@ __htab_map_lookup_and_delete_batch(struct bpf_map *map,
/* Note that since bucket_cnt > 0 here, it is implicit /* Note that since bucket_cnt > 0 here, it is implicit
* that the locked was grabbed, so release it. * that the locked was grabbed, so release it.
*/ */
raw_spin_unlock_irqrestore(&b->lock, flags); htab_unlock_bucket(htab, b, flags);
rcu_read_unlock(); rcu_read_unlock();
bpf_enable_instrumentation(); bpf_enable_instrumentation();
goto after_loop; goto after_loop;
...@@ -1370,7 +1393,7 @@ __htab_map_lookup_and_delete_batch(struct bpf_map *map, ...@@ -1370,7 +1393,7 @@ __htab_map_lookup_and_delete_batch(struct bpf_map *map,
/* Note that since bucket_cnt > 0 here, it is implicit /* Note that since bucket_cnt > 0 here, it is implicit
* that the locked was grabbed, so release it. * that the locked was grabbed, so release it.
*/ */
raw_spin_unlock_irqrestore(&b->lock, flags); htab_unlock_bucket(htab, b, flags);
rcu_read_unlock(); rcu_read_unlock();
bpf_enable_instrumentation(); bpf_enable_instrumentation();
kvfree(keys); kvfree(keys);
...@@ -1423,7 +1446,7 @@ __htab_map_lookup_and_delete_batch(struct bpf_map *map, ...@@ -1423,7 +1446,7 @@ __htab_map_lookup_and_delete_batch(struct bpf_map *map,
dst_val += value_size; dst_val += value_size;
} }
raw_spin_unlock_irqrestore(&b->lock, flags); htab_unlock_bucket(htab, b, flags);
locked = false; locked = false;
while (node_to_free) { while (node_to_free) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment