Commit 3539b96e authored by Roman Gushchin's avatar Roman Gushchin Committed by Alexei Starovoitov

bpf: group memory related fields in struct bpf_map_memory

Group "user" and "pages" fields of bpf_map into the bpf_map_memory
structure. Later it can be extended with "memcg" and other related
information.

The main reason for a such change (beside cosmetics) is to pass
bpf_map_memory structure to charging functions before the actual
allocation of bpf_map.
Signed-off-by: default avatarRoman Gushchin <guro@fb.com>
Acked-by: default avatarSong Liu <songliubraving@fb.com>
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parent d50836cd
...@@ -66,6 +66,11 @@ struct bpf_map_ops { ...@@ -66,6 +66,11 @@ struct bpf_map_ops {
u64 imm, u32 *off); u64 imm, u32 *off);
}; };
struct bpf_map_memory {
u32 pages;
struct user_struct *user;
};
struct bpf_map { struct bpf_map {
/* The first two cachelines with read-mostly members of which some /* The first two cachelines with read-mostly members of which some
* are also accessed in fast-path (e.g. ops, max_entries). * are also accessed in fast-path (e.g. ops, max_entries).
...@@ -86,7 +91,7 @@ struct bpf_map { ...@@ -86,7 +91,7 @@ struct bpf_map {
u32 btf_key_type_id; u32 btf_key_type_id;
u32 btf_value_type_id; u32 btf_value_type_id;
struct btf *btf; struct btf *btf;
u32 pages; struct bpf_map_memory memory;
bool unpriv_array; bool unpriv_array;
bool frozen; /* write-once */ bool frozen; /* write-once */
/* 48 bytes hole */ /* 48 bytes hole */
...@@ -94,8 +99,7 @@ struct bpf_map { ...@@ -94,8 +99,7 @@ struct bpf_map {
/* The 3rd and 4th cacheline with misc members to avoid false sharing /* The 3rd and 4th cacheline with misc members to avoid false sharing
* particularly with refcounting. * particularly with refcounting.
*/ */
struct user_struct *user ____cacheline_aligned; atomic_t refcnt ____cacheline_aligned;
atomic_t refcnt;
atomic_t usercnt; atomic_t usercnt;
struct work_struct work; struct work_struct work;
char name[BPF_OBJ_NAME_LEN]; char name[BPF_OBJ_NAME_LEN];
......
...@@ -138,7 +138,7 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr) ...@@ -138,7 +138,7 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
/* copy mandatory map attributes */ /* copy mandatory map attributes */
bpf_map_init_from_attr(&array->map, attr); bpf_map_init_from_attr(&array->map, attr);
array->map.pages = cost; array->map.memory.pages = cost;
array->elem_size = elem_size; array->elem_size = elem_size;
if (percpu && bpf_array_alloc_percpu(array)) { if (percpu && bpf_array_alloc_percpu(array)) {
......
...@@ -108,10 +108,10 @@ static struct bpf_map *cpu_map_alloc(union bpf_attr *attr) ...@@ -108,10 +108,10 @@ static struct bpf_map *cpu_map_alloc(union bpf_attr *attr)
cost += cpu_map_bitmap_size(attr) * num_possible_cpus(); cost += cpu_map_bitmap_size(attr) * num_possible_cpus();
if (cost >= U32_MAX - PAGE_SIZE) if (cost >= U32_MAX - PAGE_SIZE)
goto free_cmap; goto free_cmap;
cmap->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT; cmap->map.memory.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
/* Notice returns -EPERM on if map size is larger than memlock limit */ /* Notice returns -EPERM on if map size is larger than memlock limit */
ret = bpf_map_precharge_memlock(cmap->map.pages); ret = bpf_map_precharge_memlock(cmap->map.memory.pages);
if (ret) { if (ret) {
err = ret; err = ret;
goto free_cmap; goto free_cmap;
......
...@@ -111,10 +111,10 @@ static struct bpf_map *dev_map_alloc(union bpf_attr *attr) ...@@ -111,10 +111,10 @@ static struct bpf_map *dev_map_alloc(union bpf_attr *attr)
if (cost >= U32_MAX - PAGE_SIZE) if (cost >= U32_MAX - PAGE_SIZE)
goto free_dtab; goto free_dtab;
dtab->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT; dtab->map.memory.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
/* if map size is larger than memlock limit, reject it early */ /* if map size is larger than memlock limit, reject it early */
err = bpf_map_precharge_memlock(dtab->map.pages); err = bpf_map_precharge_memlock(dtab->map.memory.pages);
if (err) if (err)
goto free_dtab; goto free_dtab;
......
...@@ -364,10 +364,10 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr) ...@@ -364,10 +364,10 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
/* make sure page count doesn't overflow */ /* make sure page count doesn't overflow */
goto free_htab; goto free_htab;
htab->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT; htab->map.memory.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
/* if map size is larger than memlock limit, reject it early */ /* if map size is larger than memlock limit, reject it early */
err = bpf_map_precharge_memlock(htab->map.pages); err = bpf_map_precharge_memlock(htab->map.memory.pages);
if (err) if (err)
goto free_htab; goto free_htab;
......
...@@ -303,7 +303,7 @@ static struct bpf_map *cgroup_storage_map_alloc(union bpf_attr *attr) ...@@ -303,7 +303,7 @@ static struct bpf_map *cgroup_storage_map_alloc(union bpf_attr *attr)
if (!map) if (!map)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
map->map.pages = pages; map->map.memory.pages = pages;
/* copy mandatory map attributes */ /* copy mandatory map attributes */
bpf_map_init_from_attr(&map->map, attr); bpf_map_init_from_attr(&map->map, attr);
......
...@@ -578,9 +578,9 @@ static struct bpf_map *trie_alloc(union bpf_attr *attr) ...@@ -578,9 +578,9 @@ static struct bpf_map *trie_alloc(union bpf_attr *attr)
goto out_err; goto out_err;
} }
trie->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT; trie->map.memory.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
ret = bpf_map_precharge_memlock(trie->map.pages); ret = bpf_map_precharge_memlock(trie->map.memory.pages);
if (ret) if (ret)
goto out_err; goto out_err;
......
...@@ -89,7 +89,7 @@ static struct bpf_map *queue_stack_map_alloc(union bpf_attr *attr) ...@@ -89,7 +89,7 @@ static struct bpf_map *queue_stack_map_alloc(union bpf_attr *attr)
bpf_map_init_from_attr(&qs->map, attr); bpf_map_init_from_attr(&qs->map, attr);
qs->map.pages = cost; qs->map.memory.pages = cost;
qs->size = size; qs->size = size;
raw_spin_lock_init(&qs->lock); raw_spin_lock_init(&qs->lock);
......
...@@ -176,7 +176,7 @@ static struct bpf_map *reuseport_array_alloc(union bpf_attr *attr) ...@@ -176,7 +176,7 @@ static struct bpf_map *reuseport_array_alloc(union bpf_attr *attr)
/* copy mandatory map attributes */ /* copy mandatory map attributes */
bpf_map_init_from_attr(&array->map, attr); bpf_map_init_from_attr(&array->map, attr);
array->map.pages = cost; array->map.memory.pages = cost;
return &array->map; return &array->map;
} }
......
...@@ -131,9 +131,9 @@ static struct bpf_map *stack_map_alloc(union bpf_attr *attr) ...@@ -131,9 +131,9 @@ static struct bpf_map *stack_map_alloc(union bpf_attr *attr)
bpf_map_init_from_attr(&smap->map, attr); bpf_map_init_from_attr(&smap->map, attr);
smap->map.value_size = value_size; smap->map.value_size = value_size;
smap->n_buckets = n_buckets; smap->n_buckets = n_buckets;
smap->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT; smap->map.memory.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
err = bpf_map_precharge_memlock(smap->map.pages); err = bpf_map_precharge_memlock(smap->map.memory.pages);
if (err) if (err)
goto free_smap; goto free_smap;
......
...@@ -222,19 +222,20 @@ static int bpf_map_init_memlock(struct bpf_map *map) ...@@ -222,19 +222,20 @@ static int bpf_map_init_memlock(struct bpf_map *map)
struct user_struct *user = get_current_user(); struct user_struct *user = get_current_user();
int ret; int ret;
ret = bpf_charge_memlock(user, map->pages); ret = bpf_charge_memlock(user, map->memory.pages);
if (ret) { if (ret) {
free_uid(user); free_uid(user);
return ret; return ret;
} }
map->user = user; map->memory.user = user;
return ret; return ret;
} }
static void bpf_map_release_memlock(struct bpf_map *map) static void bpf_map_release_memlock(struct bpf_map *map)
{ {
struct user_struct *user = map->user; struct user_struct *user = map->memory.user;
bpf_uncharge_memlock(user, map->pages);
bpf_uncharge_memlock(user, map->memory.pages);
free_uid(user); free_uid(user);
} }
...@@ -242,17 +243,17 @@ int bpf_map_charge_memlock(struct bpf_map *map, u32 pages) ...@@ -242,17 +243,17 @@ int bpf_map_charge_memlock(struct bpf_map *map, u32 pages)
{ {
int ret; int ret;
ret = bpf_charge_memlock(map->user, pages); ret = bpf_charge_memlock(map->memory.user, pages);
if (ret) if (ret)
return ret; return ret;
map->pages += pages; map->memory.pages += pages;
return ret; return ret;
} }
void bpf_map_uncharge_memlock(struct bpf_map *map, u32 pages) void bpf_map_uncharge_memlock(struct bpf_map *map, u32 pages)
{ {
bpf_uncharge_memlock(map->user, pages); bpf_uncharge_memlock(map->memory.user, pages);
map->pages -= pages; map->memory.pages -= pages;
} }
static int bpf_map_alloc_id(struct bpf_map *map) static int bpf_map_alloc_id(struct bpf_map *map)
...@@ -395,7 +396,7 @@ static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp) ...@@ -395,7 +396,7 @@ static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp)
map->value_size, map->value_size,
map->max_entries, map->max_entries,
map->map_flags, map->map_flags,
map->pages * 1ULL << PAGE_SHIFT, map->memory.pages * 1ULL << PAGE_SHIFT,
map->id, map->id,
READ_ONCE(map->frozen)); READ_ONCE(map->frozen));
......
...@@ -40,10 +40,10 @@ static struct bpf_map *xsk_map_alloc(union bpf_attr *attr) ...@@ -40,10 +40,10 @@ static struct bpf_map *xsk_map_alloc(union bpf_attr *attr)
if (cost >= U32_MAX - PAGE_SIZE) if (cost >= U32_MAX - PAGE_SIZE)
goto free_m; goto free_m;
m->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT; m->map.memory.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
/* Notice returns -EPERM on if map size is larger than memlock limit */ /* Notice returns -EPERM on if map size is larger than memlock limit */
err = bpf_map_precharge_memlock(m->map.pages); err = bpf_map_precharge_memlock(m->map.memory.pages);
if (err) if (err)
goto free_m; goto free_m;
......
...@@ -659,7 +659,7 @@ static struct bpf_map *bpf_sk_storage_map_alloc(union bpf_attr *attr) ...@@ -659,7 +659,7 @@ static struct bpf_map *bpf_sk_storage_map_alloc(union bpf_attr *attr)
smap->elem_size = sizeof(struct bpf_sk_storage_elem) + attr->value_size; smap->elem_size = sizeof(struct bpf_sk_storage_elem) + attr->value_size;
smap->cache_idx = (unsigned int)atomic_inc_return(&cache_idx) % smap->cache_idx = (unsigned int)atomic_inc_return(&cache_idx) %
BPF_SK_STORAGE_CACHE_SIZE; BPF_SK_STORAGE_CACHE_SIZE;
smap->map.pages = pages; smap->map.memory.pages = pages;
return &smap->map; return &smap->map;
} }
......
...@@ -49,8 +49,8 @@ static struct bpf_map *sock_map_alloc(union bpf_attr *attr) ...@@ -49,8 +49,8 @@ static struct bpf_map *sock_map_alloc(union bpf_attr *attr)
goto free_stab; goto free_stab;
} }
stab->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT; stab->map.memory.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
err = bpf_map_precharge_memlock(stab->map.pages); err = bpf_map_precharge_memlock(stab->map.memory.pages);
if (err) if (err)
goto free_stab; goto free_stab;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment