Commit 7f9ad2ac authored by David S. Miller's avatar David S. Miller

Merge branch 'bpf-lsm-hooks'

Chenbo Feng says:

====================
bpf: security: New file mode and LSM hooks for eBPF object permission control

Much like files and sockets, eBPF objects are accessed, controlled, and
shared via a file descriptor (FD). Unlike files and sockets, the
existing mechanism for eBPF object access control is very limited.
Currently there are two options for granting accessing to eBPF
operations: grant access to all processes, or only CAP_SYS_ADMIN
processes. The CAP_SYS_ADMIN-only mode is not ideal because most users
do not have this capability and granting a user CAP_SYS_ADMIN grants too
many other security-sensitive permissions. It also unnecessarily allows
all CAP_SYS_ADMIN processes access to eBPF functionality. Allowing all
processes to access to eBPF objects is also undesirable since it has
potential to allow unprivileged processes to consume kernel memory, and
opens up attack surface to the kernel.

Adding LSM hooks maintains the status quo for systems which do not use
an LSM, preserving compatibility with userspace, while allowing security
modules to choose how best to handle permissions on eBPF objects. Here
is a possible use case for the lsm hooks with selinux module:

The network-control daemon (netd) creates and loads an eBPF object for
network packet filtering and analysis. It passes the object FD to an
unprivileged network monitor app (netmonitor), which is not allowed to
create, modify or load eBPF objects, but is allowed to read the traffic
stats from the map.

Selinux could use these hooks to grant the following permissions:
allow netd self:bpf_map { create read write};
allow netmonitor netd:fd use;
allow netmonitor netd:bpf_map read;

In this patch series, A file mode is added to bpf map to store the
accessing mode. With this file mode flags, the map can be obtained read
only, write only or read and write. With the help of this file mode,
several security hooks can be added to the eBPF syscall implementations
to do permissions checks. These LSM hooks are mainly focused on checking
the process privileges before it obtains the fd for a specific bpf
object. No matter from a file location or from a eBPF id. Besides that,
a general check hook is also implemented at the start of bpf syscalls so
that each security module can have their own implementation on the reset
of bpf object related functionalities.

In order to store the ownership and security information about eBPF
maps, a security field pointer is added to the struct bpf_map. And the
last two patch set are implementation of selinux check on these hooks
introduced, plus an additional check when eBPF object is passed between
processes using unix socket as well as binder IPC.

Change since V1:

 - Whitelist the new bpf flags in the map allocate check.
 - Added bpf selftest for the new flags.
 - Added two new security hooks for copying the security information from
   the bpf object security struct to file security struct
 - Simplified the checking action when bpf fd is passed between processes.

 Change since V2:

 - Fixed the line break problem for map flags check
 - Fixed the typo in selinux check of file mode.
 - Merge bpf_map and bpf_prog into one selinux class
 - Added bpf_type and bpf_sid into file security struct to store the
   security information when generate fd.
 - Add the hook to bpf_map_new_fd and bpf_prog_new_fd.

 Change since V3:

 - Return the actual error from security check instead of -EPERM
 - Move the hooks into anon_inode_getfd() to avoid get file again after
   bpf object file is installed with fd.
 - Removed the bpf_sid field inside file_scerity_struct to reduce the
   cache size.

 Change since V4:

 - Rename bpf av prog_use to prog_run to distinguish from fd_use.
 - Remove the bpf_type field inside file_scerity_struct and use bpf fops
   to indentify bpf object instead.

 Change since v5:

 - Fixed the incorrect selinux class name for SECCLASS_BPF

 Change since v7:

 - Fixed the build error caused by xt_bpf module.
 - Add flags check for bpf_obj_get() and bpf_map_get_fd_by_id() to make it
   uapi-wise.
 - Add the flags field to the bpf_obj_get_user function when BPF_SYSCALL
   is not configured.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents aec72f33 f66e448c
......@@ -57,6 +57,9 @@ struct bpf_map {
atomic_t usercnt;
struct bpf_map *inner_map_meta;
char name[BPF_OBJ_NAME_LEN];
#ifdef CONFIG_SECURITY
void *security;
#endif
};
/* function argument constraints */
......@@ -193,6 +196,9 @@ struct bpf_prog_aux {
struct user_struct *user;
u64 load_time; /* ns since boottime */
char name[BPF_OBJ_NAME_LEN];
#ifdef CONFIG_SECURITY
void *security;
#endif
union {
struct work_struct work;
struct rcu_head rcu;
......@@ -282,6 +288,9 @@ int bpf_prog_array_copy_to_user(struct bpf_prog_array __rcu *progs,
#ifdef CONFIG_BPF_SYSCALL
DECLARE_PER_CPU(int, bpf_prog_active);
extern const struct file_operations bpf_map_fops;
extern const struct file_operations bpf_prog_fops;
#define BPF_PROG_TYPE(_id, _name) \
extern const struct bpf_prog_ops _name ## _prog_ops; \
extern const struct bpf_verifier_ops _name ## _verifier_ops;
......@@ -315,11 +324,11 @@ void bpf_map_area_free(void *base);
extern int sysctl_unprivileged_bpf_disabled;
int bpf_map_new_fd(struct bpf_map *map);
int bpf_map_new_fd(struct bpf_map *map, int flags);
int bpf_prog_new_fd(struct bpf_prog *prog);
int bpf_obj_pin_user(u32 ufd, const char __user *pathname);
int bpf_obj_get_user(const char __user *pathname);
int bpf_obj_get_user(const char __user *pathname, int flags);
int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value);
int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value);
......@@ -338,6 +347,8 @@ int bpf_fd_htab_map_update_elem(struct bpf_map *map, struct file *map_file,
void *key, void *value, u64 map_flags);
int bpf_fd_htab_map_lookup_elem(struct bpf_map *map, void *key, u32 *value);
int bpf_get_file_flag(int flags);
/* memcpy that is used with 8-byte aligned pointers, power-of-8 size and
* forced to use 'long' read/writes to try to atomically copy long counters.
* Best-effort only. No barriers here, since it _will_ race with concurrent
......@@ -421,7 +432,7 @@ static inline void __bpf_prog_uncharge(struct user_struct *user, u32 pages)
{
}
static inline int bpf_obj_get_user(const char __user *pathname)
static inline int bpf_obj_get_user(const char __user *pathname, int flags)
{
return -EOPNOTSUPP;
}
......
......@@ -1351,6 +1351,40 @@
* @inode we wish to get the security context of.
* @ctx is a pointer in which to place the allocated security context.
* @ctxlen points to the place to put the length of @ctx.
*
* Security hooks for using the eBPF maps and programs functionalities through
* eBPF syscalls.
*
* @bpf:
* Do a initial check for all bpf syscalls after the attribute is copied
* into the kernel. The actual security module can implement their own
* rules to check the specific cmd they need.
*
* @bpf_map:
* Do a check when the kernel generate and return a file descriptor for
* eBPF maps.
*
* @map: bpf map that we want to access
* @mask: the access flags
*
* @bpf_prog:
* Do a check when the kernel generate and return a file descriptor for
* eBPF programs.
*
* @prog: bpf prog that userspace want to use.
*
* @bpf_map_alloc_security:
* Initialize the security field inside bpf map.
*
* @bpf_map_free_security:
* Clean up the security information stored inside bpf map.
*
* @bpf_prog_alloc_security:
* Initialize the security field inside bpf program.
*
* @bpf_prog_free_security:
* Clean up the security information stored inside bpf prog.
*
*/
union security_list_options {
int (*binder_set_context_mgr)(struct task_struct *mgr);
......@@ -1682,6 +1716,17 @@ union security_list_options {
struct audit_context *actx);
void (*audit_rule_free)(void *lsmrule);
#endif /* CONFIG_AUDIT */
#ifdef CONFIG_BPF_SYSCALL
int (*bpf)(int cmd, union bpf_attr *attr,
unsigned int size);
int (*bpf_map)(struct bpf_map *map, fmode_t fmode);
int (*bpf_prog)(struct bpf_prog *prog);
int (*bpf_map_alloc_security)(struct bpf_map *map);
void (*bpf_map_free_security)(struct bpf_map *map);
int (*bpf_prog_alloc_security)(struct bpf_prog_aux *aux);
void (*bpf_prog_free_security)(struct bpf_prog_aux *aux);
#endif /* CONFIG_BPF_SYSCALL */
};
struct security_hook_heads {
......@@ -1901,6 +1946,15 @@ struct security_hook_heads {
struct list_head audit_rule_match;
struct list_head audit_rule_free;
#endif /* CONFIG_AUDIT */
#ifdef CONFIG_BPF_SYSCALL
struct list_head bpf;
struct list_head bpf_map;
struct list_head bpf_prog;
struct list_head bpf_map_alloc_security;
struct list_head bpf_map_free_security;
struct list_head bpf_prog_alloc_security;
struct list_head bpf_prog_free_security;
#endif /* CONFIG_BPF_SYSCALL */
} __randomize_layout;
/*
......
......@@ -31,6 +31,7 @@
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/fs.h>
#include <linux/bpf.h>
struct linux_binprm;
struct cred;
......@@ -1730,6 +1731,50 @@ static inline void securityfs_remove(struct dentry *dentry)
#endif
#ifdef CONFIG_BPF_SYSCALL
#ifdef CONFIG_SECURITY
extern int security_bpf(int cmd, union bpf_attr *attr, unsigned int size);
extern int security_bpf_map(struct bpf_map *map, fmode_t fmode);
extern int security_bpf_prog(struct bpf_prog *prog);
extern int security_bpf_map_alloc(struct bpf_map *map);
extern void security_bpf_map_free(struct bpf_map *map);
extern int security_bpf_prog_alloc(struct bpf_prog_aux *aux);
extern void security_bpf_prog_free(struct bpf_prog_aux *aux);
#else
static inline int security_bpf(int cmd, union bpf_attr *attr,
unsigned int size)
{
return 0;
}
static inline int security_bpf_map(struct bpf_map *map, fmode_t fmode)
{
return 0;
}
static inline int security_bpf_prog(struct bpf_prog *prog)
{
return 0;
}
static inline int security_bpf_map_alloc(struct bpf_map *map)
{
return 0;
}
static inline void security_bpf_map_free(struct bpf_map *map)
{ }
static inline int security_bpf_prog_alloc(struct bpf_prog_aux *aux)
{
return 0;
}
static inline void security_bpf_prog_free(struct bpf_prog_aux *aux)
{ }
#endif /* CONFIG_SECURITY */
#endif /* CONFIG_BPF_SYSCALL */
#ifdef CONFIG_SECURITY
static inline char *alloc_secdata(void)
......
......@@ -218,6 +218,10 @@ enum bpf_attach_type {
#define BPF_OBJ_NAME_LEN 16U
/* Flags for accessing BPF object */
#define BPF_F_RDONLY (1U << 3)
#define BPF_F_WRONLY (1U << 4)
union bpf_attr {
struct { /* anonymous struct used by BPF_MAP_CREATE command */
__u32 map_type; /* one of enum bpf_map_type */
......@@ -260,6 +264,7 @@ union bpf_attr {
struct { /* anonymous struct used by BPF_OBJ_* commands */
__aligned_u64 pathname;
__u32 bpf_fd;
__u32 file_flags;
};
struct { /* anonymous struct used by BPF_PROG_ATTACH/DETACH commands */
......@@ -287,6 +292,7 @@ union bpf_attr {
__u32 map_id;
};
__u32 next_id;
__u32 open_flags;
};
struct { /* anonymous struct used by BPF_OBJ_GET_INFO_BY_FD */
......
......@@ -19,6 +19,9 @@
#include "map_in_map.h"
#define ARRAY_CREATE_FLAG_MASK \
(BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY)
static void bpf_array_free_percpu(struct bpf_array *array)
{
int i;
......@@ -56,7 +59,8 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
/* check sanity of attributes */
if (attr->max_entries == 0 || attr->key_size != 4 ||
attr->value_size == 0 || attr->map_flags & ~BPF_F_NUMA_NODE ||
attr->value_size == 0 ||
attr->map_flags & ~ARRAY_CREATE_FLAG_MASK ||
(percpu && numa_node != NUMA_NO_NODE))
return ERR_PTR(-EINVAL);
......
......@@ -50,6 +50,9 @@
#include <linux/bpf.h>
#include <linux/filter.h>
#define DEV_CREATE_FLAG_MASK \
(BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY)
struct bpf_dtab_netdev {
struct net_device *dev;
struct bpf_dtab *dtab;
......@@ -80,7 +83,7 @@ static struct bpf_map *dev_map_alloc(union bpf_attr *attr)
/* check sanity of attributes */
if (attr->max_entries == 0 || attr->key_size != 4 ||
attr->value_size != 4 || attr->map_flags & ~BPF_F_NUMA_NODE)
attr->value_size != 4 || attr->map_flags & ~DEV_CREATE_FLAG_MASK)
return ERR_PTR(-EINVAL);
dtab = kzalloc(sizeof(*dtab), GFP_USER);
......
......@@ -18,8 +18,9 @@
#include "bpf_lru_list.h"
#include "map_in_map.h"
#define HTAB_CREATE_FLAG_MASK \
(BPF_F_NO_PREALLOC | BPF_F_NO_COMMON_LRU | BPF_F_NUMA_NODE)
#define HTAB_CREATE_FLAG_MASK \
(BPF_F_NO_PREALLOC | BPF_F_NO_COMMON_LRU | BPF_F_NUMA_NODE | \
BPF_F_RDONLY | BPF_F_WRONLY)
struct bucket {
struct hlist_nulls_head head;
......
......@@ -295,7 +295,7 @@ int bpf_obj_pin_user(u32 ufd, const char __user *pathname)
}
static void *bpf_obj_do_get(const struct filename *pathname,
enum bpf_type *type)
enum bpf_type *type, int flags)
{
struct inode *inode;
struct path path;
......@@ -307,7 +307,7 @@ static void *bpf_obj_do_get(const struct filename *pathname,
return ERR_PTR(ret);
inode = d_backing_inode(path.dentry);
ret = inode_permission(inode, MAY_WRITE);
ret = inode_permission(inode, ACC_MODE(flags));
if (ret)
goto out;
......@@ -326,18 +326,23 @@ static void *bpf_obj_do_get(const struct filename *pathname,
return ERR_PTR(ret);
}
int bpf_obj_get_user(const char __user *pathname)
int bpf_obj_get_user(const char __user *pathname, int flags)
{
enum bpf_type type = BPF_TYPE_UNSPEC;
struct filename *pname;
int ret = -ENOENT;
int f_flags;
void *raw;
f_flags = bpf_get_file_flag(flags);
if (f_flags < 0)
return f_flags;
pname = getname(pathname);
if (IS_ERR(pname))
return PTR_ERR(pname);
raw = bpf_obj_do_get(pname, &type);
raw = bpf_obj_do_get(pname, &type, f_flags);
if (IS_ERR(raw)) {
ret = PTR_ERR(raw);
goto out;
......@@ -346,7 +351,7 @@ int bpf_obj_get_user(const char __user *pathname)
if (type == BPF_TYPE_PROG)
ret = bpf_prog_new_fd(raw);
else if (type == BPF_TYPE_MAP)
ret = bpf_map_new_fd(raw);
ret = bpf_map_new_fd(raw, f_flags);
else
goto out;
......
......@@ -495,7 +495,8 @@ static int trie_delete_elem(struct bpf_map *map, void *_key)
#define LPM_KEY_SIZE_MAX LPM_KEY_SIZE(LPM_DATA_SIZE_MAX)
#define LPM_KEY_SIZE_MIN LPM_KEY_SIZE(LPM_DATA_SIZE_MIN)
#define LPM_CREATE_FLAG_MASK (BPF_F_NO_PREALLOC | BPF_F_NUMA_NODE)
#define LPM_CREATE_FLAG_MASK (BPF_F_NO_PREALLOC | BPF_F_NUMA_NODE | \
BPF_F_RDONLY | BPF_F_WRONLY)
static struct bpf_map *trie_alloc(union bpf_attr *attr)
{
......
......@@ -40,6 +40,9 @@
#include <linux/list.h>
#include <net/strparser.h>
#define SOCK_CREATE_FLAG_MASK \
(BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY)
struct bpf_stab {
struct bpf_map map;
struct sock **sock_map;
......@@ -489,7 +492,7 @@ static struct bpf_map *sock_map_alloc(union bpf_attr *attr)
/* check sanity of attributes */
if (attr->max_entries == 0 || attr->key_size != 4 ||
attr->value_size != 4 || attr->map_flags & ~BPF_F_NUMA_NODE)
attr->value_size != 4 || attr->map_flags & ~SOCK_CREATE_FLAG_MASK)
return ERR_PTR(-EINVAL);
if (attr->value_size > KMALLOC_MAX_SIZE)
......
......@@ -11,6 +11,9 @@
#include <linux/perf_event.h>
#include "percpu_freelist.h"
#define STACK_CREATE_FLAG_MASK \
(BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY)
struct stack_map_bucket {
struct pcpu_freelist_node fnode;
u32 hash;
......@@ -60,7 +63,7 @@ static struct bpf_map *stack_map_alloc(union bpf_attr *attr)
if (!capable(CAP_SYS_ADMIN))
return ERR_PTR(-EPERM);
if (attr->map_flags & ~BPF_F_NUMA_NODE)
if (attr->map_flags & ~STACK_CREATE_FLAG_MASK)
return ERR_PTR(-EINVAL);
/* check sanity of attributes */
......
......@@ -34,6 +34,8 @@
#define IS_FD_HASH(map) ((map)->map_type == BPF_MAP_TYPE_HASH_OF_MAPS)
#define IS_FD_MAP(map) (IS_FD_ARRAY(map) || IS_FD_HASH(map))
#define BPF_OBJ_FLAG_MASK (BPF_F_RDONLY | BPF_F_WRONLY)
DEFINE_PER_CPU(int, bpf_prog_active);
static DEFINE_IDR(prog_idr);
static DEFINE_SPINLOCK(prog_idr_lock);
......@@ -210,6 +212,7 @@ static void bpf_map_free_deferred(struct work_struct *work)
struct bpf_map *map = container_of(work, struct bpf_map, work);
bpf_map_uncharge_memlock(map);
security_bpf_map_free(map);
/* implementation dependent freeing */
map->ops->map_free(map);
}
......@@ -294,17 +297,54 @@ static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp)
}
#endif
static const struct file_operations bpf_map_fops = {
static ssize_t bpf_dummy_read(struct file *filp, char __user *buf, size_t siz,
loff_t *ppos)
{
/* We need this handler such that alloc_file() enables
* f_mode with FMODE_CAN_READ.
*/
return -EINVAL;
}
static ssize_t bpf_dummy_write(struct file *filp, const char __user *buf,
size_t siz, loff_t *ppos)
{
/* We need this handler such that alloc_file() enables
* f_mode with FMODE_CAN_WRITE.
*/
return -EINVAL;
}
const struct file_operations bpf_map_fops = {
#ifdef CONFIG_PROC_FS
.show_fdinfo = bpf_map_show_fdinfo,
#endif
.release = bpf_map_release,
.read = bpf_dummy_read,
.write = bpf_dummy_write,
};
int bpf_map_new_fd(struct bpf_map *map)
int bpf_map_new_fd(struct bpf_map *map, int flags)
{
int ret;
ret = security_bpf_map(map, OPEN_FMODE(flags));
if (ret < 0)
return ret;
return anon_inode_getfd("bpf-map", &bpf_map_fops, map,
O_RDWR | O_CLOEXEC);
flags | O_CLOEXEC);
}
int bpf_get_file_flag(int flags)
{
if ((flags & BPF_F_RDONLY) && (flags & BPF_F_WRONLY))
return -EINVAL;
if (flags & BPF_F_RDONLY)
return O_RDONLY;
if (flags & BPF_F_WRONLY)
return O_WRONLY;
return O_RDWR;
}
/* helper macro to check that unused fields 'union bpf_attr' are zero */
......@@ -344,12 +384,17 @@ static int map_create(union bpf_attr *attr)
{
int numa_node = bpf_map_attr_numa_node(attr);
struct bpf_map *map;
int f_flags;
int err;
err = CHECK_ATTR(BPF_MAP_CREATE);
if (err)
return -EINVAL;
f_flags = bpf_get_file_flag(attr->map_flags);
if (f_flags < 0)
return f_flags;
if (numa_node != NUMA_NO_NODE &&
((unsigned int)numa_node >= nr_node_ids ||
!node_online(numa_node)))
......@@ -367,15 +412,19 @@ static int map_create(union bpf_attr *attr)
atomic_set(&map->refcnt, 1);
atomic_set(&map->usercnt, 1);
err = bpf_map_charge_memlock(map);
err = security_bpf_map_alloc(map);
if (err)
goto free_map_nouncharge;
err = bpf_map_charge_memlock(map);
if (err)
goto free_map_sec;
err = bpf_map_alloc_id(map);
if (err)
goto free_map;
err = bpf_map_new_fd(map);
err = bpf_map_new_fd(map, f_flags);
if (err < 0) {
/* failed to allocate fd.
* bpf_map_put() is needed because the above
......@@ -392,6 +441,8 @@ static int map_create(union bpf_attr *attr)
free_map:
bpf_map_uncharge_memlock(map);
free_map_sec:
security_bpf_map_free(map);
free_map_nouncharge:
map->ops->map_free(map);
return err;
......@@ -490,6 +541,11 @@ static int map_lookup_elem(union bpf_attr *attr)
if (IS_ERR(map))
return PTR_ERR(map);
if (!(f.file->f_mode & FMODE_CAN_READ)) {
err = -EPERM;
goto err_put;
}
key = memdup_user(ukey, map->key_size);
if (IS_ERR(key)) {
err = PTR_ERR(key);
......@@ -570,6 +626,11 @@ static int map_update_elem(union bpf_attr *attr)
if (IS_ERR(map))
return PTR_ERR(map);
if (!(f.file->f_mode & FMODE_CAN_WRITE)) {
err = -EPERM;
goto err_put;
}
key = memdup_user(ukey, map->key_size);
if (IS_ERR(key)) {
err = PTR_ERR(key);
......@@ -659,6 +720,11 @@ static int map_delete_elem(union bpf_attr *attr)
if (IS_ERR(map))
return PTR_ERR(map);
if (!(f.file->f_mode & FMODE_CAN_WRITE)) {
err = -EPERM;
goto err_put;
}
key = memdup_user(ukey, map->key_size);
if (IS_ERR(key)) {
err = PTR_ERR(key);
......@@ -702,6 +768,11 @@ static int map_get_next_key(union bpf_attr *attr)
if (IS_ERR(map))
return PTR_ERR(map);
if (!(f.file->f_mode & FMODE_CAN_READ)) {
err = -EPERM;
goto err_put;
}
if (ukey) {
key = memdup_user(ukey, map->key_size);
if (IS_ERR(key)) {
......@@ -856,6 +927,7 @@ static void __bpf_prog_put_rcu(struct rcu_head *rcu)
free_used_maps(aux);
bpf_prog_uncharge_memlock(aux->prog);
security_bpf_prog_free(aux);
bpf_prog_free(aux->prog);
}
......@@ -903,15 +975,23 @@ static void bpf_prog_show_fdinfo(struct seq_file *m, struct file *filp)
}
#endif
static const struct file_operations bpf_prog_fops = {
const struct file_operations bpf_prog_fops = {
#ifdef CONFIG_PROC_FS
.show_fdinfo = bpf_prog_show_fdinfo,
#endif
.release = bpf_prog_release,
.read = bpf_dummy_read,
.write = bpf_dummy_write,
};
int bpf_prog_new_fd(struct bpf_prog *prog)
{
int ret;
ret = security_bpf_prog(prog);
if (ret < 0)
return ret;
return anon_inode_getfd("bpf-prog", &bpf_prog_fops, prog,
O_RDWR | O_CLOEXEC);
}
......@@ -1051,10 +1131,14 @@ static int bpf_prog_load(union bpf_attr *attr)
if (!prog)
return -ENOMEM;
err = bpf_prog_charge_memlock(prog);
err = security_bpf_prog_alloc(prog->aux);
if (err)
goto free_prog_nouncharge;
err = bpf_prog_charge_memlock(prog);
if (err)
goto free_prog_sec;
prog->len = attr->insn_cnt;
err = -EFAULT;
......@@ -1112,16 +1196,18 @@ static int bpf_prog_load(union bpf_attr *attr)
free_used_maps(prog->aux);
free_prog:
bpf_prog_uncharge_memlock(prog);
free_prog_sec:
security_bpf_prog_free(prog->aux);
free_prog_nouncharge:
bpf_prog_free(prog);
return err;
}
#define BPF_OBJ_LAST_FIELD bpf_fd
#define BPF_OBJ_LAST_FIELD file_flags
static int bpf_obj_pin(const union bpf_attr *attr)
{
if (CHECK_ATTR(BPF_OBJ))
if (CHECK_ATTR(BPF_OBJ) || attr->file_flags != 0)
return -EINVAL;
return bpf_obj_pin_user(attr->bpf_fd, u64_to_user_ptr(attr->pathname));
......@@ -1129,10 +1215,12 @@ static int bpf_obj_pin(const union bpf_attr *attr)
static int bpf_obj_get(const union bpf_attr *attr)
{
if (CHECK_ATTR(BPF_OBJ) || attr->bpf_fd != 0)
if (CHECK_ATTR(BPF_OBJ) || attr->bpf_fd != 0 ||
attr->file_flags & ~BPF_OBJ_FLAG_MASK)
return -EINVAL;
return bpf_obj_get_user(u64_to_user_ptr(attr->pathname));
return bpf_obj_get_user(u64_to_user_ptr(attr->pathname),
attr->file_flags);
}
#ifdef CONFIG_CGROUP_BPF
......@@ -1392,20 +1480,26 @@ static int bpf_prog_get_fd_by_id(const union bpf_attr *attr)
return fd;
}
#define BPF_MAP_GET_FD_BY_ID_LAST_FIELD map_id
#define BPF_MAP_GET_FD_BY_ID_LAST_FIELD open_flags
static int bpf_map_get_fd_by_id(const union bpf_attr *attr)
{
struct bpf_map *map;
u32 id = attr->map_id;
int f_flags;
int fd;
if (CHECK_ATTR(BPF_MAP_GET_FD_BY_ID))
if (CHECK_ATTR(BPF_MAP_GET_FD_BY_ID) ||
attr->open_flags & ~BPF_OBJ_FLAG_MASK)
return -EINVAL;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
f_flags = bpf_get_file_flag(attr->open_flags);
if (f_flags < 0)
return f_flags;
spin_lock_bh(&map_idr_lock);
map = idr_find(&map_idr, id);
if (map)
......@@ -1417,7 +1511,7 @@ static int bpf_map_get_fd_by_id(const union bpf_attr *attr)
if (IS_ERR(map))
return PTR_ERR(map);
fd = bpf_map_new_fd(map);
fd = bpf_map_new_fd(map, f_flags);
if (fd < 0)
bpf_map_put(map);
......@@ -1572,6 +1666,10 @@ SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, siz
if (copy_from_user(&attr, uattr, size) != 0)
return -EFAULT;
err = security_bpf(cmd, &attr, size);
if (err < 0)
return err;
switch (cmd) {
case BPF_MAP_CREATE:
err = map_create(&attr);
......
......@@ -56,7 +56,7 @@ static int __bpf_mt_check_path(const char *path, struct bpf_prog **ret)
int retval, fd;
set_fs(KERNEL_DS);
fd = bpf_obj_get_user(path);
fd = bpf_obj_get_user(path, 0);
set_fs(oldfs);
if (fd < 0)
return fd;
......
......@@ -12,6 +12,7 @@
* (at your option) any later version.
*/
#include <linux/bpf.h>
#include <linux/capability.h>
#include <linux/dcache.h>
#include <linux/module.h>
......@@ -1703,3 +1704,34 @@ int security_audit_rule_match(u32 secid, u32 field, u32 op, void *lsmrule,
actx);
}
#endif /* CONFIG_AUDIT */
#ifdef CONFIG_BPF_SYSCALL
int security_bpf(int cmd, union bpf_attr *attr, unsigned int size)
{
return call_int_hook(bpf, 0, cmd, attr, size);
}
int security_bpf_map(struct bpf_map *map, fmode_t fmode)
{
return call_int_hook(bpf_map, 0, map, fmode);
}
int security_bpf_prog(struct bpf_prog *prog)
{
return call_int_hook(bpf_prog, 0, prog);
}
int security_bpf_map_alloc(struct bpf_map *map)
{
return call_int_hook(bpf_map_alloc_security, 0, map);
}
int security_bpf_prog_alloc(struct bpf_prog_aux *aux)
{
return call_int_hook(bpf_prog_alloc_security, 0, aux);
}
void security_bpf_map_free(struct bpf_map *map)
{
call_void_hook(bpf_map_free_security, map);
}
void security_bpf_prog_free(struct bpf_prog_aux *aux)
{
call_void_hook(bpf_prog_free_security, aux);
}
#endif /* CONFIG_BPF_SYSCALL */
......@@ -85,6 +85,7 @@
#include <linux/export.h>
#include <linux/msg.h>
#include <linux/shm.h>
#include <linux/bpf.h>
#include "avc.h"
#include "objsec.h"
......@@ -1814,6 +1815,10 @@ static inline int file_path_has_perm(const struct cred *cred,
return inode_has_perm(cred, file_inode(file), av, &ad);
}
#ifdef CONFIG_BPF_SYSCALL
static int bpf_fd_pass(struct file *file, u32 sid);
#endif
/* Check whether a task can use an open file descriptor to
access an inode in a given way. Check access to the
descriptor itself, and then use dentry_has_perm to
......@@ -1844,6 +1849,12 @@ static int file_has_perm(const struct cred *cred,
goto out;
}
#ifdef CONFIG_BPF_SYSCALL
rc = bpf_fd_pass(file, cred_sid(cred));
if (rc)
return rc;
#endif
/* av is zero if only checking access to the descriptor. */
rc = 0;
if (av)
......@@ -2164,6 +2175,12 @@ static int selinux_binder_transfer_file(struct task_struct *from,
return rc;
}
#ifdef CONFIG_BPF_SYSCALL
rc = bpf_fd_pass(file, sid);
if (rc)
return rc;
#endif
if (unlikely(IS_PRIVATE(d_backing_inode(dentry))))
return 0;
......@@ -6252,6 +6269,139 @@ static void selinux_ib_free_security(void *ib_sec)
}
#endif
#ifdef CONFIG_BPF_SYSCALL
static int selinux_bpf(int cmd, union bpf_attr *attr,
unsigned int size)
{
u32 sid = current_sid();
int ret;
switch (cmd) {
case BPF_MAP_CREATE:
ret = avc_has_perm(sid, sid, SECCLASS_BPF, BPF__MAP_CREATE,
NULL);
break;
case BPF_PROG_LOAD:
ret = avc_has_perm(sid, sid, SECCLASS_BPF, BPF__PROG_LOAD,
NULL);
break;
default:
ret = 0;
break;
}
return ret;
}
static u32 bpf_map_fmode_to_av(fmode_t fmode)
{
u32 av = 0;
if (fmode & FMODE_READ)
av |= BPF__MAP_READ;
if (fmode & FMODE_WRITE)
av |= BPF__MAP_WRITE;
return av;
}
/* This function will check the file pass through unix socket or binder to see
* if it is a bpf related object. And apply correspinding checks on the bpf
* object based on the type. The bpf maps and programs, not like other files and
* socket, are using a shared anonymous inode inside the kernel as their inode.
* So checking that inode cannot identify if the process have privilege to
* access the bpf object and that's why we have to add this additional check in
* selinux_file_receive and selinux_binder_transfer_files.
*/
static int bpf_fd_pass(struct file *file, u32 sid)
{
struct bpf_security_struct *bpfsec;
struct bpf_prog *prog;
struct bpf_map *map;
int ret;
if (file->f_op == &bpf_map_fops) {
map = file->private_data;
bpfsec = map->security;
ret = avc_has_perm(sid, bpfsec->sid, SECCLASS_BPF,
bpf_map_fmode_to_av(file->f_mode), NULL);
if (ret)
return ret;
} else if (file->f_op == &bpf_prog_fops) {
prog = file->private_data;
bpfsec = prog->aux->security;
ret = avc_has_perm(sid, bpfsec->sid, SECCLASS_BPF,
BPF__PROG_RUN, NULL);
if (ret)
return ret;
}
return 0;
}
static int selinux_bpf_map(struct bpf_map *map, fmode_t fmode)
{
u32 sid = current_sid();
struct bpf_security_struct *bpfsec;
bpfsec = map->security;
return avc_has_perm(sid, bpfsec->sid, SECCLASS_BPF,
bpf_map_fmode_to_av(fmode), NULL);
}
static int selinux_bpf_prog(struct bpf_prog *prog)
{
u32 sid = current_sid();
struct bpf_security_struct *bpfsec;
bpfsec = prog->aux->security;
return avc_has_perm(sid, bpfsec->sid, SECCLASS_BPF,
BPF__PROG_RUN, NULL);
}
static int selinux_bpf_map_alloc(struct bpf_map *map)
{
struct bpf_security_struct *bpfsec;
bpfsec = kzalloc(sizeof(*bpfsec), GFP_KERNEL);
if (!bpfsec)
return -ENOMEM;
bpfsec->sid = current_sid();
map->security = bpfsec;
return 0;
}
static void selinux_bpf_map_free(struct bpf_map *map)
{
struct bpf_security_struct *bpfsec = map->security;
map->security = NULL;
kfree(bpfsec);
}
static int selinux_bpf_prog_alloc(struct bpf_prog_aux *aux)
{
struct bpf_security_struct *bpfsec;
bpfsec = kzalloc(sizeof(*bpfsec), GFP_KERNEL);
if (!bpfsec)
return -ENOMEM;
bpfsec->sid = current_sid();
aux->security = bpfsec;
return 0;
}
static void selinux_bpf_prog_free(struct bpf_prog_aux *aux)
{
struct bpf_security_struct *bpfsec = aux->security;
aux->security = NULL;
kfree(bpfsec);
}
#endif
static struct security_hook_list selinux_hooks[] __lsm_ro_after_init = {
LSM_HOOK_INIT(binder_set_context_mgr, selinux_binder_set_context_mgr),
LSM_HOOK_INIT(binder_transaction, selinux_binder_transaction),
......@@ -6471,6 +6621,16 @@ static struct security_hook_list selinux_hooks[] __lsm_ro_after_init = {
LSM_HOOK_INIT(audit_rule_match, selinux_audit_rule_match),
LSM_HOOK_INIT(audit_rule_free, selinux_audit_rule_free),
#endif
#ifdef CONFIG_BPF_SYSCALL
LSM_HOOK_INIT(bpf, selinux_bpf),
LSM_HOOK_INIT(bpf_map, selinux_bpf_map),
LSM_HOOK_INIT(bpf_prog, selinux_bpf_prog),
LSM_HOOK_INIT(bpf_map_alloc_security, selinux_bpf_map_alloc),
LSM_HOOK_INIT(bpf_prog_alloc_security, selinux_bpf_prog_alloc),
LSM_HOOK_INIT(bpf_map_free_security, selinux_bpf_map_free),
LSM_HOOK_INIT(bpf_prog_free_security, selinux_bpf_prog_free),
#endif
};
static __init int selinux_init(void)
......
......@@ -237,6 +237,8 @@ struct security_class_mapping secclass_map[] = {
{ "access", NULL } },
{ "infiniband_endport",
{ "manage_subnet", NULL } },
{ "bpf",
{"map_create", "map_read", "map_write", "prog_load", "prog_run"} },
{ NULL }
};
......
......@@ -150,6 +150,10 @@ struct pkey_security_struct {
u32 sid; /* SID of pkey */
};
struct bpf_security_struct {
u32 sid; /*SID of bpf obj creater*/
};
extern unsigned int selinux_checkreqprot;
#endif /* _SELINUX_OBJSEC_H_ */
......@@ -1033,6 +1033,51 @@ static void test_map_parallel(void)
assert(bpf_map_get_next_key(fd, &key, &key) == -1 && errno == ENOENT);
}
static void test_map_rdonly(void)
{
int i, fd, key = 0, value = 0;
fd = bpf_create_map(BPF_MAP_TYPE_HASH, sizeof(key), sizeof(value),
MAP_SIZE, map_flags | BPF_F_RDONLY);
if (fd < 0) {
printf("Failed to create map for read only test '%s'!\n",
strerror(errno));
exit(1);
}
key = 1;
value = 1234;
/* Insert key=1 element. */
assert(bpf_map_update_elem(fd, &key, &value, BPF_ANY) == -1 &&
errno == EPERM);
/* Check that key=2 is not found. */
assert(bpf_map_lookup_elem(fd, &key, &value) == -1 && errno == ENOENT);
assert(bpf_map_get_next_key(fd, &key, &value) == -1 && errno == ENOENT);
}
static void test_map_wronly(void)
{
int i, fd, key = 0, value = 0;
fd = bpf_create_map(BPF_MAP_TYPE_HASH, sizeof(key), sizeof(value),
MAP_SIZE, map_flags | BPF_F_WRONLY);
if (fd < 0) {
printf("Failed to create map for read only test '%s'!\n",
strerror(errno));
exit(1);
}
key = 1;
value = 1234;
/* Insert key=1 element. */
assert(bpf_map_update_elem(fd, &key, &value, BPF_ANY) == 0)
/* Check that key=2 is not found. */
assert(bpf_map_lookup_elem(fd, &key, &value) == -1 && errno == EPERM);
assert(bpf_map_get_next_key(fd, &key, &value) == -1 && errno == EPERM);
}
static void run_all_tests(void)
{
test_hashmap(0, NULL);
......@@ -1050,6 +1095,9 @@ static void run_all_tests(void)
test_map_large();
test_map_parallel();
test_map_stress();
test_map_rdonly();
test_map_wronly();
}
int main(void)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment