Commit 79e178a5 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'apparmor-pr-2019-12-03' of...

Merge tag 'apparmor-pr-2019-12-03' of git://git.kernel.org/pub/scm/linux/kernel/git/jj/linux-apparmor

Pull apparmor updates from John Johansen:
 "Features:

   - increase left match history buffer size to provide improved
     conflict resolution in overlapping execution rules.

   - switch buffer allocation to use a memory pool and GFP_KERNEL where
     possible.

   - add compression of policy blobs to reduce memory usage.

  Cleanups:

   - fix spelling mistake "immutible" -> "immutable"

  Bug fixes:

   - fix unsigned len comparison in update_for_len macro

   - fix sparse warning for type-casting of current->real_cred"

* tag 'apparmor-pr-2019-12-03' of git://git.kernel.org/pub/scm/linux/kernel/git/jj/linux-apparmor:
  apparmor: make it so work buffers can be allocated from atomic context
  apparmor: reduce rcu_read_lock scope for aa_file_perm mediation
  apparmor: fix wrong buffer allocation in aa_new_mount
  apparmor: fix unsigned len comparison with less than zero
  apparmor: increase left match history buffer size
  apparmor: Switch to GFP_KERNEL where possible
  apparmor: Use a memory pool instead per-CPU caches
  apparmor: Force type-casting of current->real_cred
  apparmor: fix spelling mistake "immutible" -> "immutable"
  apparmor: fix blob compression when ns is forced on a policy load
  apparmor: fix missing ZLIB defines
  apparmor: fix blob compression build failure on ppc
  apparmor: Initial implementation of raw policy blob compression
parents 01d1dff6 341c1fda
......@@ -6,6 +6,8 @@ config SECURITY_APPARMOR
select SECURITY_PATH
select SECURITYFS
select SECURITY_NETWORK
select ZLIB_INFLATE
select ZLIB_DEFLATE
default n
help
This enables the AppArmor security module.
......
......@@ -21,6 +21,7 @@
#include <linux/fs.h>
#include <linux/fs_context.h>
#include <linux/poll.h>
#include <linux/zlib.h>
#include <uapi/linux/major.h>
#include <uapi/linux/magic.h>
......@@ -65,6 +66,35 @@
* support fns
*/
struct rawdata_f_data {
struct aa_loaddata *loaddata;
};
#define RAWDATA_F_DATA_BUF(p) (char *)(p + 1)
static void rawdata_f_data_free(struct rawdata_f_data *private)
{
if (!private)
return;
aa_put_loaddata(private->loaddata);
kvfree(private);
}
static struct rawdata_f_data *rawdata_f_data_alloc(size_t size)
{
struct rawdata_f_data *ret;
if (size > SIZE_MAX - sizeof(*ret))
return ERR_PTR(-EINVAL);
ret = kvzalloc(sizeof(*ret) + size, GFP_KERNEL);
if (!ret)
return ERR_PTR(-ENOMEM);
return ret;
}
/**
* aa_mangle_name - mangle a profile name to std profile layout form
* @name: profile name to mangle (NOT NULL)
......@@ -1280,36 +1310,117 @@ static int seq_rawdata_hash_show(struct seq_file *seq, void *v)
return 0;
}
static int seq_rawdata_compressed_size_show(struct seq_file *seq, void *v)
{
struct aa_loaddata *data = seq->private;
seq_printf(seq, "%zu\n", data->compressed_size);
return 0;
}
SEQ_RAWDATA_FOPS(abi);
SEQ_RAWDATA_FOPS(revision);
SEQ_RAWDATA_FOPS(hash);
SEQ_RAWDATA_FOPS(compressed_size);
static int deflate_decompress(char *src, size_t slen, char *dst, size_t dlen)
{
int error;
struct z_stream_s strm;
if (aa_g_rawdata_compression_level == 0) {
if (dlen < slen)
return -EINVAL;
memcpy(dst, src, slen);
return 0;
}
memset(&strm, 0, sizeof(strm));
strm.workspace = kvzalloc(zlib_inflate_workspacesize(), GFP_KERNEL);
if (!strm.workspace)
return -ENOMEM;
strm.next_in = src;
strm.avail_in = slen;
error = zlib_inflateInit(&strm);
if (error != Z_OK) {
error = -ENOMEM;
goto fail_inflate_init;
}
strm.next_out = dst;
strm.avail_out = dlen;
error = zlib_inflate(&strm, Z_FINISH);
if (error != Z_STREAM_END)
error = -EINVAL;
else
error = 0;
zlib_inflateEnd(&strm);
fail_inflate_init:
kvfree(strm.workspace);
return error;
}
static ssize_t rawdata_read(struct file *file, char __user *buf, size_t size,
loff_t *ppos)
{
struct aa_loaddata *rawdata = file->private_data;
struct rawdata_f_data *private = file->private_data;
return simple_read_from_buffer(buf, size, ppos, rawdata->data,
rawdata->size);
return simple_read_from_buffer(buf, size, ppos,
RAWDATA_F_DATA_BUF(private),
private->loaddata->size);
}
static int rawdata_release(struct inode *inode, struct file *file)
{
aa_put_loaddata(file->private_data);
rawdata_f_data_free(file->private_data);
return 0;
}
static int rawdata_open(struct inode *inode, struct file *file)
{
int error;
struct aa_loaddata *loaddata;
struct rawdata_f_data *private;
if (!policy_view_capable(NULL))
return -EACCES;
file->private_data = __aa_get_loaddata(inode->i_private);
if (!file->private_data)
loaddata = __aa_get_loaddata(inode->i_private);
if (!loaddata)
/* lost race: this entry is being reaped */
return -ENOENT;
private = rawdata_f_data_alloc(loaddata->size);
if (IS_ERR(private)) {
error = PTR_ERR(private);
goto fail_private_alloc;
}
private->loaddata = loaddata;
error = deflate_decompress(loaddata->data, loaddata->compressed_size,
RAWDATA_F_DATA_BUF(private),
loaddata->size);
if (error)
goto fail_decompress;
file->private_data = private;
return 0;
fail_decompress:
rawdata_f_data_free(private);
return error;
fail_private_alloc:
aa_put_loaddata(loaddata);
return error;
}
static const struct file_operations rawdata_fops = {
......@@ -1388,6 +1499,13 @@ int __aa_fs_create_rawdata(struct aa_ns *ns, struct aa_loaddata *rawdata)
rawdata->dents[AAFS_LOADDATA_HASH] = dent;
}
dent = aafs_create_file("compressed_size", S_IFREG | 0444, dir,
rawdata,
&seq_rawdata_compressed_size_fops);
if (IS_ERR(dent))
goto fail;
rawdata->dents[AAFS_LOADDATA_COMPRESSED_SIZE] = dent;
dent = aafs_create_file("raw_data", S_IFREG | 0444,
dir, rawdata, &rawdata_fops);
if (IS_ERR(dent))
......
......@@ -520,7 +520,7 @@ struct aa_label *x_table_lookup(struct aa_profile *profile, u32 xindex,
label = &new_profile->label;
continue;
}
label = aa_label_parse(&profile->label, *name, GFP_ATOMIC,
label = aa_label_parse(&profile->label, *name, GFP_KERNEL,
true, false);
if (IS_ERR(label))
label = NULL;
......@@ -600,7 +600,7 @@ static struct aa_label *x_to_label(struct aa_profile *profile,
/* base the stack on post domain transition */
struct aa_label *base = new;
new = aa_label_parse(base, stack, GFP_ATOMIC, true, false);
new = aa_label_parse(base, stack, GFP_KERNEL, true, false);
if (IS_ERR(new))
new = NULL;
aa_put_label(base);
......@@ -685,20 +685,9 @@ static struct aa_label *profile_transition(struct aa_profile *profile,
} else if (COMPLAIN_MODE(profile)) {
/* no exec permission - learning mode */
struct aa_profile *new_profile = NULL;
char *n = kstrdup(name, GFP_ATOMIC);
if (n) {
/* name is ptr into buffer */
long pos = name - buffer;
/* break per cpu buffer hold */
put_buffers(buffer);
new_profile = aa_new_null_profile(profile, false, n,
new_profile = aa_new_null_profile(profile, false, name,
GFP_KERNEL);
get_buffers(buffer);
name = buffer + pos;
strcpy((char *)name, n);
kfree(n);
}
if (!new_profile) {
error = -ENOMEM;
info = "could not create null profile";
......@@ -719,7 +708,7 @@ static struct aa_label *profile_transition(struct aa_profile *profile,
if (DEBUG_ON) {
dbg_printk("apparmor: scrubbing environment variables"
" for %s profile=", name);
aa_label_printk(new, GFP_ATOMIC);
aa_label_printk(new, GFP_KERNEL);
dbg_printk("\n");
}
*secure_exec = true;
......@@ -795,7 +784,7 @@ static int profile_onexec(struct aa_profile *profile, struct aa_label *onexec,
if (DEBUG_ON) {
dbg_printk("apparmor: scrubbing environment "
"variables for %s label=", xname);
aa_label_printk(onexec, GFP_ATOMIC);
aa_label_printk(onexec, GFP_KERNEL);
dbg_printk("\n");
}
*secure_exec = true;
......@@ -829,7 +818,7 @@ static struct aa_label *handle_onexec(struct aa_label *label,
bprm, buffer, cond, unsafe));
if (error)
return ERR_PTR(error);
new = fn_label_build_in_ns(label, profile, GFP_ATOMIC,
new = fn_label_build_in_ns(label, profile, GFP_KERNEL,
aa_get_newest_label(onexec),
profile_transition(profile, bprm, buffer,
cond, unsafe));
......@@ -841,9 +830,9 @@ static struct aa_label *handle_onexec(struct aa_label *label,
buffer, cond, unsafe));
if (error)
return ERR_PTR(error);
new = fn_label_build_in_ns(label, profile, GFP_ATOMIC,
new = fn_label_build_in_ns(label, profile, GFP_KERNEL,
aa_label_merge(&profile->label, onexec,
GFP_ATOMIC),
GFP_KERNEL),
profile_transition(profile, bprm, buffer,
cond, unsafe));
}
......@@ -903,13 +892,18 @@ int apparmor_bprm_set_creds(struct linux_binprm *bprm)
ctx->nnp = aa_get_label(label);
/* buffer freed below, name is pointer into buffer */
get_buffers(buffer);
buffer = aa_get_buffer(false);
if (!buffer) {
error = -ENOMEM;
goto done;
}
/* Test for onexec first as onexec override other x transitions. */
if (ctx->onexec)
new = handle_onexec(label, ctx->onexec, ctx->token,
bprm, buffer, &cond, &unsafe);
else
new = fn_label_build(label, profile, GFP_ATOMIC,
new = fn_label_build(label, profile, GFP_KERNEL,
profile_transition(profile, bprm, buffer,
&cond, &unsafe));
......@@ -953,7 +947,7 @@ int apparmor_bprm_set_creds(struct linux_binprm *bprm)
if (DEBUG_ON) {
dbg_printk("scrubbing environment variables for %s "
"label=", bprm->filename);
aa_label_printk(new, GFP_ATOMIC);
aa_label_printk(new, GFP_KERNEL);
dbg_printk("\n");
}
bprm->secureexec = 1;
......@@ -964,7 +958,7 @@ int apparmor_bprm_set_creds(struct linux_binprm *bprm)
if (DEBUG_ON) {
dbg_printk("apparmor: clearing unsafe personality "
"bits. %s label=", bprm->filename);
aa_label_printk(new, GFP_ATOMIC);
aa_label_printk(new, GFP_KERNEL);
dbg_printk("\n");
}
bprm->per_clear |= PER_CLEAR_ON_SETID;
......@@ -975,7 +969,7 @@ int apparmor_bprm_set_creds(struct linux_binprm *bprm)
done:
aa_put_label(label);
put_buffers(buffer);
aa_put_buffer(buffer);
return error;
......
......@@ -76,7 +76,7 @@ static void file_audit_cb(struct audit_buffer *ab, void *va)
if (aad(sa)->peer) {
audit_log_format(ab, " target=");
aa_label_xaudit(ab, labels_ns(aad(sa)->label), aad(sa)->peer,
FLAG_VIEW_SUBNS, GFP_ATOMIC);
FLAG_VIEW_SUBNS, GFP_KERNEL);
} else if (aad(sa)->fs.target) {
audit_log_format(ab, " target=");
audit_log_untrustedstring(ab, aad(sa)->fs.target);
......@@ -332,12 +332,14 @@ int aa_path_perm(const char *op, struct aa_label *label,
flags |= PATH_DELEGATE_DELETED | (S_ISDIR(cond->mode) ? PATH_IS_DIR :
0);
get_buffers(buffer);
buffer = aa_get_buffer(false);
if (!buffer)
return -ENOMEM;
error = fn_for_each_confined(label, profile,
profile_path_perm(op, profile, path, buffer, request,
cond, flags, &perms));
put_buffers(buffer);
aa_put_buffer(buffer);
return error;
}
......@@ -475,12 +477,18 @@ int aa_path_link(struct aa_label *label, struct dentry *old_dentry,
int error;
/* buffer freed below, lname is pointer in buffer */
get_buffers(buffer, buffer2);
buffer = aa_get_buffer(false);
buffer2 = aa_get_buffer(false);
error = -ENOMEM;
if (!buffer || !buffer2)
goto out;
error = fn_for_each_confined(label, profile,
profile_path_link(profile, &link, buffer, &target,
buffer2, &cond));
put_buffers(buffer, buffer2);
out:
aa_put_buffer(buffer);
aa_put_buffer(buffer2);
return error;
}
......@@ -507,7 +515,7 @@ static void update_file_ctx(struct aa_file_ctx *fctx, struct aa_label *label,
static int __file_path_perm(const char *op, struct aa_label *label,
struct aa_label *flabel, struct file *file,
u32 request, u32 denied)
u32 request, u32 denied, bool in_atomic)
{
struct aa_profile *profile;
struct aa_perms perms = {};
......@@ -524,7 +532,9 @@ static int __file_path_perm(const char *op, struct aa_label *label,
return 0;
flags = PATH_DELEGATE_DELETED | (S_ISDIR(cond.mode) ? PATH_IS_DIR : 0);
get_buffers(buffer);
buffer = aa_get_buffer(in_atomic);
if (!buffer)
return -ENOMEM;
/* check every profile in task label not in current cache */
error = fn_for_each_not_in_set(flabel, label, profile,
......@@ -553,7 +563,7 @@ static int __file_path_perm(const char *op, struct aa_label *label,
if (!error)
update_file_ctx(file_ctx(file), label, request);
put_buffers(buffer);
aa_put_buffer(buffer);
return error;
}
......@@ -590,11 +600,12 @@ static int __file_sock_perm(const char *op, struct aa_label *label,
* @label: label being enforced (NOT NULL)
* @file: file to revalidate access permissions on (NOT NULL)
* @request: requested permissions
* @in_atomic: whether allocations need to be done in atomic context
*
* Returns: %0 if access allowed else error
*/
int aa_file_perm(const char *op, struct aa_label *label, struct file *file,
u32 request)
u32 request, bool in_atomic)
{
struct aa_file_ctx *fctx;
struct aa_label *flabel;
......@@ -607,7 +618,8 @@ int aa_file_perm(const char *op, struct aa_label *label, struct file *file,
fctx = file_ctx(file);
rcu_read_lock();
flabel = rcu_dereference(fctx->label);
flabel = aa_get_newest_label(rcu_dereference(fctx->label));
rcu_read_unlock();
AA_BUG(!flabel);
/* revalidate access, if task is unconfined, or the cached cred
......@@ -626,14 +638,13 @@ int aa_file_perm(const char *op, struct aa_label *label, struct file *file,
if (file->f_path.mnt && path_mediated_fs(file->f_path.dentry))
error = __file_path_perm(op, label, flabel, file, request,
denied);
denied, in_atomic);
else if (S_ISSOCK(file_inode(file)->i_mode))
error = __file_sock_perm(op, label, flabel, file, request,
denied);
done:
rcu_read_unlock();
aa_put_label(flabel);
return error;
}
......@@ -655,7 +666,8 @@ static void revalidate_tty(struct aa_label *label)
struct tty_file_private, list);
file = file_priv->file;
if (aa_file_perm(OP_INHERIT, label, file, MAY_READ | MAY_WRITE))
if (aa_file_perm(OP_INHERIT, label, file, MAY_READ | MAY_WRITE,
IN_ATOMIC))
drop_tty = 1;
}
spin_unlock(&tty->files_lock);
......@@ -669,7 +681,8 @@ static int match_file(const void *p, struct file *file, unsigned int fd)
{
struct aa_label *label = (struct aa_label *)p;
if (aa_file_perm(OP_INHERIT, label, file, aa_map_file_to_perms(file)))
if (aa_file_perm(OP_INHERIT, label, file, aa_map_file_to_perms(file),
IN_ATOMIC))
return fd + 1;
return 0;
}
......
......@@ -36,6 +36,7 @@ extern enum audit_mode aa_g_audit;
extern bool aa_g_audit_header;
extern bool aa_g_debug;
extern bool aa_g_hash_policy;
extern int aa_g_rawdata_compression_level;
extern bool aa_g_lock_policy;
extern bool aa_g_logsyscall;
extern bool aa_g_paranoid_load;
......
......@@ -197,7 +197,7 @@ int aa_path_link(struct aa_label *label, struct dentry *old_dentry,
const struct path *new_dir, struct dentry *new_dentry);
int aa_file_perm(const char *op, struct aa_label *label, struct file *file,
u32 request);
u32 request, bool in_atomic);
void aa_inherit_files(const struct cred *cred, struct files_struct *files);
......
......@@ -134,7 +134,7 @@ unsigned int aa_dfa_matchn_until(struct aa_dfa *dfa, unsigned int start,
void aa_dfa_free_kref(struct kref *kref);
#define WB_HISTORY_SIZE 8
#define WB_HISTORY_SIZE 24
struct match_workbuf {
unsigned int count;
unsigned int pos;
......@@ -147,7 +147,6 @@ struct match_workbuf N = { \
.count = 0, \
.pos = 0, \
.len = 0, \
.size = WB_HISTORY_SIZE, \
}
unsigned int aa_dfa_leftmatch(struct aa_dfa *dfa, unsigned int start,
......
......@@ -11,7 +11,6 @@
#ifndef __AA_PATH_H
#define __AA_PATH_H
enum path_flags {
PATH_IS_DIR = 0x1, /* path is a directory */
PATH_CONNECT_PATH = 0x4, /* connect disconnected paths to / */
......@@ -26,51 +25,8 @@ int aa_path_name(const struct path *path, int flags, char *buffer,
const char **name, const char **info,
const char *disconnected);
#define MAX_PATH_BUFFERS 2
/* Per cpu buffers used during mediation */
/* preallocated buffers to use during path lookups */
struct aa_buffers {
char *buf[MAX_PATH_BUFFERS];
};
#include <linux/percpu.h>
#include <linux/preempt.h>
DECLARE_PER_CPU(struct aa_buffers, aa_buffers);
#define ASSIGN(FN, A, X, N) ((X) = FN(A, N))
#define EVAL1(FN, A, X) ASSIGN(FN, A, X, 0) /*X = FN(0)*/
#define EVAL2(FN, A, X, Y...) \
do { ASSIGN(FN, A, X, 1); EVAL1(FN, A, Y); } while (0)
#define EVAL(FN, A, X...) CONCATENATE(EVAL, COUNT_ARGS(X))(FN, A, X)
#define for_each_cpu_buffer(I) for ((I) = 0; (I) < MAX_PATH_BUFFERS; (I)++)
#ifdef CONFIG_DEBUG_PREEMPT
#define AA_BUG_PREEMPT_ENABLED(X) AA_BUG(preempt_count() <= 0, X)
#else
#define AA_BUG_PREEMPT_ENABLED(X) /* nop */
#endif
#define __get_buffer(C, N) ({ \
AA_BUG_PREEMPT_ENABLED("__get_buffer without preempt disabled"); \
(C)->buf[(N)]; })
#define __get_buffers(C, X...) EVAL(__get_buffer, C, X)
#define __put_buffers(X, Y...) ((void)&(X))
#define get_buffers(X...) \
do { \
struct aa_buffers *__cpu_var = get_cpu_ptr(&aa_buffers); \
__get_buffers(__cpu_var, X); \
} while (0)
#define put_buffers(X, Y...) \
do { \
__put_buffers(X, Y); \
put_cpu_ptr(&aa_buffers); \
} while (0)
#define IN_ATOMIC true
char *aa_get_buffer(bool in_atomic);
void aa_put_buffer(char *buf);
#endif /* __AA_PATH_H */
......@@ -41,6 +41,7 @@ enum {
AAFS_LOADDATA_REVISION,
AAFS_LOADDATA_HASH,
AAFS_LOADDATA_DATA,
AAFS_LOADDATA_COMPRESSED_SIZE,
AAFS_LOADDATA_DIR, /* must be last actual entry */
AAFS_LOADDATA_NDENTS /* count of entries */
};
......@@ -61,11 +62,16 @@ struct aa_loaddata {
struct dentry *dents[AAFS_LOADDATA_NDENTS];
struct aa_ns *ns;
char *name;
size_t size;
size_t size; /* the original size of the payload */
size_t compressed_size; /* the compressed size of the payload */
long revision; /* the ns policy revision this caused */
int abi;
unsigned char *hash;
/* Pointer to payload. If @compressed_size > 0, then this is the
* compressed version of the payload, else it is the uncompressed
* version (with the size indicated by @size).
*/
char *data;
};
......
......@@ -1458,11 +1458,13 @@ static inline bool use_label_hname(struct aa_ns *ns, struct aa_label *label,
/* helper macro for snprint routines */
#define update_for_len(total, len, size, str) \
do { \
size_t ulen = len; \
\
AA_BUG(len < 0); \
total += len; \
len = min(len, size); \
size -= len; \
str += len; \
total += ulen; \
ulen = min(ulen, size); \
size -= ulen; \
str += ulen; \
} while (0)
/**
......@@ -1597,7 +1599,7 @@ int aa_label_snxprint(char *str, size_t size, struct aa_ns *ns,
struct aa_ns *prev_ns = NULL;
struct label_it i;
int count = 0, total = 0;
size_t len;
ssize_t len;
AA_BUG(!str && size != 0);
AA_BUG(!label);
......
......@@ -21,6 +21,7 @@
#include <linux/user_namespace.h>
#include <linux/netfilter_ipv4.h>
#include <linux/netfilter_ipv6.h>
#include <linux/zlib.h>
#include <net/sock.h>
#include <uapi/linux/mount.h>
......@@ -43,8 +44,17 @@
/* Flag indicating whether initialization completed */
int apparmor_initialized;
DEFINE_PER_CPU(struct aa_buffers, aa_buffers);
union aa_buffer {
struct list_head list;
char buffer[1];
};
#define RESERVE_COUNT 2
static int reserve_count = RESERVE_COUNT;
static int buffer_count;
static LIST_HEAD(aa_global_buffers);
static DEFINE_SPINLOCK(aa_buffers_lock);
/*
* LSM hook functions
......@@ -442,7 +452,8 @@ static void apparmor_file_free_security(struct file *file)
aa_put_label(rcu_access_pointer(ctx->label));
}
static int common_file_perm(const char *op, struct file *file, u32 mask)
static int common_file_perm(const char *op, struct file *file, u32 mask,
bool in_atomic)
{
struct aa_label *label;
int error = 0;
......@@ -452,7 +463,7 @@ static int common_file_perm(const char *op, struct file *file, u32 mask)
return -EACCES;
label = __begin_current_label_crit_section();
error = aa_file_perm(op, label, file, mask);
error = aa_file_perm(op, label, file, mask, in_atomic);
__end_current_label_crit_section(label);
return error;
......@@ -460,12 +471,13 @@ static int common_file_perm(const char *op, struct file *file, u32 mask)
static int apparmor_file_receive(struct file *file)
{
return common_file_perm(OP_FRECEIVE, file, aa_map_file_to_perms(file));
return common_file_perm(OP_FRECEIVE, file, aa_map_file_to_perms(file),
false);
}
static int apparmor_file_permission(struct file *file, int mask)
{
return common_file_perm(OP_FPERM, file, mask);
return common_file_perm(OP_FPERM, file, mask, false);
}
static int apparmor_file_lock(struct file *file, unsigned int cmd)
......@@ -475,11 +487,11 @@ static int apparmor_file_lock(struct file *file, unsigned int cmd)
if (cmd == F_WRLCK)
mask |= MAY_WRITE;
return common_file_perm(OP_FLOCK, file, mask);
return common_file_perm(OP_FLOCK, file, mask, false);
}
static int common_mmap(const char *op, struct file *file, unsigned long prot,
unsigned long flags)
unsigned long flags, bool in_atomic)
{
int mask = 0;
......@@ -497,20 +509,21 @@ static int common_mmap(const char *op, struct file *file, unsigned long prot,
if (prot & PROT_EXEC)
mask |= AA_EXEC_MMAP;
return common_file_perm(op, file, mask);
return common_file_perm(op, file, mask, in_atomic);
}
static int apparmor_mmap_file(struct file *file, unsigned long reqprot,
unsigned long prot, unsigned long flags)
{
return common_mmap(OP_FMMAP, file, prot, flags);
return common_mmap(OP_FMMAP, file, prot, flags, GFP_ATOMIC);
}
static int apparmor_file_mprotect(struct vm_area_struct *vma,
unsigned long reqprot, unsigned long prot)
{
return common_mmap(OP_FMPROT, vma->vm_file, prot,
!(vma->vm_flags & VM_SHARED) ? MAP_PRIVATE : 0);
!(vma->vm_flags & VM_SHARED) ? MAP_PRIVATE : 0,
false);
}
static int apparmor_sb_mount(const char *dev_name, const struct path *path,
......@@ -1262,6 +1275,16 @@ static const struct kernel_param_ops param_ops_aauint = {
.get = param_get_aauint
};
static int param_set_aacompressionlevel(const char *val,
const struct kernel_param *kp);
static int param_get_aacompressionlevel(char *buffer,
const struct kernel_param *kp);
#define param_check_aacompressionlevel param_check_int
static const struct kernel_param_ops param_ops_aacompressionlevel = {
.set = param_set_aacompressionlevel,
.get = param_get_aacompressionlevel
};
static int param_set_aalockpolicy(const char *val, const struct kernel_param *kp);
static int param_get_aalockpolicy(char *buffer, const struct kernel_param *kp);
#define param_check_aalockpolicy param_check_bool
......@@ -1292,6 +1315,11 @@ bool aa_g_hash_policy = IS_ENABLED(CONFIG_SECURITY_APPARMOR_HASH_DEFAULT);
module_param_named(hash_policy, aa_g_hash_policy, aabool, S_IRUSR | S_IWUSR);
#endif
/* policy loaddata compression level */
int aa_g_rawdata_compression_level = Z_DEFAULT_COMPRESSION;
module_param_named(rawdata_compression_level, aa_g_rawdata_compression_level,
aacompressionlevel, 0400);
/* Debug mode */
bool aa_g_debug = IS_ENABLED(CONFIG_SECURITY_APPARMOR_DEBUG_MESSAGES);
module_param_named(debug, aa_g_debug, aabool, S_IRUSR | S_IWUSR);
......@@ -1402,6 +1430,7 @@ static int param_set_aauint(const char *val, const struct kernel_param *kp)
return -EPERM;
error = param_set_uint(val, kp);
aa_g_path_max = max_t(uint32_t, aa_g_path_max, sizeof(union aa_buffer));
pr_info("AppArmor: buffer size set to %d bytes\n", aa_g_path_max);
return error;
......@@ -1456,6 +1485,37 @@ static int param_get_aaintbool(char *buffer, const struct kernel_param *kp)
return param_get_bool(buffer, &kp_local);
}
static int param_set_aacompressionlevel(const char *val,
const struct kernel_param *kp)
{
int error;
if (!apparmor_enabled)
return -EINVAL;
if (apparmor_initialized)
return -EPERM;
error = param_set_int(val, kp);
aa_g_rawdata_compression_level = clamp(aa_g_rawdata_compression_level,
Z_NO_COMPRESSION,
Z_BEST_COMPRESSION);
pr_info("AppArmor: policy rawdata compression level set to %u\n",
aa_g_rawdata_compression_level);
return error;
}
static int param_get_aacompressionlevel(char *buffer,
const struct kernel_param *kp)
{
if (!apparmor_enabled)
return -EINVAL;
if (apparmor_initialized && !policy_view_capable(NULL))
return -EPERM;
return param_get_int(buffer, kp);
}
static int param_get_audit(char *buffer, const struct kernel_param *kp)
{
if (!apparmor_enabled)
......@@ -1514,6 +1574,61 @@ static int param_set_mode(const char *val, const struct kernel_param *kp)
return 0;
}
char *aa_get_buffer(bool in_atomic)
{
union aa_buffer *aa_buf;
bool try_again = true;
gfp_t flags = (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
retry:
spin_lock(&aa_buffers_lock);
if (buffer_count > reserve_count ||
(in_atomic && !list_empty(&aa_global_buffers))) {
aa_buf = list_first_entry(&aa_global_buffers, union aa_buffer,
list);
list_del(&aa_buf->list);
buffer_count--;
spin_unlock(&aa_buffers_lock);
return &aa_buf->buffer[0];
}
if (in_atomic) {
/*
* out of reserve buffers and in atomic context so increase
* how many buffers to keep in reserve
*/
reserve_count++;
flags = GFP_ATOMIC;
}
spin_unlock(&aa_buffers_lock);
if (!in_atomic)
might_sleep();
aa_buf = kmalloc(aa_g_path_max, flags);
if (!aa_buf) {
if (try_again) {
try_again = false;
goto retry;
}
pr_warn_once("AppArmor: Failed to allocate a memory buffer.\n");
return NULL;
}
return &aa_buf->buffer[0];
}
void aa_put_buffer(char *buf)
{
union aa_buffer *aa_buf;
if (!buf)
return;
aa_buf = container_of(buf, union aa_buffer, buffer[0]);
spin_lock(&aa_buffers_lock);
list_add(&aa_buf->list, &aa_global_buffers);
buffer_count++;
spin_unlock(&aa_buffers_lock);
}
/*
* AppArmor init functions
*/
......@@ -1525,7 +1640,7 @@ static int param_set_mode(const char *val, const struct kernel_param *kp)
*/
static int __init set_init_ctx(void)
{
struct cred *cred = (struct cred *)current->real_cred;
struct cred *cred = (__force struct cred *)current->real_cred;
set_cred_label(cred, aa_get_label(ns_unconfined(root_ns)));
......@@ -1534,38 +1649,48 @@ static int __init set_init_ctx(void)
static void destroy_buffers(void)
{
u32 i, j;
union aa_buffer *aa_buf;
for_each_possible_cpu(i) {
for_each_cpu_buffer(j) {
kfree(per_cpu(aa_buffers, i).buf[j]);
per_cpu(aa_buffers, i).buf[j] = NULL;
}
spin_lock(&aa_buffers_lock);
while (!list_empty(&aa_global_buffers)) {
aa_buf = list_first_entry(&aa_global_buffers, union aa_buffer,
list);
list_del(&aa_buf->list);
spin_unlock(&aa_buffers_lock);
kfree(aa_buf);
spin_lock(&aa_buffers_lock);
}
spin_unlock(&aa_buffers_lock);
}
static int __init alloc_buffers(void)
{
u32 i, j;
for_each_possible_cpu(i) {
for_each_cpu_buffer(j) {
char *buffer;
union aa_buffer *aa_buf;
int i, num;
if (cpu_to_node(i) > num_online_nodes())
/* fallback to kmalloc for offline nodes */
buffer = kmalloc(aa_g_path_max, GFP_KERNEL);
/*
* A function may require two buffers at once. Usually the buffers are
* used for a short period of time and are shared. On UP kernel buffers
* two should be enough, with more CPUs it is possible that more
* buffers will be used simultaneously. The preallocated pool may grow.
* This preallocation has also the side-effect that AppArmor will be
* disabled early at boot if aa_g_path_max is extremly high.
*/
if (num_online_cpus() > 1)
num = 4 + RESERVE_COUNT;
else
buffer = kmalloc_node(aa_g_path_max, GFP_KERNEL,
cpu_to_node(i));
if (!buffer) {
num = 2 + RESERVE_COUNT;
for (i = 0; i < num; i++) {
aa_buf = kmalloc(aa_g_path_max, GFP_KERNEL |
__GFP_RETRY_MAYFAIL | __GFP_NOWARN);
if (!aa_buf) {
destroy_buffers();
return -ENOMEM;
}
per_cpu(aa_buffers, i).buf[j] = buffer;
}
aa_put_buffer(&aa_buf->buffer[0]);
}
return 0;
}
......@@ -1730,7 +1855,7 @@ static int __init apparmor_init(void)
error = alloc_buffers();
if (error) {
AA_ERROR("Unable to allocate work buffers\n");
goto buffers_out;
goto alloc_out;
}
error = set_init_ctx();
......@@ -1755,7 +1880,6 @@ static int __init apparmor_init(void)
buffers_out:
destroy_buffers();
alloc_out:
aa_destroy_aafs();
aa_teardown_dfa_engine();
......
......@@ -616,8 +616,8 @@ unsigned int aa_dfa_matchn_until(struct aa_dfa *dfa, unsigned int start,
#define inc_wb_pos(wb) \
do { \
wb->pos = (wb->pos + 1) & (wb->size - 1); \
wb->len = (wb->len + 1) & (wb->size - 1); \
wb->pos = (wb->pos + 1) & (WB_HISTORY_SIZE - 1); \
wb->len = (wb->len + 1) & (WB_HISTORY_SIZE - 1); \
} while (0)
/* For DFAs that don't support extended tagging of states */
......@@ -636,7 +636,7 @@ static bool is_loop(struct match_workbuf *wb, unsigned int state,
return true;
}
if (pos == 0)
pos = wb->size;
pos = WB_HISTORY_SIZE;
pos--;
}
......
......@@ -408,11 +408,13 @@ int aa_remount(struct aa_label *label, const struct path *path,
binary = path->dentry->d_sb->s_type->fs_flags & FS_BINARY_MOUNTDATA;
get_buffers(buffer);
buffer = aa_get_buffer(false);
if (!buffer)
return -ENOMEM;
error = fn_for_each_confined(label, profile,
match_mnt(profile, path, buffer, NULL, NULL, NULL,
flags, data, binary));
put_buffers(buffer);
aa_put_buffer(buffer);
return error;
}
......@@ -437,11 +439,18 @@ int aa_bind_mount(struct aa_label *label, const struct path *path,
if (error)
return error;
get_buffers(buffer, old_buffer);
buffer = aa_get_buffer(false);
old_buffer = aa_get_buffer(false);
error = -ENOMEM;
if (!buffer || old_buffer)
goto out;
error = fn_for_each_confined(label, profile,
match_mnt(profile, path, buffer, &old_path, old_buffer,
NULL, flags, NULL, false));
put_buffers(buffer, old_buffer);
out:
aa_put_buffer(buffer);
aa_put_buffer(old_buffer);
path_put(&old_path);
return error;
......@@ -461,11 +470,13 @@ int aa_mount_change_type(struct aa_label *label, const struct path *path,
flags &= (MS_REC | MS_SILENT | MS_SHARED | MS_PRIVATE | MS_SLAVE |
MS_UNBINDABLE);
get_buffers(buffer);
buffer = aa_get_buffer(false);
if (!buffer)
return -ENOMEM;
error = fn_for_each_confined(label, profile,
match_mnt(profile, path, buffer, NULL, NULL, NULL,
flags, NULL, false));
put_buffers(buffer);
aa_put_buffer(buffer);
return error;
}
......@@ -488,11 +499,17 @@ int aa_move_mount(struct aa_label *label, const struct path *path,
if (error)
return error;
get_buffers(buffer, old_buffer);
buffer = aa_get_buffer(false);
old_buffer = aa_get_buffer(false);
error = -ENOMEM;
if (!buffer || !old_buffer)
goto out;
error = fn_for_each_confined(label, profile,
match_mnt(profile, path, buffer, &old_path, old_buffer,
NULL, MS_MOVE, NULL, false));
put_buffers(buffer, old_buffer);
out:
aa_put_buffer(buffer);
aa_put_buffer(old_buffer);
path_put(&old_path);
return error;
......@@ -533,8 +550,17 @@ int aa_new_mount(struct aa_label *label, const char *dev_name,
}
}
get_buffers(buffer, dev_buffer);
buffer = aa_get_buffer(false);
if (!buffer) {
error = -ENOMEM;
goto out;
}
if (dev_path) {
dev_buffer = aa_get_buffer(false);
if (!dev_buffer) {
error = -ENOMEM;
goto out;
}
error = fn_for_each_confined(label, profile,
match_mnt(profile, path, buffer, dev_path, dev_buffer,
type, flags, data, binary));
......@@ -543,7 +569,10 @@ int aa_new_mount(struct aa_label *label, const char *dev_name,
match_mnt_path_str(profile, path, buffer, dev_name,
type, flags, data, binary, NULL));
}
put_buffers(buffer, dev_buffer);
out:
aa_put_buffer(buffer);
aa_put_buffer(dev_buffer);
if (dev_path)
path_put(dev_path);
......@@ -591,10 +620,13 @@ int aa_umount(struct aa_label *label, struct vfsmount *mnt, int flags)
AA_BUG(!label);
AA_BUG(!mnt);
get_buffers(buffer);
buffer = aa_get_buffer(false);
if (!buffer)
return -ENOMEM;
error = fn_for_each_confined(label, profile,
profile_umount(profile, &path, buffer));
put_buffers(buffer);
aa_put_buffer(buffer);
return error;
}
......@@ -667,8 +699,12 @@ int aa_pivotroot(struct aa_label *label, const struct path *old_path,
AA_BUG(!old_path);
AA_BUG(!new_path);
get_buffers(old_buffer, new_buffer);
target = fn_label_build(label, profile, GFP_ATOMIC,
old_buffer = aa_get_buffer(false);
new_buffer = aa_get_buffer(false);
error = -ENOMEM;
if (!old_buffer || !new_buffer)
goto out;
target = fn_label_build(label, profile, GFP_KERNEL,
build_pivotroot(profile, new_path, new_buffer,
old_path, old_buffer));
if (!target) {
......@@ -686,7 +722,8 @@ int aa_pivotroot(struct aa_label *label, const struct path *old_path,
/* already audited error */
error = PTR_ERR(target);
out:
put_buffers(old_buffer, new_buffer);
aa_put_buffer(old_buffer);
aa_put_buffer(new_buffer);
return error;
......
......@@ -582,7 +582,7 @@ static int replacement_allowed(struct aa_profile *profile, int noreplace,
{
if (profile) {
if (profile->label.flags & FLAG_IMMUTIBLE) {
*info = "cannot replace immutible profile";
*info = "cannot replace immutable profile";
return -EPERM;
} else if (noreplace) {
*info = "profile already exists";
......@@ -856,7 +856,7 @@ static struct aa_profile *update_to_newest_parent(struct aa_profile *new)
ssize_t aa_replace_profiles(struct aa_ns *policy_ns, struct aa_label *label,
u32 mask, struct aa_loaddata *udata)
{
const char *ns_name, *info = NULL;
const char *ns_name = NULL, *info = NULL;
struct aa_ns *ns = NULL;
struct aa_load_ent *ent, *tmp;
struct aa_loaddata *rawdata_ent;
......@@ -1043,6 +1043,7 @@ ssize_t aa_replace_profiles(struct aa_ns *policy_ns, struct aa_label *label,
out:
aa_put_ns(ns);
aa_put_loaddata(udata);
kfree(ns_name);
if (error)
return error;
......
......@@ -16,6 +16,7 @@
#include <asm/unaligned.h>
#include <linux/ctype.h>
#include <linux/errno.h>
#include <linux/zlib.h>
#include "include/apparmor.h"
#include "include/audit.h"
......@@ -139,9 +140,11 @@ bool aa_rawdata_eq(struct aa_loaddata *l, struct aa_loaddata *r)
{
if (l->size != r->size)
return false;
if (l->compressed_size != r->compressed_size)
return false;
if (aa_g_hash_policy && memcmp(l->hash, r->hash, aa_hash_size()) != 0)
return false;
return memcmp(l->data, r->data, r->size) == 0;
return memcmp(l->data, r->data, r->compressed_size ?: r->size) == 0;
}
/*
......@@ -968,11 +971,14 @@ static int verify_header(struct aa_ext *e, int required, const char **ns)
e, error);
return error;
}
if (*ns && strcmp(*ns, name))
if (*ns && strcmp(*ns, name)) {
audit_iface(NULL, NULL, NULL, "invalid ns change", e,
error);
else if (!*ns)
*ns = name;
} else if (!*ns) {
*ns = kstrdup(name, GFP_KERNEL);
if (!*ns)
return -ENOMEM;
}
}
return 0;
......@@ -1039,6 +1045,105 @@ struct aa_load_ent *aa_load_ent_alloc(void)
return ent;
}
static int deflate_compress(const char *src, size_t slen, char **dst,
size_t *dlen)
{
int error;
struct z_stream_s strm;
void *stgbuf, *dstbuf;
size_t stglen = deflateBound(slen);
memset(&strm, 0, sizeof(strm));
if (stglen < slen)
return -EFBIG;
strm.workspace = kvzalloc(zlib_deflate_workspacesize(MAX_WBITS,
MAX_MEM_LEVEL),
GFP_KERNEL);
if (!strm.workspace)
return -ENOMEM;
error = zlib_deflateInit(&strm, aa_g_rawdata_compression_level);
if (error != Z_OK) {
error = -ENOMEM;
goto fail_deflate_init;
}
stgbuf = kvzalloc(stglen, GFP_KERNEL);
if (!stgbuf) {
error = -ENOMEM;
goto fail_stg_alloc;
}
strm.next_in = src;
strm.avail_in = slen;
strm.next_out = stgbuf;
strm.avail_out = stglen;
error = zlib_deflate(&strm, Z_FINISH);
if (error != Z_STREAM_END) {
error = -EINVAL;
goto fail_deflate;
}
error = 0;
if (is_vmalloc_addr(stgbuf)) {
dstbuf = kvzalloc(strm.total_out, GFP_KERNEL);
if (dstbuf) {
memcpy(dstbuf, stgbuf, strm.total_out);
kvfree(stgbuf);
}
} else
/*
* If the staging buffer was kmalloc'd, then using krealloc is
* probably going to be faster. The destination buffer will
* always be smaller, so it's just shrunk, avoiding a memcpy
*/
dstbuf = krealloc(stgbuf, strm.total_out, GFP_KERNEL);
if (!dstbuf) {
error = -ENOMEM;
goto fail_deflate;
}
*dst = dstbuf;
*dlen = strm.total_out;
fail_stg_alloc:
zlib_deflateEnd(&strm);
fail_deflate_init:
kvfree(strm.workspace);
return error;
fail_deflate:
kvfree(stgbuf);
goto fail_stg_alloc;
}
static int compress_loaddata(struct aa_loaddata *data)
{
AA_BUG(data->compressed_size > 0);
/*
* Shortcut the no compression case, else we increase the amount of
* storage required by a small amount
*/
if (aa_g_rawdata_compression_level != 0) {
void *udata = data->data;
int error = deflate_compress(udata, data->size, &data->data,
&data->compressed_size);
if (error)
return error;
kvfree(udata);
} else
data->compressed_size = data->size;
return 0;
}
/**
* aa_unpack - unpack packed binary profile(s) data loaded from user space
* @udata: user data copied to kmem (NOT NULL)
......@@ -1107,6 +1212,9 @@ int aa_unpack(struct aa_loaddata *udata, struct list_head *lh,
goto fail;
}
}
error = compress_loaddata(udata);
if (error)
goto fail;
return 0;
fail_profile:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment