Commit 47a92dfb authored by Suren Baghdasaryan's avatar Suren Baghdasaryan Committed by Andrew Morton

lib: prevent module unloading if memory is not freed

Skip freeing module's data section if there are non-zero allocation tags
because otherwise, once these allocations are freed, the access to their
code tag would cause UAF.

Link: https://lkml.kernel.org/r/20240321163705.3067592-13-surenb@google.comSigned-off-by: default avatarSuren Baghdasaryan <surenb@google.com>
Tested-by: default avatarKees Cook <keescook@chromium.org>
Cc: Alexander Viro <viro@zeniv.linux.org.uk>
Cc: Alex Gaynor <alex.gaynor@gmail.com>
Cc: Alice Ryhl <aliceryhl@google.com>
Cc: Andreas Hindborg <a.hindborg@samsung.com>
Cc: Benno Lossin <benno.lossin@proton.me>
Cc: "Björn Roy Baron" <bjorn3_gh@protonmail.com>
Cc: Boqun Feng <boqun.feng@gmail.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Dennis Zhou <dennis@kernel.org>
Cc: Gary Guo <gary@garyguo.net>
Cc: Kent Overstreet <kent.overstreet@linux.dev>
Cc: Miguel Ojeda <ojeda@kernel.org>
Cc: Pasha Tatashin <pasha.tatashin@soleen.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Tejun Heo <tj@kernel.org>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Wedson Almeida Filho <wedsonaf@gmail.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent a4735739
......@@ -35,7 +35,7 @@ struct codetag_type_desc {
size_t tag_size;
void (*module_load)(struct codetag_type *cttype,
struct codetag_module *cmod);
void (*module_unload)(struct codetag_type *cttype,
bool (*module_unload)(struct codetag_type *cttype,
struct codetag_module *cmod);
};
......@@ -71,10 +71,10 @@ codetag_register_type(const struct codetag_type_desc *desc);
#if defined(CONFIG_CODE_TAGGING) && defined(CONFIG_MODULES)
void codetag_load_module(struct module *mod);
void codetag_unload_module(struct module *mod);
bool codetag_unload_module(struct module *mod);
#else
static inline void codetag_load_module(struct module *mod) {}
static inline void codetag_unload_module(struct module *mod) {}
static inline bool codetag_unload_module(struct module *mod) { return true; }
#endif
#endif /* _LINUX_CODETAG_H */
......@@ -1211,15 +1211,19 @@ static void *module_memory_alloc(unsigned int size, enum mod_mem_type type)
return module_alloc(size);
}
static void module_memory_free(void *ptr, enum mod_mem_type type)
static void module_memory_free(void *ptr, enum mod_mem_type type,
bool unload_codetags)
{
if (!unload_codetags && mod_mem_type_is_core_data(type))
return;
if (mod_mem_use_vmalloc(type))
vfree(ptr);
else
module_memfree(ptr);
}
static void free_mod_mem(struct module *mod)
static void free_mod_mem(struct module *mod, bool unload_codetags)
{
for_each_mod_mem_type(type) {
struct module_memory *mod_mem = &mod->mem[type];
......@@ -1230,20 +1234,27 @@ static void free_mod_mem(struct module *mod)
/* Free lock-classes; relies on the preceding sync_rcu(). */
lockdep_free_key_range(mod_mem->base, mod_mem->size);
if (mod_mem->size)
module_memory_free(mod_mem->base, type);
module_memory_free(mod_mem->base, type,
unload_codetags);
}
/* MOD_DATA hosts mod, so free it at last */
lockdep_free_key_range(mod->mem[MOD_DATA].base, mod->mem[MOD_DATA].size);
module_memory_free(mod->mem[MOD_DATA].base, MOD_DATA);
module_memory_free(mod->mem[MOD_DATA].base, MOD_DATA, unload_codetags);
}
/* Free a module, remove from lists, etc. */
static void free_module(struct module *mod)
{
bool unload_codetags;
trace_module_free(mod);
codetag_unload_module(mod);
unload_codetags = codetag_unload_module(mod);
if (!unload_codetags)
pr_warn("%s: memory allocation(s) from the module still alive, cannot unload cleanly\n",
mod->name);
mod_sysfs_teardown(mod);
/*
......@@ -1285,7 +1296,7 @@ static void free_module(struct module *mod)
kfree(mod->args);
percpu_modfree(mod);
free_mod_mem(mod);
free_mod_mem(mod, unload_codetags);
}
void *__symbol_get(const char *symbol)
......@@ -2298,7 +2309,7 @@ static int move_module(struct module *mod, struct load_info *info)
return 0;
out_enomem:
for (t--; t >= 0; t--)
module_memory_free(mod->mem[t].base, t);
module_memory_free(mod->mem[t].base, t, true);
return ret;
}
......@@ -2428,7 +2439,7 @@ static void module_deallocate(struct module *mod, struct load_info *info)
percpu_modfree(mod);
module_arch_freeing_init(mod);
free_mod_mem(mod);
free_mod_mem(mod, true);
}
int __weak module_finalize(const Elf_Ehdr *hdr,
......
......@@ -5,6 +5,7 @@
#include <linux/module.h>
#include <linux/seq_buf.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
struct codetag_type {
struct list_head link;
......@@ -206,12 +207,13 @@ void codetag_load_module(struct module *mod)
mutex_unlock(&codetag_lock);
}
void codetag_unload_module(struct module *mod)
bool codetag_unload_module(struct module *mod)
{
struct codetag_type *cttype;
bool unload_ok = true;
if (!mod)
return;
return true;
mutex_lock(&codetag_lock);
list_for_each_entry(cttype, &codetag_types, link) {
......@@ -228,7 +230,8 @@ void codetag_unload_module(struct module *mod)
}
if (found) {
if (cttype->desc.module_unload)
cttype->desc.module_unload(cttype, cmod);
if (!cttype->desc.module_unload(cttype, cmod))
unload_ok = false;
cttype->count -= range_size(cttype, &cmod->range);
idr_remove(&cttype->mod_idr, mod_id);
......@@ -237,6 +240,8 @@ void codetag_unload_module(struct module *mod)
up_write(&cttype->mod_lock);
}
mutex_unlock(&codetag_lock);
return unload_ok;
}
#else /* CONFIG_MODULES */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment