Commit af96397d authored by Heiko Carstens's avatar Heiko Carstens Committed by Linus Torvalds

kprobes: allow to specify custom allocator for insn caches

The current two insn slot caches both use module_alloc/module_free to
allocate and free insn slot cache pages.

For s390 this is not sufficient since there is the need to allocate insn
slots that are either within the vmalloc module area or within dma memory.

Therefore add a mechanism which allows to specify an own allocator for an
own insn slot cache.
Signed-off-by: default avatarHeiko Carstens <heiko.carstens@de.ibm.com>
Acked-by: default avatarMasami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
Cc: Ananth N Mavinakayanahalli <ananth@in.ibm.com>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent c802d64a
...@@ -268,6 +268,8 @@ extern void kprobes_inc_nmissed_count(struct kprobe *p); ...@@ -268,6 +268,8 @@ extern void kprobes_inc_nmissed_count(struct kprobe *p);
struct kprobe_insn_cache { struct kprobe_insn_cache {
struct mutex mutex; struct mutex mutex;
void *(*alloc)(void); /* allocate insn page */
void (*free)(void *); /* free insn page */
struct list_head pages; /* list of kprobe_insn_page */ struct list_head pages; /* list of kprobe_insn_page */
size_t insn_size; /* size of instruction slot */ size_t insn_size; /* size of instruction slot */
int nr_garbage; int nr_garbage;
......
...@@ -112,6 +112,7 @@ static struct kprobe_blackpoint kprobe_blacklist[] = { ...@@ -112,6 +112,7 @@ static struct kprobe_blackpoint kprobe_blacklist[] = {
struct kprobe_insn_page { struct kprobe_insn_page {
struct list_head list; struct list_head list;
kprobe_opcode_t *insns; /* Page of instruction slots */ kprobe_opcode_t *insns; /* Page of instruction slots */
struct kprobe_insn_cache *cache;
int nused; int nused;
int ngarbage; int ngarbage;
char slot_used[]; char slot_used[];
...@@ -132,8 +133,20 @@ enum kprobe_slot_state { ...@@ -132,8 +133,20 @@ enum kprobe_slot_state {
SLOT_USED = 2, SLOT_USED = 2,
}; };
static void *alloc_insn_page(void)
{
return module_alloc(PAGE_SIZE);
}
static void free_insn_page(void *page)
{
module_free(NULL, page);
}
struct kprobe_insn_cache kprobe_insn_slots = { struct kprobe_insn_cache kprobe_insn_slots = {
.mutex = __MUTEX_INITIALIZER(kprobe_insn_slots.mutex), .mutex = __MUTEX_INITIALIZER(kprobe_insn_slots.mutex),
.alloc = alloc_insn_page,
.free = free_insn_page,
.pages = LIST_HEAD_INIT(kprobe_insn_slots.pages), .pages = LIST_HEAD_INIT(kprobe_insn_slots.pages),
.insn_size = MAX_INSN_SIZE, .insn_size = MAX_INSN_SIZE,
.nr_garbage = 0, .nr_garbage = 0,
...@@ -182,7 +195,7 @@ kprobe_opcode_t __kprobes *__get_insn_slot(struct kprobe_insn_cache *c) ...@@ -182,7 +195,7 @@ kprobe_opcode_t __kprobes *__get_insn_slot(struct kprobe_insn_cache *c)
* kernel image and loaded module images reside. This is required * kernel image and loaded module images reside. This is required
* so x86_64 can correctly handle the %rip-relative fixups. * so x86_64 can correctly handle the %rip-relative fixups.
*/ */
kip->insns = module_alloc(PAGE_SIZE); kip->insns = c->alloc();
if (!kip->insns) { if (!kip->insns) {
kfree(kip); kfree(kip);
goto out; goto out;
...@@ -192,6 +205,7 @@ kprobe_opcode_t __kprobes *__get_insn_slot(struct kprobe_insn_cache *c) ...@@ -192,6 +205,7 @@ kprobe_opcode_t __kprobes *__get_insn_slot(struct kprobe_insn_cache *c)
kip->slot_used[0] = SLOT_USED; kip->slot_used[0] = SLOT_USED;
kip->nused = 1; kip->nused = 1;
kip->ngarbage = 0; kip->ngarbage = 0;
kip->cache = c;
list_add(&kip->list, &c->pages); list_add(&kip->list, &c->pages);
slot = kip->insns; slot = kip->insns;
out: out:
...@@ -213,7 +227,7 @@ static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx) ...@@ -213,7 +227,7 @@ static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
*/ */
if (!list_is_singular(&kip->list)) { if (!list_is_singular(&kip->list)) {
list_del(&kip->list); list_del(&kip->list);
module_free(NULL, kip->insns); kip->cache->free(kip->insns);
kfree(kip); kfree(kip);
} }
return 1; return 1;
...@@ -274,6 +288,8 @@ void __kprobes __free_insn_slot(struct kprobe_insn_cache *c, ...@@ -274,6 +288,8 @@ void __kprobes __free_insn_slot(struct kprobe_insn_cache *c,
/* For optimized_kprobe buffer */ /* For optimized_kprobe buffer */
struct kprobe_insn_cache kprobe_optinsn_slots = { struct kprobe_insn_cache kprobe_optinsn_slots = {
.mutex = __MUTEX_INITIALIZER(kprobe_optinsn_slots.mutex), .mutex = __MUTEX_INITIALIZER(kprobe_optinsn_slots.mutex),
.alloc = alloc_insn_page,
.free = free_insn_page,
.pages = LIST_HEAD_INIT(kprobe_optinsn_slots.pages), .pages = LIST_HEAD_INIT(kprobe_optinsn_slots.pages),
/* .insn_size is initialized later */ /* .insn_size is initialized later */
.nr_garbage = 0, .nr_garbage = 0,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment