Commit 4675ff05 authored by Levin, Alexander (Sasha Levin)'s avatar Levin, Alexander (Sasha Levin) Committed by Linus Torvalds

kmemcheck: rip it out

Fix up makefiles, remove references, and git rm kmemcheck.

Link: http://lkml.kernel.org/r/20171007030159.22241-4-alexander.levin@verizon.comSigned-off-by: default avatarSasha Levin <alexander.levin@verizon.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Vegard Nossum <vegardno@ifi.uio.no>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Eric W. Biederman <ebiederm@xmission.com>
Cc: Alexander Potapenko <glider@google.com>
Cc: Tim Hansen <devtimhansen@gmail.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent d8be7566
...@@ -1864,13 +1864,6 @@ ...@@ -1864,13 +1864,6 @@
Built with CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF=y, Built with CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF=y,
the default is off. the default is off.
kmemcheck= [X86] Boot-time kmemcheck enable/disable/one-shot mode
Valid arguments: 0, 1, 2
kmemcheck=0 (disabled)
kmemcheck=1 (enabled)
kmemcheck=2 (one-shot mode)
Default: 2 (one-shot mode)
kvm.ignore_msrs=[KVM] Ignore guest accesses to unhandled MSRs. kvm.ignore_msrs=[KVM] Ignore guest accesses to unhandled MSRs.
Default is 0 (don't ignore, but inject #GP) Default is 0 (don't ignore, but inject #GP)
......
...@@ -21,7 +21,6 @@ whole; patches welcome! ...@@ -21,7 +21,6 @@ whole; patches welcome!
kasan kasan
ubsan ubsan
kmemleak kmemleak
kmemcheck
gdb-kernel-debugging gdb-kernel-debugging
kgdb kgdb
kselftest kselftest
......
This diff is collapsed.
...@@ -7688,16 +7688,6 @@ F: include/linux/kdb.h ...@@ -7688,16 +7688,6 @@ F: include/linux/kdb.h
F: include/linux/kgdb.h F: include/linux/kgdb.h
F: kernel/debug/ F: kernel/debug/
KMEMCHECK
M: Vegard Nossum <vegardno@ifi.uio.no>
M: Pekka Enberg <penberg@kernel.org>
S: Maintained
F: Documentation/dev-tools/kmemcheck.rst
F: arch/x86/include/asm/kmemcheck.h
F: arch/x86/mm/kmemcheck/
F: include/linux/kmemcheck.h
F: mm/kmemcheck.c
KMEMLEAK KMEMLEAK
M: Catalin Marinas <catalin.marinas@arm.com> M: Catalin Marinas <catalin.marinas@arm.com>
S: Maintained S: Maintained
......
...@@ -112,7 +112,6 @@ config X86 ...@@ -112,7 +112,6 @@ config X86
select HAVE_ARCH_JUMP_LABEL select HAVE_ARCH_JUMP_LABEL
select HAVE_ARCH_KASAN if X86_64 && SPARSEMEM_VMEMMAP select HAVE_ARCH_KASAN if X86_64 && SPARSEMEM_VMEMMAP
select HAVE_ARCH_KGDB select HAVE_ARCH_KGDB
select HAVE_ARCH_KMEMCHECK
select HAVE_ARCH_MMAP_RND_BITS if MMU select HAVE_ARCH_MMAP_RND_BITS if MMU
select HAVE_ARCH_MMAP_RND_COMPAT_BITS if MMU && COMPAT select HAVE_ARCH_MMAP_RND_COMPAT_BITS if MMU && COMPAT
select HAVE_ARCH_COMPAT_MMAP_BASES if MMU && COMPAT select HAVE_ARCH_COMPAT_MMAP_BASES if MMU && COMPAT
...@@ -1430,7 +1429,7 @@ config ARCH_DMA_ADDR_T_64BIT ...@@ -1430,7 +1429,7 @@ config ARCH_DMA_ADDR_T_64BIT
config X86_DIRECT_GBPAGES config X86_DIRECT_GBPAGES
def_bool y def_bool y
depends on X86_64 && !DEBUG_PAGEALLOC && !KMEMCHECK depends on X86_64 && !DEBUG_PAGEALLOC
---help--- ---help---
Certain kernel features effectively disable kernel Certain kernel features effectively disable kernel
linear 1 GB mappings (even if the CPU otherwise linear 1 GB mappings (even if the CPU otherwise
......
/* SPDX-License-Identifier: GPL-2.0 */ /* SPDX-License-Identifier: GPL-2.0 */
#ifndef ASM_X86_KMEMCHECK_H
#define ASM_X86_KMEMCHECK_H
#include <linux/types.h>
#include <asm/ptrace.h>
#ifdef CONFIG_KMEMCHECK
bool kmemcheck_active(struct pt_regs *regs);
void kmemcheck_show(struct pt_regs *regs);
void kmemcheck_hide(struct pt_regs *regs);
bool kmemcheck_fault(struct pt_regs *regs,
unsigned long address, unsigned long error_code);
bool kmemcheck_trap(struct pt_regs *regs);
#else
static inline bool kmemcheck_active(struct pt_regs *regs)
{
return false;
}
static inline void kmemcheck_show(struct pt_regs *regs)
{
}
static inline void kmemcheck_hide(struct pt_regs *regs)
{
}
static inline bool kmemcheck_fault(struct pt_regs *regs,
unsigned long address, unsigned long error_code)
{
return false;
}
static inline bool kmemcheck_trap(struct pt_regs *regs)
{
return false;
}
#endif /* CONFIG_KMEMCHECK */
#endif
...@@ -179,8 +179,6 @@ static inline void *__memcpy3d(void *to, const void *from, size_t len) ...@@ -179,8 +179,6 @@ static inline void *__memcpy3d(void *to, const void *from, size_t len)
* No 3D Now! * No 3D Now!
*/ */
#ifndef CONFIG_KMEMCHECK
#if (__GNUC__ >= 4) #if (__GNUC__ >= 4)
#define memcpy(t, f, n) __builtin_memcpy(t, f, n) #define memcpy(t, f, n) __builtin_memcpy(t, f, n)
#else #else
...@@ -189,13 +187,6 @@ static inline void *__memcpy3d(void *to, const void *from, size_t len) ...@@ -189,13 +187,6 @@ static inline void *__memcpy3d(void *to, const void *from, size_t len)
? __constant_memcpy((t), (f), (n)) \ ? __constant_memcpy((t), (f), (n)) \
: __memcpy((t), (f), (n))) : __memcpy((t), (f), (n)))
#endif #endif
#else
/*
* kmemcheck becomes very happy if we use the REP instructions unconditionally,
* because it means that we know both memory operands in advance.
*/
#define memcpy(t, f, n) __memcpy((t), (f), (n))
#endif
#endif #endif
#endif /* !CONFIG_FORTIFY_SOURCE */ #endif /* !CONFIG_FORTIFY_SOURCE */
......
...@@ -33,7 +33,6 @@ extern void *memcpy(void *to, const void *from, size_t len); ...@@ -33,7 +33,6 @@ extern void *memcpy(void *to, const void *from, size_t len);
extern void *__memcpy(void *to, const void *from, size_t len); extern void *__memcpy(void *to, const void *from, size_t len);
#ifndef CONFIG_FORTIFY_SOURCE #ifndef CONFIG_FORTIFY_SOURCE
#ifndef CONFIG_KMEMCHECK
#if (__GNUC__ == 4 && __GNUC_MINOR__ < 3) || __GNUC__ < 4 #if (__GNUC__ == 4 && __GNUC_MINOR__ < 3) || __GNUC__ < 4
#define memcpy(dst, src, len) \ #define memcpy(dst, src, len) \
({ \ ({ \
...@@ -46,13 +45,6 @@ extern void *__memcpy(void *to, const void *from, size_t len); ...@@ -46,13 +45,6 @@ extern void *__memcpy(void *to, const void *from, size_t len);
__ret; \ __ret; \
}) })
#endif #endif
#else
/*
* kmemcheck becomes very happy if we use the REP instructions unconditionally,
* because it means that we know both memory operands in advance.
*/
#define memcpy(dst, src, len) __inline_memcpy((dst), (src), (len))
#endif
#endif /* !CONFIG_FORTIFY_SOURCE */ #endif /* !CONFIG_FORTIFY_SOURCE */
#define __HAVE_ARCH_MEMSET #define __HAVE_ARCH_MEMSET
......
...@@ -187,21 +187,6 @@ static void early_init_intel(struct cpuinfo_x86 *c) ...@@ -187,21 +187,6 @@ static void early_init_intel(struct cpuinfo_x86 *c)
if (c->x86 == 6 && c->x86_model < 15) if (c->x86 == 6 && c->x86_model < 15)
clear_cpu_cap(c, X86_FEATURE_PAT); clear_cpu_cap(c, X86_FEATURE_PAT);
#ifdef CONFIG_KMEMCHECK
/*
* P4s have a "fast strings" feature which causes single-
* stepping REP instructions to only generate a #DB on
* cache-line boundaries.
*
* Ingo Molnar reported a Pentium D (model 6) and a Xeon
* (model 2) with the same problem.
*/
if (c->x86 == 15)
if (msr_clear_bit(MSR_IA32_MISC_ENABLE,
MSR_IA32_MISC_ENABLE_FAST_STRING_BIT) > 0)
pr_info("kmemcheck: Disabling fast string operations\n");
#endif
/* /*
* If fast string is not enabled in IA32_MISC_ENABLE for any reason, * If fast string is not enabled in IA32_MISC_ENABLE for any reason,
* clear the fast string and enhanced fast string CPU capabilities. * clear the fast string and enhanced fast string CPU capabilities.
......
...@@ -29,8 +29,6 @@ obj-$(CONFIG_X86_PTDUMP) += debug_pagetables.o ...@@ -29,8 +29,6 @@ obj-$(CONFIG_X86_PTDUMP) += debug_pagetables.o
obj-$(CONFIG_HIGHMEM) += highmem_32.o obj-$(CONFIG_HIGHMEM) += highmem_32.o
obj-$(CONFIG_KMEMCHECK) += kmemcheck/
KASAN_SANITIZE_kasan_init_$(BITS).o := n KASAN_SANITIZE_kasan_init_$(BITS).o := n
obj-$(CONFIG_KASAN) += kasan_init_$(BITS).o obj-$(CONFIG_KASAN) += kasan_init_$(BITS).o
......
...@@ -163,12 +163,11 @@ static int page_size_mask; ...@@ -163,12 +163,11 @@ static int page_size_mask;
static void __init probe_page_size_mask(void) static void __init probe_page_size_mask(void)
{ {
/* /*
* For CONFIG_KMEMCHECK or pagealloc debugging, identity mapping will * For pagealloc debugging, identity mapping will use small pages.
* use small pages.
* This will simplify cpa(), which otherwise needs to support splitting * This will simplify cpa(), which otherwise needs to support splitting
* large pages into small in interrupt context, etc. * large pages into small in interrupt context, etc.
*/ */
if (boot_cpu_has(X86_FEATURE_PSE) && !debug_pagealloc_enabled() && !IS_ENABLED(CONFIG_KMEMCHECK)) if (boot_cpu_has(X86_FEATURE_PSE) && !debug_pagealloc_enabled())
page_size_mask |= 1 << PG_LEVEL_2M; page_size_mask |= 1 << PG_LEVEL_2M;
else else
direct_gbpages = 0; direct_gbpages = 0;
......
obj-y := error.o kmemcheck.o opcode.o pte.o selftest.o shadow.o
// SPDX-License-Identifier: GPL-2.0 // SPDX-License-Identifier: GPL-2.0
#include <linux/interrupt.h>
#include <linux/kdebug.h>
#include <linux/kmemcheck.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/ptrace.h>
#include <linux/stacktrace.h>
#include <linux/string.h>
#include "error.h"
#include "shadow.h"
enum kmemcheck_error_type {
KMEMCHECK_ERROR_INVALID_ACCESS,
KMEMCHECK_ERROR_BUG,
};
#define SHADOW_COPY_SIZE (1 << CONFIG_KMEMCHECK_SHADOW_COPY_SHIFT)
struct kmemcheck_error {
enum kmemcheck_error_type type;
union {
/* KMEMCHECK_ERROR_INVALID_ACCESS */
struct {
/* Kind of access that caused the error */
enum kmemcheck_shadow state;
/* Address and size of the erroneous read */
unsigned long address;
unsigned int size;
};
};
struct pt_regs regs;
struct stack_trace trace;
unsigned long trace_entries[32];
/* We compress it to a char. */
unsigned char shadow_copy[SHADOW_COPY_SIZE];
unsigned char memory_copy[SHADOW_COPY_SIZE];
};
/*
* Create a ring queue of errors to output. We can't call printk() directly
* from the kmemcheck traps, since this may call the console drivers and
* result in a recursive fault.
*/
static struct kmemcheck_error error_fifo[CONFIG_KMEMCHECK_QUEUE_SIZE];
static unsigned int error_count;
static unsigned int error_rd;
static unsigned int error_wr;
static unsigned int error_missed_count;
static struct kmemcheck_error *error_next_wr(void)
{
struct kmemcheck_error *e;
if (error_count == ARRAY_SIZE(error_fifo)) {
++error_missed_count;
return NULL;
}
e = &error_fifo[error_wr];
if (++error_wr == ARRAY_SIZE(error_fifo))
error_wr = 0;
++error_count;
return e;
}
static struct kmemcheck_error *error_next_rd(void)
{
struct kmemcheck_error *e;
if (error_count == 0)
return NULL;
e = &error_fifo[error_rd];
if (++error_rd == ARRAY_SIZE(error_fifo))
error_rd = 0;
--error_count;
return e;
}
void kmemcheck_error_recall(void)
{
static const char *desc[] = {
[KMEMCHECK_SHADOW_UNALLOCATED] = "unallocated",
[KMEMCHECK_SHADOW_UNINITIALIZED] = "uninitialized",
[KMEMCHECK_SHADOW_INITIALIZED] = "initialized",
[KMEMCHECK_SHADOW_FREED] = "freed",
};
static const char short_desc[] = {
[KMEMCHECK_SHADOW_UNALLOCATED] = 'a',
[KMEMCHECK_SHADOW_UNINITIALIZED] = 'u',
[KMEMCHECK_SHADOW_INITIALIZED] = 'i',
[KMEMCHECK_SHADOW_FREED] = 'f',
};
struct kmemcheck_error *e;
unsigned int i;
e = error_next_rd();
if (!e)
return;
switch (e->type) {
case KMEMCHECK_ERROR_INVALID_ACCESS:
printk(KERN_WARNING "WARNING: kmemcheck: Caught %d-bit read from %s memory (%p)\n",
8 * e->size, e->state < ARRAY_SIZE(desc) ?
desc[e->state] : "(invalid shadow state)",
(void *) e->address);
printk(KERN_WARNING);
for (i = 0; i < SHADOW_COPY_SIZE; ++i)
printk(KERN_CONT "%02x", e->memory_copy[i]);
printk(KERN_CONT "\n");
printk(KERN_WARNING);
for (i = 0; i < SHADOW_COPY_SIZE; ++i) {
if (e->shadow_copy[i] < ARRAY_SIZE(short_desc))
printk(KERN_CONT " %c", short_desc[e->shadow_copy[i]]);
else
printk(KERN_CONT " ?");
}
printk(KERN_CONT "\n");
printk(KERN_WARNING "%*c\n", 2 + 2
* (int) (e->address & (SHADOW_COPY_SIZE - 1)), '^');
break;
case KMEMCHECK_ERROR_BUG:
printk(KERN_EMERG "ERROR: kmemcheck: Fatal error\n");
break;
}
__show_regs(&e->regs, 1);
print_stack_trace(&e->trace, 0);
}
static void do_wakeup(unsigned long data)
{
while (error_count > 0)
kmemcheck_error_recall();
if (error_missed_count > 0) {
printk(KERN_WARNING "kmemcheck: Lost %d error reports because "
"the queue was too small\n", error_missed_count);
error_missed_count = 0;
}
}
static DECLARE_TASKLET(kmemcheck_tasklet, &do_wakeup, 0);
/*
* Save the context of an error report.
*/
void kmemcheck_error_save(enum kmemcheck_shadow state,
unsigned long address, unsigned int size, struct pt_regs *regs)
{
static unsigned long prev_ip;
struct kmemcheck_error *e;
void *shadow_copy;
void *memory_copy;
/* Don't report several adjacent errors from the same EIP. */
if (regs->ip == prev_ip)
return;
prev_ip = regs->ip;
e = error_next_wr();
if (!e)
return;
e->type = KMEMCHECK_ERROR_INVALID_ACCESS;
e->state = state;
e->address = address;
e->size = size;
/* Save regs */
memcpy(&e->regs, regs, sizeof(*regs));
/* Save stack trace */
e->trace.nr_entries = 0;
e->trace.entries = e->trace_entries;
e->trace.max_entries = ARRAY_SIZE(e->trace_entries);
e->trace.skip = 0;
save_stack_trace_regs(regs, &e->trace);
/* Round address down to nearest 16 bytes */
shadow_copy = kmemcheck_shadow_lookup(address
& ~(SHADOW_COPY_SIZE - 1));
BUG_ON(!shadow_copy);
memcpy(e->shadow_copy, shadow_copy, SHADOW_COPY_SIZE);
kmemcheck_show_addr(address);
memory_copy = (void *) (address & ~(SHADOW_COPY_SIZE - 1));
memcpy(e->memory_copy, memory_copy, SHADOW_COPY_SIZE);
kmemcheck_hide_addr(address);
tasklet_hi_schedule_first(&kmemcheck_tasklet);
}
/*
* Save the context of a kmemcheck bug.
*/
void kmemcheck_error_save_bug(struct pt_regs *regs)
{
struct kmemcheck_error *e;
e = error_next_wr();
if (!e)
return;
e->type = KMEMCHECK_ERROR_BUG;
memcpy(&e->regs, regs, sizeof(*regs));
e->trace.nr_entries = 0;
e->trace.entries = e->trace_entries;
e->trace.max_entries = ARRAY_SIZE(e->trace_entries);
e->trace.skip = 1;
save_stack_trace(&e->trace);
tasklet_hi_schedule_first(&kmemcheck_tasklet);
}
/* SPDX-License-Identifier: GPL-2.0 */ /* SPDX-License-Identifier: GPL-2.0 */
#ifndef ARCH__X86__MM__KMEMCHECK__ERROR_H
#define ARCH__X86__MM__KMEMCHECK__ERROR_H
#include <linux/ptrace.h>
#include "shadow.h"
void kmemcheck_error_save(enum kmemcheck_shadow state,
unsigned long address, unsigned int size, struct pt_regs *regs);
void kmemcheck_error_save_bug(struct pt_regs *regs);
void kmemcheck_error_recall(void);
#endif
This diff is collapsed.
// SPDX-License-Identifier: GPL-2.0 // SPDX-License-Identifier: GPL-2.0
#include <linux/types.h>
#include "opcode.h"
static bool opcode_is_prefix(uint8_t b)
{
return
/* Group 1 */
b == 0xf0 || b == 0xf2 || b == 0xf3
/* Group 2 */
|| b == 0x2e || b == 0x36 || b == 0x3e || b == 0x26
|| b == 0x64 || b == 0x65
/* Group 3 */
|| b == 0x66
/* Group 4 */
|| b == 0x67;
}
#ifdef CONFIG_X86_64
static bool opcode_is_rex_prefix(uint8_t b)
{
return (b & 0xf0) == 0x40;
}
#else
static bool opcode_is_rex_prefix(uint8_t b)
{
return false;
}
#endif
#define REX_W (1 << 3)
/*
* This is a VERY crude opcode decoder. We only need to find the size of the
* load/store that caused our #PF and this should work for all the opcodes
* that we care about. Moreover, the ones who invented this instruction set
* should be shot.
*/
void kmemcheck_opcode_decode(const uint8_t *op, unsigned int *size)
{
/* Default operand size */
int operand_size_override = 4;
/* prefixes */
for (; opcode_is_prefix(*op); ++op) {
if (*op == 0x66)
operand_size_override = 2;
}
/* REX prefix */
if (opcode_is_rex_prefix(*op)) {
uint8_t rex = *op;
++op;
if (rex & REX_W) {
switch (*op) {
case 0x63:
*size = 4;
return;
case 0x0f:
++op;
switch (*op) {
case 0xb6:
case 0xbe:
*size = 1;
return;
case 0xb7:
case 0xbf:
*size = 2;
return;
}
break;
}
*size = 8;
return;
}
}
/* escape opcode */
if (*op == 0x0f) {
++op;
/*
* This is move with zero-extend and sign-extend, respectively;
* we don't have to think about 0xb6/0xbe, because this is
* already handled in the conditional below.
*/
if (*op == 0xb7 || *op == 0xbf)
operand_size_override = 2;
}
*size = (*op & 1) ? operand_size_override : 1;
}
const uint8_t *kmemcheck_opcode_get_primary(const uint8_t *op)
{
/* skip prefixes */
while (opcode_is_prefix(*op))
++op;
if (opcode_is_rex_prefix(*op))
++op;
return op;
}
/* SPDX-License-Identifier: GPL-2.0 */ /* SPDX-License-Identifier: GPL-2.0 */
#ifndef ARCH__X86__MM__KMEMCHECK__OPCODE_H
#define ARCH__X86__MM__KMEMCHECK__OPCODE_H
#include <linux/types.h>
void kmemcheck_opcode_decode(const uint8_t *op, unsigned int *size);
const uint8_t *kmemcheck_opcode_get_primary(const uint8_t *op);
#endif
// SPDX-License-Identifier: GPL-2.0 // SPDX-License-Identifier: GPL-2.0
#include <linux/mm.h>
#include <asm/pgtable.h>
#include "pte.h"
pte_t *kmemcheck_pte_lookup(unsigned long address)
{
pte_t *pte;
unsigned int level;
pte = lookup_address(address, &level);
if (!pte)
return NULL;
if (level != PG_LEVEL_4K)
return NULL;
if (!pte_hidden(*pte))
return NULL;
return pte;
}
/* SPDX-License-Identifier: GPL-2.0 */ /* SPDX-License-Identifier: GPL-2.0 */
#ifndef ARCH__X86__MM__KMEMCHECK__PTE_H
#define ARCH__X86__MM__KMEMCHECK__PTE_H
#include <linux/mm.h>
#include <asm/pgtable.h>
pte_t *kmemcheck_pte_lookup(unsigned long address);
#endif
// SPDX-License-Identifier: GPL-2.0 // SPDX-License-Identifier: GPL-2.0
#include <linux/bug.h>
#include <linux/kernel.h>
#include "opcode.h"
#include "selftest.h"
struct selftest_opcode {
unsigned int expected_size;
const uint8_t *insn;
const char *desc;
};
static const struct selftest_opcode selftest_opcodes[] = {
/* REP MOVS */
{1, "\xf3\xa4", "rep movsb <mem8>, <mem8>"},
{4, "\xf3\xa5", "rep movsl <mem32>, <mem32>"},
/* MOVZX / MOVZXD */
{1, "\x66\x0f\xb6\x51\xf8", "movzwq <mem8>, <reg16>"},
{1, "\x0f\xb6\x51\xf8", "movzwq <mem8>, <reg32>"},
/* MOVSX / MOVSXD */
{1, "\x66\x0f\xbe\x51\xf8", "movswq <mem8>, <reg16>"},
{1, "\x0f\xbe\x51\xf8", "movswq <mem8>, <reg32>"},
#ifdef CONFIG_X86_64
/* MOVZX / MOVZXD */
{1, "\x49\x0f\xb6\x51\xf8", "movzbq <mem8>, <reg64>"},
{2, "\x49\x0f\xb7\x51\xf8", "movzbq <mem16>, <reg64>"},
/* MOVSX / MOVSXD */
{1, "\x49\x0f\xbe\x51\xf8", "movsbq <mem8>, <reg64>"},
{2, "\x49\x0f\xbf\x51\xf8", "movsbq <mem16>, <reg64>"},
{4, "\x49\x63\x51\xf8", "movslq <mem32>, <reg64>"},
#endif
};
static bool selftest_opcode_one(const struct selftest_opcode *op)
{
unsigned size;
kmemcheck_opcode_decode(op->insn, &size);
if (size == op->expected_size)
return true;
printk(KERN_WARNING "kmemcheck: opcode %s: expected size %d, got %d\n",
op->desc, op->expected_size, size);
return false;
}
static bool selftest_opcodes_all(void)
{
bool pass = true;
unsigned int i;
for (i = 0; i < ARRAY_SIZE(selftest_opcodes); ++i)
pass = pass && selftest_opcode_one(&selftest_opcodes[i]);
return pass;
}
bool kmemcheck_selftest(void)
{
bool pass = true;
pass = pass && selftest_opcodes_all();
return pass;
}
/* SPDX-License-Identifier: GPL-2.0 */ /* SPDX-License-Identifier: GPL-2.0 */
#ifndef ARCH_X86_MM_KMEMCHECK_SELFTEST_H
#define ARCH_X86_MM_KMEMCHECK_SELFTEST_H
bool kmemcheck_selftest(void);
#endif
#include <linux/kmemcheck.h>
#include <linux/export.h>
#include <linux/mm.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include "pte.h"
#include "shadow.h"
/*
* Return the shadow address for the given address. Returns NULL if the
* address is not tracked.
*
* We need to be extremely careful not to follow any invalid pointers,
* because this function can be called for *any* possible address.
*/
void *kmemcheck_shadow_lookup(unsigned long address)
{
pte_t *pte;
struct page *page;
if (!virt_addr_valid(address))
return NULL;
pte = kmemcheck_pte_lookup(address);
if (!pte)
return NULL;
page = virt_to_page(address);
if (!page->shadow)
return NULL;
return page->shadow + (address & (PAGE_SIZE - 1));
}
static void mark_shadow(void *address, unsigned int n,
enum kmemcheck_shadow status)
{
unsigned long addr = (unsigned long) address;
unsigned long last_addr = addr + n - 1;
unsigned long page = addr & PAGE_MASK;
unsigned long last_page = last_addr & PAGE_MASK;
unsigned int first_n;
void *shadow;
/* If the memory range crosses a page boundary, stop there. */
if (page == last_page)
first_n = n;
else
first_n = page + PAGE_SIZE - addr;
shadow = kmemcheck_shadow_lookup(addr);
if (shadow)
memset(shadow, status, first_n);
addr += first_n;
n -= first_n;
/* Do full-page memset()s. */
while (n >= PAGE_SIZE) {
shadow = kmemcheck_shadow_lookup(addr);
if (shadow)
memset(shadow, status, PAGE_SIZE);
addr += PAGE_SIZE;
n -= PAGE_SIZE;
}
/* Do the remaining page, if any. */
if (n > 0) {
shadow = kmemcheck_shadow_lookup(addr);
if (shadow)
memset(shadow, status, n);
}
}
void kmemcheck_mark_unallocated(void *address, unsigned int n)
{
mark_shadow(address, n, KMEMCHECK_SHADOW_UNALLOCATED);
}
void kmemcheck_mark_uninitialized(void *address, unsigned int n)
{
mark_shadow(address, n, KMEMCHECK_SHADOW_UNINITIALIZED);
}
/*
* Fill the shadow memory of the given address such that the memory at that
* address is marked as being initialized.
*/
void kmemcheck_mark_initialized(void *address, unsigned int n)
{
mark_shadow(address, n, KMEMCHECK_SHADOW_INITIALIZED);
}
EXPORT_SYMBOL_GPL(kmemcheck_mark_initialized);
void kmemcheck_mark_freed(void *address, unsigned int n)
{
mark_shadow(address, n, KMEMCHECK_SHADOW_FREED);
}
void kmemcheck_mark_unallocated_pages(struct page *p, unsigned int n)
{
unsigned int i;
for (i = 0; i < n; ++i)
kmemcheck_mark_unallocated(page_address(&p[i]), PAGE_SIZE);
}
void kmemcheck_mark_uninitialized_pages(struct page *p, unsigned int n)
{
unsigned int i;
for (i = 0; i < n; ++i)
kmemcheck_mark_uninitialized(page_address(&p[i]), PAGE_SIZE);
}
void kmemcheck_mark_initialized_pages(struct page *p, unsigned int n)
{
unsigned int i;
for (i = 0; i < n; ++i)
kmemcheck_mark_initialized(page_address(&p[i]), PAGE_SIZE);
}
enum kmemcheck_shadow kmemcheck_shadow_test(void *shadow, unsigned int size)
{
#ifdef CONFIG_KMEMCHECK_PARTIAL_OK
uint8_t *x;
unsigned int i;
x = shadow;
/*
* Make sure _some_ bytes are initialized. Gcc frequently generates
* code to access neighboring bytes.
*/
for (i = 0; i < size; ++i) {
if (x[i] == KMEMCHECK_SHADOW_INITIALIZED)
return x[i];
}
return x[0];
#else
return kmemcheck_shadow_test_all(shadow, size);
#endif
}
enum kmemcheck_shadow kmemcheck_shadow_test_all(void *shadow, unsigned int size)
{
uint8_t *x;
unsigned int i;
x = shadow;
/* All bytes must be initialized. */
for (i = 0; i < size; ++i) {
if (x[i] != KMEMCHECK_SHADOW_INITIALIZED)
return x[i];
}
return x[0];
}
void kmemcheck_shadow_set(void *shadow, unsigned int size)
{
uint8_t *x;
unsigned int i;
x = shadow;
for (i = 0; i < size; ++i)
x[i] = KMEMCHECK_SHADOW_INITIALIZED;
}
/* SPDX-License-Identifier: GPL-2.0 */ /* SPDX-License-Identifier: GPL-2.0 */
#ifndef ARCH__X86__MM__KMEMCHECK__SHADOW_H
#define ARCH__X86__MM__KMEMCHECK__SHADOW_H
enum kmemcheck_shadow {
KMEMCHECK_SHADOW_UNALLOCATED,
KMEMCHECK_SHADOW_UNINITIALIZED,
KMEMCHECK_SHADOW_INITIALIZED,
KMEMCHECK_SHADOW_FREED,
};
void *kmemcheck_shadow_lookup(unsigned long address);
enum kmemcheck_shadow kmemcheck_shadow_test(void *shadow, unsigned int size);
enum kmemcheck_shadow kmemcheck_shadow_test_all(void *shadow,
unsigned int size);
void kmemcheck_shadow_set(void *shadow, unsigned int size);
#endif
...@@ -594,21 +594,6 @@ static inline void tasklet_hi_schedule(struct tasklet_struct *t) ...@@ -594,21 +594,6 @@ static inline void tasklet_hi_schedule(struct tasklet_struct *t)
__tasklet_hi_schedule(t); __tasklet_hi_schedule(t);
} }
extern void __tasklet_hi_schedule_first(struct tasklet_struct *t);
/*
* This version avoids touching any other tasklets. Needed for kmemcheck
* in order not to take any page faults while enqueueing this tasklet;
* consider VERY carefully whether you really need this or
* tasklet_hi_schedule()...
*/
static inline void tasklet_hi_schedule_first(struct tasklet_struct *t)
{
if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
__tasklet_hi_schedule_first(t);
}
static inline void tasklet_disable_nosync(struct tasklet_struct *t) static inline void tasklet_disable_nosync(struct tasklet_struct *t)
{ {
atomic_inc(&t->count); atomic_inc(&t->count);
......
/* SPDX-License-Identifier: GPL-2.0 */ /* SPDX-License-Identifier: GPL-2.0 */
#ifndef LINUX_KMEMCHECK_H
#define LINUX_KMEMCHECK_H
#include <linux/mm_types.h>
#include <linux/types.h>
#ifdef CONFIG_KMEMCHECK
extern int kmemcheck_enabled;
/* The slab-related functions. */
void kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node);
void kmemcheck_free_shadow(struct page *page, int order);
void kmemcheck_slab_alloc(struct kmem_cache *s, gfp_t gfpflags, void *object,
size_t size);
void kmemcheck_slab_free(struct kmem_cache *s, void *object, size_t size);
void kmemcheck_pagealloc_alloc(struct page *p, unsigned int order,
gfp_t gfpflags);
void kmemcheck_show_pages(struct page *p, unsigned int n);
void kmemcheck_hide_pages(struct page *p, unsigned int n);
bool kmemcheck_page_is_tracked(struct page *p);
void kmemcheck_mark_unallocated(void *address, unsigned int n);
void kmemcheck_mark_uninitialized(void *address, unsigned int n);
void kmemcheck_mark_initialized(void *address, unsigned int n);
void kmemcheck_mark_freed(void *address, unsigned int n);
void kmemcheck_mark_unallocated_pages(struct page *p, unsigned int n);
void kmemcheck_mark_uninitialized_pages(struct page *p, unsigned int n);
void kmemcheck_mark_initialized_pages(struct page *p, unsigned int n);
int kmemcheck_show_addr(unsigned long address);
int kmemcheck_hide_addr(unsigned long address);
bool kmemcheck_is_obj_initialized(unsigned long addr, size_t size);
/*
* Bitfield annotations
*
* How to use: If you have a struct using bitfields, for example
*
* struct a {
* int x:8, y:8;
* };
*
* then this should be rewritten as
*
* struct a {
* kmemcheck_bitfield_begin(flags);
* int x:8, y:8;
* kmemcheck_bitfield_end(flags);
* };
*
* Now the "flags_begin" and "flags_end" members may be used to refer to the
* beginning and end, respectively, of the bitfield (and things like
* &x.flags_begin is allowed). As soon as the struct is allocated, the bit-
* fields should be annotated:
*
* struct a *a = kmalloc(sizeof(struct a), GFP_KERNEL);
* kmemcheck_annotate_bitfield(a, flags);
*/
#define kmemcheck_bitfield_begin(name) \
int name##_begin[0];
#define kmemcheck_bitfield_end(name) \
int name##_end[0];
#define kmemcheck_annotate_bitfield(ptr, name) \
do { \
int _n; \
\
if (!ptr) \
break; \
\
_n = (long) &((ptr)->name##_end) \
- (long) &((ptr)->name##_begin); \
BUILD_BUG_ON(_n < 0); \
\
kmemcheck_mark_initialized(&((ptr)->name##_begin), _n); \
} while (0)
#define kmemcheck_annotate_variable(var) \
do { \
kmemcheck_mark_initialized(&(var), sizeof(var)); \
} while (0) \
#else
#define kmemcheck_enabled 0
static inline void
kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node)
{
}
static inline void
kmemcheck_free_shadow(struct page *page, int order)
{
}
static inline void
kmemcheck_slab_alloc(struct kmem_cache *s, gfp_t gfpflags, void *object,
size_t size)
{
}
static inline void kmemcheck_slab_free(struct kmem_cache *s, void *object,
size_t size)
{
}
static inline void kmemcheck_pagealloc_alloc(struct page *p,
unsigned int order, gfp_t gfpflags)
{
}
static inline bool kmemcheck_page_is_tracked(struct page *p)
{
return false;
}
static inline void kmemcheck_mark_unallocated(void *address, unsigned int n)
{
}
static inline void kmemcheck_mark_uninitialized(void *address, unsigned int n)
{
}
static inline void kmemcheck_mark_initialized(void *address, unsigned int n)
{
}
static inline void kmemcheck_mark_freed(void *address, unsigned int n)
{
}
static inline void kmemcheck_mark_unallocated_pages(struct page *p,
unsigned int n)
{
}
static inline void kmemcheck_mark_uninitialized_pages(struct page *p,
unsigned int n)
{
}
static inline void kmemcheck_mark_initialized_pages(struct page *p,
unsigned int n)
{
}
static inline bool kmemcheck_is_obj_initialized(unsigned long addr, size_t size)
{
return true;
}
#define kmemcheck_bitfield_begin(name)
#define kmemcheck_bitfield_end(name)
#define kmemcheck_annotate_bitfield(ptr, name) \
do { \
} while (0)
#define kmemcheck_annotate_variable(var) \
do { \
} while (0)
#endif /* CONFIG_KMEMCHECK */
#endif /* LINUX_KMEMCHECK_H */
...@@ -486,16 +486,6 @@ void __tasklet_hi_schedule(struct tasklet_struct *t) ...@@ -486,16 +486,6 @@ void __tasklet_hi_schedule(struct tasklet_struct *t)
} }
EXPORT_SYMBOL(__tasklet_hi_schedule); EXPORT_SYMBOL(__tasklet_hi_schedule);
void __tasklet_hi_schedule_first(struct tasklet_struct *t)
{
lockdep_assert_irqs_disabled();
t->next = __this_cpu_read(tasklet_hi_vec.head);
__this_cpu_write(tasklet_hi_vec.head, t);
__raise_softirq_irqoff(HI_SOFTIRQ);
}
EXPORT_SYMBOL(__tasklet_hi_schedule_first);
static __latent_entropy void tasklet_action(struct softirq_action *a) static __latent_entropy void tasklet_action(struct softirq_action *a)
{ {
struct tasklet_struct *list; struct tasklet_struct *list;
......
...@@ -30,7 +30,6 @@ ...@@ -30,7 +30,6 @@
#include <linux/proc_fs.h> #include <linux/proc_fs.h>
#include <linux/security.h> #include <linux/security.h>
#include <linux/ctype.h> #include <linux/ctype.h>
#include <linux/kmemcheck.h>
#include <linux/kmemleak.h> #include <linux/kmemleak.h>
#include <linux/fs.h> #include <linux/fs.h>
#include <linux/init.h> #include <linux/init.h>
...@@ -1173,15 +1172,6 @@ static struct ctl_table kern_table[] = { ...@@ -1173,15 +1172,6 @@ static struct ctl_table kern_table[] = {
.extra1 = &zero, .extra1 = &zero,
.extra2 = &one_thousand, .extra2 = &one_thousand,
}, },
#endif
#ifdef CONFIG_KMEMCHECK
{
.procname = "kmemcheck",
.data = &kmemcheck_enabled,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
#endif #endif
{ {
.procname = "panic_on_warn", .procname = "panic_on_warn",
......
...@@ -504,7 +504,7 @@ config DEBUG_OBJECTS_ENABLE_DEFAULT ...@@ -504,7 +504,7 @@ config DEBUG_OBJECTS_ENABLE_DEFAULT
config DEBUG_SLAB config DEBUG_SLAB
bool "Debug slab memory allocations" bool "Debug slab memory allocations"
depends on DEBUG_KERNEL && SLAB && !KMEMCHECK depends on DEBUG_KERNEL && SLAB
help help
Say Y here to have the kernel do limited verification on memory Say Y here to have the kernel do limited verification on memory
allocation as well as poisoning memory on free to catch use of freed allocation as well as poisoning memory on free to catch use of freed
...@@ -516,7 +516,7 @@ config DEBUG_SLAB_LEAK ...@@ -516,7 +516,7 @@ config DEBUG_SLAB_LEAK
config SLUB_DEBUG_ON config SLUB_DEBUG_ON
bool "SLUB debugging on by default" bool "SLUB debugging on by default"
depends on SLUB && SLUB_DEBUG && !KMEMCHECK depends on SLUB && SLUB_DEBUG
default n default n
help help
Boot with debugging on by default. SLUB boots by default with Boot with debugging on by default. SLUB boots by default with
...@@ -730,8 +730,6 @@ config DEBUG_STACKOVERFLOW ...@@ -730,8 +730,6 @@ config DEBUG_STACKOVERFLOW
If in doubt, say "N". If in doubt, say "N".
source "lib/Kconfig.kmemcheck"
source "lib/Kconfig.kasan" source "lib/Kconfig.kasan"
endmenu # "Memory Debugging" endmenu # "Memory Debugging"
......
config HAVE_ARCH_KMEMCHECK
bool
if HAVE_ARCH_KMEMCHECK
menuconfig KMEMCHECK
bool "kmemcheck: trap use of uninitialized memory"
depends on DEBUG_KERNEL
depends on !X86_USE_3DNOW
depends on SLUB || SLAB
depends on !CC_OPTIMIZE_FOR_SIZE
depends on !FUNCTION_TRACER
select FRAME_POINTER
select STACKTRACE
default n
help
This option enables tracing of dynamically allocated kernel memory
to see if memory is used before it has been given an initial value.
Be aware that this requires half of your memory for bookkeeping and
will insert extra code at *every* read and write to tracked memory
thus slow down the kernel code (but user code is unaffected).
The kernel may be started with kmemcheck=0 or kmemcheck=1 to disable
or enable kmemcheck at boot-time. If the kernel is started with
kmemcheck=0, the large memory and CPU overhead is not incurred.
choice
prompt "kmemcheck: default mode at boot"
depends on KMEMCHECK
default KMEMCHECK_ONESHOT_BY_DEFAULT
help
This option controls the default behaviour of kmemcheck when the
kernel boots and no kmemcheck= parameter is given.
config KMEMCHECK_DISABLED_BY_DEFAULT
bool "disabled"
depends on KMEMCHECK
config KMEMCHECK_ENABLED_BY_DEFAULT
bool "enabled"
depends on KMEMCHECK
config KMEMCHECK_ONESHOT_BY_DEFAULT
bool "one-shot"
depends on KMEMCHECK
help
In one-shot mode, only the first error detected is reported before
kmemcheck is disabled.
endchoice
config KMEMCHECK_QUEUE_SIZE
int "kmemcheck: error queue size"
depends on KMEMCHECK
default 64
help
Select the maximum number of errors to store in the queue. Since
errors can occur virtually anywhere and in any context, we need a
temporary storage area which is guarantueed not to generate any
other faults. The queue will be emptied as soon as a tasklet may
be scheduled. If the queue is full, new error reports will be
lost.
config KMEMCHECK_SHADOW_COPY_SHIFT
int "kmemcheck: shadow copy size (5 => 32 bytes, 6 => 64 bytes)"
depends on KMEMCHECK
range 2 8
default 5
help
Select the number of shadow bytes to save along with each entry of
the queue. These bytes indicate what parts of an allocation are
initialized, uninitialized, etc. and will be displayed when an
error is detected to help the debugging of a particular problem.
config KMEMCHECK_PARTIAL_OK
bool "kmemcheck: allow partially uninitialized memory"
depends on KMEMCHECK
default y
help
This option works around certain GCC optimizations that produce
32-bit reads from 16-bit variables where the upper 16 bits are
thrown away afterwards. This may of course also hide some real
bugs.
config KMEMCHECK_BITOPS_OK
bool "kmemcheck: allow bit-field manipulation"
depends on KMEMCHECK
default n
help
This option silences warnings that would be generated for bit-field
accesses where not all the bits are initialized at the same time.
This may also hide some real bugs.
endif
...@@ -11,7 +11,6 @@ config DEBUG_PAGEALLOC ...@@ -11,7 +11,6 @@ config DEBUG_PAGEALLOC
bool "Debug page memory allocations" bool "Debug page memory allocations"
depends on DEBUG_KERNEL depends on DEBUG_KERNEL
depends on !HIBERNATION || ARCH_SUPPORTS_DEBUG_PAGEALLOC && !PPC && !SPARC depends on !HIBERNATION || ARCH_SUPPORTS_DEBUG_PAGEALLOC && !PPC && !SPARC
depends on !KMEMCHECK
select PAGE_EXTENSION select PAGE_EXTENSION
select PAGE_POISONING if !ARCH_SUPPORTS_DEBUG_PAGEALLOC select PAGE_POISONING if !ARCH_SUPPORTS_DEBUG_PAGEALLOC
---help--- ---help---
......
...@@ -17,7 +17,6 @@ KCOV_INSTRUMENT_slub.o := n ...@@ -17,7 +17,6 @@ KCOV_INSTRUMENT_slub.o := n
KCOV_INSTRUMENT_page_alloc.o := n KCOV_INSTRUMENT_page_alloc.o := n
KCOV_INSTRUMENT_debug-pagealloc.o := n KCOV_INSTRUMENT_debug-pagealloc.o := n
KCOV_INSTRUMENT_kmemleak.o := n KCOV_INSTRUMENT_kmemleak.o := n
KCOV_INSTRUMENT_kmemcheck.o := n
KCOV_INSTRUMENT_memcontrol.o := n KCOV_INSTRUMENT_memcontrol.o := n
KCOV_INSTRUMENT_mmzone.o := n KCOV_INSTRUMENT_mmzone.o := n
KCOV_INSTRUMENT_vmstat.o := n KCOV_INSTRUMENT_vmstat.o := n
...@@ -70,7 +69,6 @@ obj-$(CONFIG_KSM) += ksm.o ...@@ -70,7 +69,6 @@ obj-$(CONFIG_KSM) += ksm.o
obj-$(CONFIG_PAGE_POISONING) += page_poison.o obj-$(CONFIG_PAGE_POISONING) += page_poison.o
obj-$(CONFIG_SLAB) += slab.o obj-$(CONFIG_SLAB) += slab.o
obj-$(CONFIG_SLUB) += slub.o obj-$(CONFIG_SLUB) += slub.o
obj-$(CONFIG_KMEMCHECK) += kmemcheck.o
obj-$(CONFIG_KASAN) += kasan/ obj-$(CONFIG_KASAN) += kasan/
obj-$(CONFIG_FAILSLAB) += failslab.o obj-$(CONFIG_FAILSLAB) += failslab.o
obj-$(CONFIG_MEMORY_HOTPLUG) += memory_hotplug.o obj-$(CONFIG_MEMORY_HOTPLUG) += memory_hotplug.o
......
// SPDX-License-Identifier: GPL-2.0 // SPDX-License-Identifier: GPL-2.0
#include <linux/gfp.h>
#include <linux/mm_types.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include "slab.h"
#include <linux/kmemcheck.h>
void kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node)
{
struct page *shadow;
int pages;
int i;
pages = 1 << order;
/*
* With kmemcheck enabled, we need to allocate a memory area for the
* shadow bits as well.
*/
shadow = alloc_pages_node(node, flags, order);
if (!shadow) {
if (printk_ratelimit())
pr_err("kmemcheck: failed to allocate shadow bitmap\n");
return;
}
for(i = 0; i < pages; ++i)
page[i].shadow = page_address(&shadow[i]);
/*
* Mark it as non-present for the MMU so that our accesses to
* this memory will trigger a page fault and let us analyze
* the memory accesses.
*/
kmemcheck_hide_pages(page, pages);
}
void kmemcheck_free_shadow(struct page *page, int order)
{
struct page *shadow;
int pages;
int i;
if (!kmemcheck_page_is_tracked(page))
return;
pages = 1 << order;
kmemcheck_show_pages(page, pages);
shadow = virt_to_page(page[0].shadow);
for(i = 0; i < pages; ++i)
page[i].shadow = NULL;
__free_pages(shadow, order);
}
void kmemcheck_slab_alloc(struct kmem_cache *s, gfp_t gfpflags, void *object,
size_t size)
{
if (unlikely(!object)) /* Skip object if allocation failed */
return;
/*
* Has already been memset(), which initializes the shadow for us
* as well.
*/
if (gfpflags & __GFP_ZERO)
return;
/* No need to initialize the shadow of a non-tracked slab. */
if (s->flags & SLAB_NOTRACK)
return;
if (!kmemcheck_enabled || gfpflags & __GFP_NOTRACK) {
/*
* Allow notracked objects to be allocated from
* tracked caches. Note however that these objects
* will still get page faults on access, they just
* won't ever be flagged as uninitialized. If page
* faults are not acceptable, the slab cache itself
* should be marked NOTRACK.
*/
kmemcheck_mark_initialized(object, size);
} else if (!s->ctor) {
/*
* New objects should be marked uninitialized before
* they're returned to the called.
*/
kmemcheck_mark_uninitialized(object, size);
}
}
void kmemcheck_slab_free(struct kmem_cache *s, void *object, size_t size)
{
/* TODO: RCU freeing is unsupported for now; hide false positives. */
if (!s->ctor && !(s->flags & SLAB_TYPESAFE_BY_RCU))
kmemcheck_mark_freed(object, size);
}
void kmemcheck_pagealloc_alloc(struct page *page, unsigned int order,
gfp_t gfpflags)
{
int pages;
if (gfpflags & (__GFP_HIGHMEM | __GFP_NOTRACK))
return;
pages = 1 << order;
/*
* NOTE: We choose to track GFP_ZERO pages too; in fact, they
* can become uninitialized by copying uninitialized memory
* into them.
*/
/* XXX: Can use zone->node for node? */
kmemcheck_alloc_shadow(page, order, gfpflags, -1);
if (gfpflags & __GFP_ZERO)
kmemcheck_mark_initialized_pages(page, pages);
else
kmemcheck_mark_uninitialized_pages(page, pages);
}
...@@ -1371,7 +1371,7 @@ static inline void *slab_free_hook(struct kmem_cache *s, void *x) ...@@ -1371,7 +1371,7 @@ static inline void *slab_free_hook(struct kmem_cache *s, void *x)
* So in order to make the debug calls that expect irqs to be * So in order to make the debug calls that expect irqs to be
* disabled we need to disable interrupts temporarily. * disabled we need to disable interrupts temporarily.
*/ */
#if defined(CONFIG_KMEMCHECK) || defined(CONFIG_LOCKDEP) #ifdef CONFIG_LOCKDEP
{ {
unsigned long flags; unsigned long flags;
...@@ -1399,8 +1399,7 @@ static inline void slab_free_freelist_hook(struct kmem_cache *s, ...@@ -1399,8 +1399,7 @@ static inline void slab_free_freelist_hook(struct kmem_cache *s,
* Compiler cannot detect this function can be removed if slab_free_hook() * Compiler cannot detect this function can be removed if slab_free_hook()
* evaluates to nothing. Thus, catch all relevant config debug options here. * evaluates to nothing. Thus, catch all relevant config debug options here.
*/ */
#if defined(CONFIG_KMEMCHECK) || \ #if defined(CONFIG_LOCKDEP) || \
defined(CONFIG_LOCKDEP) || \
defined(CONFIG_DEBUG_KMEMLEAK) || \ defined(CONFIG_DEBUG_KMEMLEAK) || \
defined(CONFIG_DEBUG_OBJECTS_FREE) || \ defined(CONFIG_DEBUG_OBJECTS_FREE) || \
defined(CONFIG_KASAN) defined(CONFIG_KASAN)
......
...@@ -2182,8 +2182,6 @@ sub dump_struct($$) { ...@@ -2182,8 +2182,6 @@ sub dump_struct($$) {
# strip comments: # strip comments:
$members =~ s/\/\*.*?\*\///gos; $members =~ s/\/\*.*?\*\///gos;
$nested =~ s/\/\*.*?\*\///gos; $nested =~ s/\/\*.*?\*\///gos;
# strip kmemcheck_bitfield_{begin,end}.*;
$members =~ s/kmemcheck_bitfield_.*?;//gos;
# strip attributes # strip attributes
$members =~ s/__attribute__\s*\(\([a-z,_\*\s\(\)]*\)\)//i; $members =~ s/__attribute__\s*\(\([a-z,_\*\s\(\)]*\)\)//i;
$members =~ s/__aligned\s*\([^;]*\)//gos; $members =~ s/__aligned\s*\([^;]*\)//gos;
......
/* SPDX-License-Identifier: GPL-2.0 */ /* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LIBLOCKDEP_LINUX_KMEMCHECK_H_
#define _LIBLOCKDEP_LINUX_KMEMCHECK_H_
static inline void kmemcheck_mark_initialized(void *address, unsigned int n)
{
}
#endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment