Commit 75f296d9 authored by Levin, Alexander (Sasha Levin)'s avatar Levin, Alexander (Sasha Levin) Committed by Linus Torvalds

kmemcheck: stop using GFP_NOTRACK and SLAB_NOTRACK

Convert all allocations that used a NOTRACK flag to stop using it.

Link: http://lkml.kernel.org/r/20171007030159.22241-3-alexander.levin@verizon.comSigned-off-by: default avatarSasha Levin <alexander.levin@verizon.com>
Cc: Alexander Potapenko <glider@google.com>
Cc: Eric W. Biederman <ebiederm@xmission.com>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Tim Hansen <devtimhansen@gmail.com>
Cc: Vegard Nossum <vegardno@ifi.uio.no>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 49502766
...@@ -57,7 +57,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) ...@@ -57,7 +57,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
extern pgd_t *pgd_alloc(struct mm_struct *mm); extern pgd_t *pgd_alloc(struct mm_struct *mm);
extern void pgd_free(struct mm_struct *mm, pgd_t *pgd); extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
#define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO) #define PGALLOC_GFP (GFP_KERNEL | __GFP_ZERO)
static inline void clean_pte_table(pte_t *pte) static inline void clean_pte_table(pte_t *pte)
{ {
......
...@@ -26,7 +26,7 @@ ...@@ -26,7 +26,7 @@
#define check_pgt_cache() do { } while (0) #define check_pgt_cache() do { } while (0)
#define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO) #define PGALLOC_GFP (GFP_KERNEL | __GFP_ZERO)
#define PGD_SIZE (PTRS_PER_PGD * sizeof(pgd_t)) #define PGD_SIZE (PTRS_PER_PGD * sizeof(pgd_t))
#if CONFIG_PGTABLE_LEVELS > 2 #if CONFIG_PGTABLE_LEVELS > 2
......
...@@ -18,7 +18,7 @@ static inline gfp_t pgtable_gfp_flags(struct mm_struct *mm, gfp_t gfp) ...@@ -18,7 +18,7 @@ static inline gfp_t pgtable_gfp_flags(struct mm_struct *mm, gfp_t gfp)
} }
#endif /* MODULE */ #endif /* MODULE */
#define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO) #define PGALLOC_GFP (GFP_KERNEL | __GFP_ZERO)
#ifdef CONFIG_PPC_BOOK3S #ifdef CONFIG_PPC_BOOK3S
#include <asm/book3s/pgalloc.h> #include <asm/book3s/pgalloc.h>
......
...@@ -1172,11 +1172,11 @@ static int __init dwarf_unwinder_init(void) ...@@ -1172,11 +1172,11 @@ static int __init dwarf_unwinder_init(void)
dwarf_frame_cachep = kmem_cache_create("dwarf_frames", dwarf_frame_cachep = kmem_cache_create("dwarf_frames",
sizeof(struct dwarf_frame), 0, sizeof(struct dwarf_frame), 0,
SLAB_PANIC | SLAB_HWCACHE_ALIGN | SLAB_NOTRACK, NULL); SLAB_PANIC | SLAB_HWCACHE_ALIGN, NULL);
dwarf_reg_cachep = kmem_cache_create("dwarf_regs", dwarf_reg_cachep = kmem_cache_create("dwarf_regs",
sizeof(struct dwarf_reg), 0, sizeof(struct dwarf_reg), 0,
SLAB_PANIC | SLAB_HWCACHE_ALIGN | SLAB_NOTRACK, NULL); SLAB_PANIC | SLAB_HWCACHE_ALIGN, NULL);
dwarf_frame_pool = mempool_create_slab_pool(DWARF_FRAME_MIN_REQ, dwarf_frame_pool = mempool_create_slab_pool(DWARF_FRAME_MIN_REQ,
dwarf_frame_cachep); dwarf_frame_cachep);
......
...@@ -59,7 +59,7 @@ void arch_task_cache_init(void) ...@@ -59,7 +59,7 @@ void arch_task_cache_init(void)
task_xstate_cachep = kmem_cache_create("task_xstate", xstate_size, task_xstate_cachep = kmem_cache_create("task_xstate", xstate_size,
__alignof__(union thread_xstate), __alignof__(union thread_xstate),
SLAB_PANIC | SLAB_NOTRACK, NULL); SLAB_PANIC, NULL);
} }
#ifdef CONFIG_SH_FPU_EMU #ifdef CONFIG_SH_FPU_EMU
......
...@@ -2927,7 +2927,7 @@ void __flush_tlb_all(void) ...@@ -2927,7 +2927,7 @@ void __flush_tlb_all(void)
pte_t *pte_alloc_one_kernel(struct mm_struct *mm, pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
unsigned long address) unsigned long address)
{ {
struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO); struct page *page = alloc_page(GFP_KERNEL | __GFP_ZERO);
pte_t *pte = NULL; pte_t *pte = NULL;
if (page) if (page)
...@@ -2939,7 +2939,7 @@ pte_t *pte_alloc_one_kernel(struct mm_struct *mm, ...@@ -2939,7 +2939,7 @@ pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
pgtable_t pte_alloc_one(struct mm_struct *mm, pgtable_t pte_alloc_one(struct mm_struct *mm,
unsigned long address) unsigned long address)
{ {
struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO); struct page *page = alloc_page(GFP_KERNEL | __GFP_ZERO);
if (!page) if (!page)
return NULL; return NULL;
if (!pgtable_page_ctor(page)) { if (!pgtable_page_ctor(page)) {
......
...@@ -28,7 +28,7 @@ extern void free_pgd_slow(struct mm_struct *mm, pgd_t *pgd); ...@@ -28,7 +28,7 @@ extern void free_pgd_slow(struct mm_struct *mm, pgd_t *pgd);
#define pgd_alloc(mm) get_pgd_slow(mm) #define pgd_alloc(mm) get_pgd_slow(mm)
#define pgd_free(mm, pgd) free_pgd_slow(mm, pgd) #define pgd_free(mm, pgd) free_pgd_slow(mm, pgd)
#define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO) #define PGALLOC_GFP (GFP_KERNEL | __GFP_ZERO)
/* /*
* Allocate one PTE table. * Allocate one PTE table.
......
...@@ -57,7 +57,7 @@ ...@@ -57,7 +57,7 @@
# error "Need more virtual address space for the ESPFIX hack" # error "Need more virtual address space for the ESPFIX hack"
#endif #endif
#define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO) #define PGALLOC_GFP (GFP_KERNEL | __GFP_ZERO)
/* This contains the *bottom* address of the espfix stack */ /* This contains the *bottom* address of the espfix stack */
DEFINE_PER_CPU_READ_MOSTLY(unsigned long, espfix_stack); DEFINE_PER_CPU_READ_MOSTLY(unsigned long, espfix_stack);
......
...@@ -92,8 +92,7 @@ __ref void *alloc_low_pages(unsigned int num) ...@@ -92,8 +92,7 @@ __ref void *alloc_low_pages(unsigned int num)
unsigned int order; unsigned int order;
order = get_order((unsigned long)num << PAGE_SHIFT); order = get_order((unsigned long)num << PAGE_SHIFT);
return (void *)__get_free_pages(GFP_ATOMIC | __GFP_NOTRACK | return (void *)__get_free_pages(GFP_ATOMIC | __GFP_ZERO, order);
__GFP_ZERO, order);
} }
if ((pgt_buf_end + num) > pgt_buf_top || !can_use_brk_pgt) { if ((pgt_buf_end + num) > pgt_buf_top || !can_use_brk_pgt) {
......
...@@ -184,7 +184,7 @@ static __ref void *spp_getpage(void) ...@@ -184,7 +184,7 @@ static __ref void *spp_getpage(void)
void *ptr; void *ptr;
if (after_bootmem) if (after_bootmem)
ptr = (void *) get_zeroed_page(GFP_ATOMIC | __GFP_NOTRACK); ptr = (void *) get_zeroed_page(GFP_ATOMIC);
else else
ptr = alloc_bootmem_pages(PAGE_SIZE); ptr = alloc_bootmem_pages(PAGE_SIZE);
......
...@@ -753,7 +753,7 @@ static int split_large_page(struct cpa_data *cpa, pte_t *kpte, ...@@ -753,7 +753,7 @@ static int split_large_page(struct cpa_data *cpa, pte_t *kpte,
if (!debug_pagealloc_enabled()) if (!debug_pagealloc_enabled())
spin_unlock(&cpa_lock); spin_unlock(&cpa_lock);
base = alloc_pages(GFP_KERNEL | __GFP_NOTRACK, 0); base = alloc_pages(GFP_KERNEL, 0);
if (!debug_pagealloc_enabled()) if (!debug_pagealloc_enabled())
spin_lock(&cpa_lock); spin_lock(&cpa_lock);
if (!base) if (!base)
...@@ -904,7 +904,7 @@ static void unmap_pud_range(p4d_t *p4d, unsigned long start, unsigned long end) ...@@ -904,7 +904,7 @@ static void unmap_pud_range(p4d_t *p4d, unsigned long start, unsigned long end)
static int alloc_pte_page(pmd_t *pmd) static int alloc_pte_page(pmd_t *pmd)
{ {
pte_t *pte = (pte_t *)get_zeroed_page(GFP_KERNEL | __GFP_NOTRACK); pte_t *pte = (pte_t *)get_zeroed_page(GFP_KERNEL);
if (!pte) if (!pte)
return -1; return -1;
...@@ -914,7 +914,7 @@ static int alloc_pte_page(pmd_t *pmd) ...@@ -914,7 +914,7 @@ static int alloc_pte_page(pmd_t *pmd)
static int alloc_pmd_page(pud_t *pud) static int alloc_pmd_page(pud_t *pud)
{ {
pmd_t *pmd = (pmd_t *)get_zeroed_page(GFP_KERNEL | __GFP_NOTRACK); pmd_t *pmd = (pmd_t *)get_zeroed_page(GFP_KERNEL);
if (!pmd) if (!pmd)
return -1; return -1;
...@@ -1120,7 +1120,7 @@ static int populate_pgd(struct cpa_data *cpa, unsigned long addr) ...@@ -1120,7 +1120,7 @@ static int populate_pgd(struct cpa_data *cpa, unsigned long addr)
pgd_entry = cpa->pgd + pgd_index(addr); pgd_entry = cpa->pgd + pgd_index(addr);
if (pgd_none(*pgd_entry)) { if (pgd_none(*pgd_entry)) {
p4d = (p4d_t *)get_zeroed_page(GFP_KERNEL | __GFP_NOTRACK); p4d = (p4d_t *)get_zeroed_page(GFP_KERNEL);
if (!p4d) if (!p4d)
return -1; return -1;
...@@ -1132,7 +1132,7 @@ static int populate_pgd(struct cpa_data *cpa, unsigned long addr) ...@@ -1132,7 +1132,7 @@ static int populate_pgd(struct cpa_data *cpa, unsigned long addr)
*/ */
p4d = p4d_offset(pgd_entry, addr); p4d = p4d_offset(pgd_entry, addr);
if (p4d_none(*p4d)) { if (p4d_none(*p4d)) {
pud = (pud_t *)get_zeroed_page(GFP_KERNEL | __GFP_NOTRACK); pud = (pud_t *)get_zeroed_page(GFP_KERNEL);
if (!pud) if (!pud)
return -1; return -1;
......
...@@ -7,7 +7,7 @@ ...@@ -7,7 +7,7 @@
#include <asm/fixmap.h> #include <asm/fixmap.h>
#include <asm/mtrr.h> #include <asm/mtrr.h>
#define PGALLOC_GFP (GFP_KERNEL_ACCOUNT | __GFP_NOTRACK | __GFP_ZERO) #define PGALLOC_GFP (GFP_KERNEL_ACCOUNT | __GFP_ZERO)
#ifdef CONFIG_HIGHPTE #ifdef CONFIG_HIGHPTE
#define PGALLOC_USER_GFP __GFP_HIGHMEM #define PGALLOC_USER_GFP __GFP_HIGHMEM
......
...@@ -207,7 +207,7 @@ int __init efi_alloc_page_tables(void) ...@@ -207,7 +207,7 @@ int __init efi_alloc_page_tables(void)
if (efi_enabled(EFI_OLD_MEMMAP)) if (efi_enabled(EFI_OLD_MEMMAP))
return 0; return 0;
gfp_mask = GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO; gfp_mask = GFP_KERNEL | __GFP_ZERO;
efi_pgd = (pgd_t *)__get_free_page(gfp_mask); efi_pgd = (pgd_t *)__get_free_page(gfp_mask);
if (!efi_pgd) if (!efi_pgd)
return -ENOMEM; return -ENOMEM;
......
...@@ -122,12 +122,7 @@ calibrate_xor_blocks(void) ...@@ -122,12 +122,7 @@ calibrate_xor_blocks(void)
goto out; goto out;
} }
/* b1 = (void *) __get_free_pages(GFP_KERNEL, 2);
* Note: Since the memory is not actually used for _anything_ but to
* test the XOR speed, we don't really want kmemcheck to warn about
* reading uninitialized bytes here.
*/
b1 = (void *) __get_free_pages(GFP_KERNEL | __GFP_NOTRACK, 2);
if (!b1) { if (!b1) {
printk(KERN_WARNING "xor: Yikes! No memory available.\n"); printk(KERN_WARNING "xor: Yikes! No memory available.\n");
return -ENOMEM; return -ENOMEM;
......
...@@ -44,10 +44,9 @@ enum { ...@@ -44,10 +44,9 @@ enum {
#endif #endif
#if IS_ENABLED(CONFIG_DEBUG_STACK_USAGE) || IS_ENABLED(CONFIG_DEBUG_KMEMLEAK) #if IS_ENABLED(CONFIG_DEBUG_STACK_USAGE) || IS_ENABLED(CONFIG_DEBUG_KMEMLEAK)
# define THREADINFO_GFP (GFP_KERNEL_ACCOUNT | __GFP_NOTRACK | \ # define THREADINFO_GFP (GFP_KERNEL_ACCOUNT | __GFP_ZERO)
__GFP_ZERO)
#else #else
# define THREADINFO_GFP (GFP_KERNEL_ACCOUNT | __GFP_NOTRACK) # define THREADINFO_GFP (GFP_KERNEL_ACCOUNT)
#endif #endif
/* /*
......
...@@ -380,8 +380,7 @@ static int __init do_mount_root(char *name, char *fs, int flags, void *data) ...@@ -380,8 +380,7 @@ static int __init do_mount_root(char *name, char *fs, int flags, void *data)
void __init mount_block_root(char *name, int flags) void __init mount_block_root(char *name, int flags)
{ {
struct page *page = alloc_page(GFP_KERNEL | struct page *page = alloc_page(GFP_KERNEL);
__GFP_NOTRACK_FALSE_POSITIVE);
char *fs_names = page_address(page); char *fs_names = page_address(page);
char *p; char *p;
#ifdef CONFIG_BLOCK #ifdef CONFIG_BLOCK
......
...@@ -469,7 +469,7 @@ void __init fork_init(void) ...@@ -469,7 +469,7 @@ void __init fork_init(void)
/* create a slab on which task_structs can be allocated */ /* create a slab on which task_structs can be allocated */
task_struct_cachep = kmem_cache_create("task_struct", task_struct_cachep = kmem_cache_create("task_struct",
arch_task_struct_size, align, arch_task_struct_size, align,
SLAB_PANIC|SLAB_NOTRACK|SLAB_ACCOUNT, NULL); SLAB_PANIC|SLAB_ACCOUNT, NULL);
#endif #endif
/* do the arch specific task caches init */ /* do the arch specific task caches init */
...@@ -2205,18 +2205,18 @@ void __init proc_caches_init(void) ...@@ -2205,18 +2205,18 @@ void __init proc_caches_init(void)
sighand_cachep = kmem_cache_create("sighand_cache", sighand_cachep = kmem_cache_create("sighand_cache",
sizeof(struct sighand_struct), 0, sizeof(struct sighand_struct), 0,
SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_TYPESAFE_BY_RCU| SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_TYPESAFE_BY_RCU|
SLAB_NOTRACK|SLAB_ACCOUNT, sighand_ctor); SLAB_ACCOUNT, sighand_ctor);
signal_cachep = kmem_cache_create("signal_cache", signal_cachep = kmem_cache_create("signal_cache",
sizeof(struct signal_struct), 0, sizeof(struct signal_struct), 0,
SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK|SLAB_ACCOUNT, SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT,
NULL); NULL);
files_cachep = kmem_cache_create("files_cache", files_cachep = kmem_cache_create("files_cache",
sizeof(struct files_struct), 0, sizeof(struct files_struct), 0,
SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK|SLAB_ACCOUNT, SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT,
NULL); NULL);
fs_cachep = kmem_cache_create("fs_cache", fs_cachep = kmem_cache_create("fs_cache",
sizeof(struct fs_struct), 0, sizeof(struct fs_struct), 0,
SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK|SLAB_ACCOUNT, SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT,
NULL); NULL);
/* /*
* FIXME! The "sizeof(struct mm_struct)" currently includes the * FIXME! The "sizeof(struct mm_struct)" currently includes the
...@@ -2227,7 +2227,7 @@ void __init proc_caches_init(void) ...@@ -2227,7 +2227,7 @@ void __init proc_caches_init(void)
*/ */
mm_cachep = kmem_cache_create("mm_struct", mm_cachep = kmem_cache_create("mm_struct",
sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN, sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN,
SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK|SLAB_ACCOUNT, SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT,
NULL); NULL);
vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC|SLAB_ACCOUNT); vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC|SLAB_ACCOUNT);
mmap_init(); mmap_init();
......
...@@ -1036,8 +1036,7 @@ static int __send_signal(int sig, struct siginfo *info, struct task_struct *t, ...@@ -1036,8 +1036,7 @@ static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
else else
override_rlimit = 0; override_rlimit = 0;
q = __sigqueue_alloc(sig, t, GFP_ATOMIC | __GFP_NOTRACK_FALSE_POSITIVE, q = __sigqueue_alloc(sig, t, GFP_ATOMIC, override_rlimit);
override_rlimit);
if (q) { if (q) {
list_add_tail(&q->list, &pending->list); list_add_tail(&q->list, &pending->list);
switch ((unsigned long) info) { switch ((unsigned long) info) {
......
...@@ -18,7 +18,7 @@ void kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node) ...@@ -18,7 +18,7 @@ void kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node)
* With kmemcheck enabled, we need to allocate a memory area for the * With kmemcheck enabled, we need to allocate a memory area for the
* shadow bits as well. * shadow bits as well.
*/ */
shadow = alloc_pages_node(node, flags | __GFP_NOTRACK, order); shadow = alloc_pages_node(node, flags, order);
if (!shadow) { if (!shadow) {
if (printk_ratelimit()) if (printk_ratelimit())
pr_err("kmemcheck: failed to allocate shadow bitmap\n"); pr_err("kmemcheck: failed to allocate shadow bitmap\n");
......
...@@ -1410,7 +1410,7 @@ static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, ...@@ -1410,7 +1410,7 @@ static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,
flags |= cachep->allocflags; flags |= cachep->allocflags;
page = __alloc_pages_node(nodeid, flags | __GFP_NOTRACK, cachep->gfporder); page = __alloc_pages_node(nodeid, flags, cachep->gfporder);
if (!page) { if (!page) {
slab_out_of_memory(cachep, flags, nodeid); slab_out_of_memory(cachep, flags, nodeid);
return NULL; return NULL;
......
...@@ -141,10 +141,10 @@ static inline slab_flags_t kmem_cache_flags(unsigned long object_size, ...@@ -141,10 +141,10 @@ static inline slab_flags_t kmem_cache_flags(unsigned long object_size,
#if defined(CONFIG_SLAB) #if defined(CONFIG_SLAB)
#define SLAB_CACHE_FLAGS (SLAB_MEM_SPREAD | SLAB_NOLEAKTRACE | \ #define SLAB_CACHE_FLAGS (SLAB_MEM_SPREAD | SLAB_NOLEAKTRACE | \
SLAB_RECLAIM_ACCOUNT | SLAB_TEMPORARY | \ SLAB_RECLAIM_ACCOUNT | SLAB_TEMPORARY | \
SLAB_NOTRACK | SLAB_ACCOUNT) SLAB_ACCOUNT)
#elif defined(CONFIG_SLUB) #elif defined(CONFIG_SLUB)
#define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \ #define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \
SLAB_TEMPORARY | SLAB_NOTRACK | SLAB_ACCOUNT) SLAB_TEMPORARY | SLAB_ACCOUNT)
#else #else
#define SLAB_CACHE_FLAGS (0) #define SLAB_CACHE_FLAGS (0)
#endif #endif
...@@ -163,7 +163,6 @@ static inline slab_flags_t kmem_cache_flags(unsigned long object_size, ...@@ -163,7 +163,6 @@ static inline slab_flags_t kmem_cache_flags(unsigned long object_size,
SLAB_NOLEAKTRACE | \ SLAB_NOLEAKTRACE | \
SLAB_RECLAIM_ACCOUNT | \ SLAB_RECLAIM_ACCOUNT | \
SLAB_TEMPORARY | \ SLAB_TEMPORARY | \
SLAB_NOTRACK | \
SLAB_ACCOUNT) SLAB_ACCOUNT)
int __kmem_cache_shutdown(struct kmem_cache *); int __kmem_cache_shutdown(struct kmem_cache *);
......
...@@ -44,7 +44,7 @@ static DECLARE_WORK(slab_caches_to_rcu_destroy_work, ...@@ -44,7 +44,7 @@ static DECLARE_WORK(slab_caches_to_rcu_destroy_work,
SLAB_FAILSLAB | SLAB_KASAN) SLAB_FAILSLAB | SLAB_KASAN)
#define SLAB_MERGE_SAME (SLAB_RECLAIM_ACCOUNT | SLAB_CACHE_DMA | \ #define SLAB_MERGE_SAME (SLAB_RECLAIM_ACCOUNT | SLAB_CACHE_DMA | \
SLAB_NOTRACK | SLAB_ACCOUNT) SLAB_ACCOUNT)
/* /*
* Merge control. If this is set then no merging of slab caches will occur. * Merge control. If this is set then no merging of slab caches will occur.
......
...@@ -1436,8 +1436,6 @@ static inline struct page *alloc_slab_page(struct kmem_cache *s, ...@@ -1436,8 +1436,6 @@ static inline struct page *alloc_slab_page(struct kmem_cache *s,
struct page *page; struct page *page;
int order = oo_order(oo); int order = oo_order(oo);
flags |= __GFP_NOTRACK;
if (node == NUMA_NO_NODE) if (node == NUMA_NO_NODE)
page = alloc_pages(flags, order); page = alloc_pages(flags, order);
else else
...@@ -3774,7 +3772,7 @@ static void *kmalloc_large_node(size_t size, gfp_t flags, int node) ...@@ -3774,7 +3772,7 @@ static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
struct page *page; struct page *page;
void *ptr = NULL; void *ptr = NULL;
flags |= __GFP_COMP | __GFP_NOTRACK; flags |= __GFP_COMP;
page = alloc_pages_node(node, flags, get_order(size)); page = alloc_pages_node(node, flags, get_order(size));
if (page) if (page)
ptr = page_address(page); ptr = page_address(page);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment