Commit 3ee2d747 authored by Muchun Song's avatar Muchun Song Committed by Andrew Morton

mm: kfence: fix PG_slab and memcg_data clearing

It does not reset PG_slab and memcg_data when KFENCE fails to initialize
kfence pool at runtime.  It is reporting a "Bad page state" message when
kfence pool is freed to buddy.  The checking of whether it is a compound
head page seems unnecessary since we already guarantee this when
allocating kfence pool.   Remove the check to simplify the code.

Link: https://lkml.kernel.org/r/20230320030059.20189-1-songmuchun@bytedance.com
Fixes: 0ce20dd8 ("mm: add Kernel Electric-Fence infrastructure")
Signed-off-by: default avatarMuchun Song <songmuchun@bytedance.com>
Cc: Alexander Potapenko <glider@google.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Jann Horn <jannh@google.com>
Cc: Marco Elver <elver@google.com>
Cc: Roman Gushchin <roman.gushchin@linux.dev>
Cc: SeongJae Park <sjpark@amazon.de>
Cc: <stable@vger.kernel.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent e900ba10
...@@ -561,10 +561,6 @@ static unsigned long kfence_init_pool(void) ...@@ -561,10 +561,6 @@ static unsigned long kfence_init_pool(void)
if (!i || (i % 2)) if (!i || (i % 2))
continue; continue;
/* Verify we do not have a compound head page. */
if (WARN_ON(compound_head(&pages[i]) != &pages[i]))
return addr;
__folio_set_slab(slab_folio(slab)); __folio_set_slab(slab_folio(slab));
#ifdef CONFIG_MEMCG #ifdef CONFIG_MEMCG
slab->memcg_data = (unsigned long)&kfence_metadata[i / 2 - 1].objcg | slab->memcg_data = (unsigned long)&kfence_metadata[i / 2 - 1].objcg |
...@@ -597,12 +593,26 @@ static unsigned long kfence_init_pool(void) ...@@ -597,12 +593,26 @@ static unsigned long kfence_init_pool(void)
/* Protect the right redzone. */ /* Protect the right redzone. */
if (unlikely(!kfence_protect(addr + PAGE_SIZE))) if (unlikely(!kfence_protect(addr + PAGE_SIZE)))
return addr; goto reset_slab;
addr += 2 * PAGE_SIZE; addr += 2 * PAGE_SIZE;
} }
return 0; return 0;
reset_slab:
for (i = 0; i < KFENCE_POOL_SIZE / PAGE_SIZE; i++) {
struct slab *slab = page_slab(&pages[i]);
if (!i || (i % 2))
continue;
#ifdef CONFIG_MEMCG
slab->memcg_data = 0;
#endif
__folio_clear_slab(slab_folio(slab));
}
return addr;
} }
static bool __init kfence_init_pool_early(void) static bool __init kfence_init_pool_early(void)
...@@ -632,16 +642,6 @@ static bool __init kfence_init_pool_early(void) ...@@ -632,16 +642,6 @@ static bool __init kfence_init_pool_early(void)
* fails for the first page, and therefore expect addr==__kfence_pool in * fails for the first page, and therefore expect addr==__kfence_pool in
* most failure cases. * most failure cases.
*/ */
for (char *p = (char *)addr; p < __kfence_pool + KFENCE_POOL_SIZE; p += PAGE_SIZE) {
struct slab *slab = virt_to_slab(p);
if (!slab)
continue;
#ifdef CONFIG_MEMCG
slab->memcg_data = 0;
#endif
__folio_clear_slab(slab_folio(slab));
}
memblock_free_late(__pa(addr), KFENCE_POOL_SIZE - (addr - (unsigned long)__kfence_pool)); memblock_free_late(__pa(addr), KFENCE_POOL_SIZE - (addr - (unsigned long)__kfence_pool));
__kfence_pool = NULL; __kfence_pool = NULL;
return false; return false;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment