Commit 1ea9d333 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'mm-stable-2022-12-17-2' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm

Pull more mm updates from Andrew Morton:

 - A few late-breaking minor fixups

 - Two minor feature patches which were awkwardly dependent on mm-nonmm.
   I need to set up a new branch to handle such things.

* tag 'mm-stable-2022-12-17-2' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm:
  MAINTAINERS: zram: zsmalloc: Add an additional co-maintainer
  mm/kmemleak: use %pK to display kernel pointers in backtrace
  mm: use stack_depot for recording kmemleak's backtrace
  maple_tree: update copyright dates for test code
  maple_tree: fix mas_find_rev() comment
  mm/gup_test: free memory allocated via kvcalloc() using kvfree()
parents 4f292c4d 8b777594
......@@ -1439,6 +1439,10 @@ N: Justin Guyett
E: jguyett@andrew.cmu.edu
D: via-rhine net driver hacking
N: Nitin Gupta
E: ngupta@vflare.org
D: zsmalloc memory allocator and zram block device driver
N: Danny ter Haar
E: dth@cistron.nl
D: /proc/cpuinfo, reboot on panic , kernel pre-patch tester ;)
......
......@@ -23055,8 +23055,7 @@ F: drivers/media/pci/zoran/
ZRAM COMPRESSED RAM BLOCK DEVICE DRVIER
M: Minchan Kim <minchan@kernel.org>
M: Nitin Gupta <ngupta@vflare.org>
R: Sergey Senozhatsky <senozhatsky@chromium.org>
M: Sergey Senozhatsky <senozhatsky@chromium.org>
L: linux-kernel@vger.kernel.org
S: Maintained
F: Documentation/admin-guide/blockdev/zram.rst
......@@ -23069,8 +23068,7 @@ F: drivers/tty/serial/zs.*
ZSMALLOC COMPRESSED SLAB MEMORY ALLOCATOR
M: Minchan Kim <minchan@kernel.org>
M: Nitin Gupta <ngupta@vflare.org>
R: Sergey Senozhatsky <senozhatsky@chromium.org>
M: Sergey Senozhatsky <senozhatsky@chromium.org>
L: linux-mm@kvack.org
S: Maintained
F: Documentation/mm/zsmalloc.rst
......
......@@ -728,6 +728,7 @@ config DEBUG_KMEMLEAK
select STACKTRACE if STACKTRACE_SUPPORT
select KALLSYMS
select CRC32
select STACKDEPOT
help
Say Y here if you want to enable the memory leak
detector. The memory allocation/freeing is traced in a way
......
......@@ -6062,7 +6062,7 @@ void *mas_find_rev(struct ma_state *mas, unsigned long min)
if (mas->index < min)
return NULL;
/* Retries on dead nodes handled by mas_next_entry */
/* Retries on dead nodes handled by mas_prev_entry */
return mas_prev_entry(mas, min);
}
EXPORT_SYMBOL_GPL(mas_find_rev);
......
......@@ -214,7 +214,7 @@ static inline void pin_longterm_test_stop(void)
if (pin_longterm_test_nr_pages)
unpin_user_pages(pin_longterm_test_pages,
pin_longterm_test_nr_pages);
kfree(pin_longterm_test_pages);
kvfree(pin_longterm_test_pages);
pin_longterm_test_pages = NULL;
pin_longterm_test_nr_pages = 0;
}
......@@ -255,7 +255,7 @@ static inline int pin_longterm_test_start(unsigned long arg)
fast = !!(args.flags & PIN_LONGTERM_TEST_FLAG_USE_FAST);
if (!fast && mmap_read_lock_killable(current->mm)) {
kfree(pages);
kvfree(pages);
return -EINTR;
}
......
......@@ -79,6 +79,7 @@
#include <linux/mutex.h>
#include <linux/rcupdate.h>
#include <linux/stacktrace.h>
#include <linux/stackdepot.h>
#include <linux/cache.h>
#include <linux/percpu.h>
#include <linux/memblock.h>
......@@ -159,8 +160,7 @@ struct kmemleak_object {
u32 checksum;
/* memory ranges to be scanned inside an object (empty for all) */
struct hlist_head area_list;
unsigned long trace[MAX_TRACE];
unsigned int trace_len;
depot_stack_handle_t trace_handle;
unsigned long jiffies; /* creation timestamp */
pid_t pid; /* pid of the current task */
char comm[TASK_COMM_LEN]; /* executable name */
......@@ -346,19 +346,22 @@ static void print_unreferenced(struct seq_file *seq,
struct kmemleak_object *object)
{
int i;
unsigned long *entries;
unsigned int nr_entries;
unsigned int msecs_age = jiffies_to_msecs(jiffies - object->jiffies);
nr_entries = stack_depot_fetch(object->trace_handle, &entries);
warn_or_seq_printf(seq, "unreferenced object 0x%08lx (size %zu):\n",
object->pointer, object->size);
object->pointer, object->size);
warn_or_seq_printf(seq, " comm \"%s\", pid %d, jiffies %lu (age %d.%03ds)\n",
object->comm, object->pid, object->jiffies,
msecs_age / 1000, msecs_age % 1000);
object->comm, object->pid, object->jiffies,
msecs_age / 1000, msecs_age % 1000);
hex_dump_object(seq, object);
warn_or_seq_printf(seq, " backtrace:\n");
for (i = 0; i < object->trace_len; i++) {
void *ptr = (void *)object->trace[i];
warn_or_seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
for (i = 0; i < nr_entries; i++) {
void *ptr = (void *)entries[i];
warn_or_seq_printf(seq, " [<%pK>] %pS\n", ptr, ptr);
}
}
......@@ -370,15 +373,16 @@ static void print_unreferenced(struct seq_file *seq,
static void dump_object_info(struct kmemleak_object *object)
{
pr_notice("Object 0x%08lx (size %zu):\n",
object->pointer, object->size);
object->pointer, object->size);
pr_notice(" comm \"%s\", pid %d, jiffies %lu\n",
object->comm, object->pid, object->jiffies);
object->comm, object->pid, object->jiffies);
pr_notice(" min_count = %d\n", object->min_count);
pr_notice(" count = %d\n", object->count);
pr_notice(" flags = 0x%x\n", object->flags);
pr_notice(" checksum = %u\n", object->checksum);
pr_notice(" backtrace:\n");
stack_trace_print(object->trace, object->trace_len, 4);
if (object->trace_handle)
stack_depot_print(object->trace_handle);
}
/*
......@@ -591,12 +595,18 @@ static struct kmemleak_object *find_and_remove_object(unsigned long ptr, int ali
return object;
}
/*
* Save stack trace to the given array of MAX_TRACE size.
*/
static int __save_stack_trace(unsigned long *trace)
static noinline depot_stack_handle_t set_track_prepare(void)
{
return stack_trace_save(trace, MAX_TRACE, 2);
depot_stack_handle_t trace_handle;
unsigned long entries[MAX_TRACE];
unsigned int nr_entries;
if (!kmemleak_initialized)
return 0;
nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 3);
trace_handle = stack_depot_save(entries, nr_entries, GFP_NOWAIT);
return trace_handle;
}
/*
......@@ -653,7 +663,7 @@ static void __create_object(unsigned long ptr, size_t size,
}
/* kernel backtrace */
object->trace_len = __save_stack_trace(object->trace);
object->trace_handle = set_track_prepare();
raw_spin_lock_irqsave(&kmemleak_lock, flags);
......@@ -692,7 +702,6 @@ static void __create_object(unsigned long ptr, size_t size,
rb_link_node(&object->rb_node, rb_parent, link);
rb_insert_color(&object->rb_node, is_phys ? &object_phys_tree_root :
&object_tree_root);
list_add_tail_rcu(&object->object_list, &object_list);
out:
raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
......@@ -1091,7 +1100,7 @@ void __ref kmemleak_update_trace(const void *ptr)
}
raw_spin_lock_irqsave(&object->lock, flags);
object->trace_len = __save_stack_trace(object->trace);
object->trace_handle = set_track_prepare();
raw_spin_unlock_irqrestore(&object->lock, flags);
put_object(object);
......@@ -2084,6 +2093,7 @@ void __init kmemleak_init(void)
if (kmemleak_error)
return;
stack_depot_init();
jiffies_min_age = msecs_to_jiffies(MSECS_MIN_AGE);
jiffies_scan_wait = msecs_to_jiffies(SECS_SCAN_WAIT * 1000);
......
// SPDX-License-Identifier: GPL-2.0+
/*
* maple_tree.c: Userspace shim for maple tree test-suite
* Copyright (c) 2018 Liam R. Howlett <Liam.Howlett@Oracle.com>
* maple_tree.c: Userspace testing for maple tree test-suite
* Copyright (c) 2018-2022 Oracle Corporation
* Author: Liam R. Howlett <Liam.Howlett@Oracle.com>
*
* Any tests that require internal knowledge of the tree or threads and other
* difficult to handle in kernel tests.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment