Commit df442a4e authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'akpm' (patches from Andrew)

Merge misc fixes from Andrew Morton:
 "21 patches.

  Subsystems affected by this patch series: MAINTAINERS, mailmap, and mm
  (mlock, pagecache, damon, slub, memcg, hugetlb, and pagecache)"

* emailed patches from Andrew Morton <akpm@linux-foundation.org>: (21 commits)
  mm: bdi: initialize bdi_min_ratio when bdi is unregistered
  hugetlbfs: fix issue of preallocation of gigantic pages can't work
  mm/memcg: relocate mod_objcg_mlstate(), get_obj_stock() and put_obj_stock()
  mm/slub: fix endianness bug for alloc/free_traces attributes
  selftests/damon: split test cases
  selftests/damon: test debugfs file reads/writes with huge count
  selftests/damon: test wrong DAMOS condition ranges input
  selftests/damon: test DAMON enabling with empty target_ids case
  selftests/damon: skip test if DAMON is running
  mm/damon/vaddr-test: remove unnecessary variables
  mm/damon/vaddr-test: split a test function having >1024 bytes frame size
  mm/damon/vaddr: remove an unnecessary warning message
  mm/damon/core: remove unnecessary error messages
  mm/damon/dbgfs: remove an unnecessary error message
  mm/damon/core: use better timer mechanisms selection threshold
  mm/damon/core: fix fake load reports due to uninterruptible sleeps
  timers: implement usleep_idle_range()
  filemap: remove PageHWPoison check from next_uptodate_page()
  mailmap: update email address for Guo Ren
  MAINTAINERS: update kdump maintainers
  ...
parents 6f513529 3c376dfa
...@@ -126,6 +126,8 @@ Greg Kroah-Hartman <gregkh@suse.de> ...@@ -126,6 +126,8 @@ Greg Kroah-Hartman <gregkh@suse.de>
Greg Kroah-Hartman <greg@kroah.com> Greg Kroah-Hartman <greg@kroah.com>
Greg Kurz <groug@kaod.org> <gkurz@linux.vnet.ibm.com> Greg Kurz <groug@kaod.org> <gkurz@linux.vnet.ibm.com>
Gregory CLEMENT <gregory.clement@bootlin.com> <gregory.clement@free-electrons.com> Gregory CLEMENT <gregory.clement@bootlin.com> <gregory.clement@free-electrons.com>
Guo Ren <guoren@kernel.org> <guoren@linux.alibaba.com>
Guo Ren <guoren@kernel.org> <ren_guo@c-sky.com>
Gustavo Padovan <gustavo@las.ic.unicamp.br> Gustavo Padovan <gustavo@las.ic.unicamp.br>
Gustavo Padovan <padovan@profusion.mobi> Gustavo Padovan <padovan@profusion.mobi>
Hanjun Guo <guohanjun@huawei.com> <hanjun.guo@linaro.org> Hanjun Guo <guohanjun@huawei.com> <hanjun.guo@linaro.org>
......
...@@ -10279,9 +10279,9 @@ F: lib/Kconfig.kcsan ...@@ -10279,9 +10279,9 @@ F: lib/Kconfig.kcsan
F: scripts/Makefile.kcsan F: scripts/Makefile.kcsan
KDUMP KDUMP
M: Dave Young <dyoung@redhat.com>
M: Baoquan He <bhe@redhat.com> M: Baoquan He <bhe@redhat.com>
R: Vivek Goyal <vgoyal@redhat.com> R: Vivek Goyal <vgoyal@redhat.com>
R: Dave Young <dyoung@redhat.com>
L: kexec@lists.infradead.org L: kexec@lists.infradead.org
S: Maintained S: Maintained
W: http://lse.sourceforge.net/kdump/ W: http://lse.sourceforge.net/kdump/
......
...@@ -20,6 +20,7 @@ ...@@ -20,6 +20,7 @@
*/ */
#include <linux/math.h> #include <linux/math.h>
#include <linux/sched.h>
extern unsigned long loops_per_jiffy; extern unsigned long loops_per_jiffy;
...@@ -58,7 +59,18 @@ void calibrate_delay(void); ...@@ -58,7 +59,18 @@ void calibrate_delay(void);
void __attribute__((weak)) calibration_delay_done(void); void __attribute__((weak)) calibration_delay_done(void);
void msleep(unsigned int msecs); void msleep(unsigned int msecs);
unsigned long msleep_interruptible(unsigned int msecs); unsigned long msleep_interruptible(unsigned int msecs);
void usleep_range(unsigned long min, unsigned long max); void usleep_range_state(unsigned long min, unsigned long max,
unsigned int state);
static inline void usleep_range(unsigned long min, unsigned long max)
{
usleep_range_state(min, max, TASK_UNINTERRUPTIBLE);
}
static inline void usleep_idle_range(unsigned long min, unsigned long max)
{
usleep_range_state(min, max, TASK_IDLE);
}
static inline void ssleep(unsigned int seconds) static inline void ssleep(unsigned int seconds)
{ {
......
...@@ -66,10 +66,17 @@ struct rlimit64 { ...@@ -66,10 +66,17 @@ struct rlimit64 {
#define _STK_LIM (8*1024*1024) #define _STK_LIM (8*1024*1024)
/* /*
* GPG2 wants 64kB of mlocked memory, to make sure pass phrases * Limit the amount of locked memory by some sane default:
* and other sensitive information are never written to disk. * root can always increase this limit if needed.
*
* The main use-cases are (1) preventing sensitive memory
* from being swapped; (2) real-time operations; (3) via
* IOURING_REGISTER_BUFFERS.
*
* The first two don't need much. The latter will take as
* much as it can get. 8MB is a reasonably sane default.
*/ */
#define MLOCK_LIMIT ((PAGE_SIZE > 64*1024) ? PAGE_SIZE : 64*1024) #define MLOCK_LIMIT (8*1024*1024)
/* /*
* Due to binary compatibility, the actual resource numbers * Due to binary compatibility, the actual resource numbers
......
...@@ -2054,26 +2054,28 @@ unsigned long msleep_interruptible(unsigned int msecs) ...@@ -2054,26 +2054,28 @@ unsigned long msleep_interruptible(unsigned int msecs)
EXPORT_SYMBOL(msleep_interruptible); EXPORT_SYMBOL(msleep_interruptible);
/** /**
* usleep_range - Sleep for an approximate time * usleep_range_state - Sleep for an approximate time in a given state
* @min: Minimum time in usecs to sleep * @min: Minimum time in usecs to sleep
* @max: Maximum time in usecs to sleep * @max: Maximum time in usecs to sleep
* @state: State of the current task that will be while sleeping
* *
* In non-atomic context where the exact wakeup time is flexible, use * In non-atomic context where the exact wakeup time is flexible, use
* usleep_range() instead of udelay(). The sleep improves responsiveness * usleep_range_state() instead of udelay(). The sleep improves responsiveness
* by avoiding the CPU-hogging busy-wait of udelay(), and the range reduces * by avoiding the CPU-hogging busy-wait of udelay(), and the range reduces
* power usage by allowing hrtimers to take advantage of an already- * power usage by allowing hrtimers to take advantage of an already-
* scheduled interrupt instead of scheduling a new one just for this sleep. * scheduled interrupt instead of scheduling a new one just for this sleep.
*/ */
void __sched usleep_range(unsigned long min, unsigned long max) void __sched usleep_range_state(unsigned long min, unsigned long max,
unsigned int state)
{ {
ktime_t exp = ktime_add_us(ktime_get(), min); ktime_t exp = ktime_add_us(ktime_get(), min);
u64 delta = (u64)(max - min) * NSEC_PER_USEC; u64 delta = (u64)(max - min) * NSEC_PER_USEC;
for (;;) { for (;;) {
__set_current_state(TASK_UNINTERRUPTIBLE); __set_current_state(state);
/* Do not return before the requested sleep time has elapsed */ /* Do not return before the requested sleep time has elapsed */
if (!schedule_hrtimeout_range(&exp, delta, HRTIMER_MODE_ABS)) if (!schedule_hrtimeout_range(&exp, delta, HRTIMER_MODE_ABS))
break; break;
} }
} }
EXPORT_SYMBOL(usleep_range); EXPORT_SYMBOL(usleep_range_state);
...@@ -945,6 +945,13 @@ void bdi_unregister(struct backing_dev_info *bdi) ...@@ -945,6 +945,13 @@ void bdi_unregister(struct backing_dev_info *bdi)
wb_shutdown(&bdi->wb); wb_shutdown(&bdi->wb);
cgwb_bdi_unregister(bdi); cgwb_bdi_unregister(bdi);
/*
* If this BDI's min ratio has been set, use bdi_set_min_ratio() to
* update the global bdi_min_ratio.
*/
if (bdi->min_ratio)
bdi_set_min_ratio(bdi, 0);
if (bdi->dev) { if (bdi->dev) {
bdi_debug_unregister(bdi); bdi_debug_unregister(bdi);
device_unregister(bdi->dev); device_unregister(bdi->dev);
......
...@@ -282,7 +282,6 @@ int damon_set_targets(struct damon_ctx *ctx, ...@@ -282,7 +282,6 @@ int damon_set_targets(struct damon_ctx *ctx,
for (i = 0; i < nr_ids; i++) { for (i = 0; i < nr_ids; i++) {
t = damon_new_target(ids[i]); t = damon_new_target(ids[i]);
if (!t) { if (!t) {
pr_err("Failed to alloc damon_target\n");
/* The caller should do cleanup of the ids itself */ /* The caller should do cleanup of the ids itself */
damon_for_each_target_safe(t, next, ctx) damon_for_each_target_safe(t, next, ctx)
damon_destroy_target(t); damon_destroy_target(t);
...@@ -312,16 +311,10 @@ int damon_set_attrs(struct damon_ctx *ctx, unsigned long sample_int, ...@@ -312,16 +311,10 @@ int damon_set_attrs(struct damon_ctx *ctx, unsigned long sample_int,
unsigned long aggr_int, unsigned long primitive_upd_int, unsigned long aggr_int, unsigned long primitive_upd_int,
unsigned long min_nr_reg, unsigned long max_nr_reg) unsigned long min_nr_reg, unsigned long max_nr_reg)
{ {
if (min_nr_reg < 3) { if (min_nr_reg < 3)
pr_err("min_nr_regions (%lu) must be at least 3\n",
min_nr_reg);
return -EINVAL; return -EINVAL;
} if (min_nr_reg > max_nr_reg)
if (min_nr_reg > max_nr_reg) {
pr_err("invalid nr_regions. min (%lu) > max (%lu)\n",
min_nr_reg, max_nr_reg);
return -EINVAL; return -EINVAL;
}
ctx->sample_interval = sample_int; ctx->sample_interval = sample_int;
ctx->aggr_interval = aggr_int; ctx->aggr_interval = aggr_int;
...@@ -980,10 +973,11 @@ static unsigned long damos_wmark_wait_us(struct damos *scheme) ...@@ -980,10 +973,11 @@ static unsigned long damos_wmark_wait_us(struct damos *scheme)
static void kdamond_usleep(unsigned long usecs) static void kdamond_usleep(unsigned long usecs)
{ {
if (usecs > 100 * 1000) /* See Documentation/timers/timers-howto.rst for the thresholds */
schedule_timeout_interruptible(usecs_to_jiffies(usecs)); if (usecs > 20 * USEC_PER_MSEC)
schedule_timeout_idle(usecs_to_jiffies(usecs));
else else
usleep_range(usecs, usecs + 1); usleep_idle_range(usecs, usecs + 1);
} }
/* Returns negative error code if it's not activated but should return */ /* Returns negative error code if it's not activated but should return */
...@@ -1038,7 +1032,7 @@ static int kdamond_fn(void *data) ...@@ -1038,7 +1032,7 @@ static int kdamond_fn(void *data)
ctx->callback.after_sampling(ctx)) ctx->callback.after_sampling(ctx))
done = true; done = true;
usleep_range(ctx->sample_interval, ctx->sample_interval + 1); kdamond_usleep(ctx->sample_interval);
if (ctx->primitive.check_accesses) if (ctx->primitive.check_accesses)
max_nr_accesses = ctx->primitive.check_accesses(ctx); max_nr_accesses = ctx->primitive.check_accesses(ctx);
......
...@@ -210,10 +210,8 @@ static struct damos **str_to_schemes(const char *str, ssize_t len, ...@@ -210,10 +210,8 @@ static struct damos **str_to_schemes(const char *str, ssize_t len,
&wmarks.low, &parsed); &wmarks.low, &parsed);
if (ret != 18) if (ret != 18)
break; break;
if (!damos_action_valid(action)) { if (!damos_action_valid(action))
pr_err("wrong action %d\n", action);
goto fail; goto fail;
}
pos += parsed; pos += parsed;
scheme = damon_new_scheme(min_sz, max_sz, min_nr_a, max_nr_a, scheme = damon_new_scheme(min_sz, max_sz, min_nr_a, max_nr_a,
......
...@@ -135,7 +135,6 @@ static void damon_do_test_apply_three_regions(struct kunit *test, ...@@ -135,7 +135,6 @@ static void damon_do_test_apply_three_regions(struct kunit *test,
struct damon_addr_range *three_regions, struct damon_addr_range *three_regions,
unsigned long *expected, int nr_expected) unsigned long *expected, int nr_expected)
{ {
struct damon_ctx *ctx = damon_new_ctx();
struct damon_target *t; struct damon_target *t;
struct damon_region *r; struct damon_region *r;
int i; int i;
...@@ -145,7 +144,6 @@ static void damon_do_test_apply_three_regions(struct kunit *test, ...@@ -145,7 +144,6 @@ static void damon_do_test_apply_three_regions(struct kunit *test,
r = damon_new_region(regions[i * 2], regions[i * 2 + 1]); r = damon_new_region(regions[i * 2], regions[i * 2 + 1]);
damon_add_region(r, t); damon_add_region(r, t);
} }
damon_add_target(ctx, t);
damon_va_apply_three_regions(t, three_regions); damon_va_apply_three_regions(t, three_regions);
...@@ -154,8 +152,6 @@ static void damon_do_test_apply_three_regions(struct kunit *test, ...@@ -154,8 +152,6 @@ static void damon_do_test_apply_three_regions(struct kunit *test,
KUNIT_EXPECT_EQ(test, r->ar.start, expected[i * 2]); KUNIT_EXPECT_EQ(test, r->ar.start, expected[i * 2]);
KUNIT_EXPECT_EQ(test, r->ar.end, expected[i * 2 + 1]); KUNIT_EXPECT_EQ(test, r->ar.end, expected[i * 2 + 1]);
} }
damon_destroy_ctx(ctx);
} }
/* /*
...@@ -252,60 +248,59 @@ static void damon_test_apply_three_regions4(struct kunit *test) ...@@ -252,60 +248,59 @@ static void damon_test_apply_three_regions4(struct kunit *test)
new_three_regions, expected, ARRAY_SIZE(expected)); new_three_regions, expected, ARRAY_SIZE(expected));
} }
static void damon_test_split_evenly(struct kunit *test) static void damon_test_split_evenly_fail(struct kunit *test,
unsigned long start, unsigned long end, unsigned int nr_pieces)
{ {
struct damon_ctx *c = damon_new_ctx(); struct damon_target *t = damon_new_target(42);
struct damon_target *t; struct damon_region *r = damon_new_region(start, end);
struct damon_region *r;
unsigned long i;
KUNIT_EXPECT_EQ(test, damon_va_evenly_split_region(NULL, NULL, 5),
-EINVAL);
t = damon_new_target(42);
r = damon_new_region(0, 100);
KUNIT_EXPECT_EQ(test, damon_va_evenly_split_region(t, r, 0), -EINVAL);
damon_add_region(r, t); damon_add_region(r, t);
KUNIT_EXPECT_EQ(test, damon_va_evenly_split_region(t, r, 10), 0); KUNIT_EXPECT_EQ(test,
KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 10u); damon_va_evenly_split_region(t, r, nr_pieces), -EINVAL);
KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 1u);
i = 0;
damon_for_each_region(r, t) { damon_for_each_region(r, t) {
KUNIT_EXPECT_EQ(test, r->ar.start, i++ * 10); KUNIT_EXPECT_EQ(test, r->ar.start, start);
KUNIT_EXPECT_EQ(test, r->ar.end, i * 10); KUNIT_EXPECT_EQ(test, r->ar.end, end);
} }
damon_free_target(t); damon_free_target(t);
}
static void damon_test_split_evenly_succ(struct kunit *test,
unsigned long start, unsigned long end, unsigned int nr_pieces)
{
struct damon_target *t = damon_new_target(42);
struct damon_region *r = damon_new_region(start, end);
unsigned long expected_width = (end - start) / nr_pieces;
unsigned long i = 0;
t = damon_new_target(42);
r = damon_new_region(5, 59);
damon_add_region(r, t); damon_add_region(r, t);
KUNIT_EXPECT_EQ(test, damon_va_evenly_split_region(t, r, 5), 0); KUNIT_EXPECT_EQ(test,
KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 5u); damon_va_evenly_split_region(t, r, nr_pieces), 0);
KUNIT_EXPECT_EQ(test, damon_nr_regions(t), nr_pieces);
i = 0;
damon_for_each_region(r, t) { damon_for_each_region(r, t) {
if (i == 4) if (i == nr_pieces - 1)
break; break;
KUNIT_EXPECT_EQ(test, r->ar.start, 5 + 10 * i++); KUNIT_EXPECT_EQ(test,
KUNIT_EXPECT_EQ(test, r->ar.end, 5 + 10 * i); r->ar.start, start + i++ * expected_width);
KUNIT_EXPECT_EQ(test, r->ar.end, start + i * expected_width);
} }
KUNIT_EXPECT_EQ(test, r->ar.start, 5 + 10 * i); KUNIT_EXPECT_EQ(test, r->ar.start, start + i * expected_width);
KUNIT_EXPECT_EQ(test, r->ar.end, 59ul); KUNIT_EXPECT_EQ(test, r->ar.end, end);
damon_free_target(t); damon_free_target(t);
}
t = damon_new_target(42); static void damon_test_split_evenly(struct kunit *test)
r = damon_new_region(5, 6); {
damon_add_region(r, t); KUNIT_EXPECT_EQ(test, damon_va_evenly_split_region(NULL, NULL, 5),
KUNIT_EXPECT_EQ(test, damon_va_evenly_split_region(t, r, 2), -EINVAL); -EINVAL);
KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 1u);
damon_for_each_region(r, t) { damon_test_split_evenly_fail(test, 0, 100, 0);
KUNIT_EXPECT_EQ(test, r->ar.start, 5ul); damon_test_split_evenly_succ(test, 0, 100, 10);
KUNIT_EXPECT_EQ(test, r->ar.end, 6ul); damon_test_split_evenly_succ(test, 5, 59, 5);
} damon_test_split_evenly_fail(test, 5, 6, 2);
damon_free_target(t);
damon_destroy_ctx(c);
} }
static struct kunit_case damon_test_cases[] = { static struct kunit_case damon_test_cases[] = {
......
...@@ -627,7 +627,6 @@ int damon_va_apply_scheme(struct damon_ctx *ctx, struct damon_target *t, ...@@ -627,7 +627,6 @@ int damon_va_apply_scheme(struct damon_ctx *ctx, struct damon_target *t,
case DAMOS_STAT: case DAMOS_STAT:
return 0; return 0;
default: default:
pr_warn("Wrong action %d\n", scheme->action);
return -EINVAL; return -EINVAL;
} }
......
...@@ -3253,8 +3253,6 @@ static struct page *next_uptodate_page(struct page *page, ...@@ -3253,8 +3253,6 @@ static struct page *next_uptodate_page(struct page *page,
goto skip; goto skip;
if (!PageUptodate(page) || PageReadahead(page)) if (!PageUptodate(page) || PageReadahead(page))
goto skip; goto skip;
if (PageHWPoison(page))
goto skip;
if (!trylock_page(page)) if (!trylock_page(page))
goto skip; goto skip;
if (page->mapping != mapping) if (page->mapping != mapping)
......
...@@ -2973,7 +2973,7 @@ int __alloc_bootmem_huge_page(struct hstate *h, int nid) ...@@ -2973,7 +2973,7 @@ int __alloc_bootmem_huge_page(struct hstate *h, int nid)
struct huge_bootmem_page *m = NULL; /* initialize for clang */ struct huge_bootmem_page *m = NULL; /* initialize for clang */
int nr_nodes, node; int nr_nodes, node;
if (nid >= nr_online_nodes) if (nid != NUMA_NO_NODE && nid >= nr_online_nodes)
return 0; return 0;
/* do node specific alloc */ /* do node specific alloc */
if (nid != NUMA_NO_NODE) { if (nid != NUMA_NO_NODE) {
......
...@@ -776,24 +776,6 @@ void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val) ...@@ -776,24 +776,6 @@ void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val)
rcu_read_unlock(); rcu_read_unlock();
} }
/*
* mod_objcg_mlstate() may be called with irq enabled, so
* mod_memcg_lruvec_state() should be used.
*/
static inline void mod_objcg_mlstate(struct obj_cgroup *objcg,
struct pglist_data *pgdat,
enum node_stat_item idx, int nr)
{
struct mem_cgroup *memcg;
struct lruvec *lruvec;
rcu_read_lock();
memcg = obj_cgroup_memcg(objcg);
lruvec = mem_cgroup_lruvec(memcg, pgdat);
mod_memcg_lruvec_state(lruvec, idx, nr);
rcu_read_unlock();
}
/** /**
* __count_memcg_events - account VM events in a cgroup * __count_memcg_events - account VM events in a cgroup
* @memcg: the memory cgroup * @memcg: the memory cgroup
...@@ -2137,41 +2119,6 @@ static bool obj_stock_flush_required(struct memcg_stock_pcp *stock, ...@@ -2137,41 +2119,6 @@ static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
} }
#endif #endif
/*
* Most kmem_cache_alloc() calls are from user context. The irq disable/enable
* sequence used in this case to access content from object stock is slow.
* To optimize for user context access, there are now two object stocks for
* task context and interrupt context access respectively.
*
* The task context object stock can be accessed by disabling preemption only
* which is cheap in non-preempt kernel. The interrupt context object stock
* can only be accessed after disabling interrupt. User context code can
* access interrupt object stock, but not vice versa.
*/
static inline struct obj_stock *get_obj_stock(unsigned long *pflags)
{
struct memcg_stock_pcp *stock;
if (likely(in_task())) {
*pflags = 0UL;
preempt_disable();
stock = this_cpu_ptr(&memcg_stock);
return &stock->task_obj;
}
local_irq_save(*pflags);
stock = this_cpu_ptr(&memcg_stock);
return &stock->irq_obj;
}
static inline void put_obj_stock(unsigned long flags)
{
if (likely(in_task()))
preempt_enable();
else
local_irq_restore(flags);
}
/** /**
* consume_stock: Try to consume stocked charge on this cpu. * consume_stock: Try to consume stocked charge on this cpu.
* @memcg: memcg to consume from. * @memcg: memcg to consume from.
...@@ -2816,6 +2763,59 @@ static struct mem_cgroup *get_mem_cgroup_from_objcg(struct obj_cgroup *objcg) ...@@ -2816,6 +2763,59 @@ static struct mem_cgroup *get_mem_cgroup_from_objcg(struct obj_cgroup *objcg)
*/ */
#define OBJCGS_CLEAR_MASK (__GFP_DMA | __GFP_RECLAIMABLE | __GFP_ACCOUNT) #define OBJCGS_CLEAR_MASK (__GFP_DMA | __GFP_RECLAIMABLE | __GFP_ACCOUNT)
/*
* Most kmem_cache_alloc() calls are from user context. The irq disable/enable
* sequence used in this case to access content from object stock is slow.
* To optimize for user context access, there are now two object stocks for
* task context and interrupt context access respectively.
*
* The task context object stock can be accessed by disabling preemption only
* which is cheap in non-preempt kernel. The interrupt context object stock
* can only be accessed after disabling interrupt. User context code can
* access interrupt object stock, but not vice versa.
*/
static inline struct obj_stock *get_obj_stock(unsigned long *pflags)
{
struct memcg_stock_pcp *stock;
if (likely(in_task())) {
*pflags = 0UL;
preempt_disable();
stock = this_cpu_ptr(&memcg_stock);
return &stock->task_obj;
}
local_irq_save(*pflags);
stock = this_cpu_ptr(&memcg_stock);
return &stock->irq_obj;
}
static inline void put_obj_stock(unsigned long flags)
{
if (likely(in_task()))
preempt_enable();
else
local_irq_restore(flags);
}
/*
* mod_objcg_mlstate() may be called with irq enabled, so
* mod_memcg_lruvec_state() should be used.
*/
static inline void mod_objcg_mlstate(struct obj_cgroup *objcg,
struct pglist_data *pgdat,
enum node_stat_item idx, int nr)
{
struct mem_cgroup *memcg;
struct lruvec *lruvec;
rcu_read_lock();
memcg = obj_cgroup_memcg(objcg);
lruvec = mem_cgroup_lruvec(memcg, pgdat);
mod_memcg_lruvec_state(lruvec, idx, nr);
rcu_read_unlock();
}
int memcg_alloc_page_obj_cgroups(struct page *page, struct kmem_cache *s, int memcg_alloc_page_obj_cgroups(struct page *page, struct kmem_cache *s,
gfp_t gfp, bool new_page) gfp_t gfp, bool new_page)
{ {
......
...@@ -5081,6 +5081,7 @@ struct loc_track { ...@@ -5081,6 +5081,7 @@ struct loc_track {
unsigned long max; unsigned long max;
unsigned long count; unsigned long count;
struct location *loc; struct location *loc;
loff_t idx;
}; };
static struct dentry *slab_debugfs_root; static struct dentry *slab_debugfs_root;
...@@ -6052,11 +6053,11 @@ __initcall(slab_sysfs_init); ...@@ -6052,11 +6053,11 @@ __initcall(slab_sysfs_init);
#if defined(CONFIG_SLUB_DEBUG) && defined(CONFIG_DEBUG_FS) #if defined(CONFIG_SLUB_DEBUG) && defined(CONFIG_DEBUG_FS)
static int slab_debugfs_show(struct seq_file *seq, void *v) static int slab_debugfs_show(struct seq_file *seq, void *v)
{ {
struct location *l;
unsigned int idx = *(unsigned int *)v;
struct loc_track *t = seq->private; struct loc_track *t = seq->private;
struct location *l;
unsigned long idx;
idx = (unsigned long) t->idx;
if (idx < t->count) { if (idx < t->count) {
l = &t->loc[idx]; l = &t->loc[idx];
...@@ -6105,16 +6106,18 @@ static void *slab_debugfs_next(struct seq_file *seq, void *v, loff_t *ppos) ...@@ -6105,16 +6106,18 @@ static void *slab_debugfs_next(struct seq_file *seq, void *v, loff_t *ppos)
{ {
struct loc_track *t = seq->private; struct loc_track *t = seq->private;
v = ppos; t->idx = ++(*ppos);
++*ppos;
if (*ppos <= t->count) if (*ppos <= t->count)
return v; return ppos;
return NULL; return NULL;
} }
static void *slab_debugfs_start(struct seq_file *seq, loff_t *ppos) static void *slab_debugfs_start(struct seq_file *seq, loff_t *ppos)
{ {
struct loc_track *t = seq->private;
t->idx = *ppos;
return ppos; return ppos;
} }
......
# SPDX-License-Identifier: GPL-2.0-only
huge_count_read_write
# SPDX-License-Identifier: GPL-2.0 # SPDX-License-Identifier: GPL-2.0
# Makefile for damon selftests # Makefile for damon selftests
TEST_FILES = _chk_dependency.sh TEST_GEN_FILES += huge_count_read_write
TEST_PROGS = debugfs_attrs.sh
TEST_FILES = _chk_dependency.sh _debugfs_common.sh
TEST_PROGS = debugfs_attrs.sh debugfs_schemes.sh debugfs_target_ids.sh
TEST_PROGS += debugfs_empty_targets.sh debugfs_huge_count_read_write.sh
include ../lib.mk include ../lib.mk
#!/bin/bash
# SPDX-License-Identifier: GPL-2.0
test_write_result() {
file=$1
content=$2
orig_content=$3
expect_reason=$4
expected=$5
echo "$content" > "$file"
if [ $? -ne "$expected" ]
then
echo "writing $content to $file doesn't return $expected"
echo "expected because: $expect_reason"
echo "$orig_content" > "$file"
exit 1
fi
}
test_write_succ() {
test_write_result "$1" "$2" "$3" "$4" 0
}
test_write_fail() {
test_write_result "$1" "$2" "$3" "$4" 1
}
test_content() {
file=$1
orig_content=$2
expected=$3
expect_reason=$4
content=$(cat "$file")
if [ "$content" != "$expected" ]
then
echo "reading $file expected $expected but $content"
echo "expected because: $expect_reason"
echo "$orig_content" > "$file"
exit 1
fi
}
source ./_chk_dependency.sh
damon_onoff="$DBGFS/monitor_on"
if [ $(cat "$damon_onoff") = "on" ]
then
echo "monitoring is on"
exit $ksft_skip
fi
#!/bin/bash #!/bin/bash
# SPDX-License-Identifier: GPL-2.0 # SPDX-License-Identifier: GPL-2.0
test_write_result() { source _debugfs_common.sh
file=$1
content=$2
orig_content=$3
expect_reason=$4
expected=$5
echo "$content" > "$file"
if [ $? -ne "$expected" ]
then
echo "writing $content to $file doesn't return $expected"
echo "expected because: $expect_reason"
echo "$orig_content" > "$file"
exit 1
fi
}
test_write_succ() {
test_write_result "$1" "$2" "$3" "$4" 0
}
test_write_fail() {
test_write_result "$1" "$2" "$3" "$4" 1
}
test_content() {
file=$1
orig_content=$2
expected=$3
expect_reason=$4
content=$(cat "$file")
if [ "$content" != "$expected" ]
then
echo "reading $file expected $expected but $content"
echo "expected because: $expect_reason"
echo "$orig_content" > "$file"
exit 1
fi
}
source ./_chk_dependency.sh
# Test attrs file # Test attrs file
# =============== # ===============
...@@ -56,33 +15,3 @@ test_write_fail "$file" "1 2 3 5 4" "$orig_content" \ ...@@ -56,33 +15,3 @@ test_write_fail "$file" "1 2 3 5 4" "$orig_content" \
"min_nr_regions > max_nr_regions" "min_nr_regions > max_nr_regions"
test_content "$file" "$orig_content" "1 2 3 4 5" "successfully written" test_content "$file" "$orig_content" "1 2 3 4 5" "successfully written"
echo "$orig_content" > "$file" echo "$orig_content" > "$file"
# Test schemes file
# =================
file="$DBGFS/schemes"
orig_content=$(cat "$file")
test_write_succ "$file" "1 2 3 4 5 6 4 0 0 0 1 2 3 1 100 3 2 1" \
"$orig_content" "valid input"
test_write_fail "$file" "1 2
3 4 5 6 3 0 0 0 1 2 3 1 100 3 2 1" "$orig_content" "multi lines"
test_write_succ "$file" "" "$orig_content" "disabling"
echo "$orig_content" > "$file"
# Test target_ids file
# ====================
file="$DBGFS/target_ids"
orig_content=$(cat "$file")
test_write_succ "$file" "1 2 3 4" "$orig_content" "valid input"
test_write_succ "$file" "1 2 abc 4" "$orig_content" "still valid input"
test_content "$file" "$orig_content" "1 2" "non-integer was there"
test_write_succ "$file" "abc 2 3" "$orig_content" "the file allows wrong input"
test_content "$file" "$orig_content" "" "wrong input written"
test_write_succ "$file" "" "$orig_content" "empty input"
test_content "$file" "$orig_content" "" "empty input written"
echo "$orig_content" > "$file"
echo "PASS"
#!/bin/bash
# SPDX-License-Identifier: GPL-2.0
source _debugfs_common.sh
# Test empty targets case
# =======================
orig_target_ids=$(cat "$DBGFS/target_ids")
echo "" > "$DBGFS/target_ids"
orig_monitor_on=$(cat "$DBGFS/monitor_on")
test_write_fail "$DBGFS/monitor_on" "on" "orig_monitor_on" "empty target ids"
echo "$orig_target_ids" > "$DBGFS/target_ids"
#!/bin/bash
# SPDX-License-Identifier: GPL-2.0
source _debugfs_common.sh
# Test huge count read write
# ==========================
dmesg -C
for file in "$DBGFS/"*
do
./huge_count_read_write "$file"
done
if dmesg | grep -q WARNING
then
dmesg
exit 1
else
exit 0
fi
#!/bin/bash
# SPDX-License-Identifier: GPL-2.0
source _debugfs_common.sh
# Test schemes file
# =================
file="$DBGFS/schemes"
orig_content=$(cat "$file")
test_write_succ "$file" "1 2 3 4 5 6 4 0 0 0 1 2 3 1 100 3 2 1" \
"$orig_content" "valid input"
test_write_fail "$file" "1 2
3 4 5 6 3 0 0 0 1 2 3 1 100 3 2 1" "$orig_content" "multi lines"
test_write_succ "$file" "" "$orig_content" "disabling"
test_write_fail "$file" "2 1 2 1 10 1 3 10 1 1 1 1 1 1 1 1 2 3" \
"$orig_content" "wrong condition ranges"
echo "$orig_content" > "$file"
#!/bin/bash
# SPDX-License-Identifier: GPL-2.0
source _debugfs_common.sh
# Test target_ids file
# ====================
file="$DBGFS/target_ids"
orig_content=$(cat "$file")
test_write_succ "$file" "1 2 3 4" "$orig_content" "valid input"
test_write_succ "$file" "1 2 abc 4" "$orig_content" "still valid input"
test_content "$file" "$orig_content" "1 2" "non-integer was there"
test_write_succ "$file" "abc 2 3" "$orig_content" "the file allows wrong input"
test_content "$file" "$orig_content" "" "wrong input written"
test_write_succ "$file" "" "$orig_content" "empty input"
test_content "$file" "$orig_content" "" "empty input written"
echo "$orig_content" > "$file"
// SPDX-License-Identifier: GPL-2.0
/*
* Author: SeongJae Park <sj@kernel.org>
*/
#include <fcntl.h>
#include <stdlib.h>
#include <unistd.h>
#include <stdio.h>
void write_read_with_huge_count(char *file)
{
int filedesc = open(file, O_RDWR);
char buf[25];
int ret;
printf("%s %s\n", __func__, file);
if (filedesc < 0) {
fprintf(stderr, "failed opening %s\n", file);
exit(1);
}
write(filedesc, "", 0xfffffffful);
perror("after write: ");
ret = read(filedesc, buf, 0xfffffffful);
perror("after read: ");
close(filedesc);
}
int main(int argc, char *argv[])
{
if (argc != 2) {
fprintf(stderr, "Usage: %s <file>\n", argv[0]);
exit(1);
}
write_read_with_huge_count(argv[1]);
return 0;
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment