Commit 4adca1cb authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'akpm' (patches from Andrew Morton)

Merge misc fixes from Andrew Morton:
 "Six fixes"

* emailed patches from Andrew Morton <akpm@linux-foundation.org>:
  drivers/rtc/rtc-s5m.c: terminate s5m_rtc_id array with empty element
  printk: add dummy routine for when CONFIG_PRINTK=n
  mm/vmscan: fix highidx argument type
  memcg: remove extra newlines from memcg oom kill log
  x86, build: replace Perl script with Shell script
  mm: page_alloc: embed OOM killing naturally into allocation slowpath
parents c976a67b 45cd15e6
...@@ -90,7 +90,7 @@ suffix-$(CONFIG_KERNEL_LZO) := lzo ...@@ -90,7 +90,7 @@ suffix-$(CONFIG_KERNEL_LZO) := lzo
suffix-$(CONFIG_KERNEL_LZ4) := lz4 suffix-$(CONFIG_KERNEL_LZ4) := lz4
RUN_SIZE = $(shell $(OBJDUMP) -h vmlinux | \ RUN_SIZE = $(shell $(OBJDUMP) -h vmlinux | \
perl $(srctree)/arch/x86/tools/calc_run_size.pl) $(CONFIG_SHELL) $(srctree)/arch/x86/tools/calc_run_size.sh)
quiet_cmd_mkpiggy = MKPIGGY $@ quiet_cmd_mkpiggy = MKPIGGY $@
cmd_mkpiggy = $(obj)/mkpiggy $< $(RUN_SIZE) > $@ || ( rm -f $@ ; false ) cmd_mkpiggy = $(obj)/mkpiggy $< $(RUN_SIZE) > $@ || ( rm -f $@ ; false )
......
#!/usr/bin/perl
#
# Calculate the amount of space needed to run the kernel, including room for
# the .bss and .brk sections.
#
# Usage:
# objdump -h a.out | perl calc_run_size.pl
use strict;
my $mem_size = 0;
my $file_offset = 0;
my $sections=" *[0-9]+ \.(?:bss|brk) +";
while (<>) {
if (/^$sections([0-9a-f]+) +(?:[0-9a-f]+ +){2}([0-9a-f]+)/) {
my $size = hex($1);
my $offset = hex($2);
$mem_size += $size;
if ($file_offset == 0) {
$file_offset = $offset;
} elsif ($file_offset != $offset) {
# BFD linker shows the same file offset in ELF.
# Gold linker shows them as consecutive.
next if ($file_offset + $mem_size == $offset + $size);
printf STDERR "file_offset: 0x%lx\n", $file_offset;
printf STDERR "mem_size: 0x%lx\n", $mem_size;
printf STDERR "offset: 0x%lx\n", $offset;
printf STDERR "size: 0x%lx\n", $size;
die ".bss and .brk are non-contiguous\n";
}
}
}
if ($file_offset == 0) {
die "Never found .bss or .brk file offset\n";
}
printf("%d\n", $mem_size + $file_offset);
#!/bin/sh
#
# Calculate the amount of space needed to run the kernel, including room for
# the .bss and .brk sections.
#
# Usage:
# objdump -h a.out | sh calc_run_size.sh
NUM='\([0-9a-fA-F]*[ \t]*\)'
OUT=$(sed -n 's/^[ \t0-9]*.b[sr][sk][ \t]*'"$NUM$NUM$NUM$NUM"'.*/\1\4/p')
if [ -z "$OUT" ] ; then
echo "Never found .bss or .brk file offset" >&2
exit 1
fi
OUT=$(echo ${OUT# })
sizeA=$(printf "%d" 0x${OUT%% *})
OUT=${OUT#* }
offsetA=$(printf "%d" 0x${OUT%% *})
OUT=${OUT#* }
sizeB=$(printf "%d" 0x${OUT%% *})
OUT=${OUT#* }
offsetB=$(printf "%d" 0x${OUT%% *})
run_size=$(( $offsetA + $sizeA + $sizeB ))
# BFD linker shows the same file offset in ELF.
if [ "$offsetA" -ne "$offsetB" ] ; then
# Gold linker shows them as consecutive.
endB=$(( $offsetB + $sizeB ))
if [ "$endB" != "$run_size" ] ; then
printf "sizeA: 0x%x\n" $sizeA >&2
printf "offsetA: 0x%x\n" $offsetA >&2
printf "sizeB: 0x%x\n" $sizeB >&2
printf "offsetB: 0x%x\n" $offsetB >&2
echo ".bss and .brk are non-contiguous" >&2
exit 1
fi
fi
printf "%d\n" $run_size
exit 0
...@@ -832,6 +832,7 @@ static SIMPLE_DEV_PM_OPS(s5m_rtc_pm_ops, s5m_rtc_suspend, s5m_rtc_resume); ...@@ -832,6 +832,7 @@ static SIMPLE_DEV_PM_OPS(s5m_rtc_pm_ops, s5m_rtc_suspend, s5m_rtc_resume);
static const struct platform_device_id s5m_rtc_id[] = { static const struct platform_device_id s5m_rtc_id[] = {
{ "s5m-rtc", S5M8767X }, { "s5m-rtc", S5M8767X },
{ "s2mps14-rtc", S2MPS14X }, { "s2mps14-rtc", S2MPS14X },
{ },
}; };
static struct platform_driver s5m_rtc_driver = { static struct platform_driver s5m_rtc_driver = {
......
...@@ -85,11 +85,6 @@ static inline void oom_killer_enable(void) ...@@ -85,11 +85,6 @@ static inline void oom_killer_enable(void)
oom_killer_disabled = false; oom_killer_disabled = false;
} }
static inline bool oom_gfp_allowed(gfp_t gfp_mask)
{
return (gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY);
}
extern struct task_struct *find_lock_task_mm(struct task_struct *p); extern struct task_struct *find_lock_task_mm(struct task_struct *p);
static inline bool task_will_free_mem(struct task_struct *task) static inline bool task_will_free_mem(struct task_struct *task)
......
...@@ -10,9 +10,6 @@ ...@@ -10,9 +10,6 @@
extern const char linux_banner[]; extern const char linux_banner[];
extern const char linux_proc_banner[]; extern const char linux_proc_banner[];
extern char *log_buf_addr_get(void);
extern u32 log_buf_len_get(void);
static inline int printk_get_level(const char *buffer) static inline int printk_get_level(const char *buffer)
{ {
if (buffer[0] == KERN_SOH_ASCII && buffer[1]) { if (buffer[0] == KERN_SOH_ASCII && buffer[1]) {
...@@ -163,6 +160,8 @@ extern int kptr_restrict; ...@@ -163,6 +160,8 @@ extern int kptr_restrict;
extern void wake_up_klogd(void); extern void wake_up_klogd(void);
char *log_buf_addr_get(void);
u32 log_buf_len_get(void);
void log_buf_kexec_setup(void); void log_buf_kexec_setup(void);
void __init setup_log_buf(int early); void __init setup_log_buf(int early);
void dump_stack_set_arch_desc(const char *fmt, ...); void dump_stack_set_arch_desc(const char *fmt, ...);
...@@ -198,6 +197,16 @@ static inline void wake_up_klogd(void) ...@@ -198,6 +197,16 @@ static inline void wake_up_klogd(void)
{ {
} }
static inline char *log_buf_addr_get(void)
{
return NULL;
}
static inline u32 log_buf_len_get(void)
{
return 0;
}
static inline void log_buf_kexec_setup(void) static inline void log_buf_kexec_setup(void)
{ {
} }
......
...@@ -1477,9 +1477,9 @@ void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p) ...@@ -1477,9 +1477,9 @@ void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
pr_info("Task in "); pr_info("Task in ");
pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id)); pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id));
pr_info(" killed as a result of limit of "); pr_cont(" killed as a result of limit of ");
pr_cont_cgroup_path(memcg->css.cgroup); pr_cont_cgroup_path(memcg->css.cgroup);
pr_info("\n"); pr_cont("\n");
rcu_read_unlock(); rcu_read_unlock();
......
...@@ -2332,12 +2332,21 @@ static inline struct page * ...@@ -2332,12 +2332,21 @@ static inline struct page *
__alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order, __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
struct zonelist *zonelist, enum zone_type high_zoneidx, struct zonelist *zonelist, enum zone_type high_zoneidx,
nodemask_t *nodemask, struct zone *preferred_zone, nodemask_t *nodemask, struct zone *preferred_zone,
int classzone_idx, int migratetype) int classzone_idx, int migratetype, unsigned long *did_some_progress)
{ {
struct page *page; struct page *page;
/* Acquire the per-zone oom lock for each zone */ *did_some_progress = 0;
if (oom_killer_disabled)
return NULL;
/*
* Acquire the per-zone oom lock for each zone. If that
* fails, somebody else is making progress for us.
*/
if (!oom_zonelist_trylock(zonelist, gfp_mask)) { if (!oom_zonelist_trylock(zonelist, gfp_mask)) {
*did_some_progress = 1;
schedule_timeout_uninterruptible(1); schedule_timeout_uninterruptible(1);
return NULL; return NULL;
} }
...@@ -2363,12 +2372,18 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order, ...@@ -2363,12 +2372,18 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
goto out; goto out;
if (!(gfp_mask & __GFP_NOFAIL)) { if (!(gfp_mask & __GFP_NOFAIL)) {
/* Coredumps can quickly deplete all memory reserves */
if (current->flags & PF_DUMPCORE)
goto out;
/* The OOM killer will not help higher order allocs */ /* The OOM killer will not help higher order allocs */
if (order > PAGE_ALLOC_COSTLY_ORDER) if (order > PAGE_ALLOC_COSTLY_ORDER)
goto out; goto out;
/* The OOM killer does not needlessly kill tasks for lowmem */ /* The OOM killer does not needlessly kill tasks for lowmem */
if (high_zoneidx < ZONE_NORMAL) if (high_zoneidx < ZONE_NORMAL)
goto out; goto out;
/* The OOM killer does not compensate for light reclaim */
if (!(gfp_mask & __GFP_FS))
goto out;
/* /*
* GFP_THISNODE contains __GFP_NORETRY and we never hit this. * GFP_THISNODE contains __GFP_NORETRY and we never hit this.
* Sanity check for bare calls of __GFP_THISNODE, not real OOM. * Sanity check for bare calls of __GFP_THISNODE, not real OOM.
...@@ -2381,7 +2396,7 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order, ...@@ -2381,7 +2396,7 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
} }
/* Exhausted what can be done so it's blamo time */ /* Exhausted what can be done so it's blamo time */
out_of_memory(zonelist, gfp_mask, order, nodemask, false); out_of_memory(zonelist, gfp_mask, order, nodemask, false);
*did_some_progress = 1;
out: out:
oom_zonelist_unlock(zonelist, gfp_mask); oom_zonelist_unlock(zonelist, gfp_mask);
return page; return page;
...@@ -2658,7 +2673,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, ...@@ -2658,7 +2673,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
(gfp_mask & GFP_THISNODE) == GFP_THISNODE) (gfp_mask & GFP_THISNODE) == GFP_THISNODE)
goto nopage; goto nopage;
restart: retry:
if (!(gfp_mask & __GFP_NO_KSWAPD)) if (!(gfp_mask & __GFP_NO_KSWAPD))
wake_all_kswapds(order, zonelist, high_zoneidx, wake_all_kswapds(order, zonelist, high_zoneidx,
preferred_zone, nodemask); preferred_zone, nodemask);
...@@ -2681,7 +2696,6 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, ...@@ -2681,7 +2696,6 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
classzone_idx = zonelist_zone_idx(preferred_zoneref); classzone_idx = zonelist_zone_idx(preferred_zoneref);
} }
rebalance:
/* This is the last chance, in general, before the goto nopage. */ /* This is the last chance, in general, before the goto nopage. */
page = get_page_from_freelist(gfp_mask, nodemask, order, zonelist, page = get_page_from_freelist(gfp_mask, nodemask, order, zonelist,
high_zoneidx, alloc_flags & ~ALLOC_NO_WATERMARKS, high_zoneidx, alloc_flags & ~ALLOC_NO_WATERMARKS,
...@@ -2788,54 +2802,28 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, ...@@ -2788,54 +2802,28 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
if (page) if (page)
goto got_pg; goto got_pg;
/*
* If we failed to make any progress reclaiming, then we are
* running out of options and have to consider going OOM
*/
if (!did_some_progress) {
if (oom_gfp_allowed(gfp_mask)) {
if (oom_killer_disabled)
goto nopage;
/* Coredumps can quickly deplete all memory reserves */
if ((current->flags & PF_DUMPCORE) &&
!(gfp_mask & __GFP_NOFAIL))
goto nopage;
page = __alloc_pages_may_oom(gfp_mask, order,
zonelist, high_zoneidx,
nodemask, preferred_zone,
classzone_idx, migratetype);
if (page)
goto got_pg;
if (!(gfp_mask & __GFP_NOFAIL)) {
/*
* The oom killer is not called for high-order
* allocations that may fail, so if no progress
* is being made, there are no other options and
* retrying is unlikely to help.
*/
if (order > PAGE_ALLOC_COSTLY_ORDER)
goto nopage;
/*
* The oom killer is not called for lowmem
* allocations to prevent needlessly killing
* innocent tasks.
*/
if (high_zoneidx < ZONE_NORMAL)
goto nopage;
}
goto restart;
}
}
/* Check if we should retry the allocation */ /* Check if we should retry the allocation */
pages_reclaimed += did_some_progress; pages_reclaimed += did_some_progress;
if (should_alloc_retry(gfp_mask, order, did_some_progress, if (should_alloc_retry(gfp_mask, order, did_some_progress,
pages_reclaimed)) { pages_reclaimed)) {
/*
* If we fail to make progress by freeing individual
* pages, but the allocation wants us to keep going,
* start OOM killing tasks.
*/
if (!did_some_progress) {
page = __alloc_pages_may_oom(gfp_mask, order, zonelist,
high_zoneidx, nodemask,
preferred_zone, classzone_idx,
migratetype,&did_some_progress);
if (page)
goto got_pg;
if (!did_some_progress)
goto nopage;
}
/* Wait for some write requests to complete then retry */ /* Wait for some write requests to complete then retry */
wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/50); wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/50);
goto rebalance; goto retry;
} else { } else {
/* /*
* High-order allocations do not necessarily loop after * High-order allocations do not necessarily loop after
......
...@@ -2656,7 +2656,7 @@ static bool throttle_direct_reclaim(gfp_t gfp_mask, struct zonelist *zonelist, ...@@ -2656,7 +2656,7 @@ static bool throttle_direct_reclaim(gfp_t gfp_mask, struct zonelist *zonelist,
* should make reasonable progress. * should make reasonable progress.
*/ */
for_each_zone_zonelist_nodemask(zone, z, zonelist, for_each_zone_zonelist_nodemask(zone, z, zonelist,
gfp_mask, nodemask) { gfp_zone(gfp_mask), nodemask) {
if (zone_idx(zone) > ZONE_NORMAL) if (zone_idx(zone) > ZONE_NORMAL)
continue; continue;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment