Commit f8fbd8c4 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'akpm' (patches from Andrew)

Merge misc fixes from Andrew Morton:
 "A few late-breaking fixes"

* emailed patches from Andrew Morton <akpm@linux-foundation.org>:
  mm/memblock.c: fix NULL dereference error
  MAINTAINERS: update cgroup's document path
  slub: drop bogus inline for fixup_red_left()
  powerpc/fsl_rio: fix a missing error code
  mm: initialise per_cpu_nodestats for all online pgdats at boot
  mm/memblock: fix a typo in a comment
  mm: disable CONFIG_MEMORY_HOTPLUG when KASAN is enabled
parents 84e39eeb e47608ab
...@@ -3219,7 +3219,7 @@ M: Johannes Weiner <hannes@cmpxchg.org> ...@@ -3219,7 +3219,7 @@ M: Johannes Weiner <hannes@cmpxchg.org>
L: cgroups@vger.kernel.org L: cgroups@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tj/cgroup.git T: git git://git.kernel.org/pub/scm/linux/kernel/git/tj/cgroup.git
S: Maintained S: Maintained
F: Documentation/cgroups/ F: Documentation/cgroup*
F: include/linux/cgroup* F: include/linux/cgroup*
F: kernel/cgroup* F: kernel/cgroup*
...@@ -3230,7 +3230,7 @@ W: http://www.bullopensource.org/cpuset/ ...@@ -3230,7 +3230,7 @@ W: http://www.bullopensource.org/cpuset/
W: http://oss.sgi.com/projects/cpusets/ W: http://oss.sgi.com/projects/cpusets/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tj/cgroup.git T: git git://git.kernel.org/pub/scm/linux/kernel/git/tj/cgroup.git
S: Maintained S: Maintained
F: Documentation/cgroups/cpusets.txt F: Documentation/cgroup-v1/cpusets.txt
F: include/linux/cpuset.h F: include/linux/cpuset.h
F: kernel/cpuset.c F: kernel/cpuset.c
......
...@@ -491,6 +491,7 @@ int fsl_rio_setup(struct platform_device *dev) ...@@ -491,6 +491,7 @@ int fsl_rio_setup(struct platform_device *dev)
rmu_node = of_parse_phandle(dev->dev.of_node, "fsl,srio-rmu-handle", 0); rmu_node = of_parse_phandle(dev->dev.of_node, "fsl,srio-rmu-handle", 0);
if (!rmu_node) { if (!rmu_node) {
dev_err(&dev->dev, "No valid fsl,srio-rmu-handle property\n"); dev_err(&dev->dev, "No valid fsl,srio-rmu-handle property\n");
rc = -ENOENT;
goto err_rmu; goto err_rmu;
} }
rc = of_address_to_resource(rmu_node, 0, &rmu_regs); rc = of_address_to_resource(rmu_node, 0, &rmu_regs);
......
...@@ -187,6 +187,7 @@ config MEMORY_HOTPLUG ...@@ -187,6 +187,7 @@ config MEMORY_HOTPLUG
bool "Allow for memory hot-add" bool "Allow for memory hot-add"
depends on SPARSEMEM || X86_64_ACPI_NUMA depends on SPARSEMEM || X86_64_ACPI_NUMA
depends on ARCH_ENABLE_MEMORY_HOTPLUG depends on ARCH_ENABLE_MEMORY_HOTPLUG
depends on !KASAN
config MEMORY_HOTPLUG_SPARSE config MEMORY_HOTPLUG_SPARSE
def_bool y def_bool y
......
...@@ -482,7 +482,7 @@ static void __init_memblock memblock_merge_regions(struct memblock_type *type) ...@@ -482,7 +482,7 @@ static void __init_memblock memblock_merge_regions(struct memblock_type *type)
* @flags: flags of the new region * @flags: flags of the new region
* *
* Insert new memblock region [@base,@base+@size) into @type at @idx. * Insert new memblock region [@base,@base+@size) into @type at @idx.
* @type must already have extra room to accomodate the new region. * @type must already have extra room to accommodate the new region.
*/ */
static void __init_memblock memblock_insert_region(struct memblock_type *type, static void __init_memblock memblock_insert_region(struct memblock_type *type,
int idx, phys_addr_t base, int idx, phys_addr_t base,
...@@ -544,7 +544,7 @@ int __init_memblock memblock_add_range(struct memblock_type *type, ...@@ -544,7 +544,7 @@ int __init_memblock memblock_add_range(struct memblock_type *type,
/* /*
* The following is executed twice. Once with %false @insert and * The following is executed twice. Once with %false @insert and
* then with %true. The first counts the number of regions needed * then with %true. The first counts the number of regions needed
* to accomodate the new area. The second actually inserts them. * to accommodate the new area. The second actually inserts them.
*/ */
base = obase; base = obase;
nr_new = 0; nr_new = 0;
...@@ -994,7 +994,10 @@ void __init_memblock __next_mem_range_rev(u64 *idx, int nid, ulong flags, ...@@ -994,7 +994,10 @@ void __init_memblock __next_mem_range_rev(u64 *idx, int nid, ulong flags,
if (*idx == (u64)ULLONG_MAX) { if (*idx == (u64)ULLONG_MAX) {
idx_a = type_a->cnt - 1; idx_a = type_a->cnt - 1;
idx_b = type_b->cnt; if (type_b != NULL)
idx_b = type_b->cnt;
else
idx_b = 0;
} }
for (; idx_a >= 0; idx_a--) { for (; idx_a >= 0; idx_a--) {
......
...@@ -5257,11 +5257,6 @@ static void __meminit setup_zone_pageset(struct zone *zone) ...@@ -5257,11 +5257,6 @@ static void __meminit setup_zone_pageset(struct zone *zone)
zone->pageset = alloc_percpu(struct per_cpu_pageset); zone->pageset = alloc_percpu(struct per_cpu_pageset);
for_each_possible_cpu(cpu) for_each_possible_cpu(cpu)
zone_pageset_init(zone, cpu); zone_pageset_init(zone, cpu);
if (!zone->zone_pgdat->per_cpu_nodestats) {
zone->zone_pgdat->per_cpu_nodestats =
alloc_percpu(struct per_cpu_nodestat);
}
} }
/* /*
...@@ -5270,10 +5265,15 @@ static void __meminit setup_zone_pageset(struct zone *zone) ...@@ -5270,10 +5265,15 @@ static void __meminit setup_zone_pageset(struct zone *zone)
*/ */
void __init setup_per_cpu_pageset(void) void __init setup_per_cpu_pageset(void)
{ {
struct pglist_data *pgdat;
struct zone *zone; struct zone *zone;
for_each_populated_zone(zone) for_each_populated_zone(zone)
setup_zone_pageset(zone); setup_zone_pageset(zone);
for_each_online_pgdat(pgdat)
pgdat->per_cpu_nodestats =
alloc_percpu(struct per_cpu_nodestat);
} }
static noinline __ref static noinline __ref
......
...@@ -124,7 +124,7 @@ static inline int kmem_cache_debug(struct kmem_cache *s) ...@@ -124,7 +124,7 @@ static inline int kmem_cache_debug(struct kmem_cache *s)
#endif #endif
} }
inline void *fixup_red_left(struct kmem_cache *s, void *p) void *fixup_red_left(struct kmem_cache *s, void *p)
{ {
if (kmem_cache_debug(s) && s->flags & SLAB_RED_ZONE) if (kmem_cache_debug(s) && s->flags & SLAB_RED_ZONE)
p += s->red_left_pad; p += s->red_left_pad;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment