Commit 68845a55 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'akpm' into master (patches from Andrew)

Merge misc fixes from Andrew Morton:
 "Subsystems affected by this patch series: mm/pagemap, mm/shmem,
  mm/hotfixes, mm/memcg, mm/hugetlb, mailmap, squashfs, scripts,
  io-mapping, MAINTAINERS, and gdb"

* emailed patches from Andrew Morton <akpm@linux-foundation.org>:
  scripts/gdb: fix lx-symbols 'gdb.error' while loading modules
  MAINTAINERS: add KCOV section
  io-mapping: indicate mapping failure
  scripts/decode_stacktrace: strip basepath from all paths
  squashfs: fix length field overlap check in metadata reading
  mailmap: add entry for Mike Rapoport
  khugepaged: fix null-pointer dereference due to race
  mm/hugetlb: avoid hardcoding while checking if cma is enabled
  mm: memcg/slab: fix memory leak at non-root kmem_cache destroy
  mm/memcg: fix refcount error while moving and swapping
  mm/memcontrol: fix OOPS inside mem_cgroup_get_nr_swap_pages()
  mm: initialize return of vm_insert_pages
  vfs/xattr: mm/shmem: kernfs: release simple xattr entry in a right way
  mm/mmap.c: close race between munmap() and expand_upwards()/downwards()
parents c953d60b 7359608a
...@@ -198,6 +198,9 @@ Maxime Ripard <mripard@kernel.org> <maxime.ripard@free-electrons.com> ...@@ -198,6 +198,9 @@ Maxime Ripard <mripard@kernel.org> <maxime.ripard@free-electrons.com>
Mayuresh Janorkar <mayur@ti.com> Mayuresh Janorkar <mayur@ti.com>
Michael Buesch <m@bues.ch> Michael Buesch <m@bues.ch>
Michel Dänzer <michel@tungstengraphics.com> Michel Dänzer <michel@tungstengraphics.com>
Mike Rapoport <rppt@kernel.org> <mike@compulab.co.il>
Mike Rapoport <rppt@kernel.org> <mike.rapoport@gmail.com>
Mike Rapoport <rppt@kernel.org> <rppt@linux.ibm.com>
Miodrag Dinic <miodrag.dinic@mips.com> <miodrag.dinic@imgtec.com> Miodrag Dinic <miodrag.dinic@mips.com> <miodrag.dinic@imgtec.com>
Miquel Raynal <miquel.raynal@bootlin.com> <miquel.raynal@free-electrons.com> Miquel Raynal <miquel.raynal@bootlin.com> <miquel.raynal@free-electrons.com>
Mitesh shah <mshah@teja.com> Mitesh shah <mshah@teja.com>
......
...@@ -9306,6 +9306,17 @@ F: Documentation/kbuild/kconfig* ...@@ -9306,6 +9306,17 @@ F: Documentation/kbuild/kconfig*
F: scripts/Kconfig.include F: scripts/Kconfig.include
F: scripts/kconfig/ F: scripts/kconfig/
KCOV
R: Dmitry Vyukov <dvyukov@google.com>
R: Andrey Konovalov <andreyknvl@google.com>
L: kasan-dev@googlegroups.com
S: Maintained
F: Documentation/dev-tools/kcov.rst
F: include/linux/kcov.h
F: include/uapi/linux/kcov.h
F: kernel/kcov.c
F: scripts/Makefile.kcov
KCSAN KCSAN
M: Marco Elver <elver@google.com> M: Marco Elver <elver@google.com>
R: Dmitry Vyukov <dvyukov@google.com> R: Dmitry Vyukov <dvyukov@google.com>
......
...@@ -175,7 +175,7 @@ int squashfs_read_data(struct super_block *sb, u64 index, int length, ...@@ -175,7 +175,7 @@ int squashfs_read_data(struct super_block *sb, u64 index, int length,
/* Extract the length of the metadata block */ /* Extract the length of the metadata block */
data = page_address(bvec->bv_page) + bvec->bv_offset; data = page_address(bvec->bv_page) + bvec->bv_offset;
length = data[offset]; length = data[offset];
if (offset <= bvec->bv_len - 1) { if (offset < bvec->bv_len - 1) {
length |= data[offset + 1] << 8; length |= data[offset + 1] << 8;
} else { } else {
if (WARN_ON_ONCE(!bio_next_segment(bio, &iter_all))) { if (WARN_ON_ONCE(!bio_next_segment(bio, &iter_all))) {
......
...@@ -107,9 +107,12 @@ io_mapping_init_wc(struct io_mapping *iomap, ...@@ -107,9 +107,12 @@ io_mapping_init_wc(struct io_mapping *iomap,
resource_size_t base, resource_size_t base,
unsigned long size) unsigned long size)
{ {
iomap->iomem = ioremap_wc(base, size);
if (!iomap->iomem)
return NULL;
iomap->base = base; iomap->base = base;
iomap->size = size; iomap->size = size;
iomap->iomem = ioremap_wc(base, size);
#if defined(pgprot_noncached_wc) /* archs can't agree on a name ... */ #if defined(pgprot_noncached_wc) /* archs can't agree on a name ... */
iomap->prot = pgprot_noncached_wc(PAGE_KERNEL); iomap->prot = pgprot_noncached_wc(PAGE_KERNEL);
#elif defined(pgprot_writecombine) #elif defined(pgprot_writecombine)
......
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/types.h> #include <linux/types.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/mm.h>
#include <uapi/linux/xattr.h> #include <uapi/linux/xattr.h>
struct inode; struct inode;
...@@ -94,7 +95,7 @@ static inline void simple_xattrs_free(struct simple_xattrs *xattrs) ...@@ -94,7 +95,7 @@ static inline void simple_xattrs_free(struct simple_xattrs *xattrs)
list_for_each_entry_safe(xattr, node, &xattrs->head, list) { list_for_each_entry_safe(xattr, node, &xattrs->head, list) {
kfree(xattr->name); kfree(xattr->name);
kfree(xattr); kvfree(xattr);
} }
} }
......
...@@ -45,7 +45,10 @@ int hugetlb_max_hstate __read_mostly; ...@@ -45,7 +45,10 @@ int hugetlb_max_hstate __read_mostly;
unsigned int default_hstate_idx; unsigned int default_hstate_idx;
struct hstate hstates[HUGE_MAX_HSTATE]; struct hstate hstates[HUGE_MAX_HSTATE];
#ifdef CONFIG_CMA
static struct cma *hugetlb_cma[MAX_NUMNODES]; static struct cma *hugetlb_cma[MAX_NUMNODES];
#endif
static unsigned long hugetlb_cma_size __initdata;
/* /*
* Minimum page order among possible hugepage sizes, set to a proper value * Minimum page order among possible hugepage sizes, set to a proper value
...@@ -1235,9 +1238,10 @@ static void free_gigantic_page(struct page *page, unsigned int order) ...@@ -1235,9 +1238,10 @@ static void free_gigantic_page(struct page *page, unsigned int order)
* If the page isn't allocated using the cma allocator, * If the page isn't allocated using the cma allocator,
* cma_release() returns false. * cma_release() returns false.
*/ */
if (IS_ENABLED(CONFIG_CMA) && #ifdef CONFIG_CMA
cma_release(hugetlb_cma[page_to_nid(page)], page, 1 << order)) if (cma_release(hugetlb_cma[page_to_nid(page)], page, 1 << order))
return; return;
#endif
free_contig_range(page_to_pfn(page), 1 << order); free_contig_range(page_to_pfn(page), 1 << order);
} }
...@@ -1248,7 +1252,8 @@ static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask, ...@@ -1248,7 +1252,8 @@ static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
{ {
unsigned long nr_pages = 1UL << huge_page_order(h); unsigned long nr_pages = 1UL << huge_page_order(h);
if (IS_ENABLED(CONFIG_CMA)) { #ifdef CONFIG_CMA
{
struct page *page; struct page *page;
int node; int node;
...@@ -1262,6 +1267,7 @@ static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask, ...@@ -1262,6 +1267,7 @@ static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
return page; return page;
} }
} }
#endif
return alloc_contig_pages(nr_pages, gfp_mask, nid, nodemask); return alloc_contig_pages(nr_pages, gfp_mask, nid, nodemask);
} }
...@@ -2571,7 +2577,7 @@ static void __init hugetlb_hstate_alloc_pages(struct hstate *h) ...@@ -2571,7 +2577,7 @@ static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
for (i = 0; i < h->max_huge_pages; ++i) { for (i = 0; i < h->max_huge_pages; ++i) {
if (hstate_is_gigantic(h)) { if (hstate_is_gigantic(h)) {
if (IS_ENABLED(CONFIG_CMA) && hugetlb_cma[0]) { if (hugetlb_cma_size) {
pr_warn_once("HugeTLB: hugetlb_cma is enabled, skip boot time allocation\n"); pr_warn_once("HugeTLB: hugetlb_cma is enabled, skip boot time allocation\n");
break; break;
} }
...@@ -5654,7 +5660,6 @@ void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason) ...@@ -5654,7 +5660,6 @@ void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason)
} }
#ifdef CONFIG_CMA #ifdef CONFIG_CMA
static unsigned long hugetlb_cma_size __initdata;
static bool cma_reserve_called __initdata; static bool cma_reserve_called __initdata;
static int __init cmdline_parse_hugetlb_cma(char *p) static int __init cmdline_parse_hugetlb_cma(char *p)
......
...@@ -958,6 +958,9 @@ static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address, ...@@ -958,6 +958,9 @@ static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
return SCAN_ADDRESS_RANGE; return SCAN_ADDRESS_RANGE;
if (!hugepage_vma_check(vma, vma->vm_flags)) if (!hugepage_vma_check(vma, vma->vm_flags))
return SCAN_VMA_CHECK; return SCAN_VMA_CHECK;
/* Anon VMA expected */
if (!vma->anon_vma || vma->vm_ops)
return SCAN_VMA_CHECK;
return 0; return 0;
} }
......
...@@ -5669,7 +5669,6 @@ static void __mem_cgroup_clear_mc(void) ...@@ -5669,7 +5669,6 @@ static void __mem_cgroup_clear_mc(void)
if (!mem_cgroup_is_root(mc.to)) if (!mem_cgroup_is_root(mc.to))
page_counter_uncharge(&mc.to->memory, mc.moved_swap); page_counter_uncharge(&mc.to->memory, mc.moved_swap);
mem_cgroup_id_get_many(mc.to, mc.moved_swap);
css_put_many(&mc.to->css, mc.moved_swap); css_put_many(&mc.to->css, mc.moved_swap);
mc.moved_swap = 0; mc.moved_swap = 0;
...@@ -5860,7 +5859,8 @@ static int mem_cgroup_move_charge_pte_range(pmd_t *pmd, ...@@ -5860,7 +5859,8 @@ static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
ent = target.ent; ent = target.ent;
if (!mem_cgroup_move_swap_account(ent, mc.from, mc.to)) { if (!mem_cgroup_move_swap_account(ent, mc.from, mc.to)) {
mc.precharge--; mc.precharge--;
/* we fixup refcnts and charges later. */ mem_cgroup_id_get_many(mc.to, 1);
/* we fixup other refcnts and charges later. */
mc.moved_swap++; mc.moved_swap++;
} }
break; break;
...@@ -7186,6 +7186,13 @@ static struct cftype memsw_files[] = { ...@@ -7186,6 +7186,13 @@ static struct cftype memsw_files[] = {
{ }, /* terminate */ { }, /* terminate */
}; };
/*
* If mem_cgroup_swap_init() is implemented as a subsys_initcall()
* instead of a core_initcall(), this could mean cgroup_memory_noswap still
* remains set to false even when memcg is disabled via "cgroup_disable=memory"
* boot parameter. This may result in premature OOPS inside
* mem_cgroup_get_nr_swap_pages() function in corner cases.
*/
static int __init mem_cgroup_swap_init(void) static int __init mem_cgroup_swap_init(void)
{ {
/* No memory control -> no swap control */ /* No memory control -> no swap control */
...@@ -7200,6 +7207,6 @@ static int __init mem_cgroup_swap_init(void) ...@@ -7200,6 +7207,6 @@ static int __init mem_cgroup_swap_init(void)
return 0; return 0;
} }
subsys_initcall(mem_cgroup_swap_init); core_initcall(mem_cgroup_swap_init);
#endif /* CONFIG_MEMCG_SWAP */ #endif /* CONFIG_MEMCG_SWAP */
...@@ -1601,7 +1601,7 @@ int vm_insert_pages(struct vm_area_struct *vma, unsigned long addr, ...@@ -1601,7 +1601,7 @@ int vm_insert_pages(struct vm_area_struct *vma, unsigned long addr,
return insert_pages(vma, addr, pages, num, vma->vm_page_prot); return insert_pages(vma, addr, pages, num, vma->vm_page_prot);
#else #else
unsigned long idx = 0, pgcount = *num; unsigned long idx = 0, pgcount = *num;
int err; int err = -EINVAL;
for (; idx < pgcount; ++idx) { for (; idx < pgcount; ++idx) {
err = vm_insert_page(vma, addr + (PAGE_SIZE * idx), pages[idx]); err = vm_insert_page(vma, addr + (PAGE_SIZE * idx), pages[idx]);
......
...@@ -2620,7 +2620,7 @@ static void unmap_region(struct mm_struct *mm, ...@@ -2620,7 +2620,7 @@ static void unmap_region(struct mm_struct *mm,
* Create a list of vma's touched by the unmap, removing them from the mm's * Create a list of vma's touched by the unmap, removing them from the mm's
* vma list as we go.. * vma list as we go..
*/ */
static void static bool
detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma, detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
struct vm_area_struct *prev, unsigned long end) struct vm_area_struct *prev, unsigned long end)
{ {
...@@ -2645,6 +2645,17 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -2645,6 +2645,17 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
/* Kill the cache */ /* Kill the cache */
vmacache_invalidate(mm); vmacache_invalidate(mm);
/*
* Do not downgrade mmap_lock if we are next to VM_GROWSDOWN or
* VM_GROWSUP VMA. Such VMAs can change their size under
* down_read(mmap_lock) and collide with the VMA we are about to unmap.
*/
if (vma && (vma->vm_flags & VM_GROWSDOWN))
return false;
if (prev && (prev->vm_flags & VM_GROWSUP))
return false;
return true;
} }
/* /*
...@@ -2825,7 +2836,8 @@ int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len, ...@@ -2825,7 +2836,8 @@ int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len,
} }
/* Detach vmas from rbtree */ /* Detach vmas from rbtree */
detach_vmas_to_be_unmapped(mm, vma, prev, end); if (!detach_vmas_to_be_unmapped(mm, vma, prev, end))
downgrade = false;
if (downgrade) if (downgrade)
mmap_write_downgrade(mm); mmap_write_downgrade(mm);
......
...@@ -3178,7 +3178,7 @@ static int shmem_initxattrs(struct inode *inode, ...@@ -3178,7 +3178,7 @@ static int shmem_initxattrs(struct inode *inode,
new_xattr->name = kmalloc(XATTR_SECURITY_PREFIX_LEN + len, new_xattr->name = kmalloc(XATTR_SECURITY_PREFIX_LEN + len,
GFP_KERNEL); GFP_KERNEL);
if (!new_xattr->name) { if (!new_xattr->name) {
kfree(new_xattr); kvfree(new_xattr);
return -ENOMEM; return -ENOMEM;
} }
......
...@@ -326,6 +326,14 @@ int slab_unmergeable(struct kmem_cache *s) ...@@ -326,6 +326,14 @@ int slab_unmergeable(struct kmem_cache *s)
if (s->refcount < 0) if (s->refcount < 0)
return 1; return 1;
#ifdef CONFIG_MEMCG_KMEM
/*
* Skip the dying kmem_cache.
*/
if (s->memcg_params.dying)
return 1;
#endif
return 0; return 0;
} }
...@@ -886,12 +894,15 @@ static int shutdown_memcg_caches(struct kmem_cache *s) ...@@ -886,12 +894,15 @@ static int shutdown_memcg_caches(struct kmem_cache *s)
return 0; return 0;
} }
static void flush_memcg_workqueue(struct kmem_cache *s) static void memcg_set_kmem_cache_dying(struct kmem_cache *s)
{ {
spin_lock_irq(&memcg_kmem_wq_lock); spin_lock_irq(&memcg_kmem_wq_lock);
s->memcg_params.dying = true; s->memcg_params.dying = true;
spin_unlock_irq(&memcg_kmem_wq_lock); spin_unlock_irq(&memcg_kmem_wq_lock);
}
static void flush_memcg_workqueue(struct kmem_cache *s)
{
/* /*
* SLAB and SLUB deactivate the kmem_caches through call_rcu. Make * SLAB and SLUB deactivate the kmem_caches through call_rcu. Make
* sure all registered rcu callbacks have been invoked. * sure all registered rcu callbacks have been invoked.
...@@ -923,10 +934,6 @@ static inline int shutdown_memcg_caches(struct kmem_cache *s) ...@@ -923,10 +934,6 @@ static inline int shutdown_memcg_caches(struct kmem_cache *s)
{ {
return 0; return 0;
} }
static inline void flush_memcg_workqueue(struct kmem_cache *s)
{
}
#endif /* CONFIG_MEMCG_KMEM */ #endif /* CONFIG_MEMCG_KMEM */
void slab_kmem_cache_release(struct kmem_cache *s) void slab_kmem_cache_release(struct kmem_cache *s)
...@@ -944,8 +951,6 @@ void kmem_cache_destroy(struct kmem_cache *s) ...@@ -944,8 +951,6 @@ void kmem_cache_destroy(struct kmem_cache *s)
if (unlikely(!s)) if (unlikely(!s))
return; return;
flush_memcg_workqueue(s);
get_online_cpus(); get_online_cpus();
get_online_mems(); get_online_mems();
...@@ -955,6 +960,22 @@ void kmem_cache_destroy(struct kmem_cache *s) ...@@ -955,6 +960,22 @@ void kmem_cache_destroy(struct kmem_cache *s)
if (s->refcount) if (s->refcount)
goto out_unlock; goto out_unlock;
#ifdef CONFIG_MEMCG_KMEM
memcg_set_kmem_cache_dying(s);
mutex_unlock(&slab_mutex);
put_online_mems();
put_online_cpus();
flush_memcg_workqueue(s);
get_online_cpus();
get_online_mems();
mutex_lock(&slab_mutex);
#endif
err = shutdown_memcg_caches(s); err = shutdown_memcg_caches(s);
if (!err) if (!err)
err = shutdown_cache(s); err = shutdown_cache(s);
......
...@@ -87,8 +87,8 @@ parse_symbol() { ...@@ -87,8 +87,8 @@ parse_symbol() {
return return
fi fi
# Strip out the base of the path # Strip out the base of the path on each line
code=${code#$basepath/} code=$(while read -r line; do echo "${line#$basepath/}"; done <<< "$code")
# In the case of inlines, move everything to same line # In the case of inlines, move everything to same line
code=${code//$'\n'/' '} code=${code//$'\n'/' '}
......
...@@ -96,7 +96,7 @@ lx-symbols command.""" ...@@ -96,7 +96,7 @@ lx-symbols command."""
return "" return ""
attrs = sect_attrs['attrs'] attrs = sect_attrs['attrs']
section_name_to_address = { section_name_to_address = {
attrs[n]['name'].string(): attrs[n]['address'] attrs[n]['battr']['attr']['name'].string(): attrs[n]['address']
for n in range(int(sect_attrs['nsections']))} for n in range(int(sect_attrs['nsections']))}
args = [] args = []
for section_name in [".data", ".data..read_mostly", ".rodata", ".bss", for section_name in [".data", ".data..read_mostly", ".rodata", ".bss",
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment