Commit ad6ec09d authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'akpm' (patches from Andrew)

Merge misc fixes from Andrew Morton:
 "7 patches.

  Subsystems affected by this patch series: lib, ocfs2, and mm (slub,
  migration, and memcg)"

* emailed patches from Andrew Morton <akpm@linux-foundation.org>:
  mm/memcg: fix NULL pointer dereference in memcg_slab_free_hook()
  slub: fix unreclaimable slab stat for bulk free
  mm/migrate: fix NR_ISOLATED corruption on 64-bit
  mm: memcontrol: fix blocking rstat function called from atomic cgroup1 thresholding code
  ocfs2: issue zeroout to EOF blocks
  ocfs2: fix zero out valid data
  lib/test_string.c: move string selftest in the Runtime Testing menu
parents 764a5bc8 121dffe2
...@@ -1529,6 +1529,45 @@ static void ocfs2_truncate_cluster_pages(struct inode *inode, u64 byte_start, ...@@ -1529,6 +1529,45 @@ static void ocfs2_truncate_cluster_pages(struct inode *inode, u64 byte_start,
} }
} }
/*
* zero out partial blocks of one cluster.
*
* start: file offset where zero starts, will be made upper block aligned.
* len: it will be trimmed to the end of current cluster if "start + len"
* is bigger than it.
*/
static int ocfs2_zeroout_partial_cluster(struct inode *inode,
u64 start, u64 len)
{
int ret;
u64 start_block, end_block, nr_blocks;
u64 p_block, offset;
u32 cluster, p_cluster, nr_clusters;
struct super_block *sb = inode->i_sb;
u64 end = ocfs2_align_bytes_to_clusters(sb, start);
if (start + len < end)
end = start + len;
start_block = ocfs2_blocks_for_bytes(sb, start);
end_block = ocfs2_blocks_for_bytes(sb, end);
nr_blocks = end_block - start_block;
if (!nr_blocks)
return 0;
cluster = ocfs2_bytes_to_clusters(sb, start);
ret = ocfs2_get_clusters(inode, cluster, &p_cluster,
&nr_clusters, NULL);
if (ret)
return ret;
if (!p_cluster)
return 0;
offset = start_block - ocfs2_clusters_to_blocks(sb, cluster);
p_block = ocfs2_clusters_to_blocks(sb, p_cluster) + offset;
return sb_issue_zeroout(sb, p_block, nr_blocks, GFP_NOFS);
}
static int ocfs2_zero_partial_clusters(struct inode *inode, static int ocfs2_zero_partial_clusters(struct inode *inode,
u64 start, u64 len) u64 start, u64 len)
{ {
...@@ -1538,6 +1577,7 @@ static int ocfs2_zero_partial_clusters(struct inode *inode, ...@@ -1538,6 +1577,7 @@ static int ocfs2_zero_partial_clusters(struct inode *inode,
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
unsigned int csize = osb->s_clustersize; unsigned int csize = osb->s_clustersize;
handle_t *handle; handle_t *handle;
loff_t isize = i_size_read(inode);
/* /*
* The "start" and "end" values are NOT necessarily part of * The "start" and "end" values are NOT necessarily part of
...@@ -1558,6 +1598,26 @@ static int ocfs2_zero_partial_clusters(struct inode *inode, ...@@ -1558,6 +1598,26 @@ static int ocfs2_zero_partial_clusters(struct inode *inode,
if ((start & (csize - 1)) == 0 && (end & (csize - 1)) == 0) if ((start & (csize - 1)) == 0 && (end & (csize - 1)) == 0)
goto out; goto out;
/* No page cache for EOF blocks, issue zero out to disk. */
if (end > isize) {
/*
* zeroout eof blocks in last cluster starting from
* "isize" even "start" > "isize" because it is
* complicated to zeroout just at "start" as "start"
* may be not aligned with block size, buffer write
* would be required to do that, but out of eof buffer
* write is not supported.
*/
ret = ocfs2_zeroout_partial_cluster(inode, isize,
end - isize);
if (ret) {
mlog_errno(ret);
goto out;
}
if (start >= isize)
goto out;
end = isize;
}
handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS); handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
if (IS_ERR(handle)) { if (IS_ERR(handle)) {
ret = PTR_ERR(handle); ret = PTR_ERR(handle);
...@@ -1855,45 +1915,6 @@ int ocfs2_remove_inode_range(struct inode *inode, ...@@ -1855,45 +1915,6 @@ int ocfs2_remove_inode_range(struct inode *inode,
return ret; return ret;
} }
/*
* zero out partial blocks of one cluster.
*
* start: file offset where zero starts, will be made upper block aligned.
* len: it will be trimmed to the end of current cluster if "start + len"
* is bigger than it.
*/
static int ocfs2_zeroout_partial_cluster(struct inode *inode,
u64 start, u64 len)
{
int ret;
u64 start_block, end_block, nr_blocks;
u64 p_block, offset;
u32 cluster, p_cluster, nr_clusters;
struct super_block *sb = inode->i_sb;
u64 end = ocfs2_align_bytes_to_clusters(sb, start);
if (start + len < end)
end = start + len;
start_block = ocfs2_blocks_for_bytes(sb, start);
end_block = ocfs2_blocks_for_bytes(sb, end);
nr_blocks = end_block - start_block;
if (!nr_blocks)
return 0;
cluster = ocfs2_bytes_to_clusters(sb, start);
ret = ocfs2_get_clusters(inode, cluster, &p_cluster,
&nr_clusters, NULL);
if (ret)
return ret;
if (!p_cluster)
return 0;
offset = start_block - ocfs2_clusters_to_blocks(sb, cluster);
p_block = ocfs2_clusters_to_blocks(sb, p_cluster) + offset;
return sb_issue_zeroout(sb, p_block, nr_blocks, GFP_NOFS);
}
/* /*
* Parts of this function taken from xfs_change_file_space() * Parts of this function taken from xfs_change_file_space()
*/ */
...@@ -1935,7 +1956,6 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode, ...@@ -1935,7 +1956,6 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
goto out_inode_unlock; goto out_inode_unlock;
} }
orig_isize = i_size_read(inode);
switch (sr->l_whence) { switch (sr->l_whence) {
case 0: /*SEEK_SET*/ case 0: /*SEEK_SET*/
break; break;
...@@ -1943,7 +1963,7 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode, ...@@ -1943,7 +1963,7 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
sr->l_start += f_pos; sr->l_start += f_pos;
break; break;
case 2: /*SEEK_END*/ case 2: /*SEEK_END*/
sr->l_start += orig_isize; sr->l_start += i_size_read(inode);
break; break;
default: default:
ret = -EINVAL; ret = -EINVAL;
...@@ -1998,6 +2018,7 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode, ...@@ -1998,6 +2018,7 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
ret = -EINVAL; ret = -EINVAL;
} }
orig_isize = i_size_read(inode);
/* zeroout eof blocks in the cluster. */ /* zeroout eof blocks in the cluster. */
if (!ret && change_size && orig_isize < size) { if (!ret && change_size && orig_isize < size) {
ret = ocfs2_zeroout_partial_cluster(inode, orig_isize, ret = ocfs2_zeroout_partial_cluster(inode, orig_isize,
......
...@@ -683,9 +683,6 @@ config PARMAN ...@@ -683,9 +683,6 @@ config PARMAN
config OBJAGG config OBJAGG
tristate "objagg" if COMPILE_TEST tristate "objagg" if COMPILE_TEST
config STRING_SELFTEST
tristate "Test string functions"
endmenu endmenu
config GENERIC_IOREMAP config GENERIC_IOREMAP
......
...@@ -2180,6 +2180,9 @@ config ASYNC_RAID6_TEST ...@@ -2180,6 +2180,9 @@ config ASYNC_RAID6_TEST
config TEST_HEXDUMP config TEST_HEXDUMP
tristate "Test functions located in the hexdump module at runtime" tristate "Test functions located in the hexdump module at runtime"
config STRING_SELFTEST
tristate "Test string functions at runtime"
config TEST_STRING_HELPERS config TEST_STRING_HELPERS
tristate "Test functions located in the string_helpers module at runtime" tristate "Test functions located in the string_helpers module at runtime"
......
...@@ -3574,7 +3574,8 @@ static unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap) ...@@ -3574,7 +3574,8 @@ static unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
unsigned long val; unsigned long val;
if (mem_cgroup_is_root(memcg)) { if (mem_cgroup_is_root(memcg)) {
cgroup_rstat_flush(memcg->css.cgroup); /* mem_cgroup_threshold() calls here from irqsafe context */
cgroup_rstat_flush_irqsafe(memcg->css.cgroup);
val = memcg_page_state(memcg, NR_FILE_PAGES) + val = memcg_page_state(memcg, NR_FILE_PAGES) +
memcg_page_state(memcg, NR_ANON_MAPPED); memcg_page_state(memcg, NR_ANON_MAPPED);
if (swap) if (swap)
......
...@@ -2068,7 +2068,7 @@ int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma, ...@@ -2068,7 +2068,7 @@ int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma,
LIST_HEAD(migratepages); LIST_HEAD(migratepages);
new_page_t *new; new_page_t *new;
bool compound; bool compound;
unsigned int nr_pages = thp_nr_pages(page); int nr_pages = thp_nr_pages(page);
/* /*
* PTE mapped THP or HugeTLB page can't reach here so the page could * PTE mapped THP or HugeTLB page can't reach here so the page could
......
...@@ -346,7 +346,7 @@ static inline void memcg_slab_free_hook(struct kmem_cache *s_orig, ...@@ -346,7 +346,7 @@ static inline void memcg_slab_free_hook(struct kmem_cache *s_orig,
continue; continue;
page = virt_to_head_page(p[i]); page = virt_to_head_page(p[i]);
objcgs = page_objcgs(page); objcgs = page_objcgs_check(page);
if (!objcgs) if (!objcgs)
continue; continue;
......
...@@ -3236,6 +3236,16 @@ struct detached_freelist { ...@@ -3236,6 +3236,16 @@ struct detached_freelist {
struct kmem_cache *s; struct kmem_cache *s;
}; };
static inline void free_nonslab_page(struct page *page)
{
unsigned int order = compound_order(page);
VM_BUG_ON_PAGE(!PageCompound(page), page);
kfree_hook(page_address(page));
mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE_B, -(PAGE_SIZE << order));
__free_pages(page, order);
}
/* /*
* This function progressively scans the array with free objects (with * This function progressively scans the array with free objects (with
* a limited look ahead) and extract objects belonging to the same * a limited look ahead) and extract objects belonging to the same
...@@ -3272,9 +3282,7 @@ int build_detached_freelist(struct kmem_cache *s, size_t size, ...@@ -3272,9 +3282,7 @@ int build_detached_freelist(struct kmem_cache *s, size_t size,
if (!s) { if (!s) {
/* Handle kalloc'ed objects */ /* Handle kalloc'ed objects */
if (unlikely(!PageSlab(page))) { if (unlikely(!PageSlab(page))) {
BUG_ON(!PageCompound(page)); free_nonslab_page(page);
kfree_hook(object);
__free_pages(page, compound_order(page));
p[size] = NULL; /* mark object processed */ p[size] = NULL; /* mark object processed */
return size; return size;
} }
...@@ -4250,13 +4258,7 @@ void kfree(const void *x) ...@@ -4250,13 +4258,7 @@ void kfree(const void *x)
page = virt_to_head_page(x); page = virt_to_head_page(x);
if (unlikely(!PageSlab(page))) { if (unlikely(!PageSlab(page))) {
unsigned int order = compound_order(page); free_nonslab_page(page);
BUG_ON(!PageCompound(page));
kfree_hook(object);
mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE_B,
-(PAGE_SIZE << order));
__free_pages(page, order);
return; return;
} }
slab_free(page->slab_cache, page, object, NULL, 1, _RET_IP_); slab_free(page->slab_cache, page, object, NULL, 1, _RET_IP_);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment