Commit 2c26fdd7 authored by KAMEZAWA Hiroyuki's avatar KAMEZAWA Hiroyuki Committed by Linus Torvalds

memcg: revert gfp mask fix

My patch, memcg-fix-gfp_mask-of-callers-of-charge.patch changed gfp_mask
of callers of charge to be GFP_HIGHUSER_MOVABLE for showing what will
happen at memory reclaim.

But in recent discussion, it's NACKed because it sounds ugly.

This patch is for reverting it and add some clean up to gfp_mask of
callers of charge.  No behavior change but need review before generating
HUNK in deep queue.

This patch also adds explanation to meaning of gfp_mask passed to charge
functions in memcontrol.h.
Signed-off-by: default avatarKAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Balbir Singh <balbir@in.ibm.com>
Cc: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
Cc: Hugh Dickins <hugh@veritas.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 88700756
...@@ -26,6 +26,16 @@ struct page; ...@@ -26,6 +26,16 @@ struct page;
struct mm_struct; struct mm_struct;
#ifdef CONFIG_CGROUP_MEM_RES_CTLR #ifdef CONFIG_CGROUP_MEM_RES_CTLR
/*
* All "charge" functions with gfp_mask should use GFP_KERNEL or
* (gfp_mask & GFP_RECLAIM_MASK). In current implementatin, memcg doesn't
* alloc memory but reclaims memory from all available zones. So, "where I want
* memory from" bits of gfp_mask has no meaning. So any bits of that field is
* available but adding a rule is better. charge functions' gfp_mask should
* be set to GFP_KERNEL or gfp_mask & GFP_RECLAIM_MASK for avoiding ambiguous
* codes.
* (Of course, if memcg does memory allocation in future, GFP_KERNEL is sane.)
*/
extern int mem_cgroup_newpage_charge(struct page *page, struct mm_struct *mm, extern int mem_cgroup_newpage_charge(struct page *page, struct mm_struct *mm,
gfp_t gfp_mask); gfp_t gfp_mask);
......
...@@ -460,7 +460,7 @@ int add_to_page_cache_locked(struct page *page, struct address_space *mapping, ...@@ -460,7 +460,7 @@ int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
VM_BUG_ON(!PageLocked(page)); VM_BUG_ON(!PageLocked(page));
error = mem_cgroup_cache_charge(page, current->mm, error = mem_cgroup_cache_charge(page, current->mm,
gfp_mask & ~__GFP_HIGHMEM); gfp_mask & GFP_RECLAIM_MASK);
if (error) if (error)
goto out; goto out;
......
...@@ -1248,7 +1248,7 @@ int mem_cgroup_prepare_migration(struct page *page, struct mem_cgroup **ptr) ...@@ -1248,7 +1248,7 @@ int mem_cgroup_prepare_migration(struct page *page, struct mem_cgroup **ptr)
unlock_page_cgroup(pc); unlock_page_cgroup(pc);
if (mem) { if (mem) {
ret = mem_cgroup_try_charge(NULL, GFP_HIGHUSER_MOVABLE, &mem); ret = mem_cgroup_try_charge(NULL, GFP_KERNEL, &mem);
css_put(&mem->css); css_put(&mem->css);
} }
*ptr = mem; *ptr = mem;
...@@ -1378,7 +1378,7 @@ static int mem_cgroup_resize_limit(struct mem_cgroup *memcg, ...@@ -1378,7 +1378,7 @@ static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
break; break;
progress = try_to_free_mem_cgroup_pages(memcg, progress = try_to_free_mem_cgroup_pages(memcg,
GFP_HIGHUSER_MOVABLE, false); GFP_KERNEL, false);
if (!progress) retry_count--; if (!progress) retry_count--;
} }
return ret; return ret;
...@@ -1418,7 +1418,7 @@ int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg, ...@@ -1418,7 +1418,7 @@ int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
break; break;
oldusage = res_counter_read_u64(&memcg->memsw, RES_USAGE); oldusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
try_to_free_mem_cgroup_pages(memcg, GFP_HIGHUSER_MOVABLE, true); try_to_free_mem_cgroup_pages(memcg, GFP_KERNEL, true);
curusage = res_counter_read_u64(&memcg->memsw, RES_USAGE); curusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
if (curusage >= oldusage) if (curusage >= oldusage)
retry_count--; retry_count--;
...@@ -1464,7 +1464,7 @@ static int mem_cgroup_force_empty_list(struct mem_cgroup *mem, ...@@ -1464,7 +1464,7 @@ static int mem_cgroup_force_empty_list(struct mem_cgroup *mem,
} }
spin_unlock_irqrestore(&zone->lru_lock, flags); spin_unlock_irqrestore(&zone->lru_lock, flags);
ret = mem_cgroup_move_parent(pc, mem, GFP_HIGHUSER_MOVABLE); ret = mem_cgroup_move_parent(pc, mem, GFP_KERNEL);
if (ret == -ENOMEM) if (ret == -ENOMEM)
break; break;
...@@ -1550,7 +1550,7 @@ static int mem_cgroup_force_empty(struct mem_cgroup *mem, bool free_all) ...@@ -1550,7 +1550,7 @@ static int mem_cgroup_force_empty(struct mem_cgroup *mem, bool free_all)
goto out; goto out;
} }
progress = try_to_free_mem_cgroup_pages(mem, progress = try_to_free_mem_cgroup_pages(mem,
GFP_HIGHUSER_MOVABLE, false); GFP_KERNEL, false);
if (!progress) { if (!progress) {
nr_retries--; nr_retries--;
/* maybe some writeback is necessary */ /* maybe some writeback is necessary */
......
...@@ -2000,7 +2000,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -2000,7 +2000,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
cow_user_page(new_page, old_page, address, vma); cow_user_page(new_page, old_page, address, vma);
__SetPageUptodate(new_page); __SetPageUptodate(new_page);
if (mem_cgroup_newpage_charge(new_page, mm, GFP_HIGHUSER_MOVABLE)) if (mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))
goto oom_free_new; goto oom_free_new;
/* /*
...@@ -2431,8 +2431,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -2431,8 +2431,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
lock_page(page); lock_page(page);
delayacct_clear_flag(DELAYACCT_PF_SWAPIN); delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
if (mem_cgroup_try_charge_swapin(mm, page, if (mem_cgroup_try_charge_swapin(mm, page, GFP_KERNEL, &ptr)) {
GFP_HIGHUSER_MOVABLE, &ptr) == -ENOMEM) {
ret = VM_FAULT_OOM; ret = VM_FAULT_OOM;
unlock_page(page); unlock_page(page);
goto out; goto out;
...@@ -2524,7 +2523,7 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -2524,7 +2523,7 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
goto oom; goto oom;
__SetPageUptodate(page); __SetPageUptodate(page);
if (mem_cgroup_newpage_charge(page, mm, GFP_HIGHUSER_MOVABLE)) if (mem_cgroup_newpage_charge(page, mm, GFP_KERNEL))
goto oom_free_page; goto oom_free_page;
entry = mk_pte(page, vma->vm_page_prot); entry = mk_pte(page, vma->vm_page_prot);
...@@ -2615,8 +2614,7 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -2615,8 +2614,7 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
ret = VM_FAULT_OOM; ret = VM_FAULT_OOM;
goto out; goto out;
} }
if (mem_cgroup_newpage_charge(page, if (mem_cgroup_newpage_charge(page, mm, GFP_KERNEL)) {
mm, GFP_HIGHUSER_MOVABLE)) {
ret = VM_FAULT_OOM; ret = VM_FAULT_OOM;
page_cache_release(page); page_cache_release(page);
goto out; goto out;
......
...@@ -932,8 +932,8 @@ static int shmem_unuse_inode(struct shmem_inode_info *info, swp_entry_t entry, s ...@@ -932,8 +932,8 @@ static int shmem_unuse_inode(struct shmem_inode_info *info, swp_entry_t entry, s
* Charge page using GFP_HIGHUSER_MOVABLE while we can wait. * Charge page using GFP_HIGHUSER_MOVABLE while we can wait.
* charged back to the user(not to caller) when swap account is used. * charged back to the user(not to caller) when swap account is used.
*/ */
error = mem_cgroup_cache_charge_swapin(page, error = mem_cgroup_cache_charge_swapin(page, current->mm, GFP_KERNEL,
current->mm, GFP_HIGHUSER_MOVABLE, true); true);
if (error) if (error)
goto out; goto out;
error = radix_tree_preload(GFP_KERNEL); error = radix_tree_preload(GFP_KERNEL);
...@@ -1275,7 +1275,7 @@ static int shmem_getpage(struct inode *inode, unsigned long idx, ...@@ -1275,7 +1275,7 @@ static int shmem_getpage(struct inode *inode, unsigned long idx,
* charge against this swap cache here. * charge against this swap cache here.
*/ */
if (mem_cgroup_cache_charge_swapin(swappage, if (mem_cgroup_cache_charge_swapin(swappage,
current->mm, gfp, false)) { current->mm, gfp & GFP_RECLAIM_MASK, false)) {
page_cache_release(swappage); page_cache_release(swappage);
error = -ENOMEM; error = -ENOMEM;
goto failed; goto failed;
...@@ -1393,7 +1393,7 @@ static int shmem_getpage(struct inode *inode, unsigned long idx, ...@@ -1393,7 +1393,7 @@ static int shmem_getpage(struct inode *inode, unsigned long idx,
/* Precharge page while we can wait, compensate after */ /* Precharge page while we can wait, compensate after */
error = mem_cgroup_cache_charge(filepage, current->mm, error = mem_cgroup_cache_charge(filepage, current->mm,
GFP_HIGHUSER_MOVABLE); GFP_KERNEL);
if (error) { if (error) {
page_cache_release(filepage); page_cache_release(filepage);
shmem_unacct_blocks(info->flags, 1); shmem_unacct_blocks(info->flags, 1);
......
...@@ -698,8 +698,7 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd, ...@@ -698,8 +698,7 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
pte_t *pte; pte_t *pte;
int ret = 1; int ret = 1;
if (mem_cgroup_try_charge_swapin(vma->vm_mm, page, if (mem_cgroup_try_charge_swapin(vma->vm_mm, page, GFP_KERNEL, &ptr))
GFP_HIGHUSER_MOVABLE, &ptr))
ret = -ENOMEM; ret = -ENOMEM;
pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment